diff --git a/CMakeLists.txt b/CMakeLists.txt index 4772f0b..804b2f6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -81,15 +81,30 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if(AARE_FETCH_LMFIT) #TODO! Should we fetch lmfit from the web or inlcude a tar.gz in the repo? - set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) - FetchContent_Declare( - lmfit - GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git - GIT_TAG main - PATCH_COMMAND ${lmfit_patch} - UPDATE_DISCONNECTED 1 - EXCLUDE_FROM_ALL 1 - ) + set(LMFIT_PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) + + # For cmake < 3.28 we can't supply EXCLUDE_FROM_ALL to FetchContent_Declare + # so we need this workaround + if (${CMAKE_VERSION} VERSION_LESS "3.28") + FetchContent_Declare( + lmfit + GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git + GIT_TAG main + PATCH_COMMAND ${LMFIT_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 + ) + else() + FetchContent_Declare( + lmfit + GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git + GIT_TAG main + PATCH_COMMAND ${LMFIT_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 + EXCLUDE_FROM_ALL 1 + ) + endif() + + #Disable what we don't need from lmfit set(BUILD_TESTING OFF CACHE BOOL "") set(LMFIT_CPPTEST OFF CACHE BOOL "") @@ -97,8 +112,15 @@ if(AARE_FETCH_LMFIT) set(LMFIT_CPPTEST OFF CACHE BOOL "") set(BUILD_SHARED_LIBS OFF CACHE BOOL "") + if (${CMAKE_VERSION} VERSION_LESS "3.28") + if(NOT lmfit_POPULATED) + FetchContent_Populate(lmfit) + add_subdirectory(${lmfit_SOURCE_DIR} ${lmfit_BINARY_DIR} EXCLUDE_FROM_ALL) + endif() + else() + FetchContent_MakeAvailable(lmfit) + endif() - FetchContent_MakeAvailable(lmfit) set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON) else() find_package(lmfit REQUIRED) @@ -111,10 +133,13 @@ if(AARE_FETCH_ZMQ) if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.30") cmake_policy(SET CMP0169 OLD) endif() + set(ZMQ_PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/libzmq_cmake_version.patch) FetchContent_Declare( libzmq GIT_REPOSITORY https://github.com/zeromq/libzmq.git GIT_TAG v4.3.4 + PATCH_COMMAND ${ZMQ_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 ) # Disable unwanted options from libzmq set(BUILD_TESTS OFF CACHE BOOL "Switch off libzmq test build") @@ -396,6 +421,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 120854b..560e831 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,7 @@ package: name: aare - version: 2025.3.18 #TODO! how to not duplicate this? + version: 2025.4.1 #TODO! how to not duplicate this? + diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp new file mode 100644 index 0000000..48f9ef0 --- /dev/null +++ b/include/aare/Cluster.hpp @@ -0,0 +1,36 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace aare { + +//TODO! Template this? +struct Cluster3x3 { + int16_t x; + int16_t y; + int32_t data[9]; + + int32_t sum_2x2() const{ + std::array total; + total[0] = data[0] + data[1] + data[3] + data[4]; + total[1] = data[1] + data[2] + data[4] + data[5]; + total[2] = data[3] + data[4] + data[6] + data[7]; + total[3] = data[4] + data[5] + data[7] + data[8]; + return *std::max_element(total.begin(), total.end()); + } + + int32_t sum() const{ + return std::accumulate(data, data + 9, 0); + } +}; +struct Cluster2x2 { + int16_t x; + int16_t y; + int32_t data[4]; +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 5bea342..bea9f48 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -1,25 +1,17 @@ #pragma once +#include "aare/Cluster.hpp" #include "aare/ClusterVector.hpp" #include "aare/NDArray.hpp" #include "aare/defs.hpp" #include #include +#include namespace aare { -//TODO! Template this? -struct Cluster3x3 { - int16_t x; - int16_t y; - int32_t data[9]; -}; -struct Cluster2x2 { - int16_t x; - int16_t y; - int32_t data[4]; -}; +//TODO! Legacy enums, migrate to enum class typedef enum { cBottomLeft = 0, cBottomRight = 1, @@ -53,15 +45,7 @@ struct ClusterAnalysis { double etay; }; -/* -Binary cluster file. Expects data to be layed out as: -int32_t frame_number -uint32_t number_of_clusters -int16_t x, int16_t y, int32_t data[9] x number_of_clusters -int32_t frame_number -uint32_t number_of_clusters -.... -*/ + /** * @brief Class to read and write cluster files @@ -70,16 +54,19 @@ uint32_t number_of_clusters * * int32_t frame_number * uint32_t number_of_clusters - * int16_t x, int16_t y, int32_t data[9] x number_of_clusters + * int16_t x, int16_t y, int32_t data[9] * number_of_clusters * int32_t frame_number * uint32_t number_of_clusters * etc. */ class ClusterFile { FILE *fp{}; - uint32_t m_num_left{}; - size_t m_chunk_size{}; - const std::string m_mode; + uint32_t m_num_left{}; /*Number of photons left in frame*/ + size_t m_chunk_size{}; /*Number of clusters to read at a time*/ + const std::string m_mode; /*Mode to open the file in*/ + std::optional m_roi; /*Region of interest, will be applied if set*/ + std::optional> m_noise_map; /*Noise map to cut photons, will be applied if set*/ + std::optional> m_gain_map; /*Gain map to apply to the clusters, will be applied if set*/ public: /** @@ -117,29 +104,49 @@ class ClusterFile { void write_frame(const ClusterVector &clusters); - // Need to be migrated to support NDArray and return a ClusterVector - // std::vector - // read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); - /** * @brief Return the chunk size */ size_t chunk_size() const { return m_chunk_size; } + + /** + * @brief Set the region of interest to use when reading clusters. If set only clusters within + * the ROI will be read. + */ + void set_roi(ROI roi); + + /** + * @brief Set the noise map to use when reading clusters. If set clusters below the noise + * level will be discarded. Selection criteria one of: Central pixel above noise, highest + * 2x2 sum above 2 * noise, total sum above 3 * noise. + */ + void set_noise_map(const NDView noise_map); + + /** + * @brief Set the gain map to use when reading clusters. If set the gain map will be applied + * to the clusters that pass ROI and noise_map selection. + */ + void set_gain_map(const NDView gain_map); /** * @brief Close the file. If not closed the file will be closed in the destructor */ void close(); + + private: + ClusterVector read_clusters_with_cut(size_t n_clusters); + ClusterVector read_clusters_without_cut(size_t n_clusters); + ClusterVector read_frame_with_cut(); + ClusterVector read_frame_without_cut(); + bool is_selected(Cluster3x3 &cl); + Cluster3x3 read_one_cluster(); }; -int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y); -int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y); - +//TODO! helper functions that doesn't really belong here NDArray calculate_eta2(ClusterVector &clusters); Eta2 calculate_eta2(Cluster3x3 &cl); Eta2 calculate_eta2(Cluster2x2 &cl); + } // namespace aare diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 1c15a22..b91278c 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -8,6 +8,9 @@ #include +#include "aare/Cluster.hpp" +#include "aare/NDView.hpp" + namespace aare { /** @@ -265,6 +268,28 @@ template class ClusterVector { m_size = new_size; } + void apply_gain_map(const NDView gain_map){ + //in principle we need to know the size of the image for this lookup + //TODO! check orientations + std::array xcorr = {-1, 0, 1, -1, 0, 1, -1, 0, 1}; + std::array ycorr = {-1, -1, -1, 0, 0, 0, 1, 1, 1}; + for (size_t i=0; i(i); + + if (cl.x > 0 && cl.y > 0 && cl.x < gain_map.shape(1)-1 && cl.y < gain_map.shape(0)-1){ + for (size_t j=0; j<9; j++){ + size_t x = cl.x + xcorr[j]; + size_t y = cl.y + ycorr[j]; + cl.data[j] = static_cast(cl.data[j] * gain_map(y, x)); + } + }else{ + memset(cl.data, 0, 9*sizeof(T)); //clear edge clusters + } + + + } + } + private: void allocate_buffer(size_t new_capacity) { size_t num_bytes = item_size() * new_capacity; diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index 4559882..4d22bd4 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -1,11 +1,9 @@ #pragma once #include "aare/Dtype.hpp" -// #include "aare/utils/logger.hpp" #include #include - #include #include #include @@ -43,6 +41,7 @@ inline constexpr size_t bits_per_byte = 8; void assert_failed(const std::string &msg); + class DynamicCluster { public: int cluster_sizeX; @@ -215,6 +214,9 @@ struct ROI{ int64_t height() const { return ymax - ymin; } int64_t width() const { return xmax - xmin; } + bool contains(int64_t x, int64_t y) const { + return x >= xmin && x < xmax && y >= ymin && y < ymax; + } }; diff --git a/patches/libzmq_cmake_version.patch b/patches/libzmq_cmake_version.patch new file mode 100644 index 0000000..4e421d3 --- /dev/null +++ b/patches/libzmq_cmake_version.patch @@ -0,0 +1,18 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index dd3d8eb9..c0187747 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -1,11 +1,8 @@ + # CMake build script for ZeroMQ + project(ZeroMQ) + +-if(${CMAKE_SYSTEM_NAME} STREQUAL Darwin) +- cmake_minimum_required(VERSION 3.0.2) +-else() +- cmake_minimum_required(VERSION 2.8.12) +-endif() ++cmake_minimum_required(VERSION 3.15) ++message(STATUS "Patched cmake version") + + include(CheckIncludeFiles) + include(CheckCCompilerFlag) diff --git a/pyproject.toml b/pyproject.toml index b9bf7d2..60128c9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,8 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.3.18" +version = "2025.4.1" + diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index f587443..ff46043 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -31,26 +31,22 @@ void define_cluster_file_io_bindings(py::module &m) { auto v = new ClusterVector(self.read_clusters(n_clusters)); return v; },py::return_value_policy::take_ownership) - .def("read_clusters", - [](ClusterFile &self, size_t n_clusters, ROI roi) { - auto v = new ClusterVector(self.read_clusters(n_clusters, roi)); - return v; - },py::return_value_policy::take_ownership) .def("read_frame", [](ClusterFile &self) { auto v = new ClusterVector(self.read_frame()); return v; }) + .def("set_roi", &ClusterFile::set_roi) + .def("set_noise_map", [](ClusterFile &self, py::array_t noise_map) { + auto view = make_view_2d(noise_map); + self.set_noise_map(view); + }) + .def("set_gain_map", [](ClusterFile &self, py::array_t gain_map) { + auto view = make_view_2d(gain_map); + self.set_gain_map(view); + }) + .def("close", &ClusterFile::close) .def("write_frame", &ClusterFile::write_frame) - // .def("read_cluster_with_cut", - // [](ClusterFile &self, size_t n_clusters, - // py::array_t noise_map, int nx, int ny) { - // auto view = make_view_2d(noise_map); - // auto *vec = - // new std::vector(self.read_cluster_with_cut( - // n_clusters, view.data(), nx, ny)); - // return return_vector(vec); - // }) .def("__enter__", [](ClusterFile &self) { return &self; }) .def("__exit__", [](ClusterFile &self, diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 2e23e09..f77ac92 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -31,6 +31,18 @@ ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size, } } +void ClusterFile::set_roi(ROI roi){ + m_roi = roi; +} + +void ClusterFile::set_noise_map(const NDView noise_map){ + m_noise_map = NDArray(noise_map); +} + +void ClusterFile::set_gain_map(const NDView gain_map){ + m_gain_map = NDArray(gain_map); +} + ClusterFile::~ClusterFile() { close(); } void ClusterFile::close() { @@ -48,14 +60,37 @@ void ClusterFile::write_frame(const ClusterVector &clusters) { !(clusters.cluster_size_y() == 3)) { throw std::runtime_error("Only 3x3 clusters are supported"); } + //First write the frame number - 4 bytes int32_t frame_number = clusters.frame_number(); - fwrite(&frame_number, sizeof(frame_number), 1, fp); + if(fwrite(&frame_number, sizeof(frame_number), 1, fp)!=1){ + throw std::runtime_error(LOCATION + "Could not write frame number"); + } + + //Then write the number of clusters - 4 bytes uint32_t n_clusters = clusters.size(); - fwrite(&n_clusters, sizeof(n_clusters), 1, fp); - fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp); + if(fwrite(&n_clusters, sizeof(n_clusters), 1, fp)!=1){ + throw std::runtime_error(LOCATION + "Could not write number of clusters"); + } + + //Now write the clusters in the frame + if(fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp)!=clusters.size()){ + throw std::runtime_error(LOCATION + "Could not write clusters"); + } } -ClusterVector ClusterFile::read_clusters(size_t n_clusters) { + +ClusterVector ClusterFile::read_clusters(size_t n_clusters){ + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_noise_map || m_roi){ + return read_clusters_with_cut(n_clusters); + }else{ + return read_clusters_without_cut(n_clusters); + } +} + +ClusterVector ClusterFile::read_clusters_without_cut(size_t n_clusters) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } @@ -86,6 +121,7 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { if (nph_read < n_clusters) { // keep on reading frames and photons until reaching n_clusters while (fread(&iframe, sizeof(iframe), 1, fp)) { + clusters.set_frame_number(iframe); // read number of clusters in frame if (fread(&nph, sizeof(nph), 1, fp)) { if (nph > (n_clusters - nph_read)) @@ -105,83 +141,111 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { // Resize the vector to the number of clusters. // No new allocation, only change bounds. clusters.resize(nph_read); + if(m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); return clusters; } -ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - + +ClusterVector ClusterFile::read_clusters_with_cut(size_t n_clusters) { ClusterVector clusters(3,3); clusters.reserve(n_clusters); - int32_t iframe = 0; // frame number needs to be 4 bytes! - size_t nph_read = 0; - uint32_t nn = m_num_left; - uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 - - // auto buf = reinterpret_cast(clusters.data()); - // auto buf = clusters.data(); - - Cluster3x3 tmp; //this would break if the cluster size changes - // if there are photons left from previous frame read them first - if (nph) { - if (nph > n_clusters) { - // if we have more photons left in the frame then photons to read we - // read directly the requested number - nn = n_clusters; - } else { - nn = nph; - } - //Read one cluster, in the ROI push back - // nph_read += fread((buf + nph_read*clusters.item_size()), - // clusters.item_size(), nn, fp); - for(size_t i = 0; i < nn; i++){ - fread(&tmp, sizeof(tmp), 1, fp); - if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ - clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); - nph_read++; + if (m_num_left) { + while(m_num_left && clusters.size() < n_clusters){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); } } - - m_num_left = nph - nn; // write back the number of photons left } - if (nph_read < n_clusters) { - // keep on reading frames and photons until reaching n_clusters - while (fread(&iframe, sizeof(iframe), 1, fp)) { - // read number of clusters in frame - if (fread(&nph, sizeof(nph), 1, fp)) { - if (nph > (n_clusters - nph_read)) - nn = n_clusters - nph_read; - else - nn = nph; - - // nph_read += fread((buf + nph_read*clusters.item_size()), - // clusters.item_size(), nn, fp); - for(size_t i = 0; i < nn; i++){ - fread(&tmp, sizeof(tmp), 1, fp); - if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ - clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); - nph_read++; + // we did not have enough clusters left in the previous frame + // keep on reading frames until reaching n_clusters + if (clusters.size() < n_clusters) { + // sanity check + if (m_num_left) { + throw std::runtime_error(LOCATION + "Entered second loop with clusters left\n"); + } + + int32_t frame_number = 0; // frame number needs to be 4 bytes! + while (fread(&frame_number, sizeof(frame_number), 1, fp)) { + if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) { + clusters.set_frame_number(frame_number); //cluster vector will hold the last frame number + while(m_num_left && clusters.size() < n_clusters){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); } } - m_num_left = nph - nn; } - if (nph_read >= n_clusters) + + // we have enough clusters, break out of the outer while loop + if (clusters.size() >= n_clusters) break; } - } - // Resize the vector to the number of clusters. - // No new allocation, only change bounds. - clusters.resize(nph_read); + } + if(m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); + return clusters; } -ClusterVector ClusterFile::read_frame() { +Cluster3x3 ClusterFile::read_one_cluster(){ + Cluster3x3 c; + auto rc = fread(&c, sizeof(c), 1, fp); + if (rc != 1) { + throw std::runtime_error(LOCATION + "Could not read cluster"); + } + --m_num_left; + return c; +} + +ClusterVector ClusterFile::read_frame(){ + if (m_mode != "r") { + throw std::runtime_error(LOCATION + "File not opened for reading"); + } + if (m_noise_map || m_roi){ + return read_frame_with_cut(); + }else{ + return read_frame_without_cut(); + } +} + +ClusterVector ClusterFile::read_frame_without_cut() { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_num_left) { + throw std::runtime_error( + "There are still photons left in the last frame"); + } + int32_t frame_number; + if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { + throw std::runtime_error(LOCATION + "Could not read frame number"); + } + + int32_t n_clusters; // Saved as 32bit integer in the cluster file + if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { + throw std::runtime_error(LOCATION + "Could not read number of clusters"); + } + + ClusterVector clusters(3, 3, n_clusters); + clusters.set_frame_number(frame_number); + + if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != + static_cast(n_clusters)) { + throw std::runtime_error(LOCATION + "Could not read clusters"); + } + clusters.resize(n_clusters); + if (m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); + return clusters; +} + +ClusterVector ClusterFile::read_frame_with_cut() { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } @@ -194,149 +258,47 @@ ClusterVector ClusterFile::read_frame() { throw std::runtime_error("Could not read frame number"); } - int32_t n_clusters; // Saved as 32bit integer in the cluster file - if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { + + if (fread(&m_num_left, sizeof(m_num_left), 1, fp) != 1) { throw std::runtime_error("Could not read number of clusters"); } - // std::vector clusters(n_clusters); - ClusterVector clusters(3, 3, n_clusters); + + ClusterVector clusters(3, 3); + clusters.reserve(m_num_left); clusters.set_frame_number(frame_number); - - if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != - static_cast(n_clusters)) { - throw std::runtime_error("Could not read clusters"); + while(m_num_left){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); + } } - clusters.resize(n_clusters); + if (m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); return clusters; } -// std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, -// double *noise_map, -// int nx, int ny) { -// if (m_mode != "r") { -// throw std::runtime_error("File not opened for reading"); -// } -// std::vector clusters(n_clusters); -// // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf, -// // uint32_t *n_left, double *noise_map, int -// // nx, int ny) { -// int iframe = 0; -// // uint32_t nph = *n_left; -// uint32_t nph = m_num_left; -// // uint32_t nn = *n_left; -// uint32_t nn = m_num_left; -// size_t nph_read = 0; -// int32_t t2max, tot1; -// int32_t tot3; -// // Cluster *ptr = buf; -// Cluster3x3 *ptr = clusters.data(); -// int good = 1; -// double noise; -// // read photons left from previous frame -// if (noise_map) -// printf("Using noise map\n"); +bool ClusterFile::is_selected(Cluster3x3 &cl) { + //Should fail fast + if (m_roi) { + if (!(m_roi->contains(cl.x, cl.y))) { + return false; + } + } + if (m_noise_map){ + int32_t sum_1x1 = cl.data[4]; // central pixel + int32_t sum_2x2 = cl.sum_2x2(); // highest sum of 2x2 subclusters + int32_t sum_3x3 = cl.sum(); // sum of all pixels -// if (nph) { -// if (nph > n_clusters) { -// // if we have more photons left in the frame then photons to -// // read we read directly the requested number -// nn = n_clusters; -// } else { -// nn = nph; -// } -// for (size_t iph = 0; iph < nn; iph++) { -// // read photons 1 by 1 -// size_t n_read = -// fread(reinterpret_cast(ptr), sizeof(Cluster3x3), 1, fp); -// if (n_read != 1) { -// clusters.resize(nph_read); -// return clusters; -// } -// // TODO! error handling on read -// good = 1; -// if (noise_map) { -// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) { -// tot1 = ptr->data[4]; -// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL, -// NULL); -// noise = noise_map[ptr->y * nx + ptr->x]; -// if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) { -// ; -// } else { -// good = 0; -// printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise, -// tot1, t2max, tot3); -// } -// } else { -// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); -// good = 0; -// } -// } -// if (good) { -// ptr++; -// nph_read++; -// } -// (m_num_left)--; -// if (nph_read >= n_clusters) -// break; -// } -// } -// if (nph_read < n_clusters) { -// // // keep on reading frames and photons until reaching -// // n_clusters -// while (fread(&iframe, sizeof(iframe), 1, fp)) { -// // // printf("%d\n",nph_read); - -// if (fread(&nph, sizeof(nph), 1, fp)) { -// // // printf("** %d\n",nph); -// m_num_left = nph; -// for (size_t iph = 0; iph < nph; iph++) { -// // // read photons 1 by 1 -// size_t n_read = fread(reinterpret_cast(ptr), -// sizeof(Cluster3x3), 1, fp); -// if (n_read != 1) { -// clusters.resize(nph_read); -// return clusters; -// // return nph_read; -// } -// good = 1; -// if (noise_map) { -// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && -// ptr->y < ny) { -// tot1 = ptr->data[4]; -// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, -// NULL, NULL, NULL); -// // noise = noise_map[ptr->y * nx + ptr->x]; -// noise = noise_map[ptr->y + ny * ptr->x]; -// if (tot1 > noise || t2max > 2 * noise || -// tot3 > 3 * noise) { -// ; -// } else -// good = 0; -// } else { -// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); -// good = 0; -// } -// } -// if (good) { -// ptr++; -// nph_read++; -// } -// (m_num_left)--; -// if (nph_read >= n_clusters) -// break; -// } -// } -// if (nph_read >= n_clusters) -// break; -// } -// } -// // printf("%d\n",nph_read); -// clusters.resize(nph_read); -// return clusters; -// } + auto noise = (*m_noise_map)(cl.y, cl.x); //TODO! check if this is correct + if (sum_1x1 <= noise || sum_2x2 <= 2 * noise || sum_3x3 <= 3 * noise) { + return false; + } + } + //we passed all checks + return true; +} NDArray calculate_eta2(ClusterVector &clusters) { //TOTO! make work with 2x2 clusters @@ -431,111 +393,4 @@ Eta2 calculate_eta2(Cluster2x2 &cl) { } - -int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, - double *eta3y) { - - return analyze_data(cl.data, t2, t3, quad, eta2x, eta2y, eta3x, eta3y); -} - -int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y) { - - int ok = 1; - - int32_t tot2[4]; - int32_t t2max = 0; - char c = 0; - int32_t val, tot3; - - tot3 = 0; - for (int i = 0; i < 4; i++) - tot2[i] = 0; - - for (int ix = 0; ix < 3; ix++) { - for (int iy = 0; iy < 3; iy++) { - val = data[iy * 3 + ix]; - // printf ("%d ",data[iy * 3 + ix]); - tot3 += val; - if (ix <= 1 && iy <= 1) - tot2[cBottomLeft] += val; - if (ix >= 1 && iy <= 1) - tot2[cBottomRight] += val; - if (ix <= 1 && iy >= 1) - tot2[cTopLeft] += val; - if (ix >= 1 && iy >= 1) - tot2[cTopRight] += val; - } - // printf ("\n"); - } - // printf ("\n"); - - if (t2 || quad) { - - t2max = tot2[0]; - c = cBottomLeft; - for (int i = 1; i < 4; i++) { - if (tot2[i] > t2max) { - t2max = tot2[i]; - c = i; - } - } - // printf("*** %d %d %d %d -- - // %d\n",tot2[0],tot2[1],tot2[2],tot2[3],t2max); - if (quad) - *quad = c; - if (t2) - *t2 = t2max; - } - - if (t3) - *t3 = tot3; - - if (eta2x || eta2y) { - if (eta2x) - *eta2x = 0; - if (eta2y) - *eta2y = 0; - switch (c) { - case cBottomLeft: - if (eta2x && (data[3] + data[4]) != 0) - *eta2x = static_cast(data[4]) / (data[3] + data[4]); - if (eta2y && (data[1] + data[4]) != 0) - *eta2y = static_cast(data[4]) / (data[1] + data[4]); - break; - case cBottomRight: - if (eta2x && (data[2] + data[5]) != 0) - *eta2x = static_cast(data[5]) / (data[4] + data[5]); - if (eta2y && (data[1] + data[4]) != 0) - *eta2y = static_cast(data[4]) / (data[1] + data[4]); - break; - case cTopLeft: - if (eta2x && (data[7] + data[4]) != 0) - *eta2x = static_cast(data[4]) / (data[3] + data[4]); - if (eta2y && (data[7] + data[4]) != 0) - *eta2y = static_cast(data[7]) / (data[7] + data[4]); - break; - case cTopRight: - if (eta2x && t2max != 0) - *eta2x = static_cast(data[5]) / (data[5] + data[4]); - if (eta2y && t2max != 0) - *eta2y = static_cast(data[7]) / (data[7] + data[4]); - break; - default:; - } - } - - if (eta3x || eta3y) { - if (eta3x && (data[3] + data[4] + data[5]) != 0) - *eta3x = static_cast(-data[3] + data[3 + 2]) / - (data[3] + data[4] + data[5]); - if (eta3y && (data[1] + data[4] + data[7]) != 0) - *eta3y = static_cast(-data[1] + data[2 * 3 + 1]) / - (data[1] + data[4] + data[7]); - } - - return ok; -} - } // namespace aare \ No newline at end of file diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp new file mode 100644 index 0000000..a0eed04 --- /dev/null +++ b/src/ClusterFile.test.cpp @@ -0,0 +1,80 @@ +#include "aare/ClusterFile.hpp" +#include "test_config.hpp" + + +#include "aare/defs.hpp" +#include +#include + + + + +using aare::ClusterFile; + +TEST_CASE("Read one frame from a a cluster file", "[.integration]") { + //We know that the frame has 97 clusters + auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile f(fpath); + auto clusters = f.read_frame(); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); +} + +TEST_CASE("Read one frame using ROI", "[.integration]") { + //We know that the frame has 97 clusters + auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile f(fpath); + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 50; + roi.ymin = 200; + roi.ymax = 249; + f.set_roi(roi); + auto clusters = f.read_frame(); + REQUIRE(clusters.size() == 49); + REQUIRE(clusters.frame_number() == 135); + + //Check that all clusters are within the ROI + for (size_t i = 0; i < clusters.size(); i++) { + auto c = clusters.at(i); + REQUIRE(c.x >= roi.xmin); + REQUIRE(c.x <= roi.xmax); + REQUIRE(c.y >= roi.ymin); + REQUIRE(c.y <= roi.ymax); + } + +} + + +TEST_CASE("Read clusters from single frame file", "[.integration]") { + + auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + SECTION("Read fewer clusters than available") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(50); + REQUIRE(clusters.size() == 50); + REQUIRE(clusters.frame_number() == 135); + } + SECTION("Read more clusters than available") { + ClusterFile f(fpath); + // 100 is the maximum number of clusters read + auto clusters = f.read_clusters(100); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); + } + SECTION("Read all clusters") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(97); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); + } + + + +} diff --git a/tests/test_config.hpp.in b/tests/test_config.hpp.in index 62993b7..e314b8f 100644 --- a/tests/test_config.hpp.in +++ b/tests/test_config.hpp.in @@ -7,6 +7,6 @@ inline auto test_data_path(){ if(const char* env_p = std::getenv("AARE_TEST_DATA")){ return std::filesystem::path(env_p); }else{ - throw std::runtime_error("AARE_TEST_DATA_PATH not set"); + throw std::runtime_error("Path to test data: $AARE_TEST_DATA not set"); } } \ No newline at end of file