From b7a47576a15cdb808d30ec67c2b9556510fddc19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Wed, 19 Feb 2025 07:19:59 +0100 Subject: [PATCH 01/23] Multi threaded fitting and returning chi2 (#132) Co-authored-by: Patrick Co-authored-by: JulianHeymes Co-authored-by: Dhanya Thattil --- CMakeLists.txt | 8 +- conda-recipe/meta.yaml | 3 +- include/aare/Fit.hpp | 42 ++++-- include/aare/NDArray.hpp | 29 ++++ include/aare/NDView.hpp | 11 +- include/aare/utils/par.hpp | 18 +++ pyproject.toml | 3 +- python/CMakeLists.txt | 2 +- python/examples/play.py | 34 +---- python/src/fit.hpp | 77 ++++++---- src/Fit.cpp | 294 +++++++++++++++++-------------------- src/NDArray.test.cpp | 28 ++++ 12 files changed, 317 insertions(+), 232 deletions(-) create mode 100644 include/aare/utils/par.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 62a3878..b93b513 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -85,7 +85,7 @@ if(AARE_FETCH_LMFIT) GIT_TAG main PATCH_COMMAND ${lmfit_patch} UPDATE_DISCONNECTED 1 - EXCLUDE_FROM_ALL + EXCLUDE_FROM_ALL 1 ) #Disable what we don't need from lmfit set(BUILD_TESTING OFF CACHE BOOL "") @@ -97,9 +97,6 @@ if(AARE_FETCH_LMFIT) FetchContent_MakeAvailable(lmfit) set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON) - - target_include_directories (lmfit PUBLIC "${libzmq_SOURCE_DIR}/lib") - message(STATUS "lmfit include dir: ${lmfit_SOURCE_DIR}/lib") else() find_package(lmfit REQUIRED) endif() @@ -370,7 +367,8 @@ target_link_libraries( ${STD_FS_LIB} # from helpers.cmake PRIVATE aare_compiler_flags - lmfit + "$" + ) set_target_properties(aare_core PROPERTIES diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index c405e90..ffa95a7 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,7 @@ package: name: aare - version: 2025.2.12 #TODO! how to not duplicate this? + version: 2025.2.18 #TODO! how to not duplicate this? + source: diff --git a/include/aare/Fit.hpp b/include/aare/Fit.hpp index 20ef4ef..6fd10aa 100644 --- a/include/aare/Fit.hpp +++ b/include/aare/Fit.hpp @@ -17,6 +17,14 @@ NDArray pol1(NDView x, NDView par); } // namespace func + +/** + * @brief Estimate the initial parameters for a Gaussian fit + */ +std::array gaus_init_par(const NDView x, const NDView y); + +std::array pol1_init_par(const NDView x, const NDView y); + static constexpr int DEFAULT_NUM_THREADS = 4; /** @@ -33,7 +41,11 @@ NDArray fit_gaus(NDView x, NDView y); * @param y y vales, layout [row, col, values] * @param n_threads number of threads to use */ -NDArray fit_gaus(NDView x, NDView y, int n_threads = DEFAULT_NUM_THREADS); + +NDArray fit_gaus(NDView x, NDView y, + int n_threads = DEFAULT_NUM_THREADS); + + /** @@ -45,10 +57,12 @@ NDArray fit_gaus(NDView x, NDView y, int n_thre * @param par_err_out output error parameters */ void fit_gaus(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out); + NDView par_out, NDView par_err_out, + double& chi2); /** - * @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout [row, col, values] + * @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout + * [row, col, values] * @param x x values * @param y y vales, layout [row, col, values] * @param y_err error in y, layout [row, col, values] @@ -57,20 +71,22 @@ void fit_gaus(NDView x, NDView y, NDView y_err, * @param n_threads number of threads to use */ void fit_gaus(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, int n_threads = DEFAULT_NUM_THREADS); - + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads = DEFAULT_NUM_THREADS + ); NDArray fit_pol1(NDView x, NDView y); -NDArray fit_pol1(NDView x, NDView y, int n_threads = DEFAULT_NUM_THREADS); +NDArray fit_pol1(NDView x, NDView y, + int n_threads = DEFAULT_NUM_THREADS); -void fit_pol1(NDView x, NDView y, - NDView y_err, NDView par_out, - NDView par_err_out); +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2); + +// TODO! not sure we need to offer the different version in C++ +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out,NDView chi2_out, + int n_threads = DEFAULT_NUM_THREADS); -//TODO! not sure we need to offer the different version in C++ -void fit_pol1(NDView x, NDView y, - NDView y_err, NDView par_out, - NDView par_err_out, int n_threads = DEFAULT_NUM_THREADS); } // namespace aare \ No newline at end of file diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index 15beb02..cfa5b5c 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -69,6 +69,11 @@ class NDArray : public ArrayExpr, Ndim> { std::copy(v.begin(), v.end(), begin()); } + template + NDArray(const std::array& arr) : NDArray({Size}) { + std::copy(arr.begin(), arr.end(), begin()); + } + // Move constructor NDArray(NDArray &&other) noexcept : shape_(other.shape_), strides_(c_strides(shape_)), @@ -105,6 +110,20 @@ class NDArray : public ArrayExpr, Ndim> { NDArray &operator-=(const NDArray &other); NDArray &operator*=(const NDArray &other); + //Write directly to the data array, or create a new one + template + NDArray& operator=(const std::array &other){ + if(Size != size_){ + delete[] data_; + size_ = Size; + data_ = new T[size_]; + } + for (size_t i = 0; i < Size; ++i) { + data_[i] = other[i]; + } + return *this; + } + // NDArray& operator/=(const NDArray& other); template NDArray &operator/=(const NDArray &other) { @@ -135,6 +154,11 @@ class NDArray : public ArrayExpr, Ndim> { NDArray &operator&=(const T & /*mask*/); + + + + + void sqrt() { for (int i = 0; i < size_; ++i) { data_[i] = std::sqrt(data_[i]); @@ -318,6 +342,9 @@ NDArray &NDArray::operator+=(const T &value) { return *this; } + + + template NDArray NDArray::operator+(const T &value) { NDArray result = *this; @@ -418,4 +445,6 @@ NDArray load(const std::string &pathname, return img; } + + } // namespace aare \ No newline at end of file diff --git a/include/aare/NDView.hpp b/include/aare/NDView.hpp index e3a6d30..f53f758 100644 --- a/include/aare/NDView.hpp +++ b/include/aare/NDView.hpp @@ -1,5 +1,5 @@ #pragma once - +#include "aare/defs.hpp" #include "aare/ArrayExpr.hpp" #include @@ -99,6 +99,15 @@ template class NDView : public ArrayExpr()); } + + template + NDView& operator=(const std::array &arr) { + if(size() != arr.size()) + throw std::runtime_error(LOCATION + "Array and NDView size mismatch"); + std::copy(arr.begin(), arr.end(), begin()); + return *this; + } + NDView &operator=(const T val) { for (auto it = begin(); it != end(); ++it) *it = val; diff --git a/include/aare/utils/par.hpp b/include/aare/utils/par.hpp new file mode 100644 index 0000000..efb1c77 --- /dev/null +++ b/include/aare/utils/par.hpp @@ -0,0 +1,18 @@ +#include +#include +#include + +namespace aare { + + template + void RunInParallel(F func, const std::vector>& tasks) { + // auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(func, task.first, task.second)); + } + for (auto &thread : threads) { + thread.join(); + } + } +} // namespace aare \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 74e624f..6dc941e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,8 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.2.12" +version = "2025.2.18" + [tool.scikit-build] cmake.verbose = true diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 2aaa222..09de736 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -50,10 +50,10 @@ set(PYTHON_EXAMPLES ) - # Copy the python examples to the build directory foreach(FILE ${PYTHON_EXAMPLES}) configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} ) + message(STATUS "Copying ${FILE} to ${CMAKE_BINARY_DIR}/${FILE}") endforeach(FILE ${PYTHON_EXAMPLES}) diff --git a/python/examples/play.py b/python/examples/play.py index f1a869b..37754df 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -8,38 +8,20 @@ import numpy as np import boost_histogram as bh import time -<<<<<<< HEAD -from aare import File, ClusterFinder, VarClusterFinder, ClusterFile, CtbRawFile -from aare import gaus, fit_gaus -base = Path('/mnt/sls_det_storage/moench_data/Julian/MOENCH05/20250113_first_xrays_redo/raw_files/') -cluster_file = Path('/home/l_msdetect/erik/tmp/Cu.clust') +import aare -t0 = time.perf_counter() -offset= -0.5 -hist3d = bh.Histogram( - bh.axis.Regular(160, 0+offset, 160+offset), #x - bh.axis.Regular(150, 0+offset, 150+offset), #y - bh.axis.Regular(200, 0, 6000), #ADU -) +data = np.random.normal(10, 1, 1000) -total_clusters = 0 -with ClusterFile(cluster_file, chunk_size = 1000) as f: - for i, clusters in enumerate(f): - arr = np.array(clusters) - total_clusters += clusters.size - hist3d.fill(arr['y'],arr['x'], clusters.sum_2x2()) #python talks [row, col] cluster finder [x,y] -======= -from aare import RawFile +hist = bh.Histogram(bh.axis.Regular(10, 0, 20)) +hist.fill(data) -f = RawFile('/mnt/sls_det_storage/jungfrau_data1/vadym_tests/jf12_M431/laser_scan/laserScan_pedestal_G0_master_0.json') -print(f'{f.frame_number(1)}') +x = hist.axes[0].centers +y = hist.values() +y_err = np.sqrt(y)+1 +res = aare.fit_gaus(x, y, y_err, chi2 = True) -for i in range(10): - header, img = f.read_frame() - print(header['frameNumber'], img.shape) ->>>>>>> developer t_elapsed = time.perf_counter()-t0 diff --git a/python/src/fit.hpp b/python/src/fit.hpp index 60cdecc..8e6cfef 100644 --- a/python/src/fit.hpp +++ b/python/src/fit.hpp @@ -7,6 +7,8 @@ #include "aare/Fit.hpp" namespace py = pybind11; +using namespace pybind11::literals; + void define_fit_bindings(py::module &m) { @@ -29,7 +31,8 @@ void define_fit_bindings(py::module &m) { The points at which to evaluate the Gaussian function. par : array_like The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation. - )", py::arg("x"), py::arg("par")); + )", + py::arg("x"), py::arg("par")); m.def( "pol1", @@ -49,7 +52,9 @@ void define_fit_bindings(py::module &m) { The points at which to evaluate the polynomial function. par : array_like The parameters of the polynomial function. The first element is the intercept, and the second element is the slope. - )", py::arg("x"), py::arg("par")); + )", + py::arg("x"), py::arg("par")); + m.def( "fit_gaus", @@ -72,7 +77,8 @@ void define_fit_bindings(py::module &m) { throw std::runtime_error("Data must be 1D or 3D"); } }, -R"( + R"( + Fit a 1D Gaussian to data. Parameters @@ -90,8 +96,9 @@ n_threads : int, optional "fit_gaus", [](py::array_t x, py::array_t y, - py::array_t - y_err, int n_threads) { + py::array_t y_err, + int n_threads) { + if (y.ndim() == 3) { // Allocate memory for the output // Need to have pointers to allow python to manage @@ -99,15 +106,20 @@ n_threads : int, optional auto par = new NDArray({y.shape(0), y.shape(1), 3}); auto par_err = new NDArray({y.shape(0), y.shape(1), 3}); + auto chi2 = new NDArray({y.shape(0), y.shape(1)}); + // Make views of the numpy arrays auto y_view = make_view_3d(y); auto y_view_err = make_view_3d(y_err); auto x_view = make_view_1d(x); + aare::fit_gaus(x_view, y_view, y_view_err, par->view(), - par_err->view(), n_threads); - // return return_image_data(par); - return py::make_tuple(return_image_data(par), - return_image_data(par_err)); + par_err->view(), chi2->view(), n_threads); + + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = return_image_data(chi2), + "Ndf"_a = y.shape(2) - 3); } else if (y.ndim() == 1) { // Allocate memory for the output // Need to have pointers to allow python to manage @@ -120,15 +132,21 @@ n_threads : int, optional auto y_view_err = make_view_1d(y_err); auto x_view = make_view_1d(x); + + double chi2 = 0; aare::fit_gaus(x_view, y_view, y_view_err, par->view(), - par_err->view()); - return py::make_tuple(return_image_data(par), - return_image_data(par_err)); + par_err->view(), chi2); + + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = chi2, "Ndf"_a = y.size() - 3); + } else { throw std::runtime_error("Data must be 1D or 3D"); } }, -R"( + R"( + Fit a 1D Gaussian to data with error estimates. Parameters @@ -172,11 +190,11 @@ n_threads : int, optional "fit_pol1", [](py::array_t x, py::array_t y, - py::array_t - y_err, int n_threads) { + py::array_t y_err, + int n_threads) { if (y.ndim() == 3) { - auto par = - new NDArray({y.shape(0), y.shape(1), 2}); + auto par = new NDArray({y.shape(0), y.shape(1), 2}); + auto par_err = new NDArray({y.shape(0), y.shape(1), 2}); @@ -184,10 +202,15 @@ n_threads : int, optional auto y_view_err = make_view_3d(y_err); auto x_view = make_view_1d(x); - aare::fit_pol1(x_view, y_view,y_view_err, par->view(), - par_err->view(), n_threads); - return py::make_tuple(return_image_data(par), - return_image_data(par_err)); + auto chi2 = new NDArray({y.shape(0), y.shape(1)}); + + aare::fit_pol1(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2->view(), n_threads); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = return_image_data(chi2), + "Ndf"_a = y.shape(2) - 2); + } else if (y.ndim() == 1) { auto par = new NDArray({2}); @@ -197,15 +220,19 @@ n_threads : int, optional auto y_view_err = make_view_1d(y_err); auto x_view = make_view_1d(x); + double chi2 = 0; + aare::fit_pol1(x_view, y_view, y_view_err, par->view(), - par_err->view()); - return py::make_tuple(return_image_data(par), - return_image_data(par_err)); + par_err->view(), chi2); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = chi2, "Ndf"_a = y.size() - 2); + } else { throw std::runtime_error("Data must be 1D or 3D"); } }, -R"( + R"( Fit a 1D polynomial to data with error estimates. Parameters diff --git a/src/Fit.cpp b/src/Fit.cpp index 08ecaec..3001efd 100644 --- a/src/Fit.cpp +++ b/src/Fit.cpp @@ -1,11 +1,13 @@ #include "aare/Fit.hpp" #include "aare/utils/task.hpp" - +#include "aare/utils/par.hpp" #include #include - #include +#include + + namespace aare { namespace func { @@ -35,33 +37,11 @@ NDArray pol1(NDView x, NDView par) { } // namespace func NDArray fit_gaus(NDView x, NDView y) { - NDArray result({3}, 0); - lm_control_struct control = lm_control_double; + NDArray result = gaus_init_par(x, y); + lm_status_struct status; - // Estimate the initial parameters for the fit - std::vector start_par{0, 0, 0}; - auto e = std::max_element(y.begin(), y.end()); - auto idx = std::distance(y.begin(), e); - - start_par[0] = *e; // For amplitude we use the maximum value - start_par[1] = - x[idx]; // For the mean we use the x value of the maximum value - - // For sigma we estimate the fwhm and divide by 2.35 - // assuming equally spaced x values - auto delta = x[1] - x[0]; - start_par[2] = - std::count_if(y.begin(), y.end(), - [e, delta](double val) { return val > *e / 2; }) * - delta / 2.35; - - lmfit::result_t res(start_par); - lmcurve(res.par.size(), res.par.data(), x.size(), x.data(), y.data(), - aare::func::gaus, &control, &res.status); - - result(0) = res.par[0]; - result(1) = res.par[1]; - result(2) = res.par[2]; + lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(), + aare::func::gaus, &lm_control_double, &status); return result; } @@ -81,65 +61,17 @@ NDArray fit_gaus(NDView x, NDView y, } } }; - auto tasks = split_task(0, y.shape(0), n_threads); - std::vector threads; - for (auto &task : tasks) { - threads.push_back(std::thread(process, task.first, task.second)); - } - for (auto &thread : threads) { - thread.join(); - } + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); return result; } -void fit_gaus(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, - int n_threads) { - - auto process = [&](ssize_t first_row, ssize_t last_row) { - for (ssize_t row = first_row; row < last_row; row++) { - for (ssize_t col = 0; col < y.shape(1); col++) { - NDView y_view(&y(row, col, 0), {y.shape(2)}); - NDView y_err_view(&y_err(row, col, 0), - {y_err.shape(2)}); - NDView par_out_view(&par_out(row, col, 0), - {par_out.shape(2)}); - NDView par_err_out_view(&par_err_out(row, col, 0), - {par_err_out.shape(2)}); - fit_gaus(x, y_view, y_err_view, par_out_view, par_err_out_view); - } - } - }; - - auto tasks = split_task(0, y.shape(0), n_threads); - std::vector threads; - for (auto &task : tasks) { - threads.push_back(std::thread(process, task.first, task.second)); - } - for (auto &thread : threads) { - thread.join(); - } -} - -void fit_gaus(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out) { - // Check that we have the correct sizes - if (y.size() != x.size() || y.size() != y_err.size() || - par_out.size() != 3 || par_err_out.size() != 3) { - throw std::runtime_error("Data, x, data_err must have the same size " - "and par_out, par_err_out must have size 3"); - } - - lm_control_struct control = lm_control_double; - - // Estimate the initial parameters for the fit - std::vector start_par{0, 0, 0}; - std::vector start_par_err{0, 0, 0}; - std::vector start_cov{0, 0, 0, 0, 0, 0, 0, 0, 0}; - +std::array gaus_init_par(const NDView x, const NDView y) { + std::array start_par{0, 0, 0}; auto e = std::max_element(y.begin(), y.end()); auto idx = std::distance(y.begin(), e); + start_par[0] = *e; // For amplitude we use the maximum value start_par[1] = x[idx]; // For the mean we use the x value of the maximum value @@ -152,66 +84,83 @@ void fit_gaus(NDView x, NDView y, NDView y_err, [e, delta](double val) { return val > *e / 2; }) * delta / 2.35; - lmfit::result_t res(start_par); - lmfit::result_t res_err(start_par_err); - lmfit::result_t cov(start_cov); - - // TODO can we make lmcurve write the result directly where is should be? - lmcurve2(res.par.size(), res.par.data(), res_err.par.data(), cov.par.data(), - x.size(), x.data(), y.data(), y_err.data(), aare::func::gaus, - &control, &res.status); - - par_out(0) = res.par[0]; - par_out(1) = res.par[1]; - par_out(2) = res.par[2]; - par_err_out(0) = res_err.par[0]; - par_err_out(1) = res_err.par[1]; - par_err_out(2) = res_err.par[2]; + return start_par; } -void fit_pol1(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out) { + +std::array pol1_init_par(const NDView x, const NDView y){ + // Estimate the initial parameters for the fit + std::array start_par{0, 0}; + + + auto y2 = std::max_element(y.begin(), y.end()); + auto x2 = x[std::distance(y.begin(), y2)]; + auto y1 = std::min_element(y.begin(), y.end()); + auto x1 = x[std::distance(y.begin(), y1)]; + + start_par[0] = + (*y2 - *y1) / (x2 - x1); // For amplitude we use the maximum value + start_par[1] = + *y1 - ((*y2 - *y1) / (x2 - x1)) * + x1; // For the mean we use the x value of the maximum value + return start_par; +} + +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, + double &chi2) { + // Check that we have the correct sizes if (y.size() != x.size() || y.size() != y_err.size() || - par_out.size() != 2 || par_err_out.size() != 2) { + par_out.size() != 3 || par_err_out.size() != 3) { throw std::runtime_error("Data, x, data_err must have the same size " - "and par_out, par_err_out must have size 2"); + "and par_out, par_err_out must have size 3"); } - lm_control_struct control = lm_control_double; - // Estimate the initial parameters for the fit - std::vector start_par{0, 0}; - std::vector start_par_err{0, 0}; - std::vector start_cov{0, 0, 0, 0}; + // /* Collection of output parameters for status info. */ + // typedef struct { + // double fnorm; /* norm of the residue vector fvec. */ + // int nfev; /* actual number of iterations. */ + // int outcome; /* Status indicator. Nonnegative values are used as + // index + // for the message text lm_infmsg, set in lmmin.c. */ + // int userbreak; /* Set when function evaluation requests termination. + // */ + // } lm_status_struct; - auto y2 = std::max_element(y.begin(), y.end()); - auto x2 = x[std::distance(y.begin(), y2)]; - auto y1 = std::min_element(y.begin(), y.end()); - auto x1 = x[std::distance(y.begin(), y1)]; - start_par[0] = - (*y2 - *y1) / (x2 - x1); // For amplitude we use the maximum value - start_par[1] = - *y1 - ((*y2 - *y1) / (x2 - x1)) * - x1; // For the mean we use the x value of the maximum value + lm_status_struct status; + par_out = gaus_init_par(x, y); + std::array cov{0, 0, 0, 0, 0, 0, 0 , 0 , 0}; - lmfit::result_t res(start_par); - lmfit::result_t res_err(start_par_err); - lmfit::result_t cov(start_cov); + // void lmcurve2( const int n_par, double *par, double *parerr, double *covar, const int m_dat, const double *t, const double *y, const double *dy, double (*f)( const double ti, const double *par ), const lm_control_struct *control, lm_status_struct *status); + // n_par - Number of free variables. Length of parameter vector par. + // par - Parameter vector. On input, it must contain a reasonable guess. On output, it contains the solution found to minimize ||r||. + // parerr - Parameter uncertainties vector. Array of length n_par or NULL. On output, unless it or covar is NULL, it contains the weighted parameter uncertainties for the found parameters. + // covar - Covariance matrix. Array of length n_par * n_par or NULL. On output, unless it is NULL, it contains the covariance matrix. + // m_dat - Number of data points. Length of vectors t, y, dy. Must statisfy n_par <= m_dat. + // t - Array of length m_dat. Contains the abcissae (time, or "x") for which function f will be evaluated. + // y - Array of length m_dat. Contains the ordinate values that shall be fitted. + // dy - Array of length m_dat. Contains the standard deviations of the values y. + // f - A user-supplied parametric function f(ti;par). + // control - Parameter collection for tuning the fit procedure. In most cases, the default &lm_control_double is adequate. If f is only computed with single-precision accuracy, &lm_control_float should be used. Parameters are explained in lmmin2(3). + // status - A record used to return information about the minimization process: For details, see lmmin2(3). - lmcurve2(res.par.size(), res.par.data(), res_err.par.data(), cov.par.data(), - x.size(), x.data(), y.data(), y_err.data(), aare::func::pol1, - &control, &res.status); + lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::gaus, + &lm_control_double, &status); - par_out(0) = res.par[0]; - par_out(1) = res.par[1]; - par_err_out(0) = res_err.par[0]; - par_err_out(1) = res_err.par[1]; + // Calculate chi2 + chi2 = 0; + for (size_t i = 0; i < y.size(); i++) { + chi2 += std::pow((y(i) - func::gaus(x(i), par_out.data())) / y_err(i), 2); + } } -void fit_pol1(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads) { auto process = [&](ssize_t first_row, ssize_t last_row) { @@ -224,21 +173,69 @@ void fit_pol1(NDView x, NDView y, NDView y_err, {par_out.shape(2)}); NDView par_err_out_view(&par_err_out(row, col, 0), {par_err_out.shape(2)}); - fit_pol1(x, y_view, y_err_view, par_out_view, par_err_out_view); + + fit_gaus(x, y_view, y_err_view, par_out_view, par_err_out_view, + chi2_out(row, col)); + } } }; auto tasks = split_task(0, y.shape(0), n_threads); - std::vector threads; - for (auto &task : tasks) { - threads.push_back(std::thread(process, task.first, task.second)); + RunInParallel(process, tasks); +} + +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2) { + + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 2 || par_err_out.size() != 2) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 2"); } - for (auto &thread : threads) { - thread.join(); + + lm_status_struct status; + par_out = pol1_init_par(x, y); + std::array cov{0, 0, 0, 0}; + + lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::pol1, + &lm_control_double, &status); + + // Calculate chi2 + chi2 = 0; + for (size_t i = 0; i < y.size(); i++) { + chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); } } +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + + fit_pol1(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col)); + + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); + +} + NDArray fit_pol1(NDView x, NDView y) { // // Check that we have the correct sizes // if (y.size() != x.size() || y.size() != y_err.size() || @@ -246,28 +243,12 @@ NDArray fit_pol1(NDView x, NDView y) { // throw std::runtime_error("Data, x, data_err must have the same size " // "and par_out, par_err_out must have size 2"); // } - NDArray par({2}, 0); + NDArray par = pol1_init_par(x, y); - lm_control_struct control = lm_control_double; + lm_status_struct status; + lmcurve(par.size(), par.data(), x.size(), x.data(), y.data(), + aare::func::pol1, &lm_control_double, &status); - // Estimate the initial parameters for the fit - std::vector start_par{0, 0}; - - auto y2 = std::max_element(y.begin(), y.end()); - auto x2 = x[std::distance(y.begin(), y2)]; - auto y1 = std::min_element(y.begin(), y.end()); - auto x1 = x[std::distance(y.begin(), y1)]; - - start_par[0] = (*y2 - *y1) / (x2 - x1); - start_par[1] = *y1 - ((*y2 - *y1) / (x2 - x1)) * x1; - - lmfit::result_t res(start_par); - - lmcurve(res.par.size(), res.par.data(), x.size(), x.data(), y.data(), - aare::func::pol1, &control, &res.status); - - par(0) = res.par[0]; - par(1) = res.par[1]; return par; } @@ -287,13 +268,8 @@ NDArray fit_pol1(NDView x, NDView y, }; auto tasks = split_task(0, y.shape(0), n_threads); - std::vector threads; - for (auto &task : tasks) { - threads.push_back(std::thread(process, task.first, task.second)); - } - for (auto &thread : threads) { - thread.join(); - } + + RunInParallel(process, tasks); return result; } diff --git a/src/NDArray.test.cpp b/src/NDArray.test.cpp index 54099fd..942481c 100644 --- a/src/NDArray.test.cpp +++ b/src/NDArray.test.cpp @@ -379,4 +379,32 @@ TEST_CASE("Elementwise operations on images") { REQUIRE(A(i) == a_val); } } +} + +TEST_CASE("Assign an std::array to a 1D NDArray") { + NDArray a{{5}, 0}; + std::array b{1, 2, 3, 4, 5}; + a = b; + for (uint32_t i = 0; i < a.size(); ++i) { + REQUIRE(a(i) == b[i]); + } +} + +TEST_CASE("Assign an std::array to a 1D NDArray of a different size") { + NDArray a{{3}, 0}; + std::array b{1, 2, 3, 4, 5}; + a = b; + + REQUIRE(a.size() == 5); + for (uint32_t i = 0; i < a.size(); ++i) { + REQUIRE(a(i) == b[i]); + } +} + +TEST_CASE("Construct an NDArray from an std::array") { + std::array b{1, 2, 3, 4, 5}; + NDArray a(b); + for (uint32_t i = 0; i < a.size(); ++i) { + REQUIRE(a(i) == b[i]); + } } \ No newline at end of file From 1d2c38c1d4d48ed03f79fe072f64521c2ee54738 Mon Sep 17 00:00:00 2001 From: Xiangyu Xie <45243914+xiangyuxie@users.noreply.github.com> Date: Wed, 19 Feb 2025 16:11:24 +0100 Subject: [PATCH 02/23] Enable VarClusterFinder (#134) Co-authored-by: xiangyu.xie --- include/aare/VarClusterFinder.hpp | 2 +- python/src/var_cluster.hpp | 35 ++++++++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/include/aare/VarClusterFinder.hpp b/include/aare/VarClusterFinder.hpp index d4d51cc..ea62a9d 100644 --- a/include/aare/VarClusterFinder.hpp +++ b/include/aare/VarClusterFinder.hpp @@ -7,7 +7,7 @@ #include "aare/NDArray.hpp" -const int MAX_CLUSTER_SIZE = 200; +const int MAX_CLUSTER_SIZE = 50; namespace aare { template class VarClusterFinder { diff --git a/python/src/var_cluster.hpp b/python/src/var_cluster.hpp index f3a5741..0819a44 100644 --- a/python/src/var_cluster.hpp +++ b/python/src/var_cluster.hpp @@ -19,7 +19,7 @@ using namespace::aare; void define_var_cluster_finder_bindings(py::module &m) { PYBIND11_NUMPY_DTYPE(VarClusterFinder::Hit, size, row, col, - reserved, energy, max); + reserved, energy, max, rows, cols, enes); py::class_>(m, "VarClusterFinder") .def(py::init, double>()) @@ -28,6 +28,15 @@ void define_var_cluster_finder_bindings(py::module &m) { auto ptr = new NDArray(self.labeled()); return return_image_data(ptr); }) + .def("set_noiseMap", + [](VarClusterFinder &self, + py::array_t + noise_map) { + auto noise_map_span = make_view_2d(noise_map); + self.set_noiseMap(noise_map_span); + }) + .def("set_peripheralThresholdFactor", + &VarClusterFinder::set_peripheralThresholdFactor) .def("find_clusters", [](VarClusterFinder &self, py::array_t @@ -35,6 +44,30 @@ void define_var_cluster_finder_bindings(py::module &m) { auto view = make_view_2d(img); self.find_clusters(view); }) + .def("find_clusters_X", + [](VarClusterFinder &self, + py::array_t + img) { + auto img_span = make_view_2d(img); + self.find_clusters_X(img_span); + }) + .def("single_pass", + [](VarClusterFinder &self, + py::array_t + img) { + auto img_span = make_view_2d(img); + self.single_pass(img_span); + }) + .def("hits", + [](VarClusterFinder &self) { + auto ptr = new std::vector::Hit>( + self.steal_hits()); + return return_vector(ptr); + }) + .def("clear_hits", + [](VarClusterFinder &self) { + self.clear_hits(); + }) .def("steal_hits", [](VarClusterFinder &self) { auto ptr = new std::vector::Hit>( From 8ae6bb76f83b6481bf7e5b1db192d984ea51f577 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Fri, 21 Feb 2025 11:18:39 +0100 Subject: [PATCH 03/23] removed warnings added clang-tidy --- .clang-tidy | 42 +++++++++++++++++++++++++++++++++++++ CMakeLists.txt | 3 +++ include/aare/NDArray.hpp | 12 +++++------ include/aare/RawSubFile.hpp | 2 +- include/aare/defs.hpp | 2 ++ python/src/ctb_raw_file.hpp | 4 ++-- python/src/np_helper.hpp | 12 +++++------ python/src/var_cluster.hpp | 2 +- src/Dtype.cpp | 2 +- src/File.cpp | 2 +- src/RawFile.cpp | 3 +-- 11 files changed, 66 insertions(+), 20 deletions(-) create mode 100644 .clang-tidy diff --git a/.clang-tidy b/.clang-tidy new file mode 100644 index 0000000..a2ab6c1 --- /dev/null +++ b/.clang-tidy @@ -0,0 +1,42 @@ + +--- +Checks: '*, + -altera-*, + -android-cloexec-fopen, + -cppcoreguidelines-pro-bounds-array-to-pointer-decay, + -cppcoreguidelines-pro-bounds-pointer-arithmetic, + -fuchsia*, + -readability-else-after-return, + -readability-avoid-const-params-in-decls, + -readability-identifier-length, + -cppcoreguidelines-pro-bounds-constant-array-index, + -cppcoreguidelines-pro-type-reinterpret-cast, + -llvm-header-guard, + -modernize-use-nodiscard, + -misc-non-private-member-variables-in-classes, + -readability-static-accessed-through-instance, + -readability-braces-around-statements, + -readability-isolate-declaration, + -readability-implicit-bool-conversion, + -readability-identifier-length, + -readability-identifier-naming, + -hicpp-signed-bitwise, + -hicpp-no-array-decay, + -hicpp-braces-around-statements, + -google-runtime-references, + -google-readability-todo, + -google-readability-braces-around-statements, + -modernize-use-trailing-return-type, + -llvmlibc-*' + +HeaderFilterRegex: \.hpp +FormatStyle: none +CheckOptions: + - { key: readability-identifier-naming.NamespaceCase, value: lower_case } + # - { key: readability-identifier-naming.FunctionCase, value: lower_case } + - { key: readability-identifier-naming.ClassCase, value: CamelCase } + # - { key: readability-identifier-naming.MethodCase, value: CamelCase } + # - { key: readability-identifier-naming.StructCase, value: CamelCase } + # - { key: readability-identifier-naming.VariableCase, value: lower_case } + - { key: readability-identifier-naming.GlobalConstantCase, value: UPPER_CASE } +... diff --git a/CMakeLists.txt b/CMakeLists.txt index b93b513..cff4c75 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -60,6 +60,8 @@ if(AARE_SYSTEM_LIBRARIES) set(AARE_FETCH_CATCH OFF CACHE BOOL "Disabled FetchContent for catch2" FORCE) set(AARE_FETCH_JSON OFF CACHE BOOL "Disabled FetchContent for nlohmann::json" FORCE) set(AARE_FETCH_ZMQ OFF CACHE BOOL "Disabled FetchContent for libzmq" FORCE) + # Still fetch lmfit when setting AARE_SYSTEM_LIBRARIES since this is not available + # on conda-forge endif() if(AARE_VERBOSE) @@ -78,6 +80,7 @@ endif() set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if(AARE_FETCH_LMFIT) + #TODO! Should we fetch lmfit from the web or inlcude a tar.gz in the repo? set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) FetchContent_Declare( lmfit diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index cfa5b5c..310d070 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -388,12 +388,12 @@ NDArray NDArray::operator*(const T &value) { result *= value; return result; } -template void NDArray::Print() { - if (shape_[0] < 20 && shape_[1] < 20) - Print_all(); - else - Print_some(); -} +// template void NDArray::Print() { +// if (shape_[0] < 20 && shape_[1] < 20) +// Print_all(); +// else +// Print_some(); +// } template std::ostream &operator<<(std::ostream &os, const NDArray &arr) { diff --git a/include/aare/RawSubFile.hpp b/include/aare/RawSubFile.hpp index 89c278e..1d554e8 100644 --- a/include/aare/RawSubFile.hpp +++ b/include/aare/RawSubFile.hpp @@ -64,7 +64,7 @@ class RawSubFile { size_t bytes_per_frame() const { return m_bytes_per_frame; } size_t pixels_per_frame() const { return m_rows * m_cols; } - size_t bytes_per_pixel() const { return m_bitdepth / 8; } + size_t bytes_per_pixel() const { return m_bitdepth / bits_per_byte; } private: template diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index db1a47b..4559882 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -38,6 +38,8 @@ namespace aare { +inline constexpr size_t bits_per_byte = 8; + void assert_failed(const std::string &msg); diff --git a/python/src/ctb_raw_file.hpp b/python/src/ctb_raw_file.hpp index 9ce656d..56e571b 100644 --- a/python/src/ctb_raw_file.hpp +++ b/python/src/ctb_raw_file.hpp @@ -32,7 +32,7 @@ m.def("adc_sar_05_decode64to16", [](py::array_t input) { } //Create a 2D output array with the same shape as the input - std::vector shape{input.shape(0), input.shape(1)/8}; + std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; py::array_t output(shape); //Create a view of the input and output arrays @@ -53,7 +53,7 @@ m.def("adc_sar_04_decode64to16", [](py::array_t input) { } //Create a 2D output array with the same shape as the input - std::vector shape{input.shape(0), input.shape(1)/8}; + std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; py::array_t output(shape); //Create a view of the input and output arrays diff --git a/python/src/np_helper.hpp b/python/src/np_helper.hpp index 6e92830..1845196 100644 --- a/python/src/np_helper.hpp +++ b/python/src/np_helper.hpp @@ -40,25 +40,25 @@ template py::array return_vector(std::vector *vec) { } // todo rewrite generic -template auto get_shape_3d(py::array_t arr) { +template auto get_shape_3d(const py::array_t& arr) { return aare::Shape<3>{arr.shape(0), arr.shape(1), arr.shape(2)}; } -template auto make_view_3d(py::array_t arr) { +template auto make_view_3d(py::array_t& arr) { return aare::NDView(arr.mutable_data(), get_shape_3d(arr)); } -template auto get_shape_2d(py::array_t arr) { +template auto get_shape_2d(const py::array_t& arr) { return aare::Shape<2>{arr.shape(0), arr.shape(1)}; } -template auto get_shape_1d(py::array_t arr) { +template auto get_shape_1d(const py::array_t& arr) { return aare::Shape<1>{arr.shape(0)}; } -template auto make_view_2d(py::array_t arr) { +template auto make_view_2d(py::array_t& arr) { return aare::NDView(arr.mutable_data(), get_shape_2d(arr)); } -template auto make_view_1d(py::array_t arr) { +template auto make_view_1d(py::array_t& arr) { return aare::NDView(arr.mutable_data(), get_shape_1d(arr)); } \ No newline at end of file diff --git a/python/src/var_cluster.hpp b/python/src/var_cluster.hpp index 0819a44..f7b373f 100644 --- a/python/src/var_cluster.hpp +++ b/python/src/var_cluster.hpp @@ -25,7 +25,7 @@ void define_var_cluster_finder_bindings(py::module &m) { .def(py::init, double>()) .def("labeled", [](VarClusterFinder &self) { - auto ptr = new NDArray(self.labeled()); + auto *ptr = new NDArray(self.labeled()); return return_image_data(ptr); }) .def("set_noiseMap", diff --git a/src/Dtype.cpp b/src/Dtype.cpp index 565d509..b818ea3 100644 --- a/src/Dtype.cpp +++ b/src/Dtype.cpp @@ -70,7 +70,7 @@ uint8_t Dtype::bitdepth() const { /** * @brief Get the number of bytes of the data type */ -size_t Dtype::bytes() const { return bitdepth() / 8; } +size_t Dtype::bytes() const { return bitdepth() / bits_per_byte; } /** * @brief Construct a DType object from a TypeIndex diff --git a/src/File.cpp b/src/File.cpp index 1180967..3c68eff 100644 --- a/src/File.cpp +++ b/src/File.cpp @@ -73,7 +73,7 @@ size_t File::tell() const { return file_impl->tell(); } size_t File::rows() const { return file_impl->rows(); } size_t File::cols() const { return file_impl->cols(); } size_t File::bitdepth() const { return file_impl->bitdepth(); } -size_t File::bytes_per_pixel() const { return file_impl->bitdepth() / 8; } +size_t File::bytes_per_pixel() const { return file_impl->bitdepth() / bits_per_byte; } DetectorType File::detector_type() const { return file_impl->detector_type(); } diff --git a/src/RawFile.cpp b/src/RawFile.cpp index e704add..78cb6c5 100644 --- a/src/RawFile.cpp +++ b/src/RawFile.cpp @@ -76,8 +76,7 @@ size_t RawFile::n_mod() const { return n_subfile_parts; } size_t RawFile::bytes_per_frame() { - // return m_rows * m_cols * m_master.bitdepth() / 8; - return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() / 8; + return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() / bits_per_byte; } size_t RawFile::pixels_per_frame() { // return m_rows * m_cols; From 1ad362ccfc1679a3c42328deab4804162095b3d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Mon, 17 Mar 2025 15:21:59 +0100 Subject: [PATCH 04/23] added action for gitea (#136) --- .gitea/workflows/cmake_build.yml | 58 ++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 .gitea/workflows/cmake_build.yml diff --git a/.gitea/workflows/cmake_build.yml b/.gitea/workflows/cmake_build.yml new file mode 100644 index 0000000..43a0181 --- /dev/null +++ b/.gitea/workflows/cmake_build.yml @@ -0,0 +1,58 @@ +name: Build the package using cmake then documentation + +on: + workflow_dispatch: + push: + + + +permissions: + contents: read + pages: write + id-token: write + +jobs: + build: + strategy: + fail-fast: false + matrix: + platform: [ubuntu-latest, ] # macos-12, windows-2019] + python-version: ["3.12",] + + runs-on: ${{ matrix.platform }} + + # The setup-miniconda action needs this to activate miniconda + defaults: + run: + shell: "bash -l {0}" + + steps: + - uses: actions/checkout@v4 + + - name: Setup dev env + run: | + sudo apt-get update + sudo apt-get -y install cmake gcc g++ + + - name: Get conda + uses: conda-incubator/setup-miniconda@v3.0.4 + with: + python-version: ${{ matrix.python-version }} + channels: conda-forge + + - name: Prepare + run: conda install doxygen sphinx=7.1.2 breathe pybind11 sphinx_rtd_theme furo nlohmann_json zeromq fmt numpy + + - name: Build library + run: | + mkdir build + cd build + cmake .. -DAARE_SYSTEM_LIBRARIES=ON -DAARE_DOCS=ON + make -j 2 + make docs + + + + + + From 11cd2ec654c7aaf3ff3c110ab815625e5594adda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 18 Mar 2025 17:45:38 +0100 Subject: [PATCH 05/23] Interpolate (#137) - added eta based interpolation --- CMakeLists.txt | 2 + include/aare/ClusterFile.hpp | 10 +++ include/aare/ClusterVector.hpp | 4 + include/aare/Interpolator.hpp | 29 +++++++ include/aare/NDArray.hpp | 3 + include/aare/algorithm.hpp | 55 +++++++++++++ python/aare/__init__.py | 3 +- python/examples/play.py | 91 +++++++++++++-------- python/src/cluster.hpp | 10 ++- python/src/cluster_file.hpp | 5 ++ python/src/file.hpp | 2 + python/src/interpolation.hpp | 58 +++++++++++++ python/src/module.cpp | 2 + src/ClusterFile.cpp | 109 +++++++++++++++++++++++-- src/Interpolator.cpp | 144 +++++++++++++++++++++++++++++++++ src/NDArray.test.cpp | 19 +++++ src/algorithm.test.cpp | 73 +++++++++++++++++ 17 files changed, 580 insertions(+), 39 deletions(-) create mode 100644 include/aare/Interpolator.hpp create mode 100644 include/aare/algorithm.hpp create mode 100644 python/src/interpolation.hpp create mode 100644 src/Interpolator.cpp create mode 100644 src/algorithm.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index cff4c75..4772f0b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -346,6 +346,7 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp @@ -385,6 +386,7 @@ endif() if(AARE_TESTS) set(TestSources + ${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index b796763..5bea342 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -8,11 +8,17 @@ namespace aare { +//TODO! Template this? struct Cluster3x3 { int16_t x; int16_t y; int32_t data[9]; }; +struct Cluster2x2 { + int16_t x; + int16_t y; + int32_t data[4]; +}; typedef enum { cBottomLeft = 0, @@ -37,6 +43,7 @@ struct Eta2 { double x; double y; corner c; + int32_t sum; }; struct ClusterAnalysis { @@ -97,6 +104,8 @@ class ClusterFile { */ ClusterVector read_clusters(size_t n_clusters); + ClusterVector read_clusters(size_t n_clusters, ROI roi); + /** * @brief Read a single frame from the file and return the clusters. The * cluster vector will have the frame number set. @@ -131,5 +140,6 @@ int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, NDArray calculate_eta2(ClusterVector &clusters); Eta2 calculate_eta2(Cluster3x3 &cl); +Eta2 calculate_eta2(Cluster2x2 &cl); } // namespace aare diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index febf06c..1c15a22 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -231,6 +231,10 @@ template class ClusterVector { return *reinterpret_cast(element_ptr(i)); } + template const V &at(size_t i) const { + return *reinterpret_cast(element_ptr(i)); + } + const std::string_view fmt_base() const { // TODO! how do we match on coord_t? return m_fmt_base; diff --git a/include/aare/Interpolator.hpp b/include/aare/Interpolator.hpp new file mode 100644 index 0000000..4905bce --- /dev/null +++ b/include/aare/Interpolator.hpp @@ -0,0 +1,29 @@ +#pragma once +#include "aare/NDArray.hpp" +#include "aare/NDView.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ClusterFile.hpp" //Cluster_3x3 +namespace aare{ + +struct Photon{ + double x; + double y; + double energy; +}; + +class Interpolator{ + NDArray m_ietax; + NDArray m_ietay; + + NDArray m_etabinsx; + NDArray m_etabinsy; + NDArray m_energy_bins; + public: + Interpolator(NDView etacube, NDView xbins, NDView ybins, NDView ebins); + NDArray get_ietax(){return m_ietax;} + NDArray get_ietay(){return m_ietay;} + + std::vector interpolate(const ClusterVector& clusters); +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index 310d070..45d3a83 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -102,6 +102,9 @@ class NDArray : public ArrayExpr, Ndim> { auto begin() { return data_; } auto end() { return data_ + size_; } + auto begin() const { return data_; } + auto end() const { return data_ + size_; } + using value_type = T; NDArray &operator=(NDArray &&other) noexcept; // Move assign diff --git a/include/aare/algorithm.hpp b/include/aare/algorithm.hpp new file mode 100644 index 0000000..5d6dc57 --- /dev/null +++ b/include/aare/algorithm.hpp @@ -0,0 +1,55 @@ + +#pragma once +#include +#include +#include +#include + +namespace aare { +/** + * @brief Find the index of the last element smaller than val + * assume a sorted array + */ +template +size_t last_smaller(const T* first, const T* last, T val) { + for (auto iter = first+1; iter != last; ++iter) { + if (*iter > val) { + return std::distance(first, iter-1); + } + } + return std::distance(first, last-1); +} + +template +size_t last_smaller(const NDArray& arr, T val) { + return last_smaller(arr.begin(), arr.end(), val); +} + + +template +size_t nearest_index(const T* first, const T* last, T val) { + auto iter = std::min_element(first, last, + [val](T a, T b) { + return std::abs(a - val) < std::abs(b - val); + }); + return std::distance(first, iter); +} + +template +size_t nearest_index(const NDArray& arr, T val) { + return nearest_index(arr.begin(), arr.end(), val); +} + +template +size_t nearest_index(const std::vector& vec, T val) { + return nearest_index(vec.data(), vec.data()+vec.size(), val); +} + +template +size_t nearest_index(const std::array& arr, T val) { + return nearest_index(arr.data(), arr.data()+arr.size(), val); +} + + + +} // namespace aare \ No newline at end of file diff --git a/python/aare/__init__.py b/python/aare/__init__.py index f4c19cc..058d7cf 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -7,11 +7,12 @@ from ._aare import Pedestal_d, Pedestal_f, ClusterFinder, VarClusterFinder from ._aare import DetectorType from ._aare import ClusterFile from ._aare import hitmap +from ._aare import ROI from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i from ._aare import fit_gaus, fit_pol1 - +from ._aare import Interpolator from .CtbRawFile import CtbRawFile from .RawFile import RawFile from .ScanParameters import ScanParameters diff --git a/python/examples/play.py b/python/examples/play.py index 37754df..b2c368b 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -1,50 +1,77 @@ import sys sys.path.append('/home/l_msdetect/erik/aare/build') -#Our normal python imports -from pathlib import Path -import matplotlib.pyplot as plt +from aare._aare import ClusterVector_i, Interpolator + +import pickle import numpy as np +import matplotlib.pyplot as plt import boost_histogram as bh +import torch +import math import time -import aare +def gaussian_2d(mx, my, sigma = 1, res=100, grid_size = 2): + """ + Generate a 2D gaussian as position mx, my, with sigma=sigma. + The gaussian is placed on a 2x2 pixel matrix with resolution + res in one dimesion. + """ + x = torch.linspace(0, pixel_size*grid_size, res) + x,y = torch.meshgrid(x,x, indexing="ij") + return 1 / (2*math.pi*sigma**2) * \ + torch.exp(-((x - my)**2 / (2*sigma**2) + (y - mx)**2 / (2*sigma**2))) -data = np.random.normal(10, 1, 1000) +scale = 1000 #Scale factor when converting to integer +pixel_size = 25 #um +grid = 2 +resolution = 100 +sigma_um = 10 +xa = np.linspace(0,grid*pixel_size,resolution) +ticks = [0, 25, 50] -hist = bh.Histogram(bh.axis.Regular(10, 0, 20)) -hist.fill(data) +hit = np.array((20,20)) +etahist_fname = "/home/l_msdetect/erik/tmp/test_hist.pkl" + +local_resolution = 99 +grid_size = 3 +xaxis = np.linspace(0,grid_size*pixel_size, local_resolution) +t = gaussian_2d(hit[0],hit[1], grid_size = grid_size, sigma = 10, res = local_resolution) +pixels = t.reshape(grid_size, t.shape[0] // grid_size, grid_size, t.shape[1] // grid_size).sum(axis = 3).sum(axis = 1) +pixels = pixels.numpy() +pixels = (pixels*scale).astype(np.int32) +v = ClusterVector_i(3,3) +v.push_back(1,1, pixels) + +with open(etahist_fname, "rb") as f: + hist = pickle.load(f) +eta = hist.view().copy() +etabinsx = np.array(hist.axes.edges.T[0].flat) +etabinsy = np.array(hist.axes.edges.T[1].flat) +ebins = np.array(hist.axes.edges.T[2].flat) +p = Interpolator(eta, etabinsx[0:-1], etabinsy[0:-1], ebins[0:-1]) -x = hist.axes[0].centers -y = hist.values() -y_err = np.sqrt(y)+1 -res = aare.fit_gaus(x, y, y_err, chi2 = True) + +#Generate the hit - -t_elapsed = time.perf_counter()-t0 -print(f'Histogram filling took: {t_elapsed:.3f}s {total_clusters/t_elapsed/1e6:.3f}M clusters/s') -histogram_data = hist3d.counts() -x = hist3d.axes[2].edges[:-1] -y = histogram_data[100,100,:] -xx = np.linspace(x[0], x[-1]) -# fig, ax = plt.subplots() -# ax.step(x, y, where = 'post') +tmp = p.interpolate(v) +print(f'tmp:{tmp}') +pos = np.array((tmp['x'], tmp['y']))*25 -y_err = np.sqrt(y) -y_err = np.zeros(y.size) -y_err += 1 - -# par = fit_gaus2(y,x, y_err) -# ax.plot(xx, gaus(xx,par)) -# print(par) - -res = fit_gaus(y,x) -res2 = fit_gaus(y,x, y_err) -print(res) -print(res2) +print(pixels) +fig, ax = plt.subplots(figsize = (7,7)) +ax.pcolormesh(xaxis, xaxis, t) +ax.plot(*pos, 'o') +ax.set_xticks([0,25,50,75]) +ax.set_yticks([0,25,50,75]) +ax.set_xlim(0,75) +ax.set_ylim(0,75) +ax.grid() +print(f'{hit=}') +print(f'{pos=}') \ No newline at end of file diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 792b7e6..3db816a 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -20,7 +20,13 @@ template void define_cluster_vector(py::module &m, const std::string &typestr) { auto class_name = fmt::format("ClusterVector_{}", typestr); py::class_>(m, class_name.c_str(), py::buffer_protocol()) - .def(py::init()) + .def(py::init(), + py::arg("cluster_size_x") = 3, py::arg("cluster_size_y") = 3) + .def("push_back", + [](ClusterVector &self, int x, int y, py::array_t data) { + // auto view = make_view_2d(data); + self.push_back(x, y, reinterpret_cast(data.data())); + }) .def_property_readonly("size", &ClusterVector::size) .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", @@ -38,6 +44,8 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { auto *vec = new std::vector(self.sum_2x2()); return return_vector(vec); }) + .def_property_readonly("cluster_size_x", &ClusterVector::cluster_size_x) + .def_property_readonly("cluster_size_y", &ClusterVector::cluster_size_y) .def_property_readonly("capacity", &ClusterVector::capacity) .def_property("frame_number", &ClusterVector::frame_number, &ClusterVector::set_frame_number) diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index 8a431b5..f587443 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -31,6 +31,11 @@ void define_cluster_file_io_bindings(py::module &m) { auto v = new ClusterVector(self.read_clusters(n_clusters)); return v; },py::return_value_policy::take_ownership) + .def("read_clusters", + [](ClusterFile &self, size_t n_clusters, ROI roi) { + auto v = new ClusterVector(self.read_clusters(n_clusters, roi)); + return v; + },py::return_value_policy::take_ownership) .def("read_frame", [](ClusterFile &self) { auto v = new ClusterVector(self.read_frame()); diff --git a/python/src/file.hpp b/python/src/file.hpp index c3c800c..0d64e16 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -195,6 +195,8 @@ void define_file_io_bindings(py::module &m) { py::class_(m, "ROI") .def(py::init<>()) + .def(py::init(), py::arg("xmin"), + py::arg("xmax"), py::arg("ymin"), py::arg("ymax")) .def_readwrite("xmin", &ROI::xmin) .def_readwrite("xmax", &ROI::xmax) .def_readwrite("ymin", &ROI::ymin) diff --git a/python/src/interpolation.hpp b/python/src/interpolation.hpp new file mode 100644 index 0000000..02742e1 --- /dev/null +++ b/python/src/interpolation.hpp @@ -0,0 +1,58 @@ +#include "aare/Interpolator.hpp" +#include "aare/NDArray.hpp" +#include "aare/NDView.hpp" +#include "np_helper.hpp" +#include +#include +#include +#include + +namespace py = pybind11; +void define_interpolation_bindings(py::module &m) { + + PYBIND11_NUMPY_DTYPE(aare::Photon, x,y,energy); + + py::class_(m, "Interpolator") + .def(py::init([](py::array_t etacube, py::array_t xbins, + py::array_t ybins, py::array_t ebins) { + return Interpolator(make_view_3d(etacube), make_view_1d(xbins), + make_view_1d(ybins), make_view_1d(ebins)); + })) + .def("get_ietax", [](Interpolator& self){ + auto*ptr = new NDArray{}; + *ptr = self.get_ietax(); + return return_image_data(ptr); + }) + .def("get_ietay", [](Interpolator& self){ + auto*ptr = new NDArray{}; + *ptr = self.get_ietay(); + return return_image_data(ptr); + }) + .def("interpolate", [](Interpolator& self, const ClusterVector& clusters){ + auto photons = self.interpolate(clusters); + auto* ptr = new std::vector{photons}; + return return_vector(ptr); + }); + + // TODO! Evaluate without converting to double + m.def( + "hej", + []() { + // auto boost_histogram = py::module_::import("boost_histogram"); + // py::object axis = + // boost_histogram.attr("axis").attr("Regular")(10, 0.0, 10.0); + // py::object histogram = boost_histogram.attr("Histogram")(axis); + // return histogram; + // return h; + }, + R"( + Evaluate a 1D Gaussian function for all points in x using parameters par. + + Parameters + ---------- + x : array_like + The points at which to evaluate the Gaussian function. + par : array_like + The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation. + )"); +} \ No newline at end of file diff --git a/python/src/module.cpp b/python/src/module.cpp index 70d143f..43f48ba 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -9,6 +9,7 @@ #include "cluster.hpp" #include "cluster_file.hpp" #include "fit.hpp" +#include "interpolation.hpp" //Pybind stuff #include @@ -31,5 +32,6 @@ PYBIND11_MODULE(_aare, m) { define_cluster_collector_bindings(m); define_cluster_file_sink_bindings(m); define_fit_bindings(m); + define_interpolation_bindings(m); } \ No newline at end of file diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 2928d26..2e23e09 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -108,6 +108,79 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { return clusters; } +ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + + ClusterVector clusters(3,3); + clusters.reserve(n_clusters); + + int32_t iframe = 0; // frame number needs to be 4 bytes! + size_t nph_read = 0; + uint32_t nn = m_num_left; + uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 + + // auto buf = reinterpret_cast(clusters.data()); + // auto buf = clusters.data(); + + Cluster3x3 tmp; //this would break if the cluster size changes + + // if there are photons left from previous frame read them first + if (nph) { + if (nph > n_clusters) { + // if we have more photons left in the frame then photons to read we + // read directly the requested number + nn = n_clusters; + } else { + nn = nph; + } + //Read one cluster, in the ROI push back + // nph_read += fread((buf + nph_read*clusters.item_size()), + // clusters.item_size(), nn, fp); + for(size_t i = 0; i < nn; i++){ + fread(&tmp, sizeof(tmp), 1, fp); + if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ + clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); + nph_read++; + } + } + + m_num_left = nph - nn; // write back the number of photons left + } + + if (nph_read < n_clusters) { + // keep on reading frames and photons until reaching n_clusters + while (fread(&iframe, sizeof(iframe), 1, fp)) { + // read number of clusters in frame + if (fread(&nph, sizeof(nph), 1, fp)) { + if (nph > (n_clusters - nph_read)) + nn = n_clusters - nph_read; + else + nn = nph; + + // nph_read += fread((buf + nph_read*clusters.item_size()), + // clusters.item_size(), nn, fp); + for(size_t i = 0; i < nn; i++){ + fread(&tmp, sizeof(tmp), 1, fp); + if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ + clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); + nph_read++; + } + } + m_num_left = nph - nn; + } + if (nph_read >= n_clusters) + break; + } + } + + // Resize the vector to the number of clusters. + // No new allocation, only change bounds. + clusters.resize(nph_read); + return clusters; +} + ClusterVector ClusterFile::read_frame() { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); @@ -268,11 +341,23 @@ ClusterVector ClusterFile::read_frame() { NDArray calculate_eta2(ClusterVector &clusters) { //TOTO! make work with 2x2 clusters NDArray eta2({static_cast(clusters.size()), 2}); - for (size_t i = 0; i < clusters.size(); i++) { - auto e = calculate_eta2(clusters.at(i)); - eta2(i, 0) = e.x; - eta2(i, 1) = e.y; + + if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; + } + }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; + } + }else{ + throw std::runtime_error("Only 3x3 and 2x2 clusters are supported"); } + return eta2; } @@ -290,7 +375,7 @@ Eta2 calculate_eta2(Cluster3x3 &cl) { tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8]; auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin(); - + eta.sum = tot2[c]; switch (c) { case cBottomLeft: if ((cl.data[3] + cl.data[4]) != 0) @@ -333,6 +418,20 @@ Eta2 calculate_eta2(Cluster3x3 &cl) { return eta; } + +Eta2 calculate_eta2(Cluster2x2 &cl) { + Eta2 eta{}; + if ((cl.data[0] + cl.data[1]) != 0) + eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); + if ((cl.data[0] + cl.data[2]) != 0) + eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); + eta.sum = cl.data[0] + cl.data[1] + cl.data[2]+ cl.data[3]; + eta.c = cBottomLeft; //TODO! This is not correct, but need to put something + return eta; +} + + + int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y) { diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp new file mode 100644 index 0000000..7f82533 --- /dev/null +++ b/src/Interpolator.cpp @@ -0,0 +1,144 @@ +#include "aare/Interpolator.hpp" +#include "aare/algorithm.hpp" + +namespace aare { + +Interpolator::Interpolator(NDView etacube, NDView xbins, + NDView ybins, NDView ebins) + : m_ietax(etacube), m_ietay(etacube), m_etabinsx(xbins), m_etabinsy(ybins), m_energy_bins(ebins) { + if (etacube.shape(0) != xbins.size() || etacube.shape(1) != ybins.size() || + etacube.shape(2) != ebins.size()) { + throw std::invalid_argument( + "The shape of the etacube does not match the shape of the bins"); + } + + // Cumulative sum in the x direction + for (ssize_t i = 1; i < m_ietax.shape(0); i++) { + for (ssize_t j = 0; j < m_ietax.shape(1); j++) { + for (ssize_t k = 0; k < m_ietax.shape(2); k++) { + m_ietax(i, j, k) += m_ietax(i - 1, j, k); + } + } + } + + // Normalize by the highest row, if norm less than 1 don't do anything + for (ssize_t i = 0; i < m_ietax.shape(0); i++) { + for (ssize_t j = 0; j < m_ietax.shape(1); j++) { + for (ssize_t k = 0; k < m_ietax.shape(2); k++) { + auto val = m_ietax(m_ietax.shape(0) - 1, j, k); + double norm = val < 1 ? 1 : val; + m_ietax(i, j, k) /= norm; + } + } + } + + // Cumulative sum in the y direction + for (ssize_t i = 0; i < m_ietay.shape(0); i++) { + for (ssize_t j = 1; j < m_ietay.shape(1); j++) { + for (ssize_t k = 0; k < m_ietay.shape(2); k++) { + m_ietay(i, j, k) += m_ietay(i, j - 1, k); + } + } + } + + // Normalize by the highest column, if norm less than 1 don't do anything + for (ssize_t i = 0; i < m_ietay.shape(0); i++) { + for (ssize_t j = 0; j < m_ietay.shape(1); j++) { + for (ssize_t k = 0; k < m_ietay.shape(2); k++) { + auto val = m_ietay(i, m_ietay.shape(1) - 1, k); + double norm = val < 1 ? 1 : val; + m_ietay(i, j, k) /= norm; + } + } + } +} + +std::vector Interpolator::interpolate(const ClusterVector& clusters) { + std::vector photons; + photons.reserve(clusters.size()); + + if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { + for (size_t i = 0; i(i); + Eta2 eta= calculate_eta2(cluster); + + Photon photon; + photon.x = cluster.x; + photon.y = cluster.y; + photon.energy = eta.sum; + + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + //Finding the index of the last element that is smaller + //should work fine as long as we have many bins + auto ie = last_smaller(m_energy_bins, photon.energy); + auto ix = last_smaller(m_etabinsx, eta.x); + auto iy = last_smaller(m_etabinsy, eta.y); + + // fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy); + + double dX, dY; + int ex, ey; + // cBottomLeft = 0, + // cBottomRight = 1, + // cTopLeft = 2, + // cTopRight = 3 + switch (eta.c) { + case cTopLeft: + dX = -1.; + dY = 0.; + break; + case cTopRight:; + dX = 0.; + dY = 0.; + break; + case cBottomLeft: + dX = -1.; + dY = -1.; + break; + case cBottomRight: + dX = 0.; + dY = -1.; + break; + } + photon.x += m_ietax(ix, iy, ie)*2 + dX; + photon.y += m_ietay(ix, iy, ie)*2 + dY; + photons.push_back(photon); + } + }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ + for (size_t i = 0; i(i); + Eta2 eta= calculate_eta2(cluster); + + Photon photon; + photon.x = cluster.x; + photon.y = cluster.y; + photon.energy = eta.sum; + + //Now do some actual interpolation. + //Find which energy bin the cluster is in + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + //Finding the index of the last element that is smaller + //should work fine as long as we have many bins + auto ie = last_smaller(m_energy_bins, photon.energy); + auto ix = last_smaller(m_etabinsx, eta.x); + auto iy = last_smaller(m_etabinsy, eta.y); + + photon.x += m_ietax(ix, iy, ie)*2; //eta goes between 0 and 1 but we could move the hit anywhere in the 2x2 + photon.y += m_ietay(ix, iy, ie)*2; + photons.push_back(photon); + } + + }else{ + throw std::runtime_error("Only 3x3 and 2x2 clusters are supported for interpolation"); + } + + + return photons; +} + +} // namespace aare \ No newline at end of file diff --git a/src/NDArray.test.cpp b/src/NDArray.test.cpp index 942481c..eff3e2c 100644 --- a/src/NDArray.test.cpp +++ b/src/NDArray.test.cpp @@ -2,6 +2,7 @@ #include #include #include +#include using aare::NDArray; using aare::NDView; @@ -34,6 +35,24 @@ TEST_CASE("Construct from an NDView") { } } +TEST_CASE("3D NDArray from NDView"){ + std::vector data(27); + std::iota(data.begin(), data.end(), 0); + NDView view(data.data(), Shape<3>{3, 3, 3}); + NDArray image(view); + REQUIRE(image.shape() == view.shape()); + REQUIRE(image.size() == view.size()); + REQUIRE(image.data() != view.data()); + + for(int64_t i=0; i shape{{20}}; NDArray img(shape, 3); diff --git a/src/algorithm.test.cpp b/src/algorithm.test.cpp new file mode 100644 index 0000000..fcfa8d2 --- /dev/null +++ b/src/algorithm.test.cpp @@ -0,0 +1,73 @@ + + +#include +#include + + +TEST_CASE("Find the closed index in a 1D array", "[algorithm]") { + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::nearest_index(arr, 2.3) == 2); + REQUIRE(aare::nearest_index(arr, 2.6) == 3); + REQUIRE(aare::nearest_index(arr, 45.0) == 4); + REQUIRE(aare::nearest_index(arr, 0.0) == 0); + REQUIRE(aare::nearest_index(arr, -1.0) == 0); +} + +TEST_CASE("Passing integers to nearest_index works", "[algorithm]"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::nearest_index(arr, 2) == 2); + REQUIRE(aare::nearest_index(arr, 3) == 3); + REQUIRE(aare::nearest_index(arr, 45) == 4); + REQUIRE(aare::nearest_index(arr, 0) == 0); + REQUIRE(aare::nearest_index(arr, -1) == 0); +} + + +TEST_CASE("nearest_index works with std::vector", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::nearest_index(vec, 2.123) == 2); + REQUIRE(aare::nearest_index(vec, 2.66) == 3); + REQUIRE(aare::nearest_index(vec, 4555555.0) == 4); + REQUIRE(aare::nearest_index(vec, 0.0) == 0); + REQUIRE(aare::nearest_index(vec, -10.0) == 0); +} + +TEST_CASE("nearest index works with std::array", "[algorithm]"){ + std::array arr = {0, 1, 2, 3, 4}; + REQUIRE(aare::nearest_index(arr, 2.123) == 2); + REQUIRE(aare::nearest_index(arr, 2.501) == 3); + REQUIRE(aare::nearest_index(arr, 4555555.0) == 4); + REQUIRE(aare::nearest_index(arr, 0.0) == 0); + REQUIRE(aare::nearest_index(arr, -10.0) == 0); +} + + +TEST_CASE("last smaller", "[algorithm]"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, -10.0) == 0); + REQUIRE(aare::last_smaller(arr, 0.0) == 0); + REQUIRE(aare::last_smaller(arr, 2.3) == 2); + REQUIRE(aare::last_smaller(arr, 253.) == 4); +} + +TEST_CASE("returns last bin strictly smaller", "[algorithm]"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, 2.0) == 2); + +} \ No newline at end of file From 602b04e49fd61beea1a9a3c4f0942b4632b64b64 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 18 Mar 2025 17:47:05 +0100 Subject: [PATCH 06/23] bumped version number --- conda-recipe/meta.yaml | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index ffa95a7..93c1219 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: aare - version: 2025.2.18 #TODO! how to not duplicate this? + version: 2025.3.18 #TODO! how to not duplicate this? diff --git a/pyproject.toml b/pyproject.toml index 6dc941e..8b0b789 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.2.18" +version = "2025.3.18" [tool.scikit-build] From 5d8ad27b21af0d6b22390523263b2d48c442eb9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Thu, 20 Mar 2025 12:52:04 +0100 Subject: [PATCH 07/23] Developer (#138) - Fully functioning variable size cluster finder - Added interpolation - Bit reordering for ADC SAR 05 --------- Co-authored-by: Patrick Co-authored-by: JulianHeymes Co-authored-by: Dhanya Thattil Co-authored-by: xiangyu.xie --- .clang-tidy | 42 +++++++++ .gitea/workflows/cmake_build.yml | 58 ++++++++++++ CMakeLists.txt | 5 ++ conda-recipe/meta.yaml | 3 +- include/aare/ClusterFile.hpp | 10 +++ include/aare/ClusterVector.hpp | 4 + include/aare/Interpolator.hpp | 29 ++++++ include/aare/NDArray.hpp | 15 ++-- include/aare/RawSubFile.hpp | 2 +- include/aare/VarClusterFinder.hpp | 2 +- include/aare/algorithm.hpp | 55 ++++++++++++ include/aare/defs.hpp | 2 + pyproject.toml | 3 +- python/aare/__init__.py | 3 +- python/examples/play.py | 89 +++++++++++------- python/src/cluster.hpp | 10 ++- python/src/cluster_file.hpp | 5 ++ python/src/ctb_raw_file.hpp | 4 +- python/src/file.hpp | 2 + python/src/interpolation.hpp | 58 ++++++++++++ python/src/module.cpp | 2 + python/src/np_helper.hpp | 12 +-- python/src/var_cluster.hpp | 37 +++++++- src/ClusterFile.cpp | 109 ++++++++++++++++++++-- src/Dtype.cpp | 2 +- src/File.cpp | 2 +- src/Interpolator.cpp | 144 ++++++++++++++++++++++++++++++ src/NDArray.test.cpp | 19 ++++ src/RawFile.cpp | 3 +- src/algorithm.test.cpp | 73 +++++++++++++++ 30 files changed, 743 insertions(+), 61 deletions(-) create mode 100644 .clang-tidy create mode 100644 .gitea/workflows/cmake_build.yml create mode 100644 include/aare/Interpolator.hpp create mode 100644 include/aare/algorithm.hpp create mode 100644 python/src/interpolation.hpp create mode 100644 src/Interpolator.cpp create mode 100644 src/algorithm.test.cpp diff --git a/.clang-tidy b/.clang-tidy new file mode 100644 index 0000000..a2ab6c1 --- /dev/null +++ b/.clang-tidy @@ -0,0 +1,42 @@ + +--- +Checks: '*, + -altera-*, + -android-cloexec-fopen, + -cppcoreguidelines-pro-bounds-array-to-pointer-decay, + -cppcoreguidelines-pro-bounds-pointer-arithmetic, + -fuchsia*, + -readability-else-after-return, + -readability-avoid-const-params-in-decls, + -readability-identifier-length, + -cppcoreguidelines-pro-bounds-constant-array-index, + -cppcoreguidelines-pro-type-reinterpret-cast, + -llvm-header-guard, + -modernize-use-nodiscard, + -misc-non-private-member-variables-in-classes, + -readability-static-accessed-through-instance, + -readability-braces-around-statements, + -readability-isolate-declaration, + -readability-implicit-bool-conversion, + -readability-identifier-length, + -readability-identifier-naming, + -hicpp-signed-bitwise, + -hicpp-no-array-decay, + -hicpp-braces-around-statements, + -google-runtime-references, + -google-readability-todo, + -google-readability-braces-around-statements, + -modernize-use-trailing-return-type, + -llvmlibc-*' + +HeaderFilterRegex: \.hpp +FormatStyle: none +CheckOptions: + - { key: readability-identifier-naming.NamespaceCase, value: lower_case } + # - { key: readability-identifier-naming.FunctionCase, value: lower_case } + - { key: readability-identifier-naming.ClassCase, value: CamelCase } + # - { key: readability-identifier-naming.MethodCase, value: CamelCase } + # - { key: readability-identifier-naming.StructCase, value: CamelCase } + # - { key: readability-identifier-naming.VariableCase, value: lower_case } + - { key: readability-identifier-naming.GlobalConstantCase, value: UPPER_CASE } +... diff --git a/.gitea/workflows/cmake_build.yml b/.gitea/workflows/cmake_build.yml new file mode 100644 index 0000000..43a0181 --- /dev/null +++ b/.gitea/workflows/cmake_build.yml @@ -0,0 +1,58 @@ +name: Build the package using cmake then documentation + +on: + workflow_dispatch: + push: + + + +permissions: + contents: read + pages: write + id-token: write + +jobs: + build: + strategy: + fail-fast: false + matrix: + platform: [ubuntu-latest, ] # macos-12, windows-2019] + python-version: ["3.12",] + + runs-on: ${{ matrix.platform }} + + # The setup-miniconda action needs this to activate miniconda + defaults: + run: + shell: "bash -l {0}" + + steps: + - uses: actions/checkout@v4 + + - name: Setup dev env + run: | + sudo apt-get update + sudo apt-get -y install cmake gcc g++ + + - name: Get conda + uses: conda-incubator/setup-miniconda@v3.0.4 + with: + python-version: ${{ matrix.python-version }} + channels: conda-forge + + - name: Prepare + run: conda install doxygen sphinx=7.1.2 breathe pybind11 sphinx_rtd_theme furo nlohmann_json zeromq fmt numpy + + - name: Build library + run: | + mkdir build + cd build + cmake .. -DAARE_SYSTEM_LIBRARIES=ON -DAARE_DOCS=ON + make -j 2 + make docs + + + + + + diff --git a/CMakeLists.txt b/CMakeLists.txt index b93b513..4772f0b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -60,6 +60,8 @@ if(AARE_SYSTEM_LIBRARIES) set(AARE_FETCH_CATCH OFF CACHE BOOL "Disabled FetchContent for catch2" FORCE) set(AARE_FETCH_JSON OFF CACHE BOOL "Disabled FetchContent for nlohmann::json" FORCE) set(AARE_FETCH_ZMQ OFF CACHE BOOL "Disabled FetchContent for libzmq" FORCE) + # Still fetch lmfit when setting AARE_SYSTEM_LIBRARIES since this is not available + # on conda-forge endif() if(AARE_VERBOSE) @@ -78,6 +80,7 @@ endif() set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if(AARE_FETCH_LMFIT) + #TODO! Should we fetch lmfit from the web or inlcude a tar.gz in the repo? set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) FetchContent_Declare( lmfit @@ -343,6 +346,7 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp @@ -382,6 +386,7 @@ endif() if(AARE_TESTS) set(TestSources + ${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index ffa95a7..120854b 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,7 @@ package: name: aare - version: 2025.2.18 #TODO! how to not duplicate this? + version: 2025.3.18 #TODO! how to not duplicate this? + diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index b796763..5bea342 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -8,11 +8,17 @@ namespace aare { +//TODO! Template this? struct Cluster3x3 { int16_t x; int16_t y; int32_t data[9]; }; +struct Cluster2x2 { + int16_t x; + int16_t y; + int32_t data[4]; +}; typedef enum { cBottomLeft = 0, @@ -37,6 +43,7 @@ struct Eta2 { double x; double y; corner c; + int32_t sum; }; struct ClusterAnalysis { @@ -97,6 +104,8 @@ class ClusterFile { */ ClusterVector read_clusters(size_t n_clusters); + ClusterVector read_clusters(size_t n_clusters, ROI roi); + /** * @brief Read a single frame from the file and return the clusters. The * cluster vector will have the frame number set. @@ -131,5 +140,6 @@ int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, NDArray calculate_eta2(ClusterVector &clusters); Eta2 calculate_eta2(Cluster3x3 &cl); +Eta2 calculate_eta2(Cluster2x2 &cl); } // namespace aare diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index febf06c..1c15a22 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -231,6 +231,10 @@ template class ClusterVector { return *reinterpret_cast(element_ptr(i)); } + template const V &at(size_t i) const { + return *reinterpret_cast(element_ptr(i)); + } + const std::string_view fmt_base() const { // TODO! how do we match on coord_t? return m_fmt_base; diff --git a/include/aare/Interpolator.hpp b/include/aare/Interpolator.hpp new file mode 100644 index 0000000..4905bce --- /dev/null +++ b/include/aare/Interpolator.hpp @@ -0,0 +1,29 @@ +#pragma once +#include "aare/NDArray.hpp" +#include "aare/NDView.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ClusterFile.hpp" //Cluster_3x3 +namespace aare{ + +struct Photon{ + double x; + double y; + double energy; +}; + +class Interpolator{ + NDArray m_ietax; + NDArray m_ietay; + + NDArray m_etabinsx; + NDArray m_etabinsy; + NDArray m_energy_bins; + public: + Interpolator(NDView etacube, NDView xbins, NDView ybins, NDView ebins); + NDArray get_ietax(){return m_ietax;} + NDArray get_ietay(){return m_ietay;} + + std::vector interpolate(const ClusterVector& clusters); +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index cfa5b5c..45d3a83 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -102,6 +102,9 @@ class NDArray : public ArrayExpr, Ndim> { auto begin() { return data_; } auto end() { return data_ + size_; } + auto begin() const { return data_; } + auto end() const { return data_ + size_; } + using value_type = T; NDArray &operator=(NDArray &&other) noexcept; // Move assign @@ -388,12 +391,12 @@ NDArray NDArray::operator*(const T &value) { result *= value; return result; } -template void NDArray::Print() { - if (shape_[0] < 20 && shape_[1] < 20) - Print_all(); - else - Print_some(); -} +// template void NDArray::Print() { +// if (shape_[0] < 20 && shape_[1] < 20) +// Print_all(); +// else +// Print_some(); +// } template std::ostream &operator<<(std::ostream &os, const NDArray &arr) { diff --git a/include/aare/RawSubFile.hpp b/include/aare/RawSubFile.hpp index 89c278e..1d554e8 100644 --- a/include/aare/RawSubFile.hpp +++ b/include/aare/RawSubFile.hpp @@ -64,7 +64,7 @@ class RawSubFile { size_t bytes_per_frame() const { return m_bytes_per_frame; } size_t pixels_per_frame() const { return m_rows * m_cols; } - size_t bytes_per_pixel() const { return m_bitdepth / 8; } + size_t bytes_per_pixel() const { return m_bitdepth / bits_per_byte; } private: template diff --git a/include/aare/VarClusterFinder.hpp b/include/aare/VarClusterFinder.hpp index d4d51cc..ea62a9d 100644 --- a/include/aare/VarClusterFinder.hpp +++ b/include/aare/VarClusterFinder.hpp @@ -7,7 +7,7 @@ #include "aare/NDArray.hpp" -const int MAX_CLUSTER_SIZE = 200; +const int MAX_CLUSTER_SIZE = 50; namespace aare { template class VarClusterFinder { diff --git a/include/aare/algorithm.hpp b/include/aare/algorithm.hpp new file mode 100644 index 0000000..5d6dc57 --- /dev/null +++ b/include/aare/algorithm.hpp @@ -0,0 +1,55 @@ + +#pragma once +#include +#include +#include +#include + +namespace aare { +/** + * @brief Find the index of the last element smaller than val + * assume a sorted array + */ +template +size_t last_smaller(const T* first, const T* last, T val) { + for (auto iter = first+1; iter != last; ++iter) { + if (*iter > val) { + return std::distance(first, iter-1); + } + } + return std::distance(first, last-1); +} + +template +size_t last_smaller(const NDArray& arr, T val) { + return last_smaller(arr.begin(), arr.end(), val); +} + + +template +size_t nearest_index(const T* first, const T* last, T val) { + auto iter = std::min_element(first, last, + [val](T a, T b) { + return std::abs(a - val) < std::abs(b - val); + }); + return std::distance(first, iter); +} + +template +size_t nearest_index(const NDArray& arr, T val) { + return nearest_index(arr.begin(), arr.end(), val); +} + +template +size_t nearest_index(const std::vector& vec, T val) { + return nearest_index(vec.data(), vec.data()+vec.size(), val); +} + +template +size_t nearest_index(const std::array& arr, T val) { + return nearest_index(arr.data(), arr.data()+arr.size(), val); +} + + + +} // namespace aare \ No newline at end of file diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index db1a47b..4559882 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -38,6 +38,8 @@ namespace aare { +inline constexpr size_t bits_per_byte = 8; + void assert_failed(const std::string &msg); diff --git a/pyproject.toml b/pyproject.toml index 6dc941e..b9bf7d2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,8 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.2.18" +version = "2025.3.18" + [tool.scikit-build] diff --git a/python/aare/__init__.py b/python/aare/__init__.py index f4c19cc..058d7cf 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -7,11 +7,12 @@ from ._aare import Pedestal_d, Pedestal_f, ClusterFinder, VarClusterFinder from ._aare import DetectorType from ._aare import ClusterFile from ._aare import hitmap +from ._aare import ROI from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i from ._aare import fit_gaus, fit_pol1 - +from ._aare import Interpolator from .CtbRawFile import CtbRawFile from .RawFile import RawFile from .ScanParameters import ScanParameters diff --git a/python/examples/play.py b/python/examples/play.py index 37754df..da469dc 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -1,50 +1,79 @@ import sys sys.path.append('/home/l_msdetect/erik/aare/build') -#Our normal python imports -from pathlib import Path -import matplotlib.pyplot as plt +from aare._aare import ClusterVector_i, Interpolator + +import pickle import numpy as np +import matplotlib.pyplot as plt import boost_histogram as bh +import torch +import math import time -import aare -data = np.random.normal(10, 1, 1000) +def gaussian_2d(mx, my, sigma = 1, res=100, grid_size = 2): + """ + Generate a 2D gaussian as position mx, my, with sigma=sigma. + The gaussian is placed on a 2x2 pixel matrix with resolution + res in one dimesion. + """ + x = torch.linspace(0, pixel_size*grid_size, res) + x,y = torch.meshgrid(x,x, indexing="ij") + return 1 / (2*math.pi*sigma**2) * \ + torch.exp(-((x - my)**2 / (2*sigma**2) + (y - mx)**2 / (2*sigma**2))) -hist = bh.Histogram(bh.axis.Regular(10, 0, 20)) -hist.fill(data) +scale = 1000 #Scale factor when converting to integer +pixel_size = 25 #um +grid = 2 +resolution = 100 +sigma_um = 10 +xa = np.linspace(0,grid*pixel_size,resolution) +ticks = [0, 25, 50] + +hit = np.array((20,20)) +etahist_fname = "/home/l_msdetect/erik/tmp/test_hist.pkl" + +local_resolution = 99 +grid_size = 3 +xaxis = np.linspace(0,grid_size*pixel_size, local_resolution) +t = gaussian_2d(hit[0],hit[1], grid_size = grid_size, sigma = 10, res = local_resolution) +pixels = t.reshape(grid_size, t.shape[0] // grid_size, grid_size, t.shape[1] // grid_size).sum(axis = 3).sum(axis = 1) +pixels = pixels.numpy() +pixels = (pixels*scale).astype(np.int32) +v = ClusterVector_i(3,3) +v.push_back(1,1, pixels) + +with open(etahist_fname, "rb") as f: + hist = pickle.load(f) +eta = hist.view().copy() +etabinsx = np.array(hist.axes.edges.T[0].flat) +etabinsy = np.array(hist.axes.edges.T[1].flat) +ebins = np.array(hist.axes.edges.T[2].flat) +p = Interpolator(eta, etabinsx[0:-1], etabinsy[0:-1], ebins[0:-1]) -x = hist.axes[0].centers -y = hist.values() -y_err = np.sqrt(y)+1 -res = aare.fit_gaus(x, y, y_err, chi2 = True) - -t_elapsed = time.perf_counter()-t0 -print(f'Histogram filling took: {t_elapsed:.3f}s {total_clusters/t_elapsed/1e6:.3f}M clusters/s') +#Generate the hit -histogram_data = hist3d.counts() -x = hist3d.axes[2].edges[:-1] -y = histogram_data[100,100,:] -xx = np.linspace(x[0], x[-1]) -# fig, ax = plt.subplots() -# ax.step(x, y, where = 'post') -y_err = np.sqrt(y) -y_err = np.zeros(y.size) -y_err += 1 -# par = fit_gaus2(y,x, y_err) -# ax.plot(xx, gaus(xx,par)) -# print(par) +tmp = p.interpolate(v) +print(f'tmp:{tmp}') +pos = np.array((tmp['x'], tmp['y']))*25 -res = fit_gaus(y,x) -res2 = fit_gaus(y,x, y_err) -print(res) -print(res2) +print(pixels) +fig, ax = plt.subplots(figsize = (7,7)) +ax.pcolormesh(xaxis, xaxis, t) +ax.plot(*pos, 'o') +ax.set_xticks([0,25,50,75]) +ax.set_yticks([0,25,50,75]) +ax.set_xlim(0,75) +ax.set_ylim(0,75) +ax.grid() +print(f'{hit=}') +print(f'{pos=}') \ No newline at end of file diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 792b7e6..3db816a 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -20,7 +20,13 @@ template void define_cluster_vector(py::module &m, const std::string &typestr) { auto class_name = fmt::format("ClusterVector_{}", typestr); py::class_>(m, class_name.c_str(), py::buffer_protocol()) - .def(py::init()) + .def(py::init(), + py::arg("cluster_size_x") = 3, py::arg("cluster_size_y") = 3) + .def("push_back", + [](ClusterVector &self, int x, int y, py::array_t data) { + // auto view = make_view_2d(data); + self.push_back(x, y, reinterpret_cast(data.data())); + }) .def_property_readonly("size", &ClusterVector::size) .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", @@ -38,6 +44,8 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { auto *vec = new std::vector(self.sum_2x2()); return return_vector(vec); }) + .def_property_readonly("cluster_size_x", &ClusterVector::cluster_size_x) + .def_property_readonly("cluster_size_y", &ClusterVector::cluster_size_y) .def_property_readonly("capacity", &ClusterVector::capacity) .def_property("frame_number", &ClusterVector::frame_number, &ClusterVector::set_frame_number) diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index 8a431b5..f587443 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -31,6 +31,11 @@ void define_cluster_file_io_bindings(py::module &m) { auto v = new ClusterVector(self.read_clusters(n_clusters)); return v; },py::return_value_policy::take_ownership) + .def("read_clusters", + [](ClusterFile &self, size_t n_clusters, ROI roi) { + auto v = new ClusterVector(self.read_clusters(n_clusters, roi)); + return v; + },py::return_value_policy::take_ownership) .def("read_frame", [](ClusterFile &self) { auto v = new ClusterVector(self.read_frame()); diff --git a/python/src/ctb_raw_file.hpp b/python/src/ctb_raw_file.hpp index 9ce656d..56e571b 100644 --- a/python/src/ctb_raw_file.hpp +++ b/python/src/ctb_raw_file.hpp @@ -32,7 +32,7 @@ m.def("adc_sar_05_decode64to16", [](py::array_t input) { } //Create a 2D output array with the same shape as the input - std::vector shape{input.shape(0), input.shape(1)/8}; + std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; py::array_t output(shape); //Create a view of the input and output arrays @@ -53,7 +53,7 @@ m.def("adc_sar_04_decode64to16", [](py::array_t input) { } //Create a 2D output array with the same shape as the input - std::vector shape{input.shape(0), input.shape(1)/8}; + std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; py::array_t output(shape); //Create a view of the input and output arrays diff --git a/python/src/file.hpp b/python/src/file.hpp index c3c800c..0d64e16 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -195,6 +195,8 @@ void define_file_io_bindings(py::module &m) { py::class_(m, "ROI") .def(py::init<>()) + .def(py::init(), py::arg("xmin"), + py::arg("xmax"), py::arg("ymin"), py::arg("ymax")) .def_readwrite("xmin", &ROI::xmin) .def_readwrite("xmax", &ROI::xmax) .def_readwrite("ymin", &ROI::ymin) diff --git a/python/src/interpolation.hpp b/python/src/interpolation.hpp new file mode 100644 index 0000000..02742e1 --- /dev/null +++ b/python/src/interpolation.hpp @@ -0,0 +1,58 @@ +#include "aare/Interpolator.hpp" +#include "aare/NDArray.hpp" +#include "aare/NDView.hpp" +#include "np_helper.hpp" +#include +#include +#include +#include + +namespace py = pybind11; +void define_interpolation_bindings(py::module &m) { + + PYBIND11_NUMPY_DTYPE(aare::Photon, x,y,energy); + + py::class_(m, "Interpolator") + .def(py::init([](py::array_t etacube, py::array_t xbins, + py::array_t ybins, py::array_t ebins) { + return Interpolator(make_view_3d(etacube), make_view_1d(xbins), + make_view_1d(ybins), make_view_1d(ebins)); + })) + .def("get_ietax", [](Interpolator& self){ + auto*ptr = new NDArray{}; + *ptr = self.get_ietax(); + return return_image_data(ptr); + }) + .def("get_ietay", [](Interpolator& self){ + auto*ptr = new NDArray{}; + *ptr = self.get_ietay(); + return return_image_data(ptr); + }) + .def("interpolate", [](Interpolator& self, const ClusterVector& clusters){ + auto photons = self.interpolate(clusters); + auto* ptr = new std::vector{photons}; + return return_vector(ptr); + }); + + // TODO! Evaluate without converting to double + m.def( + "hej", + []() { + // auto boost_histogram = py::module_::import("boost_histogram"); + // py::object axis = + // boost_histogram.attr("axis").attr("Regular")(10, 0.0, 10.0); + // py::object histogram = boost_histogram.attr("Histogram")(axis); + // return histogram; + // return h; + }, + R"( + Evaluate a 1D Gaussian function for all points in x using parameters par. + + Parameters + ---------- + x : array_like + The points at which to evaluate the Gaussian function. + par : array_like + The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation. + )"); +} \ No newline at end of file diff --git a/python/src/module.cpp b/python/src/module.cpp index 70d143f..43f48ba 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -9,6 +9,7 @@ #include "cluster.hpp" #include "cluster_file.hpp" #include "fit.hpp" +#include "interpolation.hpp" //Pybind stuff #include @@ -31,5 +32,6 @@ PYBIND11_MODULE(_aare, m) { define_cluster_collector_bindings(m); define_cluster_file_sink_bindings(m); define_fit_bindings(m); + define_interpolation_bindings(m); } \ No newline at end of file diff --git a/python/src/np_helper.hpp b/python/src/np_helper.hpp index 6e92830..1845196 100644 --- a/python/src/np_helper.hpp +++ b/python/src/np_helper.hpp @@ -40,25 +40,25 @@ template py::array return_vector(std::vector *vec) { } // todo rewrite generic -template auto get_shape_3d(py::array_t arr) { +template auto get_shape_3d(const py::array_t& arr) { return aare::Shape<3>{arr.shape(0), arr.shape(1), arr.shape(2)}; } -template auto make_view_3d(py::array_t arr) { +template auto make_view_3d(py::array_t& arr) { return aare::NDView(arr.mutable_data(), get_shape_3d(arr)); } -template auto get_shape_2d(py::array_t arr) { +template auto get_shape_2d(const py::array_t& arr) { return aare::Shape<2>{arr.shape(0), arr.shape(1)}; } -template auto get_shape_1d(py::array_t arr) { +template auto get_shape_1d(const py::array_t& arr) { return aare::Shape<1>{arr.shape(0)}; } -template auto make_view_2d(py::array_t arr) { +template auto make_view_2d(py::array_t& arr) { return aare::NDView(arr.mutable_data(), get_shape_2d(arr)); } -template auto make_view_1d(py::array_t arr) { +template auto make_view_1d(py::array_t& arr) { return aare::NDView(arr.mutable_data(), get_shape_1d(arr)); } \ No newline at end of file diff --git a/python/src/var_cluster.hpp b/python/src/var_cluster.hpp index f3a5741..f7b373f 100644 --- a/python/src/var_cluster.hpp +++ b/python/src/var_cluster.hpp @@ -19,15 +19,24 @@ using namespace::aare; void define_var_cluster_finder_bindings(py::module &m) { PYBIND11_NUMPY_DTYPE(VarClusterFinder::Hit, size, row, col, - reserved, energy, max); + reserved, energy, max, rows, cols, enes); py::class_>(m, "VarClusterFinder") .def(py::init, double>()) .def("labeled", [](VarClusterFinder &self) { - auto ptr = new NDArray(self.labeled()); + auto *ptr = new NDArray(self.labeled()); return return_image_data(ptr); }) + .def("set_noiseMap", + [](VarClusterFinder &self, + py::array_t + noise_map) { + auto noise_map_span = make_view_2d(noise_map); + self.set_noiseMap(noise_map_span); + }) + .def("set_peripheralThresholdFactor", + &VarClusterFinder::set_peripheralThresholdFactor) .def("find_clusters", [](VarClusterFinder &self, py::array_t @@ -35,6 +44,30 @@ void define_var_cluster_finder_bindings(py::module &m) { auto view = make_view_2d(img); self.find_clusters(view); }) + .def("find_clusters_X", + [](VarClusterFinder &self, + py::array_t + img) { + auto img_span = make_view_2d(img); + self.find_clusters_X(img_span); + }) + .def("single_pass", + [](VarClusterFinder &self, + py::array_t + img) { + auto img_span = make_view_2d(img); + self.single_pass(img_span); + }) + .def("hits", + [](VarClusterFinder &self) { + auto ptr = new std::vector::Hit>( + self.steal_hits()); + return return_vector(ptr); + }) + .def("clear_hits", + [](VarClusterFinder &self) { + self.clear_hits(); + }) .def("steal_hits", [](VarClusterFinder &self) { auto ptr = new std::vector::Hit>( diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 2928d26..2e23e09 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -108,6 +108,79 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { return clusters; } +ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + + ClusterVector clusters(3,3); + clusters.reserve(n_clusters); + + int32_t iframe = 0; // frame number needs to be 4 bytes! + size_t nph_read = 0; + uint32_t nn = m_num_left; + uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 + + // auto buf = reinterpret_cast(clusters.data()); + // auto buf = clusters.data(); + + Cluster3x3 tmp; //this would break if the cluster size changes + + // if there are photons left from previous frame read them first + if (nph) { + if (nph > n_clusters) { + // if we have more photons left in the frame then photons to read we + // read directly the requested number + nn = n_clusters; + } else { + nn = nph; + } + //Read one cluster, in the ROI push back + // nph_read += fread((buf + nph_read*clusters.item_size()), + // clusters.item_size(), nn, fp); + for(size_t i = 0; i < nn; i++){ + fread(&tmp, sizeof(tmp), 1, fp); + if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ + clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); + nph_read++; + } + } + + m_num_left = nph - nn; // write back the number of photons left + } + + if (nph_read < n_clusters) { + // keep on reading frames and photons until reaching n_clusters + while (fread(&iframe, sizeof(iframe), 1, fp)) { + // read number of clusters in frame + if (fread(&nph, sizeof(nph), 1, fp)) { + if (nph > (n_clusters - nph_read)) + nn = n_clusters - nph_read; + else + nn = nph; + + // nph_read += fread((buf + nph_read*clusters.item_size()), + // clusters.item_size(), nn, fp); + for(size_t i = 0; i < nn; i++){ + fread(&tmp, sizeof(tmp), 1, fp); + if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ + clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); + nph_read++; + } + } + m_num_left = nph - nn; + } + if (nph_read >= n_clusters) + break; + } + } + + // Resize the vector to the number of clusters. + // No new allocation, only change bounds. + clusters.resize(nph_read); + return clusters; +} + ClusterVector ClusterFile::read_frame() { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); @@ -268,11 +341,23 @@ ClusterVector ClusterFile::read_frame() { NDArray calculate_eta2(ClusterVector &clusters) { //TOTO! make work with 2x2 clusters NDArray eta2({static_cast(clusters.size()), 2}); - for (size_t i = 0; i < clusters.size(); i++) { - auto e = calculate_eta2(clusters.at(i)); - eta2(i, 0) = e.x; - eta2(i, 1) = e.y; + + if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; + } + }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; + } + }else{ + throw std::runtime_error("Only 3x3 and 2x2 clusters are supported"); } + return eta2; } @@ -290,7 +375,7 @@ Eta2 calculate_eta2(Cluster3x3 &cl) { tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8]; auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin(); - + eta.sum = tot2[c]; switch (c) { case cBottomLeft: if ((cl.data[3] + cl.data[4]) != 0) @@ -333,6 +418,20 @@ Eta2 calculate_eta2(Cluster3x3 &cl) { return eta; } + +Eta2 calculate_eta2(Cluster2x2 &cl) { + Eta2 eta{}; + if ((cl.data[0] + cl.data[1]) != 0) + eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); + if ((cl.data[0] + cl.data[2]) != 0) + eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); + eta.sum = cl.data[0] + cl.data[1] + cl.data[2]+ cl.data[3]; + eta.c = cBottomLeft; //TODO! This is not correct, but need to put something + return eta; +} + + + int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y) { diff --git a/src/Dtype.cpp b/src/Dtype.cpp index 565d509..b818ea3 100644 --- a/src/Dtype.cpp +++ b/src/Dtype.cpp @@ -70,7 +70,7 @@ uint8_t Dtype::bitdepth() const { /** * @brief Get the number of bytes of the data type */ -size_t Dtype::bytes() const { return bitdepth() / 8; } +size_t Dtype::bytes() const { return bitdepth() / bits_per_byte; } /** * @brief Construct a DType object from a TypeIndex diff --git a/src/File.cpp b/src/File.cpp index 1180967..3c68eff 100644 --- a/src/File.cpp +++ b/src/File.cpp @@ -73,7 +73,7 @@ size_t File::tell() const { return file_impl->tell(); } size_t File::rows() const { return file_impl->rows(); } size_t File::cols() const { return file_impl->cols(); } size_t File::bitdepth() const { return file_impl->bitdepth(); } -size_t File::bytes_per_pixel() const { return file_impl->bitdepth() / 8; } +size_t File::bytes_per_pixel() const { return file_impl->bitdepth() / bits_per_byte; } DetectorType File::detector_type() const { return file_impl->detector_type(); } diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp new file mode 100644 index 0000000..7f82533 --- /dev/null +++ b/src/Interpolator.cpp @@ -0,0 +1,144 @@ +#include "aare/Interpolator.hpp" +#include "aare/algorithm.hpp" + +namespace aare { + +Interpolator::Interpolator(NDView etacube, NDView xbins, + NDView ybins, NDView ebins) + : m_ietax(etacube), m_ietay(etacube), m_etabinsx(xbins), m_etabinsy(ybins), m_energy_bins(ebins) { + if (etacube.shape(0) != xbins.size() || etacube.shape(1) != ybins.size() || + etacube.shape(2) != ebins.size()) { + throw std::invalid_argument( + "The shape of the etacube does not match the shape of the bins"); + } + + // Cumulative sum in the x direction + for (ssize_t i = 1; i < m_ietax.shape(0); i++) { + for (ssize_t j = 0; j < m_ietax.shape(1); j++) { + for (ssize_t k = 0; k < m_ietax.shape(2); k++) { + m_ietax(i, j, k) += m_ietax(i - 1, j, k); + } + } + } + + // Normalize by the highest row, if norm less than 1 don't do anything + for (ssize_t i = 0; i < m_ietax.shape(0); i++) { + for (ssize_t j = 0; j < m_ietax.shape(1); j++) { + for (ssize_t k = 0; k < m_ietax.shape(2); k++) { + auto val = m_ietax(m_ietax.shape(0) - 1, j, k); + double norm = val < 1 ? 1 : val; + m_ietax(i, j, k) /= norm; + } + } + } + + // Cumulative sum in the y direction + for (ssize_t i = 0; i < m_ietay.shape(0); i++) { + for (ssize_t j = 1; j < m_ietay.shape(1); j++) { + for (ssize_t k = 0; k < m_ietay.shape(2); k++) { + m_ietay(i, j, k) += m_ietay(i, j - 1, k); + } + } + } + + // Normalize by the highest column, if norm less than 1 don't do anything + for (ssize_t i = 0; i < m_ietay.shape(0); i++) { + for (ssize_t j = 0; j < m_ietay.shape(1); j++) { + for (ssize_t k = 0; k < m_ietay.shape(2); k++) { + auto val = m_ietay(i, m_ietay.shape(1) - 1, k); + double norm = val < 1 ? 1 : val; + m_ietay(i, j, k) /= norm; + } + } + } +} + +std::vector Interpolator::interpolate(const ClusterVector& clusters) { + std::vector photons; + photons.reserve(clusters.size()); + + if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { + for (size_t i = 0; i(i); + Eta2 eta= calculate_eta2(cluster); + + Photon photon; + photon.x = cluster.x; + photon.y = cluster.y; + photon.energy = eta.sum; + + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + //Finding the index of the last element that is smaller + //should work fine as long as we have many bins + auto ie = last_smaller(m_energy_bins, photon.energy); + auto ix = last_smaller(m_etabinsx, eta.x); + auto iy = last_smaller(m_etabinsy, eta.y); + + // fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy); + + double dX, dY; + int ex, ey; + // cBottomLeft = 0, + // cBottomRight = 1, + // cTopLeft = 2, + // cTopRight = 3 + switch (eta.c) { + case cTopLeft: + dX = -1.; + dY = 0.; + break; + case cTopRight:; + dX = 0.; + dY = 0.; + break; + case cBottomLeft: + dX = -1.; + dY = -1.; + break; + case cBottomRight: + dX = 0.; + dY = -1.; + break; + } + photon.x += m_ietax(ix, iy, ie)*2 + dX; + photon.y += m_ietay(ix, iy, ie)*2 + dY; + photons.push_back(photon); + } + }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ + for (size_t i = 0; i(i); + Eta2 eta= calculate_eta2(cluster); + + Photon photon; + photon.x = cluster.x; + photon.y = cluster.y; + photon.energy = eta.sum; + + //Now do some actual interpolation. + //Find which energy bin the cluster is in + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + //Finding the index of the last element that is smaller + //should work fine as long as we have many bins + auto ie = last_smaller(m_energy_bins, photon.energy); + auto ix = last_smaller(m_etabinsx, eta.x); + auto iy = last_smaller(m_etabinsy, eta.y); + + photon.x += m_ietax(ix, iy, ie)*2; //eta goes between 0 and 1 but we could move the hit anywhere in the 2x2 + photon.y += m_ietay(ix, iy, ie)*2; + photons.push_back(photon); + } + + }else{ + throw std::runtime_error("Only 3x3 and 2x2 clusters are supported for interpolation"); + } + + + return photons; +} + +} // namespace aare \ No newline at end of file diff --git a/src/NDArray.test.cpp b/src/NDArray.test.cpp index 942481c..eff3e2c 100644 --- a/src/NDArray.test.cpp +++ b/src/NDArray.test.cpp @@ -2,6 +2,7 @@ #include #include #include +#include using aare::NDArray; using aare::NDView; @@ -34,6 +35,24 @@ TEST_CASE("Construct from an NDView") { } } +TEST_CASE("3D NDArray from NDView"){ + std::vector data(27); + std::iota(data.begin(), data.end(), 0); + NDView view(data.data(), Shape<3>{3, 3, 3}); + NDArray image(view); + REQUIRE(image.shape() == view.shape()); + REQUIRE(image.size() == view.size()); + REQUIRE(image.data() != view.data()); + + for(int64_t i=0; i shape{{20}}; NDArray img(shape, 3); diff --git a/src/RawFile.cpp b/src/RawFile.cpp index e704add..78cb6c5 100644 --- a/src/RawFile.cpp +++ b/src/RawFile.cpp @@ -76,8 +76,7 @@ size_t RawFile::n_mod() const { return n_subfile_parts; } size_t RawFile::bytes_per_frame() { - // return m_rows * m_cols * m_master.bitdepth() / 8; - return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() / 8; + return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() / bits_per_byte; } size_t RawFile::pixels_per_frame() { // return m_rows * m_cols; diff --git a/src/algorithm.test.cpp b/src/algorithm.test.cpp new file mode 100644 index 0000000..fcfa8d2 --- /dev/null +++ b/src/algorithm.test.cpp @@ -0,0 +1,73 @@ + + +#include +#include + + +TEST_CASE("Find the closed index in a 1D array", "[algorithm]") { + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::nearest_index(arr, 2.3) == 2); + REQUIRE(aare::nearest_index(arr, 2.6) == 3); + REQUIRE(aare::nearest_index(arr, 45.0) == 4); + REQUIRE(aare::nearest_index(arr, 0.0) == 0); + REQUIRE(aare::nearest_index(arr, -1.0) == 0); +} + +TEST_CASE("Passing integers to nearest_index works", "[algorithm]"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::nearest_index(arr, 2) == 2); + REQUIRE(aare::nearest_index(arr, 3) == 3); + REQUIRE(aare::nearest_index(arr, 45) == 4); + REQUIRE(aare::nearest_index(arr, 0) == 0); + REQUIRE(aare::nearest_index(arr, -1) == 0); +} + + +TEST_CASE("nearest_index works with std::vector", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::nearest_index(vec, 2.123) == 2); + REQUIRE(aare::nearest_index(vec, 2.66) == 3); + REQUIRE(aare::nearest_index(vec, 4555555.0) == 4); + REQUIRE(aare::nearest_index(vec, 0.0) == 0); + REQUIRE(aare::nearest_index(vec, -10.0) == 0); +} + +TEST_CASE("nearest index works with std::array", "[algorithm]"){ + std::array arr = {0, 1, 2, 3, 4}; + REQUIRE(aare::nearest_index(arr, 2.123) == 2); + REQUIRE(aare::nearest_index(arr, 2.501) == 3); + REQUIRE(aare::nearest_index(arr, 4555555.0) == 4); + REQUIRE(aare::nearest_index(arr, 0.0) == 0); + REQUIRE(aare::nearest_index(arr, -10.0) == 0); +} + + +TEST_CASE("last smaller", "[algorithm]"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, -10.0) == 0); + REQUIRE(aare::last_smaller(arr, 0.0) == 0); + REQUIRE(aare::last_smaller(arr, 2.3) == 2); + REQUIRE(aare::last_smaller(arr, 253.) == 4); +} + +TEST_CASE("returns last bin strictly smaller", "[algorithm]"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, 2.0) == 2); + +} \ No newline at end of file From 6ad76f63c11754444c049c652f54b3d16d3f0586 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Mon, 24 Mar 2025 14:28:10 +0100 Subject: [PATCH 08/23] Fixed reading clusters with ROI (#142) Fixed incorrect reading of clusters with ROI closes #141 --- src/ClusterFile.cpp | 78 +++++++++++++++++++-------------------------- 1 file changed, 33 insertions(+), 45 deletions(-) diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 2e23e09..59b8bb8 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -115,69 +115,57 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { ClusterVector clusters(3,3); clusters.reserve(n_clusters); - - int32_t iframe = 0; // frame number needs to be 4 bytes! - size_t nph_read = 0; - uint32_t nn = m_num_left; - uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 - - // auto buf = reinterpret_cast(clusters.data()); - // auto buf = clusters.data(); - + Cluster3x3 tmp; //this would break if the cluster size changes + // if there are photons left from previous frame read them first - if (nph) { - if (nph > n_clusters) { - // if we have more photons left in the frame then photons to read we - // read directly the requested number - nn = n_clusters; - } else { - nn = nph; - } - //Read one cluster, in the ROI push back - // nph_read += fread((buf + nph_read*clusters.item_size()), - // clusters.item_size(), nn, fp); - for(size_t i = 0; i < nn; i++){ + if (m_num_left) { + size_t nph_read = 0; + while(nph_read < m_num_left && clusters.size() < n_clusters){ fread(&tmp, sizeof(tmp), 1, fp); + nph_read++; if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); - nph_read++; } } - - m_num_left = nph - nn; // write back the number of photons left + m_num_left -= nph_read; } - if (nph_read < n_clusters) { - // keep on reading frames and photons until reaching n_clusters - while (fread(&iframe, sizeof(iframe), 1, fp)) { - // read number of clusters in frame - if (fread(&nph, sizeof(nph), 1, fp)) { - if (nph > (n_clusters - nph_read)) - nn = n_clusters - nph_read; - else - nn = nph; - // nph_read += fread((buf + nph_read*clusters.item_size()), - // clusters.item_size(), nn, fp); - for(size_t i = 0; i < nn; i++){ + if (clusters.size() < n_clusters) { + if (m_num_left) { + throw std::runtime_error(LOCATION + "Entered second loop with clusters left\n"); + } + // we did not have enough clusters left in the previous frame + // keep on reading frames until reaching n_clusters + + int32_t frame_number = 0; // frame number needs to be 4 bytes! + while (fread(&frame_number, sizeof(frame_number), 1, fp)) { + uint32_t nph_in_frame = 0; //number of photons we can read until next frame number + size_t nph_read = 0; //number of photons read in this frame + + if (fread(&nph_in_frame, sizeof(nph_in_frame), 1, fp)) { + if(frame_number != 1){ + throw std::runtime_error("Frame number is not 1"); + } + + while(nph_read < nph_in_frame && clusters.size() < n_clusters){ fread(&tmp, sizeof(tmp), 1, fp); + nph_read++; if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); - nph_read++; } } - m_num_left = nph - nn; + m_num_left = nph_in_frame - nph_read; } - if (nph_read >= n_clusters) - break; - } - } - // Resize the vector to the number of clusters. - // No new allocation, only change bounds. - clusters.resize(nph_read); + if (clusters.size() >= n_clusters){ + break; + } + } + + } return clusters; } From a42c0d645bc91f9ced8cee98e03010505a742d81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 1 Apr 2025 14:31:25 +0200 Subject: [PATCH 09/23] added roi, noise and gain (#143) - Moved definitions of Cluster_2x2 and Cluster_3x3 to it's own file - Added optional members for ROI, noise_map and gain_map in ClusterFile **API:** After creating the ClusterFile the user can set one or all of: roi, noise_map, gain_map ```python f = ClusterFile(fname) f.set_roi(roi) #aare.ROI f.set_noise_map(noise_map) #numpy array f.set_gain_map(gain_map) #numpy array ``` **When reading clusters they are evaluated in the order:** 1. If ROI is enabled check that the cluster is within the ROI 1. If noise_map is enabled check that the cluster meets one of the conditions - Center pixel above noise - Highest 2x2 sum above 2x noise - 3x3 sum above 3x noise 1. If gain_map is set apply the gain map before returning the clusters (not used for noise cut) **Open questions:** 1. Check for out of bounds access in noise and gain map? closes #139 closes #135 closes #90 --- CMakeLists.txt | 45 ++- conda-recipe/meta.yaml | 2 +- include/aare/Cluster.hpp | 36 +++ include/aare/ClusterFile.hpp | 77 +++--- include/aare/ClusterVector.hpp | 25 ++ include/aare/defs.hpp | 6 +- patches/libzmq_cmake_version.patch | 18 ++ pyproject.toml | 2 +- python/src/cluster_file.hpp | 24 +- src/ClusterFile.cpp | 428 ++++++++++------------------- src/ClusterFile.test.cpp | 80 ++++++ tests/test_config.hpp.in | 2 +- 12 files changed, 400 insertions(+), 345 deletions(-) create mode 100644 include/aare/Cluster.hpp create mode 100644 patches/libzmq_cmake_version.patch create mode 100644 src/ClusterFile.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 4772f0b..4a12fe6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -81,15 +81,29 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if(AARE_FETCH_LMFIT) #TODO! Should we fetch lmfit from the web or inlcude a tar.gz in the repo? - set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) - FetchContent_Declare( - lmfit - GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git - GIT_TAG main - PATCH_COMMAND ${lmfit_patch} - UPDATE_DISCONNECTED 1 - EXCLUDE_FROM_ALL 1 - ) + set(LMFIT_PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) + + # For cmake < 3.28 we can't supply EXCLUDE_FROM_ALL to FetchContent_Declare + # so we need this workaround + if (${CMAKE_VERSION} VERSION_LESS "3.28") + FetchContent_Declare( + lmfit + GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git + GIT_TAG main + PATCH_COMMAND ${LMFIT_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 + ) + else() + FetchContent_Declare( + lmfit + GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git + GIT_TAG main + PATCH_COMMAND ${LMFIT_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 + EXCLUDE_FROM_ALL 1 + ) + endif() + #Disable what we don't need from lmfit set(BUILD_TESTING OFF CACHE BOOL "") set(LMFIT_CPPTEST OFF CACHE BOOL "") @@ -97,8 +111,15 @@ if(AARE_FETCH_LMFIT) set(LMFIT_CPPTEST OFF CACHE BOOL "") set(BUILD_SHARED_LIBS OFF CACHE BOOL "") + if (${CMAKE_VERSION} VERSION_LESS "3.28") + if(NOT lmfit_POPULATED) + FetchContent_Populate(lmfit) + add_subdirectory(${lmfit_SOURCE_DIR} ${lmfit_BINARY_DIR} EXCLUDE_FROM_ALL) + endif() + else() + FetchContent_MakeAvailable(lmfit) + endif() - FetchContent_MakeAvailable(lmfit) set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON) else() find_package(lmfit REQUIRED) @@ -111,10 +132,13 @@ if(AARE_FETCH_ZMQ) if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.30") cmake_policy(SET CMP0169 OLD) endif() + set(ZMQ_PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/libzmq_cmake_version.patch) FetchContent_Declare( libzmq GIT_REPOSITORY https://github.com/zeromq/libzmq.git GIT_TAG v4.3.4 + PATCH_COMMAND ${ZMQ_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 ) # Disable unwanted options from libzmq set(BUILD_TESTS OFF CACHE BOOL "Switch off libzmq test build") @@ -396,6 +420,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 120854b..3630b29 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: aare - version: 2025.3.18 #TODO! how to not duplicate this? + version: 2025.4.1 #TODO! how to not duplicate this? diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp new file mode 100644 index 0000000..48f9ef0 --- /dev/null +++ b/include/aare/Cluster.hpp @@ -0,0 +1,36 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace aare { + +//TODO! Template this? +struct Cluster3x3 { + int16_t x; + int16_t y; + int32_t data[9]; + + int32_t sum_2x2() const{ + std::array total; + total[0] = data[0] + data[1] + data[3] + data[4]; + total[1] = data[1] + data[2] + data[4] + data[5]; + total[2] = data[3] + data[4] + data[6] + data[7]; + total[3] = data[4] + data[5] + data[7] + data[8]; + return *std::max_element(total.begin(), total.end()); + } + + int32_t sum() const{ + return std::accumulate(data, data + 9, 0); + } +}; +struct Cluster2x2 { + int16_t x; + int16_t y; + int32_t data[4]; +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 5bea342..22f4183 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -1,25 +1,16 @@ #pragma once +#include "aare/Cluster.hpp" #include "aare/ClusterVector.hpp" #include "aare/NDArray.hpp" #include "aare/defs.hpp" #include #include +#include namespace aare { -//TODO! Template this? -struct Cluster3x3 { - int16_t x; - int16_t y; - int32_t data[9]; -}; -struct Cluster2x2 { - int16_t x; - int16_t y; - int32_t data[4]; -}; - +//TODO! Legacy enums, migrate to enum class typedef enum { cBottomLeft = 0, cBottomRight = 1, @@ -53,15 +44,7 @@ struct ClusterAnalysis { double etay; }; -/* -Binary cluster file. Expects data to be layed out as: -int32_t frame_number -uint32_t number_of_clusters -int16_t x, int16_t y, int32_t data[9] x number_of_clusters -int32_t frame_number -uint32_t number_of_clusters -.... -*/ + /** * @brief Class to read and write cluster files @@ -70,16 +53,19 @@ uint32_t number_of_clusters * * int32_t frame_number * uint32_t number_of_clusters - * int16_t x, int16_t y, int32_t data[9] x number_of_clusters + * int16_t x, int16_t y, int32_t data[9] * number_of_clusters * int32_t frame_number * uint32_t number_of_clusters * etc. */ class ClusterFile { FILE *fp{}; - uint32_t m_num_left{}; - size_t m_chunk_size{}; - const std::string m_mode; + uint32_t m_num_left{}; /*Number of photons left in frame*/ + size_t m_chunk_size{}; /*Number of clusters to read at a time*/ + const std::string m_mode; /*Mode to open the file in*/ + std::optional m_roi; /*Region of interest, will be applied if set*/ + std::optional> m_noise_map; /*Noise map to cut photons, will be applied if set*/ + std::optional> m_gain_map; /*Gain map to apply to the clusters, will be applied if set*/ public: /** @@ -104,8 +90,6 @@ class ClusterFile { */ ClusterVector read_clusters(size_t n_clusters); - ClusterVector read_clusters(size_t n_clusters, ROI roi); - /** * @brief Read a single frame from the file and return the clusters. The * cluster vector will have the frame number set. @@ -117,29 +101,50 @@ class ClusterFile { void write_frame(const ClusterVector &clusters); - // Need to be migrated to support NDArray and return a ClusterVector - // std::vector - // read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); - /** * @brief Return the chunk size */ size_t chunk_size() const { return m_chunk_size; } + + /** + * @brief Set the region of interest to use when reading clusters. If set only clusters within + * the ROI will be read. + */ + void set_roi(ROI roi); + + /** + * @brief Set the noise map to use when reading clusters. If set clusters below the noise + * level will be discarded. Selection criteria one of: Central pixel above noise, highest + * 2x2 sum above 2 * noise, total sum above 3 * noise. + */ + void set_noise_map(const NDView noise_map); + + /** + * @brief Set the gain map to use when reading clusters. If set the gain map will be applied + * to the clusters that pass ROI and noise_map selection. + */ + void set_gain_map(const NDView gain_map); /** * @brief Close the file. If not closed the file will be closed in the destructor */ void close(); + + private: + ClusterVector read_clusters_with_cut(size_t n_clusters); + ClusterVector read_clusters_without_cut(size_t n_clusters); + ClusterVector read_frame_with_cut(); + ClusterVector read_frame_without_cut(); + bool is_selected(Cluster3x3 &cl); + Cluster3x3 read_one_cluster(); }; -int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y); -int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y); - +//TODO! helper functions that doesn't really belong here NDArray calculate_eta2(ClusterVector &clusters); Eta2 calculate_eta2(Cluster3x3 &cl); Eta2 calculate_eta2(Cluster2x2 &cl); + + } // namespace aare diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 1c15a22..b91278c 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -8,6 +8,9 @@ #include +#include "aare/Cluster.hpp" +#include "aare/NDView.hpp" + namespace aare { /** @@ -265,6 +268,28 @@ template class ClusterVector { m_size = new_size; } + void apply_gain_map(const NDView gain_map){ + //in principle we need to know the size of the image for this lookup + //TODO! check orientations + std::array xcorr = {-1, 0, 1, -1, 0, 1, -1, 0, 1}; + std::array ycorr = {-1, -1, -1, 0, 0, 0, 1, 1, 1}; + for (size_t i=0; i(i); + + if (cl.x > 0 && cl.y > 0 && cl.x < gain_map.shape(1)-1 && cl.y < gain_map.shape(0)-1){ + for (size_t j=0; j<9; j++){ + size_t x = cl.x + xcorr[j]; + size_t y = cl.y + ycorr[j]; + cl.data[j] = static_cast(cl.data[j] * gain_map(y, x)); + } + }else{ + memset(cl.data, 0, 9*sizeof(T)); //clear edge clusters + } + + + } + } + private: void allocate_buffer(size_t new_capacity) { size_t num_bytes = item_size() * new_capacity; diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index 4559882..4d22bd4 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -1,11 +1,9 @@ #pragma once #include "aare/Dtype.hpp" -// #include "aare/utils/logger.hpp" #include #include - #include #include #include @@ -43,6 +41,7 @@ inline constexpr size_t bits_per_byte = 8; void assert_failed(const std::string &msg); + class DynamicCluster { public: int cluster_sizeX; @@ -215,6 +214,9 @@ struct ROI{ int64_t height() const { return ymax - ymin; } int64_t width() const { return xmax - xmin; } + bool contains(int64_t x, int64_t y) const { + return x >= xmin && x < xmax && y >= ymin && y < ymax; + } }; diff --git a/patches/libzmq_cmake_version.patch b/patches/libzmq_cmake_version.patch new file mode 100644 index 0000000..4e421d3 --- /dev/null +++ b/patches/libzmq_cmake_version.patch @@ -0,0 +1,18 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index dd3d8eb9..c0187747 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -1,11 +1,8 @@ + # CMake build script for ZeroMQ + project(ZeroMQ) + +-if(${CMAKE_SYSTEM_NAME} STREQUAL Darwin) +- cmake_minimum_required(VERSION 3.0.2) +-else() +- cmake_minimum_required(VERSION 2.8.12) +-endif() ++cmake_minimum_required(VERSION 3.15) ++message(STATUS "Patched cmake version") + + include(CheckIncludeFiles) + include(CheckCCompilerFlag) diff --git a/pyproject.toml b/pyproject.toml index b9bf7d2..0b6d2af 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.3.18" +version = "2025.4.1" diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index f587443..ff46043 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -31,26 +31,22 @@ void define_cluster_file_io_bindings(py::module &m) { auto v = new ClusterVector(self.read_clusters(n_clusters)); return v; },py::return_value_policy::take_ownership) - .def("read_clusters", - [](ClusterFile &self, size_t n_clusters, ROI roi) { - auto v = new ClusterVector(self.read_clusters(n_clusters, roi)); - return v; - },py::return_value_policy::take_ownership) .def("read_frame", [](ClusterFile &self) { auto v = new ClusterVector(self.read_frame()); return v; }) + .def("set_roi", &ClusterFile::set_roi) + .def("set_noise_map", [](ClusterFile &self, py::array_t noise_map) { + auto view = make_view_2d(noise_map); + self.set_noise_map(view); + }) + .def("set_gain_map", [](ClusterFile &self, py::array_t gain_map) { + auto view = make_view_2d(gain_map); + self.set_gain_map(view); + }) + .def("close", &ClusterFile::close) .def("write_frame", &ClusterFile::write_frame) - // .def("read_cluster_with_cut", - // [](ClusterFile &self, size_t n_clusters, - // py::array_t noise_map, int nx, int ny) { - // auto view = make_view_2d(noise_map); - // auto *vec = - // new std::vector(self.read_cluster_with_cut( - // n_clusters, view.data(), nx, ny)); - // return return_vector(vec); - // }) .def("__enter__", [](ClusterFile &self) { return &self; }) .def("__exit__", [](ClusterFile &self, diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 59b8bb8..f4ef0ae 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -31,6 +31,18 @@ ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size, } } +void ClusterFile::set_roi(ROI roi){ + m_roi = roi; +} + +void ClusterFile::set_noise_map(const NDView noise_map){ + m_noise_map = NDArray(noise_map); +} + +void ClusterFile::set_gain_map(const NDView gain_map){ + m_gain_map = NDArray(gain_map); +} + ClusterFile::~ClusterFile() { close(); } void ClusterFile::close() { @@ -48,14 +60,37 @@ void ClusterFile::write_frame(const ClusterVector &clusters) { !(clusters.cluster_size_y() == 3)) { throw std::runtime_error("Only 3x3 clusters are supported"); } + //First write the frame number - 4 bytes int32_t frame_number = clusters.frame_number(); - fwrite(&frame_number, sizeof(frame_number), 1, fp); + if(fwrite(&frame_number, sizeof(frame_number), 1, fp)!=1){ + throw std::runtime_error(LOCATION + "Could not write frame number"); + } + + //Then write the number of clusters - 4 bytes uint32_t n_clusters = clusters.size(); - fwrite(&n_clusters, sizeof(n_clusters), 1, fp); - fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp); + if(fwrite(&n_clusters, sizeof(n_clusters), 1, fp)!=1){ + throw std::runtime_error(LOCATION + "Could not write number of clusters"); + } + + //Now write the clusters in the frame + if(fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp)!=clusters.size()){ + throw std::runtime_error(LOCATION + "Could not write clusters"); + } } -ClusterVector ClusterFile::read_clusters(size_t n_clusters) { + +ClusterVector ClusterFile::read_clusters(size_t n_clusters){ + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_noise_map || m_roi){ + return read_clusters_with_cut(n_clusters); + }else{ + return read_clusters_without_cut(n_clusters); + } +} + +ClusterVector ClusterFile::read_clusters_without_cut(size_t n_clusters) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } @@ -86,6 +121,7 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { if (nph_read < n_clusters) { // keep on reading frames and photons until reaching n_clusters while (fread(&iframe, sizeof(iframe), 1, fp)) { + clusters.set_frame_number(iframe); // read number of clusters in frame if (fread(&nph, sizeof(nph), 1, fp)) { if (nph > (n_clusters - nph_read)) @@ -105,71 +141,112 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { // Resize the vector to the number of clusters. // No new allocation, only change bounds. clusters.resize(nph_read); + if(m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); return clusters; } -ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - + + +ClusterVector ClusterFile::read_clusters_with_cut(size_t n_clusters) { ClusterVector clusters(3,3); clusters.reserve(n_clusters); - - Cluster3x3 tmp; //this would break if the cluster size changes - // if there are photons left from previous frame read them first if (m_num_left) { - size_t nph_read = 0; - while(nph_read < m_num_left && clusters.size() < n_clusters){ - fread(&tmp, sizeof(tmp), 1, fp); - nph_read++; - if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ - clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); + while(m_num_left && clusters.size() < n_clusters){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); } } - m_num_left -= nph_read; } - + // we did not have enough clusters left in the previous frame + // keep on reading frames until reaching n_clusters if (clusters.size() < n_clusters) { + // sanity check if (m_num_left) { throw std::runtime_error(LOCATION + "Entered second loop with clusters left\n"); } - // we did not have enough clusters left in the previous frame - // keep on reading frames until reaching n_clusters - + int32_t frame_number = 0; // frame number needs to be 4 bytes! while (fread(&frame_number, sizeof(frame_number), 1, fp)) { - uint32_t nph_in_frame = 0; //number of photons we can read until next frame number - size_t nph_read = 0; //number of photons read in this frame - - if (fread(&nph_in_frame, sizeof(nph_in_frame), 1, fp)) { - if(frame_number != 1){ - throw std::runtime_error("Frame number is not 1"); - } - - while(nph_read < nph_in_frame && clusters.size() < n_clusters){ - fread(&tmp, sizeof(tmp), 1, fp); - nph_read++; - if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ - clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); + if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) { + clusters.set_frame_number(frame_number); //cluster vector will hold the last frame number + while(m_num_left && clusters.size() < n_clusters){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); } } - m_num_left = nph_in_frame - nph_read; } - if (clusters.size() >= n_clusters){ + // we have enough clusters, break out of the outer while loop + if (clusters.size() >= n_clusters) break; - } } } + if(m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); + return clusters; } -ClusterVector ClusterFile::read_frame() { +Cluster3x3 ClusterFile::read_one_cluster(){ + Cluster3x3 c; + auto rc = fread(&c, sizeof(c), 1, fp); + if (rc != 1) { + throw std::runtime_error(LOCATION + "Could not read cluster"); + } + --m_num_left; + return c; +} + +ClusterVector ClusterFile::read_frame(){ + if (m_mode != "r") { + throw std::runtime_error(LOCATION + "File not opened for reading"); + } + if (m_noise_map || m_roi){ + return read_frame_with_cut(); + }else{ + return read_frame_without_cut(); + } +} + +ClusterVector ClusterFile::read_frame_without_cut() { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_num_left) { + throw std::runtime_error( + "There are still photons left in the last frame"); + } + int32_t frame_number; + if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { + throw std::runtime_error(LOCATION + "Could not read frame number"); + } + + int32_t n_clusters; // Saved as 32bit integer in the cluster file + if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { + throw std::runtime_error(LOCATION + "Could not read number of clusters"); + } + + ClusterVector clusters(3, 3, n_clusters); + clusters.set_frame_number(frame_number); + + if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != + static_cast(n_clusters)) { + throw std::runtime_error(LOCATION + "Could not read clusters"); + } + clusters.resize(n_clusters); + if (m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); + return clusters; +} + +ClusterVector ClusterFile::read_frame_with_cut() { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } @@ -182,149 +259,47 @@ ClusterVector ClusterFile::read_frame() { throw std::runtime_error("Could not read frame number"); } - int32_t n_clusters; // Saved as 32bit integer in the cluster file - if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { + + if (fread(&m_num_left, sizeof(m_num_left), 1, fp) != 1) { throw std::runtime_error("Could not read number of clusters"); } - // std::vector clusters(n_clusters); - ClusterVector clusters(3, 3, n_clusters); + + ClusterVector clusters(3, 3); + clusters.reserve(m_num_left); clusters.set_frame_number(frame_number); - - if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != - static_cast(n_clusters)) { - throw std::runtime_error("Could not read clusters"); + while(m_num_left){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); + } } - clusters.resize(n_clusters); + if (m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); return clusters; } -// std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, -// double *noise_map, -// int nx, int ny) { -// if (m_mode != "r") { -// throw std::runtime_error("File not opened for reading"); -// } -// std::vector clusters(n_clusters); -// // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf, -// // uint32_t *n_left, double *noise_map, int -// // nx, int ny) { -// int iframe = 0; -// // uint32_t nph = *n_left; -// uint32_t nph = m_num_left; -// // uint32_t nn = *n_left; -// uint32_t nn = m_num_left; -// size_t nph_read = 0; -// int32_t t2max, tot1; -// int32_t tot3; -// // Cluster *ptr = buf; -// Cluster3x3 *ptr = clusters.data(); -// int good = 1; -// double noise; -// // read photons left from previous frame -// if (noise_map) -// printf("Using noise map\n"); +bool ClusterFile::is_selected(Cluster3x3 &cl) { + //Should fail fast + if (m_roi) { + if (!(m_roi->contains(cl.x, cl.y))) { + return false; + } + } + if (m_noise_map){ + int32_t sum_1x1 = cl.data[4]; // central pixel + int32_t sum_2x2 = cl.sum_2x2(); // highest sum of 2x2 subclusters + int32_t sum_3x3 = cl.sum(); // sum of all pixels -// if (nph) { -// if (nph > n_clusters) { -// // if we have more photons left in the frame then photons to -// // read we read directly the requested number -// nn = n_clusters; -// } else { -// nn = nph; -// } -// for (size_t iph = 0; iph < nn; iph++) { -// // read photons 1 by 1 -// size_t n_read = -// fread(reinterpret_cast(ptr), sizeof(Cluster3x3), 1, fp); -// if (n_read != 1) { -// clusters.resize(nph_read); -// return clusters; -// } -// // TODO! error handling on read -// good = 1; -// if (noise_map) { -// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) { -// tot1 = ptr->data[4]; -// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL, -// NULL); -// noise = noise_map[ptr->y * nx + ptr->x]; -// if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) { -// ; -// } else { -// good = 0; -// printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise, -// tot1, t2max, tot3); -// } -// } else { -// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); -// good = 0; -// } -// } -// if (good) { -// ptr++; -// nph_read++; -// } -// (m_num_left)--; -// if (nph_read >= n_clusters) -// break; -// } -// } -// if (nph_read < n_clusters) { -// // // keep on reading frames and photons until reaching -// // n_clusters -// while (fread(&iframe, sizeof(iframe), 1, fp)) { -// // // printf("%d\n",nph_read); - -// if (fread(&nph, sizeof(nph), 1, fp)) { -// // // printf("** %d\n",nph); -// m_num_left = nph; -// for (size_t iph = 0; iph < nph; iph++) { -// // // read photons 1 by 1 -// size_t n_read = fread(reinterpret_cast(ptr), -// sizeof(Cluster3x3), 1, fp); -// if (n_read != 1) { -// clusters.resize(nph_read); -// return clusters; -// // return nph_read; -// } -// good = 1; -// if (noise_map) { -// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && -// ptr->y < ny) { -// tot1 = ptr->data[4]; -// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, -// NULL, NULL, NULL); -// // noise = noise_map[ptr->y * nx + ptr->x]; -// noise = noise_map[ptr->y + ny * ptr->x]; -// if (tot1 > noise || t2max > 2 * noise || -// tot3 > 3 * noise) { -// ; -// } else -// good = 0; -// } else { -// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); -// good = 0; -// } -// } -// if (good) { -// ptr++; -// nph_read++; -// } -// (m_num_left)--; -// if (nph_read >= n_clusters) -// break; -// } -// } -// if (nph_read >= n_clusters) -// break; -// } -// } -// // printf("%d\n",nph_read); -// clusters.resize(nph_read); -// return clusters; -// } + auto noise = (*m_noise_map)(cl.y, cl.x); //TODO! check if this is correct + if (sum_1x1 <= noise || sum_2x2 <= 2 * noise || sum_3x3 <= 3 * noise) { + return false; + } + } + //we passed all checks + return true; +} NDArray calculate_eta2(ClusterVector &clusters) { //TOTO! make work with 2x2 clusters @@ -419,111 +394,4 @@ Eta2 calculate_eta2(Cluster2x2 &cl) { } - -int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, - double *eta3y) { - - return analyze_data(cl.data, t2, t3, quad, eta2x, eta2y, eta3x, eta3y); -} - -int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y) { - - int ok = 1; - - int32_t tot2[4]; - int32_t t2max = 0; - char c = 0; - int32_t val, tot3; - - tot3 = 0; - for (int i = 0; i < 4; i++) - tot2[i] = 0; - - for (int ix = 0; ix < 3; ix++) { - for (int iy = 0; iy < 3; iy++) { - val = data[iy * 3 + ix]; - // printf ("%d ",data[iy * 3 + ix]); - tot3 += val; - if (ix <= 1 && iy <= 1) - tot2[cBottomLeft] += val; - if (ix >= 1 && iy <= 1) - tot2[cBottomRight] += val; - if (ix <= 1 && iy >= 1) - tot2[cTopLeft] += val; - if (ix >= 1 && iy >= 1) - tot2[cTopRight] += val; - } - // printf ("\n"); - } - // printf ("\n"); - - if (t2 || quad) { - - t2max = tot2[0]; - c = cBottomLeft; - for (int i = 1; i < 4; i++) { - if (tot2[i] > t2max) { - t2max = tot2[i]; - c = i; - } - } - // printf("*** %d %d %d %d -- - // %d\n",tot2[0],tot2[1],tot2[2],tot2[3],t2max); - if (quad) - *quad = c; - if (t2) - *t2 = t2max; - } - - if (t3) - *t3 = tot3; - - if (eta2x || eta2y) { - if (eta2x) - *eta2x = 0; - if (eta2y) - *eta2y = 0; - switch (c) { - case cBottomLeft: - if (eta2x && (data[3] + data[4]) != 0) - *eta2x = static_cast(data[4]) / (data[3] + data[4]); - if (eta2y && (data[1] + data[4]) != 0) - *eta2y = static_cast(data[4]) / (data[1] + data[4]); - break; - case cBottomRight: - if (eta2x && (data[2] + data[5]) != 0) - *eta2x = static_cast(data[5]) / (data[4] + data[5]); - if (eta2y && (data[1] + data[4]) != 0) - *eta2y = static_cast(data[4]) / (data[1] + data[4]); - break; - case cTopLeft: - if (eta2x && (data[7] + data[4]) != 0) - *eta2x = static_cast(data[4]) / (data[3] + data[4]); - if (eta2y && (data[7] + data[4]) != 0) - *eta2y = static_cast(data[7]) / (data[7] + data[4]); - break; - case cTopRight: - if (eta2x && t2max != 0) - *eta2x = static_cast(data[5]) / (data[5] + data[4]); - if (eta2y && t2max != 0) - *eta2y = static_cast(data[7]) / (data[7] + data[4]); - break; - default:; - } - } - - if (eta3x || eta3y) { - if (eta3x && (data[3] + data[4] + data[5]) != 0) - *eta3x = static_cast(-data[3] + data[3 + 2]) / - (data[3] + data[4] + data[5]); - if (eta3y && (data[1] + data[4] + data[7]) != 0) - *eta3y = static_cast(-data[1] + data[2 * 3 + 1]) / - (data[1] + data[4] + data[7]); - } - - return ok; -} - } // namespace aare \ No newline at end of file diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp new file mode 100644 index 0000000..a0eed04 --- /dev/null +++ b/src/ClusterFile.test.cpp @@ -0,0 +1,80 @@ +#include "aare/ClusterFile.hpp" +#include "test_config.hpp" + + +#include "aare/defs.hpp" +#include +#include + + + + +using aare::ClusterFile; + +TEST_CASE("Read one frame from a a cluster file", "[.integration]") { + //We know that the frame has 97 clusters + auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile f(fpath); + auto clusters = f.read_frame(); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); +} + +TEST_CASE("Read one frame using ROI", "[.integration]") { + //We know that the frame has 97 clusters + auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile f(fpath); + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 50; + roi.ymin = 200; + roi.ymax = 249; + f.set_roi(roi); + auto clusters = f.read_frame(); + REQUIRE(clusters.size() == 49); + REQUIRE(clusters.frame_number() == 135); + + //Check that all clusters are within the ROI + for (size_t i = 0; i < clusters.size(); i++) { + auto c = clusters.at(i); + REQUIRE(c.x >= roi.xmin); + REQUIRE(c.x <= roi.xmax); + REQUIRE(c.y >= roi.ymin); + REQUIRE(c.y <= roi.ymax); + } + +} + + +TEST_CASE("Read clusters from single frame file", "[.integration]") { + + auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + SECTION("Read fewer clusters than available") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(50); + REQUIRE(clusters.size() == 50); + REQUIRE(clusters.frame_number() == 135); + } + SECTION("Read more clusters than available") { + ClusterFile f(fpath); + // 100 is the maximum number of clusters read + auto clusters = f.read_clusters(100); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); + } + SECTION("Read all clusters") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(97); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); + } + + + +} diff --git a/tests/test_config.hpp.in b/tests/test_config.hpp.in index 62993b7..e314b8f 100644 --- a/tests/test_config.hpp.in +++ b/tests/test_config.hpp.in @@ -7,6 +7,6 @@ inline auto test_data_path(){ if(const char* env_p = std::getenv("AARE_TEST_DATA")){ return std::filesystem::path(env_p); }else{ - throw std::runtime_error("AARE_TEST_DATA_PATH not set"); + throw std::runtime_error("Path to test data: $AARE_TEST_DATA not set"); } } \ No newline at end of file From 8cad7a50a6ea69ffa0058c3134afcbc1753b2274 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 1 Apr 2025 15:00:03 +0200 Subject: [PATCH 10/23] fixed py --- python/src/cluster_file.hpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index b807712..ff46043 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -31,11 +31,6 @@ void define_cluster_file_io_bindings(py::module &m) { auto v = new ClusterVector(self.read_clusters(n_clusters)); return v; },py::return_value_policy::take_ownership) - .def("read_clusters", - [](ClusterFile &self, size_t n_clusters, ROI roi) { - auto v = new ClusterVector(self.read_clusters(n_clusters, roi)); - return v; - },py::return_value_policy::take_ownership) .def("read_frame", [](ClusterFile &self) { auto v = new ClusterVector(self.read_frame()); From e1533282f1103f24c427d89bc94dea083ec6b776 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 1 Apr 2025 15:15:54 +0200 Subject: [PATCH 11/23] Cluster cuts (#146) Co-authored-by: Patrick Co-authored-by: JulianHeymes Co-authored-by: Dhanya Thattil Co-authored-by: Xiangyu Xie <45243914+xiangyuxie@users.noreply.github.com> Co-authored-by: xiangyu.xie --- CMakeLists.txt | 46 ++- conda-recipe/meta.yaml | 3 +- include/aare/Cluster.hpp | 36 +++ include/aare/ClusterFile.hpp | 73 ++--- include/aare/ClusterVector.hpp | 25 ++ include/aare/defs.hpp | 6 +- patches/libzmq_cmake_version.patch | 18 ++ pyproject.toml | 3 +- python/src/cluster_file.hpp | 24 +- src/ClusterFile.cpp | 457 ++++++++++------------------- src/ClusterFile.test.cpp | 80 +++++ tests/test_config.hpp.in | 2 +- 12 files changed, 410 insertions(+), 363 deletions(-) create mode 100644 include/aare/Cluster.hpp create mode 100644 patches/libzmq_cmake_version.patch create mode 100644 src/ClusterFile.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 4772f0b..804b2f6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -81,15 +81,30 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if(AARE_FETCH_LMFIT) #TODO! Should we fetch lmfit from the web or inlcude a tar.gz in the repo? - set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) - FetchContent_Declare( - lmfit - GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git - GIT_TAG main - PATCH_COMMAND ${lmfit_patch} - UPDATE_DISCONNECTED 1 - EXCLUDE_FROM_ALL 1 - ) + set(LMFIT_PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) + + # For cmake < 3.28 we can't supply EXCLUDE_FROM_ALL to FetchContent_Declare + # so we need this workaround + if (${CMAKE_VERSION} VERSION_LESS "3.28") + FetchContent_Declare( + lmfit + GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git + GIT_TAG main + PATCH_COMMAND ${LMFIT_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 + ) + else() + FetchContent_Declare( + lmfit + GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git + GIT_TAG main + PATCH_COMMAND ${LMFIT_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 + EXCLUDE_FROM_ALL 1 + ) + endif() + + #Disable what we don't need from lmfit set(BUILD_TESTING OFF CACHE BOOL "") set(LMFIT_CPPTEST OFF CACHE BOOL "") @@ -97,8 +112,15 @@ if(AARE_FETCH_LMFIT) set(LMFIT_CPPTEST OFF CACHE BOOL "") set(BUILD_SHARED_LIBS OFF CACHE BOOL "") + if (${CMAKE_VERSION} VERSION_LESS "3.28") + if(NOT lmfit_POPULATED) + FetchContent_Populate(lmfit) + add_subdirectory(${lmfit_SOURCE_DIR} ${lmfit_BINARY_DIR} EXCLUDE_FROM_ALL) + endif() + else() + FetchContent_MakeAvailable(lmfit) + endif() - FetchContent_MakeAvailable(lmfit) set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON) else() find_package(lmfit REQUIRED) @@ -111,10 +133,13 @@ if(AARE_FETCH_ZMQ) if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.30") cmake_policy(SET CMP0169 OLD) endif() + set(ZMQ_PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/libzmq_cmake_version.patch) FetchContent_Declare( libzmq GIT_REPOSITORY https://github.com/zeromq/libzmq.git GIT_TAG v4.3.4 + PATCH_COMMAND ${ZMQ_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 ) # Disable unwanted options from libzmq set(BUILD_TESTS OFF CACHE BOOL "Switch off libzmq test build") @@ -396,6 +421,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 120854b..560e831 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,7 @@ package: name: aare - version: 2025.3.18 #TODO! how to not duplicate this? + version: 2025.4.1 #TODO! how to not duplicate this? + diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp new file mode 100644 index 0000000..48f9ef0 --- /dev/null +++ b/include/aare/Cluster.hpp @@ -0,0 +1,36 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace aare { + +//TODO! Template this? +struct Cluster3x3 { + int16_t x; + int16_t y; + int32_t data[9]; + + int32_t sum_2x2() const{ + std::array total; + total[0] = data[0] + data[1] + data[3] + data[4]; + total[1] = data[1] + data[2] + data[4] + data[5]; + total[2] = data[3] + data[4] + data[6] + data[7]; + total[3] = data[4] + data[5] + data[7] + data[8]; + return *std::max_element(total.begin(), total.end()); + } + + int32_t sum() const{ + return std::accumulate(data, data + 9, 0); + } +}; +struct Cluster2x2 { + int16_t x; + int16_t y; + int32_t data[4]; +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 5bea342..bea9f48 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -1,25 +1,17 @@ #pragma once +#include "aare/Cluster.hpp" #include "aare/ClusterVector.hpp" #include "aare/NDArray.hpp" #include "aare/defs.hpp" #include #include +#include namespace aare { -//TODO! Template this? -struct Cluster3x3 { - int16_t x; - int16_t y; - int32_t data[9]; -}; -struct Cluster2x2 { - int16_t x; - int16_t y; - int32_t data[4]; -}; +//TODO! Legacy enums, migrate to enum class typedef enum { cBottomLeft = 0, cBottomRight = 1, @@ -53,15 +45,7 @@ struct ClusterAnalysis { double etay; }; -/* -Binary cluster file. Expects data to be layed out as: -int32_t frame_number -uint32_t number_of_clusters -int16_t x, int16_t y, int32_t data[9] x number_of_clusters -int32_t frame_number -uint32_t number_of_clusters -.... -*/ + /** * @brief Class to read and write cluster files @@ -70,16 +54,19 @@ uint32_t number_of_clusters * * int32_t frame_number * uint32_t number_of_clusters - * int16_t x, int16_t y, int32_t data[9] x number_of_clusters + * int16_t x, int16_t y, int32_t data[9] * number_of_clusters * int32_t frame_number * uint32_t number_of_clusters * etc. */ class ClusterFile { FILE *fp{}; - uint32_t m_num_left{}; - size_t m_chunk_size{}; - const std::string m_mode; + uint32_t m_num_left{}; /*Number of photons left in frame*/ + size_t m_chunk_size{}; /*Number of clusters to read at a time*/ + const std::string m_mode; /*Mode to open the file in*/ + std::optional m_roi; /*Region of interest, will be applied if set*/ + std::optional> m_noise_map; /*Noise map to cut photons, will be applied if set*/ + std::optional> m_gain_map; /*Gain map to apply to the clusters, will be applied if set*/ public: /** @@ -117,29 +104,49 @@ class ClusterFile { void write_frame(const ClusterVector &clusters); - // Need to be migrated to support NDArray and return a ClusterVector - // std::vector - // read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); - /** * @brief Return the chunk size */ size_t chunk_size() const { return m_chunk_size; } + + /** + * @brief Set the region of interest to use when reading clusters. If set only clusters within + * the ROI will be read. + */ + void set_roi(ROI roi); + + /** + * @brief Set the noise map to use when reading clusters. If set clusters below the noise + * level will be discarded. Selection criteria one of: Central pixel above noise, highest + * 2x2 sum above 2 * noise, total sum above 3 * noise. + */ + void set_noise_map(const NDView noise_map); + + /** + * @brief Set the gain map to use when reading clusters. If set the gain map will be applied + * to the clusters that pass ROI and noise_map selection. + */ + void set_gain_map(const NDView gain_map); /** * @brief Close the file. If not closed the file will be closed in the destructor */ void close(); + + private: + ClusterVector read_clusters_with_cut(size_t n_clusters); + ClusterVector read_clusters_without_cut(size_t n_clusters); + ClusterVector read_frame_with_cut(); + ClusterVector read_frame_without_cut(); + bool is_selected(Cluster3x3 &cl); + Cluster3x3 read_one_cluster(); }; -int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y); -int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y); - +//TODO! helper functions that doesn't really belong here NDArray calculate_eta2(ClusterVector &clusters); Eta2 calculate_eta2(Cluster3x3 &cl); Eta2 calculate_eta2(Cluster2x2 &cl); + } // namespace aare diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 1c15a22..b91278c 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -8,6 +8,9 @@ #include +#include "aare/Cluster.hpp" +#include "aare/NDView.hpp" + namespace aare { /** @@ -265,6 +268,28 @@ template class ClusterVector { m_size = new_size; } + void apply_gain_map(const NDView gain_map){ + //in principle we need to know the size of the image for this lookup + //TODO! check orientations + std::array xcorr = {-1, 0, 1, -1, 0, 1, -1, 0, 1}; + std::array ycorr = {-1, -1, -1, 0, 0, 0, 1, 1, 1}; + for (size_t i=0; i(i); + + if (cl.x > 0 && cl.y > 0 && cl.x < gain_map.shape(1)-1 && cl.y < gain_map.shape(0)-1){ + for (size_t j=0; j<9; j++){ + size_t x = cl.x + xcorr[j]; + size_t y = cl.y + ycorr[j]; + cl.data[j] = static_cast(cl.data[j] * gain_map(y, x)); + } + }else{ + memset(cl.data, 0, 9*sizeof(T)); //clear edge clusters + } + + + } + } + private: void allocate_buffer(size_t new_capacity) { size_t num_bytes = item_size() * new_capacity; diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index 4559882..4d22bd4 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -1,11 +1,9 @@ #pragma once #include "aare/Dtype.hpp" -// #include "aare/utils/logger.hpp" #include #include - #include #include #include @@ -43,6 +41,7 @@ inline constexpr size_t bits_per_byte = 8; void assert_failed(const std::string &msg); + class DynamicCluster { public: int cluster_sizeX; @@ -215,6 +214,9 @@ struct ROI{ int64_t height() const { return ymax - ymin; } int64_t width() const { return xmax - xmin; } + bool contains(int64_t x, int64_t y) const { + return x >= xmin && x < xmax && y >= ymin && y < ymax; + } }; diff --git a/patches/libzmq_cmake_version.patch b/patches/libzmq_cmake_version.patch new file mode 100644 index 0000000..4e421d3 --- /dev/null +++ b/patches/libzmq_cmake_version.patch @@ -0,0 +1,18 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index dd3d8eb9..c0187747 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -1,11 +1,8 @@ + # CMake build script for ZeroMQ + project(ZeroMQ) + +-if(${CMAKE_SYSTEM_NAME} STREQUAL Darwin) +- cmake_minimum_required(VERSION 3.0.2) +-else() +- cmake_minimum_required(VERSION 2.8.12) +-endif() ++cmake_minimum_required(VERSION 3.15) ++message(STATUS "Patched cmake version") + + include(CheckIncludeFiles) + include(CheckCCompilerFlag) diff --git a/pyproject.toml b/pyproject.toml index b9bf7d2..60128c9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,8 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.3.18" +version = "2025.4.1" + diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index f587443..ff46043 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -31,26 +31,22 @@ void define_cluster_file_io_bindings(py::module &m) { auto v = new ClusterVector(self.read_clusters(n_clusters)); return v; },py::return_value_policy::take_ownership) - .def("read_clusters", - [](ClusterFile &self, size_t n_clusters, ROI roi) { - auto v = new ClusterVector(self.read_clusters(n_clusters, roi)); - return v; - },py::return_value_policy::take_ownership) .def("read_frame", [](ClusterFile &self) { auto v = new ClusterVector(self.read_frame()); return v; }) + .def("set_roi", &ClusterFile::set_roi) + .def("set_noise_map", [](ClusterFile &self, py::array_t noise_map) { + auto view = make_view_2d(noise_map); + self.set_noise_map(view); + }) + .def("set_gain_map", [](ClusterFile &self, py::array_t gain_map) { + auto view = make_view_2d(gain_map); + self.set_gain_map(view); + }) + .def("close", &ClusterFile::close) .def("write_frame", &ClusterFile::write_frame) - // .def("read_cluster_with_cut", - // [](ClusterFile &self, size_t n_clusters, - // py::array_t noise_map, int nx, int ny) { - // auto view = make_view_2d(noise_map); - // auto *vec = - // new std::vector(self.read_cluster_with_cut( - // n_clusters, view.data(), nx, ny)); - // return return_vector(vec); - // }) .def("__enter__", [](ClusterFile &self) { return &self; }) .def("__exit__", [](ClusterFile &self, diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 2e23e09..f77ac92 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -31,6 +31,18 @@ ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size, } } +void ClusterFile::set_roi(ROI roi){ + m_roi = roi; +} + +void ClusterFile::set_noise_map(const NDView noise_map){ + m_noise_map = NDArray(noise_map); +} + +void ClusterFile::set_gain_map(const NDView gain_map){ + m_gain_map = NDArray(gain_map); +} + ClusterFile::~ClusterFile() { close(); } void ClusterFile::close() { @@ -48,14 +60,37 @@ void ClusterFile::write_frame(const ClusterVector &clusters) { !(clusters.cluster_size_y() == 3)) { throw std::runtime_error("Only 3x3 clusters are supported"); } + //First write the frame number - 4 bytes int32_t frame_number = clusters.frame_number(); - fwrite(&frame_number, sizeof(frame_number), 1, fp); + if(fwrite(&frame_number, sizeof(frame_number), 1, fp)!=1){ + throw std::runtime_error(LOCATION + "Could not write frame number"); + } + + //Then write the number of clusters - 4 bytes uint32_t n_clusters = clusters.size(); - fwrite(&n_clusters, sizeof(n_clusters), 1, fp); - fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp); + if(fwrite(&n_clusters, sizeof(n_clusters), 1, fp)!=1){ + throw std::runtime_error(LOCATION + "Could not write number of clusters"); + } + + //Now write the clusters in the frame + if(fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp)!=clusters.size()){ + throw std::runtime_error(LOCATION + "Could not write clusters"); + } } -ClusterVector ClusterFile::read_clusters(size_t n_clusters) { + +ClusterVector ClusterFile::read_clusters(size_t n_clusters){ + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_noise_map || m_roi){ + return read_clusters_with_cut(n_clusters); + }else{ + return read_clusters_without_cut(n_clusters); + } +} + +ClusterVector ClusterFile::read_clusters_without_cut(size_t n_clusters) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } @@ -86,6 +121,7 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { if (nph_read < n_clusters) { // keep on reading frames and photons until reaching n_clusters while (fread(&iframe, sizeof(iframe), 1, fp)) { + clusters.set_frame_number(iframe); // read number of clusters in frame if (fread(&nph, sizeof(nph), 1, fp)) { if (nph > (n_clusters - nph_read)) @@ -105,83 +141,111 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { // Resize the vector to the number of clusters. // No new allocation, only change bounds. clusters.resize(nph_read); + if(m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); return clusters; } -ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - + +ClusterVector ClusterFile::read_clusters_with_cut(size_t n_clusters) { ClusterVector clusters(3,3); clusters.reserve(n_clusters); - int32_t iframe = 0; // frame number needs to be 4 bytes! - size_t nph_read = 0; - uint32_t nn = m_num_left; - uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 - - // auto buf = reinterpret_cast(clusters.data()); - // auto buf = clusters.data(); - - Cluster3x3 tmp; //this would break if the cluster size changes - // if there are photons left from previous frame read them first - if (nph) { - if (nph > n_clusters) { - // if we have more photons left in the frame then photons to read we - // read directly the requested number - nn = n_clusters; - } else { - nn = nph; - } - //Read one cluster, in the ROI push back - // nph_read += fread((buf + nph_read*clusters.item_size()), - // clusters.item_size(), nn, fp); - for(size_t i = 0; i < nn; i++){ - fread(&tmp, sizeof(tmp), 1, fp); - if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ - clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); - nph_read++; + if (m_num_left) { + while(m_num_left && clusters.size() < n_clusters){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); } } - - m_num_left = nph - nn; // write back the number of photons left } - if (nph_read < n_clusters) { - // keep on reading frames and photons until reaching n_clusters - while (fread(&iframe, sizeof(iframe), 1, fp)) { - // read number of clusters in frame - if (fread(&nph, sizeof(nph), 1, fp)) { - if (nph > (n_clusters - nph_read)) - nn = n_clusters - nph_read; - else - nn = nph; - - // nph_read += fread((buf + nph_read*clusters.item_size()), - // clusters.item_size(), nn, fp); - for(size_t i = 0; i < nn; i++){ - fread(&tmp, sizeof(tmp), 1, fp); - if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ - clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); - nph_read++; + // we did not have enough clusters left in the previous frame + // keep on reading frames until reaching n_clusters + if (clusters.size() < n_clusters) { + // sanity check + if (m_num_left) { + throw std::runtime_error(LOCATION + "Entered second loop with clusters left\n"); + } + + int32_t frame_number = 0; // frame number needs to be 4 bytes! + while (fread(&frame_number, sizeof(frame_number), 1, fp)) { + if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) { + clusters.set_frame_number(frame_number); //cluster vector will hold the last frame number + while(m_num_left && clusters.size() < n_clusters){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); } } - m_num_left = nph - nn; } - if (nph_read >= n_clusters) + + // we have enough clusters, break out of the outer while loop + if (clusters.size() >= n_clusters) break; } - } - // Resize the vector to the number of clusters. - // No new allocation, only change bounds. - clusters.resize(nph_read); + } + if(m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); + return clusters; } -ClusterVector ClusterFile::read_frame() { +Cluster3x3 ClusterFile::read_one_cluster(){ + Cluster3x3 c; + auto rc = fread(&c, sizeof(c), 1, fp); + if (rc != 1) { + throw std::runtime_error(LOCATION + "Could not read cluster"); + } + --m_num_left; + return c; +} + +ClusterVector ClusterFile::read_frame(){ + if (m_mode != "r") { + throw std::runtime_error(LOCATION + "File not opened for reading"); + } + if (m_noise_map || m_roi){ + return read_frame_with_cut(); + }else{ + return read_frame_without_cut(); + } +} + +ClusterVector ClusterFile::read_frame_without_cut() { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_num_left) { + throw std::runtime_error( + "There are still photons left in the last frame"); + } + int32_t frame_number; + if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { + throw std::runtime_error(LOCATION + "Could not read frame number"); + } + + int32_t n_clusters; // Saved as 32bit integer in the cluster file + if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { + throw std::runtime_error(LOCATION + "Could not read number of clusters"); + } + + ClusterVector clusters(3, 3, n_clusters); + clusters.set_frame_number(frame_number); + + if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != + static_cast(n_clusters)) { + throw std::runtime_error(LOCATION + "Could not read clusters"); + } + clusters.resize(n_clusters); + if (m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); + return clusters; +} + +ClusterVector ClusterFile::read_frame_with_cut() { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } @@ -194,149 +258,47 @@ ClusterVector ClusterFile::read_frame() { throw std::runtime_error("Could not read frame number"); } - int32_t n_clusters; // Saved as 32bit integer in the cluster file - if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { + + if (fread(&m_num_left, sizeof(m_num_left), 1, fp) != 1) { throw std::runtime_error("Could not read number of clusters"); } - // std::vector clusters(n_clusters); - ClusterVector clusters(3, 3, n_clusters); + + ClusterVector clusters(3, 3); + clusters.reserve(m_num_left); clusters.set_frame_number(frame_number); - - if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != - static_cast(n_clusters)) { - throw std::runtime_error("Could not read clusters"); + while(m_num_left){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); + } } - clusters.resize(n_clusters); + if (m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); return clusters; } -// std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, -// double *noise_map, -// int nx, int ny) { -// if (m_mode != "r") { -// throw std::runtime_error("File not opened for reading"); -// } -// std::vector clusters(n_clusters); -// // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf, -// // uint32_t *n_left, double *noise_map, int -// // nx, int ny) { -// int iframe = 0; -// // uint32_t nph = *n_left; -// uint32_t nph = m_num_left; -// // uint32_t nn = *n_left; -// uint32_t nn = m_num_left; -// size_t nph_read = 0; -// int32_t t2max, tot1; -// int32_t tot3; -// // Cluster *ptr = buf; -// Cluster3x3 *ptr = clusters.data(); -// int good = 1; -// double noise; -// // read photons left from previous frame -// if (noise_map) -// printf("Using noise map\n"); +bool ClusterFile::is_selected(Cluster3x3 &cl) { + //Should fail fast + if (m_roi) { + if (!(m_roi->contains(cl.x, cl.y))) { + return false; + } + } + if (m_noise_map){ + int32_t sum_1x1 = cl.data[4]; // central pixel + int32_t sum_2x2 = cl.sum_2x2(); // highest sum of 2x2 subclusters + int32_t sum_3x3 = cl.sum(); // sum of all pixels -// if (nph) { -// if (nph > n_clusters) { -// // if we have more photons left in the frame then photons to -// // read we read directly the requested number -// nn = n_clusters; -// } else { -// nn = nph; -// } -// for (size_t iph = 0; iph < nn; iph++) { -// // read photons 1 by 1 -// size_t n_read = -// fread(reinterpret_cast(ptr), sizeof(Cluster3x3), 1, fp); -// if (n_read != 1) { -// clusters.resize(nph_read); -// return clusters; -// } -// // TODO! error handling on read -// good = 1; -// if (noise_map) { -// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) { -// tot1 = ptr->data[4]; -// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL, -// NULL); -// noise = noise_map[ptr->y * nx + ptr->x]; -// if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) { -// ; -// } else { -// good = 0; -// printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise, -// tot1, t2max, tot3); -// } -// } else { -// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); -// good = 0; -// } -// } -// if (good) { -// ptr++; -// nph_read++; -// } -// (m_num_left)--; -// if (nph_read >= n_clusters) -// break; -// } -// } -// if (nph_read < n_clusters) { -// // // keep on reading frames and photons until reaching -// // n_clusters -// while (fread(&iframe, sizeof(iframe), 1, fp)) { -// // // printf("%d\n",nph_read); - -// if (fread(&nph, sizeof(nph), 1, fp)) { -// // // printf("** %d\n",nph); -// m_num_left = nph; -// for (size_t iph = 0; iph < nph; iph++) { -// // // read photons 1 by 1 -// size_t n_read = fread(reinterpret_cast(ptr), -// sizeof(Cluster3x3), 1, fp); -// if (n_read != 1) { -// clusters.resize(nph_read); -// return clusters; -// // return nph_read; -// } -// good = 1; -// if (noise_map) { -// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && -// ptr->y < ny) { -// tot1 = ptr->data[4]; -// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, -// NULL, NULL, NULL); -// // noise = noise_map[ptr->y * nx + ptr->x]; -// noise = noise_map[ptr->y + ny * ptr->x]; -// if (tot1 > noise || t2max > 2 * noise || -// tot3 > 3 * noise) { -// ; -// } else -// good = 0; -// } else { -// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); -// good = 0; -// } -// } -// if (good) { -// ptr++; -// nph_read++; -// } -// (m_num_left)--; -// if (nph_read >= n_clusters) -// break; -// } -// } -// if (nph_read >= n_clusters) -// break; -// } -// } -// // printf("%d\n",nph_read); -// clusters.resize(nph_read); -// return clusters; -// } + auto noise = (*m_noise_map)(cl.y, cl.x); //TODO! check if this is correct + if (sum_1x1 <= noise || sum_2x2 <= 2 * noise || sum_3x3 <= 3 * noise) { + return false; + } + } + //we passed all checks + return true; +} NDArray calculate_eta2(ClusterVector &clusters) { //TOTO! make work with 2x2 clusters @@ -431,111 +393,4 @@ Eta2 calculate_eta2(Cluster2x2 &cl) { } - -int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, - double *eta3y) { - - return analyze_data(cl.data, t2, t3, quad, eta2x, eta2y, eta3x, eta3y); -} - -int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y) { - - int ok = 1; - - int32_t tot2[4]; - int32_t t2max = 0; - char c = 0; - int32_t val, tot3; - - tot3 = 0; - for (int i = 0; i < 4; i++) - tot2[i] = 0; - - for (int ix = 0; ix < 3; ix++) { - for (int iy = 0; iy < 3; iy++) { - val = data[iy * 3 + ix]; - // printf ("%d ",data[iy * 3 + ix]); - tot3 += val; - if (ix <= 1 && iy <= 1) - tot2[cBottomLeft] += val; - if (ix >= 1 && iy <= 1) - tot2[cBottomRight] += val; - if (ix <= 1 && iy >= 1) - tot2[cTopLeft] += val; - if (ix >= 1 && iy >= 1) - tot2[cTopRight] += val; - } - // printf ("\n"); - } - // printf ("\n"); - - if (t2 || quad) { - - t2max = tot2[0]; - c = cBottomLeft; - for (int i = 1; i < 4; i++) { - if (tot2[i] > t2max) { - t2max = tot2[i]; - c = i; - } - } - // printf("*** %d %d %d %d -- - // %d\n",tot2[0],tot2[1],tot2[2],tot2[3],t2max); - if (quad) - *quad = c; - if (t2) - *t2 = t2max; - } - - if (t3) - *t3 = tot3; - - if (eta2x || eta2y) { - if (eta2x) - *eta2x = 0; - if (eta2y) - *eta2y = 0; - switch (c) { - case cBottomLeft: - if (eta2x && (data[3] + data[4]) != 0) - *eta2x = static_cast(data[4]) / (data[3] + data[4]); - if (eta2y && (data[1] + data[4]) != 0) - *eta2y = static_cast(data[4]) / (data[1] + data[4]); - break; - case cBottomRight: - if (eta2x && (data[2] + data[5]) != 0) - *eta2x = static_cast(data[5]) / (data[4] + data[5]); - if (eta2y && (data[1] + data[4]) != 0) - *eta2y = static_cast(data[4]) / (data[1] + data[4]); - break; - case cTopLeft: - if (eta2x && (data[7] + data[4]) != 0) - *eta2x = static_cast(data[4]) / (data[3] + data[4]); - if (eta2y && (data[7] + data[4]) != 0) - *eta2y = static_cast(data[7]) / (data[7] + data[4]); - break; - case cTopRight: - if (eta2x && t2max != 0) - *eta2x = static_cast(data[5]) / (data[5] + data[4]); - if (eta2y && t2max != 0) - *eta2y = static_cast(data[7]) / (data[7] + data[4]); - break; - default:; - } - } - - if (eta3x || eta3y) { - if (eta3x && (data[3] + data[4] + data[5]) != 0) - *eta3x = static_cast(-data[3] + data[3 + 2]) / - (data[3] + data[4] + data[5]); - if (eta3y && (data[1] + data[4] + data[7]) != 0) - *eta3y = static_cast(-data[1] + data[2 * 3 + 1]) / - (data[1] + data[4] + data[7]); - } - - return ok; -} - } // namespace aare \ No newline at end of file diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp new file mode 100644 index 0000000..a0eed04 --- /dev/null +++ b/src/ClusterFile.test.cpp @@ -0,0 +1,80 @@ +#include "aare/ClusterFile.hpp" +#include "test_config.hpp" + + +#include "aare/defs.hpp" +#include +#include + + + + +using aare::ClusterFile; + +TEST_CASE("Read one frame from a a cluster file", "[.integration]") { + //We know that the frame has 97 clusters + auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile f(fpath); + auto clusters = f.read_frame(); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); +} + +TEST_CASE("Read one frame using ROI", "[.integration]") { + //We know that the frame has 97 clusters + auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile f(fpath); + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 50; + roi.ymin = 200; + roi.ymax = 249; + f.set_roi(roi); + auto clusters = f.read_frame(); + REQUIRE(clusters.size() == 49); + REQUIRE(clusters.frame_number() == 135); + + //Check that all clusters are within the ROI + for (size_t i = 0; i < clusters.size(); i++) { + auto c = clusters.at(i); + REQUIRE(c.x >= roi.xmin); + REQUIRE(c.x <= roi.xmax); + REQUIRE(c.y >= roi.ymin); + REQUIRE(c.y <= roi.ymax); + } + +} + + +TEST_CASE("Read clusters from single frame file", "[.integration]") { + + auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + SECTION("Read fewer clusters than available") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(50); + REQUIRE(clusters.size() == 50); + REQUIRE(clusters.frame_number() == 135); + } + SECTION("Read more clusters than available") { + ClusterFile f(fpath); + // 100 is the maximum number of clusters read + auto clusters = f.read_clusters(100); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); + } + SECTION("Read all clusters") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(97); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); + } + + + +} diff --git a/tests/test_config.hpp.in b/tests/test_config.hpp.in index 62993b7..e314b8f 100644 --- a/tests/test_config.hpp.in +++ b/tests/test_config.hpp.in @@ -7,6 +7,6 @@ inline auto test_data_path(){ if(const char* env_p = std::getenv("AARE_TEST_DATA")){ return std::filesystem::path(env_p); }else{ - throw std::runtime_error("AARE_TEST_DATA_PATH not set"); + throw std::runtime_error("Path to test data: $AARE_TEST_DATA not set"); } } \ No newline at end of file From 7db1ae4d942e3ea4a947ca8b081b1e0f90b79b75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Thu, 3 Apr 2025 13:18:55 +0200 Subject: [PATCH 12/23] Dev/gitea ci (#151) Build and test on internal PSI gitea --- .gitea/workflows/cmake_build.yml | 18 +++++++++--------- .gitea/workflows/rh8-native.yml | 30 ++++++++++++++++++++++++++++++ .gitea/workflows/rh9-native.yml | 31 +++++++++++++++++++++++++++++++ .github/workflows/build_docs.yml | 12 +++++------- etc/dev-env.yml | 15 +++++++++++++++ 5 files changed, 90 insertions(+), 16 deletions(-) create mode 100644 .gitea/workflows/rh8-native.yml create mode 100644 .gitea/workflows/rh9-native.yml create mode 100644 etc/dev-env.yml diff --git a/.gitea/workflows/cmake_build.yml b/.gitea/workflows/cmake_build.yml index 43a0181..aa7a297 100644 --- a/.gitea/workflows/cmake_build.yml +++ b/.gitea/workflows/cmake_build.yml @@ -2,9 +2,8 @@ name: Build the package using cmake then documentation on: workflow_dispatch: - push: - + permissions: contents: read @@ -16,12 +15,12 @@ jobs: strategy: fail-fast: false matrix: - platform: [ubuntu-latest, ] # macos-12, windows-2019] - python-version: ["3.12",] + platform: [ubuntu-latest, ] + python-version: ["3.12", ] runs-on: ${{ matrix.platform }} - # The setup-miniconda action needs this to activate miniconda + defaults: run: shell: "bash -l {0}" @@ -35,13 +34,13 @@ jobs: sudo apt-get -y install cmake gcc g++ - name: Get conda - uses: conda-incubator/setup-miniconda@v3.0.4 + uses: conda-incubator/setup-miniconda@v3 with: python-version: ${{ matrix.python-version }} + environment-file: etc/dev-env.yml + miniforge-version: latest channels: conda-forge - - - name: Prepare - run: conda install doxygen sphinx=7.1.2 breathe pybind11 sphinx_rtd_theme furo nlohmann_json zeromq fmt numpy + conda-remove-defaults: "true" - name: Build library run: | @@ -56,3 +55,4 @@ jobs: + diff --git a/.gitea/workflows/rh8-native.yml b/.gitea/workflows/rh8-native.yml new file mode 100644 index 0000000..02d3dc0 --- /dev/null +++ b/.gitea/workflows/rh8-native.yml @@ -0,0 +1,30 @@ +name: Build on RHEL8 + +on: + workflow_dispatch: + +permissions: + contents: read + +jobs: + buildh: + runs-on: "ubuntu-latest" + container: + image: gitea.psi.ch/images/rhel8-developer-gitea-actions + steps: + - uses: actions/checkout@v4 + + + - name: Install dependencies + run: | + dnf install -y cmake python3.12 python3.12-devel python3.12-pip + + - name: Build library + run: | + mkdir build && cd build + cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON + make -j 2 + + - name: C++ unit tests + working-directory: ${{gitea.workspace}}/build + run: ctest \ No newline at end of file diff --git a/.gitea/workflows/rh9-native.yml b/.gitea/workflows/rh9-native.yml new file mode 100644 index 0000000..c1f10ac --- /dev/null +++ b/.gitea/workflows/rh9-native.yml @@ -0,0 +1,31 @@ +name: Build on RHEL9 + +on: + push: + workflow_dispatch: + +permissions: + contents: read + +jobs: + buildh: + runs-on: "ubuntu-latest" + container: + image: gitea.psi.ch/images/rhel9-developer-gitea-actions + steps: + - uses: actions/checkout@v4 + + + - name: Install dependencies + run: | + dnf install -y cmake python3.12 python3.12-devel python3.12-pip + + - name: Build library + run: | + mkdir build && cd build + cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON + make -j 2 + + - name: C++ unit tests + working-directory: ${{gitea.workspace}}/build + run: ctest \ No newline at end of file diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index 959ab70..24050a3 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -5,7 +5,6 @@ on: push: - permissions: contents: read pages: write @@ -16,12 +15,11 @@ jobs: strategy: fail-fast: false matrix: - platform: [ubuntu-latest, ] # macos-12, windows-2019] + platform: [ubuntu-latest, ] python-version: ["3.12",] runs-on: ${{ matrix.platform }} - # The setup-miniconda action needs this to activate miniconda defaults: run: shell: "bash -l {0}" @@ -30,13 +28,13 @@ jobs: - uses: actions/checkout@v4 - name: Get conda - uses: conda-incubator/setup-miniconda@v3.0.4 + uses: conda-incubator/setup-miniconda@v3 with: python-version: ${{ matrix.python-version }} + environment-file: etc/dev-env.yml + miniforge-version: latest channels: conda-forge - - - name: Prepare - run: conda install doxygen sphinx=7.1.2 breathe pybind11 sphinx_rtd_theme furo nlohmann_json zeromq fmt numpy + conda-remove-defaults: "true" - name: Build library run: | diff --git a/etc/dev-env.yml b/etc/dev-env.yml new file mode 100644 index 0000000..25038ee --- /dev/null +++ b/etc/dev-env.yml @@ -0,0 +1,15 @@ +name: dev-environment +channels: + - conda-forge +dependencies: + - anaconda-client + - doxygen + - sphinx=7.1.2 + - breathe + - pybind11 + - sphinx_rtd_theme + - furo + - nlohmann_json + - zeromq + - fmt + - numpy From f16273a566a6cfa0a0ce906f0a0a8462a615ff9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 8 Apr 2025 15:31:04 +0200 Subject: [PATCH 13/23] Adding support for Jungfrau .dat files (#152) closes #150 **Not addressed in this PR:** - pixels_per_frame, bytes_per_frame and tell should be made cost in FileInterface --- CMakeLists.txt | 5 + docs/src/JungfrauDataFile.rst | 25 +++ docs/src/Tests.rst | 47 +++++ docs/src/algorithm.rst | 5 + docs/src/index.rst | 12 +- docs/src/pyJungfrauDataFile.rst | 10 + include/aare/FilePtr.hpp | 30 +++ include/aare/JungfrauDataFile.hpp | 112 +++++++++++ include/aare/algorithm.hpp | 62 +++++- pyproject.toml | 7 +- python/aare/__init__.py | 2 +- python/src/jungfrau_data_file.hpp | 116 ++++++++++++ python/src/module.cpp | 3 + python/tests/conftest.py | 29 +++ python/tests/test_jungfrau_dat_files.py | 92 +++++++++ src/ClusterFile.test.cpp | 12 +- src/File.cpp | 3 + src/FilePtr.cpp | 44 +++++ src/JungfrauDataFile.cpp | 242 ++++++++++++++++++++++++ src/JungfrauDataFile.test.cpp | 94 +++++++++ src/algorithm.test.cpp | 90 ++++++++- 21 files changed, 1025 insertions(+), 17 deletions(-) create mode 100644 docs/src/JungfrauDataFile.rst create mode 100644 docs/src/Tests.rst create mode 100644 docs/src/algorithm.rst create mode 100644 docs/src/pyJungfrauDataFile.rst create mode 100644 include/aare/FilePtr.hpp create mode 100644 include/aare/JungfrauDataFile.hpp create mode 100644 python/src/jungfrau_data_file.hpp create mode 100644 python/tests/conftest.py create mode 100644 python/tests/test_jungfrau_dat_files.py create mode 100644 src/FilePtr.cpp create mode 100644 src/JungfrauDataFile.cpp create mode 100644 src/JungfrauDataFile.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 804b2f6..6db9314 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -342,8 +342,10 @@ set(PUBLICHEADERS include/aare/File.hpp include/aare/Fit.hpp include/aare/FileInterface.hpp + include/aare/FilePtr.hpp include/aare/Frame.hpp include/aare/geo_helpers.hpp + include/aare/JungfrauDataFile.hpp include/aare/NDArray.hpp include/aare/NDView.hpp include/aare/NumpyFile.hpp @@ -367,8 +369,10 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/FilePtr.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Fit.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp @@ -423,6 +427,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp diff --git a/docs/src/JungfrauDataFile.rst b/docs/src/JungfrauDataFile.rst new file mode 100644 index 0000000..78d473f --- /dev/null +++ b/docs/src/JungfrauDataFile.rst @@ -0,0 +1,25 @@ +JungfrauDataFile +================== + +JungfrauDataFile is a class to read the .dat files that are produced by Aldo's receiver. +It is mostly used for calibration. + +The structure of the file is: + +* JungfrauDataHeader +* Binary data (256x256, 256x1024 or 512x1024) +* JungfrauDataHeader +* ... + +There is no metadata indicating number of frames or the size of the image, but this +will be infered by this reader. + +.. doxygenstruct:: aare::JungfrauDataHeader + :members: + :undoc-members: + :private-members: + +.. doxygenclass:: aare::JungfrauDataFile + :members: + :undoc-members: + :private-members: \ No newline at end of file diff --git a/docs/src/Tests.rst b/docs/src/Tests.rst new file mode 100644 index 0000000..da98001 --- /dev/null +++ b/docs/src/Tests.rst @@ -0,0 +1,47 @@ +**************** +Tests +**************** + +We test the code both from the C++ and Python API. By default only tests that does not require image data is run. + +C++ +~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + mkdir build + cd build + cmake .. -DAARE_TESTS=ON + make -j 4 + + export AARE_TEST_DATA=/path/to/test/data + ./run_test [.files] #or using ctest, [.files] is the option to include tests needing data + + + +Python +~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + #From the root dir of the library + python -m pytest python/tests --files # passing --files will run the tests needing data + + + +Getting the test data +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. attention :: + + The tests needing the test data are not run by default. To make the data available, you need to set the environment variable + AARE_TEST_DATA to the path of the test data directory. Then pass either [.files] for the C++ tests or --files for Python + +The image files needed for the test are large and are not included in the repository. They are stored +using GIT LFS in a separate repository. To get the test data, you need to clone the repository. +To do this, you need to have GIT LFS installed. You can find instructions on how to install it here: https://git-lfs.github.com/ +Once you have GIT LFS installed, you can clone the repository like any normal repo using: + +.. code-block:: bash + + git clone https://gitea.psi.ch/detectors/aare-test-data.git diff --git a/docs/src/algorithm.rst b/docs/src/algorithm.rst new file mode 100644 index 0000000..9b11857 --- /dev/null +++ b/docs/src/algorithm.rst @@ -0,0 +1,5 @@ +algorithm +============= + +.. doxygenfile:: algorithm.hpp + diff --git a/docs/src/index.rst b/docs/src/index.rst index 905caea..af5e99a 100644 --- a/docs/src/index.rst +++ b/docs/src/index.rst @@ -20,9 +20,6 @@ AARE Requirements Consume - - - .. toctree:: :caption: Python API :maxdepth: 1 @@ -31,6 +28,7 @@ AARE pyCtbRawFile pyClusterFile pyClusterVector + pyJungfrauDataFile pyRawFile pyRawMasterFile pyVarClusterFinder @@ -42,6 +40,7 @@ AARE :caption: C++ API :maxdepth: 1 + algorithm NDArray NDView Frame @@ -51,6 +50,7 @@ AARE ClusterFinderMT ClusterFile ClusterVector + JungfrauDataFile Pedestal RawFile RawSubFile @@ -59,4 +59,8 @@ AARE - +.. toctree:: + :caption: Developer + :maxdepth: 3 + + Tests \ No newline at end of file diff --git a/docs/src/pyJungfrauDataFile.rst b/docs/src/pyJungfrauDataFile.rst new file mode 100644 index 0000000..2173adf --- /dev/null +++ b/docs/src/pyJungfrauDataFile.rst @@ -0,0 +1,10 @@ +JungfrauDataFile +=================== + +.. py:currentmodule:: aare + +.. autoclass:: JungfrauDataFile + :members: + :undoc-members: + :show-inheritance: + :inherited-members: \ No newline at end of file diff --git a/include/aare/FilePtr.hpp b/include/aare/FilePtr.hpp new file mode 100644 index 0000000..4c88ecb --- /dev/null +++ b/include/aare/FilePtr.hpp @@ -0,0 +1,30 @@ +#pragma once +#include +#include + +namespace aare { + +/** + * \brief RAII wrapper for FILE pointer + */ +class FilePtr { + FILE *fp_{nullptr}; + + public: + FilePtr() = default; + FilePtr(const std::filesystem::path& fname, const std::string& mode); + FilePtr(const FilePtr &) = delete; // we don't want a copy + FilePtr &operator=(const FilePtr &) = delete; // since we handle a resource + FilePtr(FilePtr &&other); + FilePtr &operator=(FilePtr &&other); + FILE *get(); + int64_t tell(); + void seek(int64_t offset, int whence = SEEK_SET) { + if (fseek(fp_, offset, whence) != 0) + throw std::runtime_error("Error seeking in file"); + } + std::string error_msg(); + ~FilePtr(); +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/JungfrauDataFile.hpp b/include/aare/JungfrauDataFile.hpp new file mode 100644 index 0000000..bba5403 --- /dev/null +++ b/include/aare/JungfrauDataFile.hpp @@ -0,0 +1,112 @@ +#pragma once +#include +#include +#include + +#include "aare/FilePtr.hpp" +#include "aare/defs.hpp" +#include "aare/NDArray.hpp" +#include "aare/FileInterface.hpp" +namespace aare { + + +struct JungfrauDataHeader{ + uint64_t framenum; + uint64_t bunchid; +}; + +class JungfrauDataFile : public FileInterface { + + size_t m_rows{}; //!< number of rows in the image, from find_frame_size(); + size_t m_cols{}; //!< number of columns in the image, from find_frame_size(); + size_t m_bytes_per_frame{}; //!< number of bytes per frame excluding header + size_t m_total_frames{}; //!< total number of frames in the series of files + size_t m_offset{}; //!< file index of the first file, allow starting at non zero file + size_t m_current_file_index{}; //!< The index of the open file + size_t m_current_frame_index{}; //!< The index of the current frame (with reference to all files) + + std::vector m_last_frame_in_file{}; //!< Used for seeking to the correct file + std::filesystem::path m_path; //!< path to the files + std::string m_base_name; //!< base name used for formatting file names + + FilePtr m_fp; //!< RAII wrapper for a FILE* + + + using pixel_type = uint16_t; + static constexpr size_t header_size = sizeof(JungfrauDataHeader); + static constexpr size_t n_digits_in_file_index = 6; //!< to format file names + + public: + JungfrauDataFile(const std::filesystem::path &fname); + + std::string base_name() const; //!< get the base name of the file (without path and extension) + size_t bytes_per_frame() override; + size_t pixels_per_frame() override; + size_t bytes_per_pixel() const; + size_t bitdepth() const override; + void seek(size_t frame_index) override; //!< seek to the given frame index (note not byte offset) + size_t tell() override; //!< get the frame index of the file pointer + size_t total_frames() const override; + size_t rows() const override; + size_t cols() const override; + size_t n_files() const; //!< get the number of files in the series. + + // Extra functions needed for FileInterface + Frame read_frame() override; + Frame read_frame(size_t frame_number) override; + std::vector read_n(size_t n_frames=0) override; + void read_into(std::byte *image_buf) override; + void read_into(std::byte *image_buf, size_t n_frames) override; + size_t frame_number(size_t frame_index) override; + DetectorType detector_type() const override; + + /** + * @brief Read a single frame from the file into the given buffer. + * @param image_buf buffer to read the frame into. (Note the caller is responsible for allocating the buffer) + * @param header pointer to a JungfrauDataHeader or nullptr to skip header) + */ + void read_into(std::byte *image_buf, JungfrauDataHeader *header = nullptr); + + /** + * @brief Read a multiple frames from the file into the given buffer. + * @param image_buf buffer to read the frame into. (Note the caller is responsible for allocating the buffer) + * @param n_frames number of frames to read + * @param header pointer to a JungfrauDataHeader or nullptr to skip header) + */ + void read_into(std::byte *image_buf, size_t n_frames, JungfrauDataHeader *header = nullptr); + + /** + * @brief Read a single frame from the file into the given NDArray + * @param image NDArray to read the frame into. + */ + void read_into(NDArray* image, JungfrauDataHeader* header = nullptr); + + /** + * @brief Read a single frame from the file. Allocated a new NDArray for the output data + * @param header pointer to a JungfrauDataHeader or nullptr to skip header) + * @return NDArray with the image data + */ + NDArray read_frame(JungfrauDataHeader* header = nullptr); + + JungfrauDataHeader read_header(); + std::filesystem::path current_file() const { return fpath(m_current_file_index+m_offset); } + + + private: + /** + * @brief Find the size of the frame in the file. (256x256, 256x1024, 512x1024) + * @param fname path to the file + * @throws std::runtime_error if the file is empty or the size cannot be determined + */ + void find_frame_size(const std::filesystem::path &fname); + + + void parse_fname(const std::filesystem::path &fname); + void scan_files(); + void open_file(size_t file_index); + std::filesystem::path fpath(size_t frame_index) const; + + + }; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/algorithm.hpp b/include/aare/algorithm.hpp index 5d6dc57..fc7d51f 100644 --- a/include/aare/algorithm.hpp +++ b/include/aare/algorithm.hpp @@ -7,13 +7,20 @@ namespace aare { /** - * @brief Find the index of the last element smaller than val - * assume a sorted array + * @brief Index of the last element that is smaller than val. + * Requires a sorted array. Uses >= for ordering. If all elements + * are smaller it returns the last element and if all elements are + * larger it returns the first element. + * @param first iterator to the first element + * @param last iterator to the last element + * @param val value to compare + * @return index of the last element that is smaller than val + * */ template size_t last_smaller(const T* first, const T* last, T val) { for (auto iter = first+1; iter != last; ++iter) { - if (*iter > val) { + if (*iter >= val) { return std::distance(first, iter-1); } } @@ -25,7 +32,49 @@ size_t last_smaller(const NDArray& arr, T val) { return last_smaller(arr.begin(), arr.end(), val); } +template +size_t last_smaller(const std::vector& vec, T val) { + return last_smaller(vec.data(), vec.data()+vec.size(), val); +} +/** + * @brief Index of the first element that is larger than val. + * Requires a sorted array. Uses > for ordering. If all elements + * are larger it returns the first element and if all elements are + * smaller it returns the last element. + * @param first iterator to the first element + * @param last iterator to the last element + * @param val value to compare + * @return index of the first element that is larger than val + */ +template +size_t first_larger(const T* first, const T* last, T val) { + for (auto iter = first; iter != last; ++iter) { + if (*iter > val) { + return std::distance(first, iter); + } + } + return std::distance(first, last-1); +} + +template +size_t first_larger(const NDArray& arr, T val) { + return first_larger(arr.begin(), arr.end(), val); +} + +template +size_t first_larger(const std::vector& vec, T val) { + return first_larger(vec.data(), vec.data()+vec.size(), val); +} + +/** + * @brief Index of the nearest element to val. + * Requires a sorted array. If there is no difference it takes the first element. + * @param first iterator to the first element + * @param last iterator to the last element + * @param val value to compare + * @return index of the nearest element + */ template size_t nearest_index(const T* first, const T* last, T val) { auto iter = std::min_element(first, last, @@ -50,6 +99,13 @@ size_t nearest_index(const std::array& arr, T val) { return nearest_index(arr.data(), arr.data()+arr.size(), val); } +template +std::vector cumsum(const std::vector& vec) { + std::vector result(vec.size()); + std::partial_sum(vec.begin(), vec.end(), result.begin()); + return result; +} + } // namespace aare \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 60128c9..470d158 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,4 +15,9 @@ cmake.verbose = true [tool.scikit-build.cmake.define] AARE_PYTHON_BINDINGS = "ON" AARE_SYSTEM_LIBRARIES = "ON" -AARE_INSTALL_PYTHONEXT = "ON" \ No newline at end of file +AARE_INSTALL_PYTHONEXT = "ON" + +[tool.pytest.ini_options] +markers = [ + "files: marks tests that need additional data (deselect with '-m \"not files\"')", +] \ No newline at end of file diff --git a/python/aare/__init__.py b/python/aare/__init__.py index 058d7cf..606f958 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -2,7 +2,7 @@ from . import _aare -from ._aare import File, RawMasterFile, RawSubFile +from ._aare import File, RawMasterFile, RawSubFile, JungfrauDataFile from ._aare import Pedestal_d, Pedestal_f, ClusterFinder, VarClusterFinder from ._aare import DetectorType from ._aare import ClusterFile diff --git a/python/src/jungfrau_data_file.hpp b/python/src/jungfrau_data_file.hpp new file mode 100644 index 0000000..942f6a6 --- /dev/null +++ b/python/src/jungfrau_data_file.hpp @@ -0,0 +1,116 @@ + +#include "aare/JungfrauDataFile.hpp" +#include "aare/defs.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; +using namespace ::aare; + +// Disable warnings for unused parameters, as we ignore some +// in the __exit__ method +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +auto read_dat_frame(JungfrauDataFile &self) { + py::array_t header(1); + py::array_t image({ + self.rows(), + self.cols() + }); + + self.read_into(reinterpret_cast(image.mutable_data()), + header.mutable_data()); + + return py::make_tuple(header, image); +} + +auto read_n_dat_frames(JungfrauDataFile &self, size_t n_frames) { + // adjust for actual frames left in the file + n_frames = std::min(n_frames, self.total_frames() - self.tell()); + if (n_frames == 0) { + throw std::runtime_error("No frames left in file"); + } + + py::array_t header(n_frames); + py::array_t image({ + n_frames, self.rows(), + self.cols()}); + + self.read_into(reinterpret_cast(image.mutable_data()), + n_frames, header.mutable_data()); + + return py::make_tuple(header, image); +} + +void define_jungfrau_data_file_io_bindings(py::module &m) { + // Make the JungfrauDataHeader usable from numpy + PYBIND11_NUMPY_DTYPE(JungfrauDataHeader, framenum, bunchid); + + py::class_(m, "JungfrauDataFile") + .def(py::init()) + .def("seek", &JungfrauDataFile::seek, + R"( + Seek to the given frame index. + )") + .def("tell", &JungfrauDataFile::tell, + R"( + Get the current frame index. + )") + .def_property_readonly("rows", &JungfrauDataFile::rows) + .def_property_readonly("cols", &JungfrauDataFile::cols) + .def_property_readonly("base_name", &JungfrauDataFile::base_name) + .def_property_readonly("bytes_per_frame", + &JungfrauDataFile::bytes_per_frame) + .def_property_readonly("pixels_per_frame", + &JungfrauDataFile::pixels_per_frame) + .def_property_readonly("bytes_per_pixel", + &JungfrauDataFile::bytes_per_pixel) + .def_property_readonly("bitdepth", &JungfrauDataFile::bitdepth) + .def_property_readonly("current_file", &JungfrauDataFile::current_file) + .def_property_readonly("total_frames", &JungfrauDataFile::total_frames) + .def_property_readonly("n_files", &JungfrauDataFile::n_files) + .def("read_frame", &read_dat_frame, + R"( + Read a single frame from the file. + )") + .def("read_n", &read_n_dat_frames, + R"( + Read maximum n_frames frames from the file. + )") + .def( + "read", + [](JungfrauDataFile &self) { + self.seek(0); + auto n_frames = self.total_frames(); + return read_n_dat_frames(self, n_frames); + }, + R"( + Read all frames from the file. Seeks to the beginning before reading. + )") + .def("__enter__", [](JungfrauDataFile &self) { return &self; }) + .def("__exit__", + [](JungfrauDataFile &self, + const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + // self.close(); + }) + .def("__iter__", [](JungfrauDataFile &self) { return &self; }) + .def("__next__", [](JungfrauDataFile &self) { + try { + return read_dat_frame(self); + } catch (std::runtime_error &e) { + throw py::stop_iteration(); + } + }); +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/module.cpp b/python/src/module.cpp index 43f48ba..7a17e78 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -11,6 +11,8 @@ #include "fit.hpp" #include "interpolation.hpp" +#include "jungfrau_data_file.hpp" + //Pybind stuff #include #include @@ -33,5 +35,6 @@ PYBIND11_MODULE(_aare, m) { define_cluster_file_sink_bindings(m); define_fit_bindings(m); define_interpolation_bindings(m); + define_jungfrau_data_file_io_bindings(m); } \ No newline at end of file diff --git a/python/tests/conftest.py b/python/tests/conftest.py new file mode 100644 index 0000000..5badf13 --- /dev/null +++ b/python/tests/conftest.py @@ -0,0 +1,29 @@ +import os +from pathlib import Path +import pytest + + + +def pytest_addoption(parser): + parser.addoption( + "--files", action="store_true", default=False, help="run slow tests" + ) + + +def pytest_configure(config): + config.addinivalue_line("markers", "files: mark test as needing image files to run") + + +def pytest_collection_modifyitems(config, items): + if config.getoption("--files"): + return + skip = pytest.mark.skip(reason="need --files option to run") + for item in items: + if "files" in item.keywords: + item.add_marker(skip) + + +@pytest.fixture +def test_data_path(): + return Path(os.environ["AARE_TEST_DATA"]) + diff --git a/python/tests/test_jungfrau_dat_files.py b/python/tests/test_jungfrau_dat_files.py new file mode 100644 index 0000000..5d3fdf8 --- /dev/null +++ b/python/tests/test_jungfrau_dat_files.py @@ -0,0 +1,92 @@ +import pytest +import numpy as np +from aare import JungfrauDataFile + +@pytest.mark.files +def test_jfungfrau_dat_read_number_of_frames(test_data_path): + with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as dat_file: + assert dat_file.total_frames == 24 + + with JungfrauDataFile(test_data_path / "dat/AldoJF250k_000000.dat") as dat_file: + assert dat_file.total_frames == 53 + + with JungfrauDataFile(test_data_path / "dat/AldoJF65k_000000.dat") as dat_file: + assert dat_file.total_frames == 113 + + +@pytest.mark.files +def test_jfungfrau_dat_read_number_of_file(test_data_path): + with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as dat_file: + assert dat_file.n_files == 4 + + with JungfrauDataFile(test_data_path / "dat/AldoJF250k_000000.dat") as dat_file: + assert dat_file.n_files == 7 + + with JungfrauDataFile(test_data_path / "dat/AldoJF65k_000000.dat") as dat_file: + assert dat_file.n_files == 7 + + +@pytest.mark.files +def test_read_module(test_data_path): + """ + Read all frames from the series of .dat files. Compare to canned data in npz format. + """ + + # Read all frames from the .dat file + with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as f: + header, data = f.read() + + #Sanity check + n_frames = 24 + assert header.size == n_frames + assert data.shape == (n_frames, 512, 1024) + + # Read reference data using numpy + with np.load(test_data_path / "dat/AldoJF500k.npz") as f: + ref_header = f["headers"] + ref_data = f["frames"] + + # Check that the data is the same + assert np.all(ref_header == header) + assert np.all(ref_data == data) + +@pytest.mark.files +def test_read_half_module(test_data_path): + + # Read all frames from the .dat file + with JungfrauDataFile(test_data_path / "dat/AldoJF250k_000000.dat") as f: + header, data = f.read() + + n_frames = 53 + assert header.size == n_frames + assert data.shape == (n_frames, 256, 1024) + + # Read reference data using numpy + with np.load(test_data_path / "dat/AldoJF250k.npz") as f: + ref_header = f["headers"] + ref_data = f["frames"] + + # Check that the data is the same + assert np.all(ref_header == header) + assert np.all(ref_data == data) + + +@pytest.mark.files +def test_read_single_chip(test_data_path): + + # Read all frames from the .dat file + with JungfrauDataFile(test_data_path / "dat/AldoJF65k_000000.dat") as f: + header, data = f.read() + + n_frames = 113 + assert header.size == n_frames + assert data.shape == (n_frames, 256, 256) + + # Read reference data using numpy + with np.load(test_data_path / "dat/AldoJF65k.npz") as f: + ref_header = f["headers"] + ref_data = f["frames"] + + # Check that the data is the same + assert np.all(ref_header == header) + assert np.all(ref_data == data) \ No newline at end of file diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp index a0eed04..a7fc044 100644 --- a/src/ClusterFile.test.cpp +++ b/src/ClusterFile.test.cpp @@ -11,9 +11,9 @@ using aare::ClusterFile; -TEST_CASE("Read one frame from a a cluster file", "[.integration]") { +TEST_CASE("Read one frame from a a cluster file", "[.files]") { //We know that the frame has 97 clusters - auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); ClusterFile f(fpath); @@ -22,9 +22,9 @@ TEST_CASE("Read one frame from a a cluster file", "[.integration]") { REQUIRE(clusters.frame_number() == 135); } -TEST_CASE("Read one frame using ROI", "[.integration]") { +TEST_CASE("Read one frame using ROI", "[.files]") { //We know that the frame has 97 clusters - auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); ClusterFile f(fpath); @@ -50,9 +50,9 @@ TEST_CASE("Read one frame using ROI", "[.integration]") { } -TEST_CASE("Read clusters from single frame file", "[.integration]") { +TEST_CASE("Read clusters from single frame file", "[.files]") { - auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); SECTION("Read fewer clusters than available") { diff --git a/src/File.cpp b/src/File.cpp index 3c68eff..eb04893 100644 --- a/src/File.cpp +++ b/src/File.cpp @@ -1,4 +1,5 @@ #include "aare/File.hpp" +#include "aare/JungfrauDataFile.hpp" #include "aare/NumpyFile.hpp" #include "aare/RawFile.hpp" @@ -27,6 +28,8 @@ File::File(const std::filesystem::path &fname, const std::string &mode, else if (fname.extension() == ".npy") { // file_impl = new NumpyFile(fname, mode, cfg); file_impl = std::make_unique(fname, mode, cfg); + }else if(fname.extension() == ".dat"){ + file_impl = std::make_unique(fname); } else { throw std::runtime_error("Unsupported file type"); } diff --git a/src/FilePtr.cpp b/src/FilePtr.cpp new file mode 100644 index 0000000..4fed3d7 --- /dev/null +++ b/src/FilePtr.cpp @@ -0,0 +1,44 @@ + +#include "aare/FilePtr.hpp" +#include +#include +#include + +namespace aare { + +FilePtr::FilePtr(const std::filesystem::path& fname, const std::string& mode = "rb") { + fp_ = fopen(fname.c_str(), mode.c_str()); + if (!fp_) + throw std::runtime_error(fmt::format("Could not open: {}", fname.c_str())); +} + +FilePtr::FilePtr(FilePtr &&other) { std::swap(fp_, other.fp_); } + +FilePtr &FilePtr::operator=(FilePtr &&other) { + std::swap(fp_, other.fp_); + return *this; +} + +FILE *FilePtr::get() { return fp_; } + +int64_t FilePtr::tell() { + auto pos = ftell(fp_); + if (pos == -1) + throw std::runtime_error(fmt::format("Error getting file position: {}", error_msg())); + return pos; +} +FilePtr::~FilePtr() { + if (fp_) + fclose(fp_); // check? +} + +std::string FilePtr::error_msg(){ + if (feof(fp_)) { + return "End of file reached"; + } + if (ferror(fp_)) { + return fmt::format("Error reading file: {}", std::strerror(errno)); + } + return ""; +} +} // namespace aare diff --git a/src/JungfrauDataFile.cpp b/src/JungfrauDataFile.cpp new file mode 100644 index 0000000..6e1ccd6 --- /dev/null +++ b/src/JungfrauDataFile.cpp @@ -0,0 +1,242 @@ +#include "aare/JungfrauDataFile.hpp" +#include "aare/algorithm.hpp" +#include "aare/defs.hpp" + +#include +#include + +namespace aare { + +JungfrauDataFile::JungfrauDataFile(const std::filesystem::path &fname) { + + if (!std::filesystem::exists(fname)) { + throw std::runtime_error(LOCATION + + "File does not exist: " + fname.string()); + } + find_frame_size(fname); + parse_fname(fname); + scan_files(); + open_file(m_current_file_index); +} + + +// FileInterface + +Frame JungfrauDataFile::read_frame(){ + Frame f(rows(), cols(), Dtype::UINT16); + read_into(reinterpret_cast(f.data()), nullptr); + return f; +} + +Frame JungfrauDataFile::read_frame(size_t frame_number){ + seek(frame_number); + Frame f(rows(), cols(), Dtype::UINT16); + read_into(reinterpret_cast(f.data()), nullptr); + return f; +} + +std::vector JungfrauDataFile::read_n(size_t n_frames) { + std::vector frames; + throw std::runtime_error(LOCATION + + "Not implemented yet"); + return frames; +} + +void JungfrauDataFile::read_into(std::byte *image_buf) { + read_into(image_buf, nullptr); +} +void JungfrauDataFile::read_into(std::byte *image_buf, size_t n_frames) { + read_into(image_buf, n_frames, nullptr); +} + +size_t JungfrauDataFile::frame_number(size_t frame_index) { + seek(frame_index); + return read_header().framenum; +} + +DetectorType JungfrauDataFile::detector_type() const { return DetectorType::Jungfrau; } + +std::string JungfrauDataFile::base_name() const { return m_base_name; } + +size_t JungfrauDataFile::bytes_per_frame() { return m_bytes_per_frame; } + +size_t JungfrauDataFile::pixels_per_frame() { return m_rows * m_cols; } + +size_t JungfrauDataFile::bytes_per_pixel() const { return sizeof(pixel_type); } + +size_t JungfrauDataFile::bitdepth() const { + return bytes_per_pixel() * bits_per_byte; +} + +void JungfrauDataFile::seek(size_t frame_index) { + if (frame_index >= m_total_frames) { + throw std::runtime_error(LOCATION + "Frame index out of range: " + + std::to_string(frame_index)); + } + m_current_frame_index = frame_index; + auto file_index = first_larger(m_last_frame_in_file, frame_index); + + if (file_index != m_current_file_index) + open_file(file_index); + + auto frame_offset = (file_index) + ? frame_index - m_last_frame_in_file[file_index - 1] + : frame_index; + auto byte_offset = frame_offset * (m_bytes_per_frame + header_size); + m_fp.seek(byte_offset); +}; + +size_t JungfrauDataFile::tell() { return m_current_frame_index; } +size_t JungfrauDataFile::total_frames() const { return m_total_frames; } +size_t JungfrauDataFile::rows() const { return m_rows; } +size_t JungfrauDataFile::cols() const { return m_cols; } + +size_t JungfrauDataFile::n_files() const { return m_last_frame_in_file.size(); } + +void JungfrauDataFile::find_frame_size(const std::filesystem::path &fname) { + + static constexpr size_t module_data_size = + header_size + sizeof(pixel_type) * 512 * 1024; + static constexpr size_t half_data_size = + header_size + sizeof(pixel_type) * 256 * 1024; + static constexpr size_t chip_data_size = + header_size + sizeof(pixel_type) * 256 * 256; + + auto file_size = std::filesystem::file_size(fname); + if (file_size == 0) { + throw std::runtime_error(LOCATION + + "Cannot guess frame size: file is empty"); + } + + if (file_size % module_data_size == 0) { + m_rows = 512; + m_cols = 1024; + m_bytes_per_frame = module_data_size - header_size; + } else if (file_size % half_data_size == 0) { + m_rows = 256; + m_cols = 1024; + m_bytes_per_frame = half_data_size - header_size; + } else if (file_size % chip_data_size == 0) { + m_rows = 256; + m_cols = 256; + m_bytes_per_frame = chip_data_size - header_size; + } else { + throw std::runtime_error(LOCATION + + "Cannot find frame size: file size is not a " + "multiple of any known frame size"); + } +} + +void JungfrauDataFile::parse_fname(const std::filesystem::path &fname) { + m_path = fname.parent_path(); + m_base_name = fname.stem(); + + // find file index, then remove if from the base name + if (auto pos = m_base_name.find_last_of('_'); pos != std::string::npos) { + m_offset = std::stoul(m_base_name.substr(pos + 1)); + m_base_name.erase(pos); + } +} + +void JungfrauDataFile::scan_files() { + // find how many files we have and the number of frames in each file + m_last_frame_in_file.clear(); + size_t file_index = m_offset; + while (std::filesystem::exists(fpath(file_index))) { + auto n_frames = std::filesystem::file_size(fpath(file_index)) / + (m_bytes_per_frame + header_size); + m_last_frame_in_file.push_back(n_frames); + ++file_index; + } + + // find where we need to open the next file and total number of frames + m_last_frame_in_file = cumsum(m_last_frame_in_file); + m_total_frames = m_last_frame_in_file.back(); +} + +void JungfrauDataFile::read_into(std::byte *image_buf, + JungfrauDataHeader *header) { + + // read header if not passed nullptr + if (header) { + if (auto rc = fread(header, sizeof(JungfrauDataHeader), 1, m_fp.get()); + rc != 1) { + throw std::runtime_error( + LOCATION + + "Could not read header from file:" + m_fp.error_msg()); + } + } else { + m_fp.seek(header_size, SEEK_CUR); + } + + // read data + if (auto rc = fread(image_buf, 1, m_bytes_per_frame, m_fp.get()); + rc != m_bytes_per_frame) { + throw std::runtime_error(LOCATION + "Could not read image from file" + + m_fp.error_msg()); + } + + // prepare for next read + // if we are at the end of the file, open the next file + ++m_current_frame_index; + if (m_current_frame_index >= m_last_frame_in_file[m_current_file_index] && + (m_current_frame_index < m_total_frames)) { + ++m_current_file_index; + open_file(m_current_file_index); + } +} + +void JungfrauDataFile::read_into(std::byte *image_buf, size_t n_frames, + JungfrauDataHeader *header) { + if (header) { + for (size_t i = 0; i < n_frames; ++i) + read_into(image_buf + i * m_bytes_per_frame, header + i); + }else{ + for (size_t i = 0; i < n_frames; ++i) + read_into(image_buf + i * m_bytes_per_frame, nullptr); + } +} + +void JungfrauDataFile::read_into(NDArray* image, JungfrauDataHeader* header) { + if(!(rows() == image->shape(0) && cols() == image->shape(1))){ + throw std::runtime_error(LOCATION + + "Image shape does not match file size: " + std::to_string(rows()) + "x" + std::to_string(cols())); + } + read_into(reinterpret_cast(image->data()), header); +} + +NDArray JungfrauDataFile::read_frame(JungfrauDataHeader* header) { + Shape<2> shape{rows(), cols()}; + NDArray image(shape); + + read_into(reinterpret_cast(image.data()), + header); + + return image; +} + +JungfrauDataHeader JungfrauDataFile::read_header() { + JungfrauDataHeader header; + if (auto rc = fread(&header, 1, sizeof(header), m_fp.get()); + rc != sizeof(header)) { + throw std::runtime_error(LOCATION + "Could not read header from file" + + m_fp.error_msg()); + } + m_fp.seek(-header_size, SEEK_CUR); + return header; +} + +void JungfrauDataFile::open_file(size_t file_index) { + // fmt::print(stderr, "Opening file: {}\n", + // fpath(file_index+m_offset).string()); + m_fp = FilePtr(fpath(file_index + m_offset), "rb"); + m_current_file_index = file_index; +} + +std::filesystem::path JungfrauDataFile::fpath(size_t file_index) const { + auto fname = fmt::format("{}_{:0{}}.dat", m_base_name, file_index, + n_digits_in_file_index); + return m_path / fname; +} + +} // namespace aare \ No newline at end of file diff --git a/src/JungfrauDataFile.test.cpp b/src/JungfrauDataFile.test.cpp new file mode 100644 index 0000000..626a318 --- /dev/null +++ b/src/JungfrauDataFile.test.cpp @@ -0,0 +1,94 @@ +#include "aare/JungfrauDataFile.hpp" + +#include +#include "test_config.hpp" + +using aare::JungfrauDataFile; +using aare::JungfrauDataHeader; +TEST_CASE("Open a Jungfrau data file", "[.files]") { + //we know we have 4 files with 7, 7, 7, and 3 frames + //firs frame number if 1 and the bunch id is frame_number**2 + //so we can check the header + auto fpath = test_data_path() / "dat" / "AldoJF500k_000000.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + REQUIRE(f.rows() == 512); + REQUIRE(f.cols() == 1024); + REQUIRE(f.bytes_per_frame() == 1048576); + REQUIRE(f.pixels_per_frame() == 524288); + REQUIRE(f.bytes_per_pixel() == 2); + REQUIRE(f.bitdepth() == 16); + REQUIRE(f.base_name() == "AldoJF500k"); + REQUIRE(f.n_files() == 4); + REQUIRE(f.tell() == 0); + REQUIRE(f.total_frames() == 24); + REQUIRE(f.current_file() == fpath); + + //Check that the frame number and buch id is read correctly + for (size_t i = 0; i < 24; ++i) { + JungfrauDataHeader header; + auto image = f.read_frame(&header); + REQUIRE(header.framenum == i + 1); + REQUIRE(header.bunchid == (i + 1) * (i + 1)); + REQUIRE(image.shape(0) == 512); + REQUIRE(image.shape(1) == 1024); + } +} + +TEST_CASE("Seek in a JungfrauDataFile", "[.files]"){ + auto fpath = test_data_path() / "dat" / "AldoJF65k_000000.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + + //The file should have 113 frames + f.seek(19); + REQUIRE(f.tell() == 19); + auto h = f.read_header(); + REQUIRE(h.framenum == 19+1); + + //Reading again does not change the file pointer + auto h2 = f.read_header(); + REQUIRE(h2.framenum == 19+1); + + f.seek(59); + REQUIRE(f.tell() == 59); + auto h3 = f.read_header(); + REQUIRE(h3.framenum == 59+1); + + JungfrauDataHeader h4; + auto image = f.read_frame(&h4); + REQUIRE(h4.framenum == 59+1); + + //now we should be on the next frame + REQUIRE(f.tell() == 60); + REQUIRE(f.read_header().framenum == 60+1); + + REQUIRE_THROWS(f.seek(86356)); //out of range +} + +TEST_CASE("Open a Jungfrau data file with non zero file index", "[.files]"){ + + auto fpath = test_data_path() / "dat" / "AldoJF65k_000003.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + + //18 files per data file, opening the 3rd file we ignore the first 3 + REQUIRE(f.total_frames() == 113-18*3); + REQUIRE(f.tell() == 0); + + //Frame numbers start at 1 in the first file + REQUIRE(f.read_header().framenum == 18*3+1); + + // moving relative to the third file + f.seek(5); + REQUIRE(f.read_header().framenum == 18*3+1+5); + + // ignoring the first 3 files + REQUIRE(f.n_files() == 4); + + REQUIRE(f.current_file().stem() == "AldoJF65k_000003"); + +} \ No newline at end of file diff --git a/src/algorithm.test.cpp b/src/algorithm.test.cpp index fcfa8d2..e2ae8fa 100644 --- a/src/algorithm.test.cpp +++ b/src/algorithm.test.cpp @@ -49,6 +49,16 @@ TEST_CASE("nearest index works with std::array", "[algorithm]"){ REQUIRE(aare::nearest_index(arr, -10.0) == 0); } +TEST_CASE("nearest index when there is no different uses the first element", "[algorithm]"){ + std::vector vec = {5, 5, 5, 5, 5}; + REQUIRE(aare::nearest_index(vec, 5) == 0); +} + +TEST_CASE("nearest index when there is no different uses the first element also when all smaller", "[algorithm]"){ + std::vector vec = {5, 5, 5, 5, 5}; + REQUIRE(aare::nearest_index(vec, 10) == 0); +} + TEST_CASE("last smaller", "[algorithm]"){ aare::NDArray arr({5}); @@ -68,6 +78,82 @@ TEST_CASE("returns last bin strictly smaller", "[algorithm]"){ arr[i] = i; } // arr 0, 1, 2, 3, 4 - REQUIRE(aare::last_smaller(arr, 2.0) == 2); + REQUIRE(aare::last_smaller(arr, 2.0) == 1); + +} + +TEST_CASE("last_smaller with all elements smaller returns last element", "[algorithm]"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, 50.) == 4); +} + +TEST_CASE("last_smaller with all elements bigger returns first element", "[algorithm]"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, -50.) == 0); +} + +TEST_CASE("last smaller with all elements equal returns the first element", "[algorithm]"){ + std::vector vec = {5,5,5,5,5,5,5}; + REQUIRE(aare::last_smaller(vec, 5) == 0); +} + + +TEST_CASE("first_lager with vector", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::first_larger(vec, 2.5) == 3); +} + +TEST_CASE("first_lager with all elements smaller returns last element", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::first_larger(vec, 50.) == 4); +} + +TEST_CASE("first_lager with all elements bigger returns first element", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::first_larger(vec, -50.) == 0); +} + +TEST_CASE("first_lager with all elements the same as the check returns last", "[algorithm]"){ + std::vector vec = {14, 14, 14, 14, 14}; + REQUIRE(aare::first_larger(vec, 14) == 4); +} + +TEST_CASE("first larger with the same element", "[algorithm]"){ + std::vector vec = {7,8,9,10,11}; + REQUIRE(aare::first_larger(vec, 9) == 3); +} + +TEST_CASE("cumsum works", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == vec.size()); + REQUIRE(result[0] == 0); + REQUIRE(result[1] == 1); + REQUIRE(result[2] == 3); + REQUIRE(result[3] == 6); + REQUIRE(result[4] == 10); +} +TEST_CASE("cumsum works with empty vector", "[algorithm]"){ + std::vector vec = {}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == 0); +} +TEST_CASE("cumsum works with negative numbers", "[algorithm]"){ + std::vector vec = {0, -1, -2, -3, -4}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == vec.size()); + REQUIRE(result[0] == 0); + REQUIRE(result[1] == -1); + REQUIRE(result[2] == -3); + REQUIRE(result[3] == -6); + REQUIRE(result[4] == -10); +} -} \ No newline at end of file From 894065fe9ccc0dd4b63d44af4dcfa21839135a37 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Wed, 9 Apr 2025 12:19:14 +0200 Subject: [PATCH 14/23] added utility plot --- python/aare/__init__.py | 2 +- python/aare/utils.py | 11 ++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/python/aare/__init__.py b/python/aare/__init__.py index 606f958..98e8c72 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -17,7 +17,7 @@ from .CtbRawFile import CtbRawFile from .RawFile import RawFile from .ScanParameters import ScanParameters -from .utils import random_pixels, random_pixel, flat_list +from .utils import random_pixels, random_pixel, flat_list, add_colorbar #make functions available in the top level API diff --git a/python/aare/utils.py b/python/aare/utils.py index 4708921..a10f54c 100644 --- a/python/aare/utils.py +++ b/python/aare/utils.py @@ -1,4 +1,6 @@ import numpy as np +import matplotlib.pyplot as plt +from mpl_toolkits.axes_grid1 import make_axes_locatable def random_pixels(n_pixels, xmin=0, xmax=512, ymin=0, ymax=1024): """Return a list of random pixels. @@ -24,4 +26,11 @@ def random_pixel(xmin=0, xmax=512, ymin=0, ymax=1024): def flat_list(xss): """Flatten a list of lists.""" - return [x for xs in xss for x in xs] \ No newline at end of file + return [x for xs in xss for x in xs] + +def add_colorbar(ax, im, size="5%", pad=0.05): + """Add a colorbar with the same height as the image.""" + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size=size, pad=pad) + plt.colorbar(im, cax=cax) + return ax, im, cax \ No newline at end of file From 8b0eee1e66dd7f6273bb4c7ceb7cd3d67a15f52a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Wed, 9 Apr 2025 17:54:55 +0200 Subject: [PATCH 15/23] fixed warnings and removed ambiguous read_frame (#154) Fixed warnings: - unused variable in Interpolator - Narrowing conversions uint64-->int64 Removed an ambiguous function from JungfrauDataFile - NDarry read_frame(header&=nullptr) - Frame read_frame() NDArray and NDView size() is now signed --- include/aare/JungfrauDataFile.hpp | 8 +------- include/aare/NDArray.hpp | 2 +- include/aare/NDView.hpp | 4 ++-- include/aare/VarClusterFinder.hpp | 4 ++-- src/Fit.cpp | 8 ++++---- src/Interpolator.cpp | 9 ++------- src/JungfrauDataFile.cpp | 20 ++++++++------------ src/JungfrauDataFile.test.cpp | 24 ++++++++++++++++++++++-- src/NDArray.test.cpp | 4 ++-- src/algorithm.test.cpp | 12 ++++++------ 10 files changed, 50 insertions(+), 45 deletions(-) diff --git a/include/aare/JungfrauDataFile.hpp b/include/aare/JungfrauDataFile.hpp index bba5403..9b1bc48 100644 --- a/include/aare/JungfrauDataFile.hpp +++ b/include/aare/JungfrauDataFile.hpp @@ -49,6 +49,7 @@ class JungfrauDataFile : public FileInterface { size_t total_frames() const override; size_t rows() const override; size_t cols() const override; + std::array shape() const; size_t n_files() const; //!< get the number of files in the series. // Extra functions needed for FileInterface @@ -81,13 +82,6 @@ class JungfrauDataFile : public FileInterface { */ void read_into(NDArray* image, JungfrauDataHeader* header = nullptr); - /** - * @brief Read a single frame from the file. Allocated a new NDArray for the output data - * @param header pointer to a JungfrauDataHeader or nullptr to skip header) - * @return NDArray with the image data - */ - NDArray read_frame(JungfrauDataHeader* header = nullptr); - JungfrauDataHeader read_header(); std::filesystem::path current_file() const { return fpath(m_current_file_index+m_offset); } diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index 45d3a83..ceb1e0b 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -194,7 +194,7 @@ class NDArray : public ArrayExpr, Ndim> { T *data() { return data_; } std::byte *buffer() { return reinterpret_cast(data_); } - size_t size() const { return size_; } + ssize_t size() const { return static_cast(size_); } size_t total_bytes() const { return size_ * sizeof(T); } std::array shape() const noexcept { return shape_; } int64_t shape(int64_t i) const noexcept { return shape_[i]; } diff --git a/include/aare/NDView.hpp b/include/aare/NDView.hpp index f53f758..55b442b 100644 --- a/include/aare/NDView.hpp +++ b/include/aare/NDView.hpp @@ -71,7 +71,7 @@ template class NDView : public ArrayExpr(size_); } size_t total_bytes() const { return size_ * sizeof(T); } std::array strides() const noexcept { return strides_; } @@ -102,7 +102,7 @@ template class NDView : public ArrayExpr NDView& operator=(const std::array &arr) { - if(size() != arr.size()) + if(size() != static_cast(arr.size())) throw std::runtime_error(LOCATION + "Array and NDView size mismatch"); std::copy(arr.begin(), arr.end(), begin()); return *this; diff --git a/include/aare/VarClusterFinder.hpp b/include/aare/VarClusterFinder.hpp index ea62a9d..161941a 100644 --- a/include/aare/VarClusterFinder.hpp +++ b/include/aare/VarClusterFinder.hpp @@ -226,7 +226,7 @@ template void VarClusterFinder::single_pass(NDView img) { template void VarClusterFinder::first_pass() { - for (size_t i = 0; i < original_.size(); ++i) { + for (ssize_t i = 0; i < original_.size(); ++i) { if (use_noise_map) threshold_ = 5 * noiseMap(i); binary_(i) = (original_(i) > threshold_); @@ -250,7 +250,7 @@ template void VarClusterFinder::first_pass() { template void VarClusterFinder::second_pass() { - for (size_t i = 0; i != labeled_.size(); ++i) { + for (ssize_t i = 0; i != labeled_.size(); ++i) { auto cl = labeled_(i); if (cl != 0) { auto it = child.find(cl); diff --git a/src/Fit.cpp b/src/Fit.cpp index 3001efd..9126109 100644 --- a/src/Fit.cpp +++ b/src/Fit.cpp @@ -18,7 +18,7 @@ double gaus(const double x, const double *par) { NDArray gaus(NDView x, NDView par) { NDArray y({x.shape(0)}, 0); - for (size_t i = 0; i < x.size(); i++) { + for (ssize_t i = 0; i < x.size(); i++) { y(i) = gaus(x(i), par.data()); } return y; @@ -28,7 +28,7 @@ double pol1(const double x, const double *par) { return par[0] * x + par[1]; } NDArray pol1(NDView x, NDView par) { NDArray y({x.shape()}, 0); - for (size_t i = 0; i < x.size(); i++) { + for (ssize_t i = 0; i < x.size(); i++) { y(i) = pol1(x(i), par.data()); } return y; @@ -153,7 +153,7 @@ void fit_gaus(NDView x, NDView y, NDView y_err, // Calculate chi2 chi2 = 0; - for (size_t i = 0; i < y.size(); i++) { + for (ssize_t i = 0; i < y.size(); i++) { chi2 += std::pow((y(i) - func::gaus(x(i), par_out.data())) / y_err(i), 2); } } @@ -205,7 +205,7 @@ void fit_pol1(NDView x, NDView y, NDView y_err, // Calculate chi2 chi2 = 0; - for (size_t i = 0; i < y.size(); i++) { + for (ssize_t i = 0; i < y.size(); i++) { chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); } } diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp index 7f82533..7034a83 100644 --- a/src/Interpolator.cpp +++ b/src/Interpolator.cpp @@ -68,19 +68,14 @@ std::vector Interpolator::interpolate(const ClusterVector& clus photon.y = cluster.y; photon.energy = eta.sum; - // auto ie = nearest_index(m_energy_bins, photon.energy)-1; - // auto ix = nearest_index(m_etabinsx, eta.x)-1; - // auto iy = nearest_index(m_etabinsy, eta.y)-1; + //Finding the index of the last element that is smaller //should work fine as long as we have many bins auto ie = last_smaller(m_energy_bins, photon.energy); auto ix = last_smaller(m_etabinsx, eta.x); auto iy = last_smaller(m_etabinsy, eta.y); - - // fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy); - double dX, dY; - int ex, ey; + double dX{}, dY{}; // cBottomLeft = 0, // cBottomRight = 1, // cTopLeft = 2, diff --git a/src/JungfrauDataFile.cpp b/src/JungfrauDataFile.cpp index 6e1ccd6..8f1f904 100644 --- a/src/JungfrauDataFile.cpp +++ b/src/JungfrauDataFile.cpp @@ -37,8 +37,9 @@ Frame JungfrauDataFile::read_frame(size_t frame_number){ std::vector JungfrauDataFile::read_n(size_t n_frames) { std::vector frames; - throw std::runtime_error(LOCATION + - "Not implemented yet"); + for(size_t i = 0; i < n_frames; ++i){ + frames.push_back(read_frame()); + } return frames; } @@ -54,6 +55,10 @@ size_t JungfrauDataFile::frame_number(size_t frame_index) { return read_header().framenum; } +std::array JungfrauDataFile::shape() const { + return {static_cast(rows()), static_cast(cols())}; +} + DetectorType JungfrauDataFile::detector_type() const { return DetectorType::Jungfrau; } std::string JungfrauDataFile::base_name() const { return m_base_name; } @@ -198,22 +203,13 @@ void JungfrauDataFile::read_into(std::byte *image_buf, size_t n_frames, } void JungfrauDataFile::read_into(NDArray* image, JungfrauDataHeader* header) { - if(!(rows() == image->shape(0) && cols() == image->shape(1))){ + if(image->shape()!=shape()){ throw std::runtime_error(LOCATION + "Image shape does not match file size: " + std::to_string(rows()) + "x" + std::to_string(cols())); } read_into(reinterpret_cast(image->data()), header); } -NDArray JungfrauDataFile::read_frame(JungfrauDataHeader* header) { - Shape<2> shape{rows(), cols()}; - NDArray image(shape); - - read_into(reinterpret_cast(image.data()), - header); - - return image; -} JungfrauDataHeader JungfrauDataFile::read_header() { JungfrauDataHeader header; diff --git a/src/JungfrauDataFile.test.cpp b/src/JungfrauDataFile.test.cpp index 626a318..ce51168 100644 --- a/src/JungfrauDataFile.test.cpp +++ b/src/JungfrauDataFile.test.cpp @@ -28,7 +28,8 @@ TEST_CASE("Open a Jungfrau data file", "[.files]") { //Check that the frame number and buch id is read correctly for (size_t i = 0; i < 24; ++i) { JungfrauDataHeader header; - auto image = f.read_frame(&header); + aare::NDArray image(f.shape()); + f.read_into(&image, &header); REQUIRE(header.framenum == i + 1); REQUIRE(header.bunchid == (i + 1) * (i + 1)); REQUIRE(image.shape(0) == 512); @@ -58,7 +59,8 @@ TEST_CASE("Seek in a JungfrauDataFile", "[.files]"){ REQUIRE(h3.framenum == 59+1); JungfrauDataHeader h4; - auto image = f.read_frame(&h4); + aare::NDArray image(f.shape()); + f.read_into(&image, &h4); REQUIRE(h4.framenum == 59+1); //now we should be on the next frame @@ -91,4 +93,22 @@ TEST_CASE("Open a Jungfrau data file with non zero file index", "[.files]"){ REQUIRE(f.current_file().stem() == "AldoJF65k_000003"); +} + +TEST_CASE("Read into throws if size doesn't match", "[.files]"){ + auto fpath = test_data_path() / "dat" / "AldoJF65k_000000.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + + aare::NDArray image({39, 85}); + JungfrauDataHeader header; + + REQUIRE_THROWS(f.read_into(&image, &header)); + REQUIRE_THROWS(f.read_into(&image, nullptr)); + REQUIRE_THROWS(f.read_into(&image)); + + REQUIRE(f.tell() == 0); + + } \ No newline at end of file diff --git a/src/NDArray.test.cpp b/src/NDArray.test.cpp index eff3e2c..c37a285 100644 --- a/src/NDArray.test.cpp +++ b/src/NDArray.test.cpp @@ -183,14 +183,14 @@ TEST_CASE("Size and shape matches") { int64_t h = 75; std::array shape{w, h}; NDArray a{shape}; - REQUIRE(a.size() == static_cast(w * h)); + REQUIRE(a.size() == w * h); REQUIRE(a.shape() == shape); } TEST_CASE("Initial value matches for all elements") { double v = 4.35; NDArray a{{5, 5}, v}; - for (uint32_t i = 0; i < a.size(); ++i) { + for (int i = 0; i < a.size(); ++i) { REQUIRE(a(i) == v); } } diff --git a/src/algorithm.test.cpp b/src/algorithm.test.cpp index e2ae8fa..79541a1 100644 --- a/src/algorithm.test.cpp +++ b/src/algorithm.test.cpp @@ -6,7 +6,7 @@ TEST_CASE("Find the closed index in a 1D array", "[algorithm]") { aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 @@ -19,7 +19,7 @@ TEST_CASE("Find the closed index in a 1D array", "[algorithm]") { TEST_CASE("Passing integers to nearest_index works", "[algorithm]"){ aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 @@ -62,7 +62,7 @@ TEST_CASE("nearest index when there is no different uses the first element also TEST_CASE("last smaller", "[algorithm]"){ aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 @@ -74,7 +74,7 @@ TEST_CASE("last smaller", "[algorithm]"){ TEST_CASE("returns last bin strictly smaller", "[algorithm]"){ aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 @@ -84,7 +84,7 @@ TEST_CASE("returns last bin strictly smaller", "[algorithm]"){ TEST_CASE("last_smaller with all elements smaller returns last element", "[algorithm]"){ aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 @@ -93,7 +93,7 @@ TEST_CASE("last_smaller with all elements smaller returns last element", "[algor TEST_CASE("last_smaller with all elements bigger returns first element", "[algorithm]"){ aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 From 6e4db45b578c7e87c577783ee639ffc89c2d11d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Thu, 10 Apr 2025 10:17:16 +0200 Subject: [PATCH 16/23] Activated RH8 build on PSI gitea (#155) --- .gitea/workflows/rh8-native.yml | 12 +++++++++--- .gitea/workflows/rh9-native.yml | 2 +- CMakeLists.txt | 8 ++++++++ python/CMakeLists.txt | 3 ++- 4 files changed, 20 insertions(+), 5 deletions(-) diff --git a/.gitea/workflows/rh8-native.yml b/.gitea/workflows/rh8-native.yml index 02d3dc0..1c64161 100644 --- a/.gitea/workflows/rh8-native.yml +++ b/.gitea/workflows/rh8-native.yml @@ -1,18 +1,24 @@ name: Build on RHEL8 on: + push: workflow_dispatch: permissions: contents: read jobs: - buildh: + build: runs-on: "ubuntu-latest" container: image: gitea.psi.ch/images/rhel8-developer-gitea-actions steps: - - uses: actions/checkout@v4 + # workaround until actions/checkout@v4 is available for RH8 + # - uses: actions/checkout@v4 + - name: Clone repository + run: | + echo Cloning ${{ github.ref_name }} + git clone https://${{secrets.GITHUB_TOKEN}}@gitea.psi.ch/${{ github.repository }}.git --branch=${{ github.ref_name }} . - name: Install dependencies @@ -22,7 +28,7 @@ jobs: - name: Build library run: | mkdir build && cd build - cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON + cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON -DPython_FIND_VIRTUALENV=FIRST make -j 2 - name: C++ unit tests diff --git a/.gitea/workflows/rh9-native.yml b/.gitea/workflows/rh9-native.yml index c1f10ac..5027365 100644 --- a/.gitea/workflows/rh9-native.yml +++ b/.gitea/workflows/rh9-native.yml @@ -8,7 +8,7 @@ permissions: contents: read jobs: - buildh: + build: runs-on: "ubuntu-latest" container: image: gitea.psi.ch/images/rhel9-developer-gitea-actions diff --git a/CMakeLists.txt b/CMakeLists.txt index 6db9314..039545e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -11,6 +11,14 @@ set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) +execute_process( + COMMAND git log -1 --format=%h + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + OUTPUT_VARIABLE GIT_HASH + OUTPUT_STRIP_TRAILING_WHITESPACE + ) +message(STATUS "Building from git hash: ${GIT_HASH}") + if (${CMAKE_VERSION} VERSION_GREATER "3.24") cmake_policy(SET CMP0135 NEW) #Fetch content download timestamp endif() diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 09de736..75847a7 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -1,12 +1,13 @@ find_package (Python 3.10 COMPONENTS Interpreter Development REQUIRED) +set(PYBIND11_FINDPYTHON ON) # Needed for RH8 # Download or find pybind11 depending on configuration if(AARE_FETCH_PYBIND11) FetchContent_Declare( pybind11 GIT_REPOSITORY https://github.com/pybind/pybind11 - GIT_TAG v2.13.0 + GIT_TAG v2.13.6 ) FetchContent_MakeAvailable(pybind11) else() From a59e9656be7b68428f7af210d1e913e7906ac0ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Fri, 11 Apr 2025 16:54:21 +0200 Subject: [PATCH 17/23] Making RawSubFile usable from Python (#158) - Removed a printout left from debugging - return also header when reading - added read_n - check for error in ifstream --- CMakeLists.txt | 2 + include/aare/RawSubFile.hpp | 5 +- include/aare/utils/ifstream_helpers.hpp | 12 +++ python/src/file.hpp | 36 ++------ python/src/module.cpp | 2 + python/src/raw_sub_file.hpp | 110 ++++++++++++++++++++++++ python/tests/test_RawSubFile.py | 36 ++++++++ src/RawSubFile.cpp | 31 ++++++- src/utils/ifstream_helpers.cpp | 18 ++++ 9 files changed, 217 insertions(+), 35 deletions(-) create mode 100644 include/aare/utils/ifstream_helpers.hpp create mode 100644 python/src/raw_sub_file.hpp create mode 100644 python/tests/test_RawSubFile.py create mode 100644 src/utils/ifstream_helpers.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 039545e..2f2a7b5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -388,7 +388,9 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp ) diff --git a/include/aare/RawSubFile.hpp b/include/aare/RawSubFile.hpp index 1d554e8..350a475 100644 --- a/include/aare/RawSubFile.hpp +++ b/include/aare/RawSubFile.hpp @@ -22,7 +22,7 @@ class RawSubFile { size_t m_rows{}; size_t m_cols{}; size_t m_bytes_per_frame{}; - size_t n_frames{}; + size_t m_num_frames{}; uint32_t m_pos_row{}; uint32_t m_pos_col{}; @@ -53,6 +53,7 @@ class RawSubFile { size_t tell(); void read_into(std::byte *image_buf, DetectorHeader *header = nullptr); + void read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header= nullptr); void get_part(std::byte *buffer, size_t frame_index); void read_header(DetectorHeader *header); @@ -66,6 +67,8 @@ class RawSubFile { size_t pixels_per_frame() const { return m_rows * m_cols; } size_t bytes_per_pixel() const { return m_bitdepth / bits_per_byte; } + size_t frames_in_file() const { return m_num_frames; } + private: template void read_with_map(std::byte *image_buf); diff --git a/include/aare/utils/ifstream_helpers.hpp b/include/aare/utils/ifstream_helpers.hpp new file mode 100644 index 0000000..0a842ed --- /dev/null +++ b/include/aare/utils/ifstream_helpers.hpp @@ -0,0 +1,12 @@ +#pragma once + +#include +#include +namespace aare { + +/** + * @brief Get the error message from an ifstream object +*/ +std::string ifstream_error_msg(std::ifstream &ifs); + +} // namespace aare \ No newline at end of file diff --git a/python/src/file.hpp b/python/src/file.hpp index 0d64e16..2d0f53e 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -20,6 +20,9 @@ namespace py = pybind11; using namespace ::aare; + + + //Disable warnings for unused parameters, as we ignore some //in the __exit__ method #pragma GCC diagnostic push @@ -214,36 +217,9 @@ void define_file_io_bindings(py::module &m) { - py::class_(m, "RawSubFile") - .def(py::init()) - .def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame) - .def_property_readonly("pixels_per_frame", - &RawSubFile::pixels_per_frame) - .def("seek", &RawSubFile::seek) - .def("tell", &RawSubFile::tell) - .def_property_readonly("rows", &RawSubFile::rows) - .def_property_readonly("cols", &RawSubFile::cols) - .def("read_frame", - [](RawSubFile &self) { - const uint8_t item_size = self.bytes_per_pixel(); - py::array image; - std::vector shape; - shape.reserve(2); - shape.push_back(self.rows()); - shape.push_back(self.cols()); - if (item_size == 1) { - image = py::array_t(shape); - } else if (item_size == 2) { - image = py::array_t(shape); - } else if (item_size == 4) { - image = py::array_t(shape); - } - fmt::print("item_size: {} rows: {} cols: {}\n", item_size, self.rows(), self.cols()); - self.read_into( - reinterpret_cast(image.mutable_data())); - return image; - }); + + + #pragma GCC diagnostic pop // py::class_(m, "ClusterHeader") diff --git a/python/src/module.cpp b/python/src/module.cpp index 7a17e78..75fe237 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -10,6 +10,7 @@ #include "cluster_file.hpp" #include "fit.hpp" #include "interpolation.hpp" +#include "raw_sub_file.hpp" #include "jungfrau_data_file.hpp" @@ -22,6 +23,7 @@ namespace py = pybind11; PYBIND11_MODULE(_aare, m) { define_file_io_bindings(m); define_raw_file_io_bindings(m); + define_raw_sub_file_io_bindings(m); define_ctb_raw_file_io_bindings(m); define_raw_master_file_bindings(m); define_var_cluster_finder_bindings(m); diff --git a/python/src/raw_sub_file.hpp b/python/src/raw_sub_file.hpp new file mode 100644 index 0000000..2cb83fc --- /dev/null +++ b/python/src/raw_sub_file.hpp @@ -0,0 +1,110 @@ +#include "aare/CtbRawFile.hpp" +#include "aare/File.hpp" +#include "aare/Frame.hpp" +#include "aare/RawFile.hpp" +#include "aare/RawMasterFile.hpp" +#include "aare/RawSubFile.hpp" + +#include "aare/defs.hpp" +// #include "aare/fClusterFileV2.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; +using namespace ::aare; + +auto read_frame_from_RawSubFile(RawSubFile &self) { + py::array_t header(1); + const uint8_t item_size = self.bytes_per_pixel(); + std::vector shape{static_cast(self.rows()), + static_cast(self.cols())}; + + py::array image; + if (item_size == 1) { + image = py::array_t(shape); + } else if (item_size == 2) { + image = py::array_t(shape); + } else if (item_size == 4) { + image = py::array_t(shape); + } + self.read_into(reinterpret_cast(image.mutable_data()), + header.mutable_data()); + + return py::make_tuple(header, image); +} + +auto read_n_frames_from_RawSubFile(RawSubFile &self, size_t n_frames) { + py::array_t header(n_frames); + const uint8_t item_size = self.bytes_per_pixel(); + std::vector shape{ + static_cast(n_frames), + static_cast(self.rows()), + static_cast(self.cols()) + }; + + py::array image; + if (item_size == 1) { + image = py::array_t(shape); + } else if (item_size == 2) { + image = py::array_t(shape); + } else if (item_size == 4) { + image = py::array_t(shape); + } + self.read_into(reinterpret_cast(image.mutable_data()), n_frames, + header.mutable_data()); + + return py::make_tuple(header, image); +} + + +//Disable warnings for unused parameters, as we ignore some +//in the __exit__ method +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +void define_raw_sub_file_io_bindings(py::module &m) { + py::class_(m, "RawSubFile") + .def(py::init()) + .def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame) + .def_property_readonly("pixels_per_frame", + &RawSubFile::pixels_per_frame) + .def_property_readonly("bytes_per_pixel", &RawSubFile::bytes_per_pixel) + .def("seek", &RawSubFile::seek) + .def("tell", &RawSubFile::tell) + .def_property_readonly("rows", &RawSubFile::rows) + .def_property_readonly("cols", &RawSubFile::cols) + .def_property_readonly("frames_in_file", &RawSubFile::frames_in_file) + .def("read_frame", &read_frame_from_RawSubFile) + .def("read_n", &read_n_frames_from_RawSubFile) + .def("read", [](RawSubFile &self){ + self.seek(0); + auto n_frames = self.frames_in_file(); + return read_n_frames_from_RawSubFile(self, n_frames); + }) + .def("__enter__", [](RawSubFile &self) { return &self; }) + .def("__exit__", + [](RawSubFile &self, + const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + }) + .def("__iter__", [](RawSubFile &self) { return &self; }) + .def("__next__", [](RawSubFile &self) { + try { + return read_frame_from_RawSubFile(self); + } catch (std::runtime_error &e) { + throw py::stop_iteration(); + } + }); + +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/tests/test_RawSubFile.py b/python/tests/test_RawSubFile.py new file mode 100644 index 0000000..a5eea91 --- /dev/null +++ b/python/tests/test_RawSubFile.py @@ -0,0 +1,36 @@ +import pytest +import numpy as np +from aare import RawSubFile, DetectorType + + +@pytest.mark.files +def test_read_a_jungfrau_RawSubFile(test_data_path): + with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f1_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f: + assert f.frames_in_file == 3 + + headers, frames = f.read() + + assert headers.size == 3 + assert frames.shape == (3, 512, 1024) + + # Frame numbers in this file should be 4, 5, 6 + for i,h in zip(range(4,7,1), headers): + assert h["frameNumber"] == i + + # Compare to canned data using numpy + data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy") + assert np.all(data[3:6] == frames) + +@pytest.mark.files +def test_iterate_over_a_jungfrau_RawSubFile(test_data_path): + + data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy") + + with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f: + i = 0 + for header, frame in f: + assert header["frameNumber"] == i+1 + assert np.all(frame == data[i]) + i += 1 + assert i == 3 + assert header["frameNumber"] == 3 \ No newline at end of file diff --git a/src/RawSubFile.cpp b/src/RawSubFile.cpp index a3bb79c..9e7a421 100644 --- a/src/RawSubFile.cpp +++ b/src/RawSubFile.cpp @@ -1,9 +1,12 @@ #include "aare/RawSubFile.hpp" #include "aare/PixelMap.hpp" +#include "aare/utils/ifstream_helpers.hpp" #include // memcpy #include #include + + namespace aare { RawSubFile::RawSubFile(const std::filesystem::path &fname, @@ -20,7 +23,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname, } if (std::filesystem::exists(fname)) { - n_frames = std::filesystem::file_size(fname) / + m_num_frames = std::filesystem::file_size(fname) / (sizeof(DetectorHeader) + rows * cols * bitdepth / 8); } else { throw std::runtime_error( @@ -35,7 +38,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname, } #ifdef AARE_VERBOSE - fmt::print("Opened file: {} with {} frames\n", m_fname.string(), n_frames); + fmt::print("Opened file: {} with {} frames\n", m_fname.string(), m_num_frames); fmt::print("m_rows: {}, m_cols: {}, m_bitdepth: {}\n", m_rows, m_cols, m_bitdepth); fmt::print("file size: {}\n", std::filesystem::file_size(fname)); @@ -43,8 +46,8 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname, } void RawSubFile::seek(size_t frame_index) { - if (frame_index >= n_frames) { - throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, n_frames)); + if (frame_index >= m_num_frames) { + throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, m_num_frames)); } m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index); } @@ -60,6 +63,10 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { m_file.seekg(sizeof(DetectorHeader), std::ios::cur); } + if (m_file.fail()){ + throw std::runtime_error(LOCATION + ifstream_error_msg(m_file)); + } + // TODO! expand support for different bitdepths if (m_pixel_map) { // read into a temporary buffer and then copy the data to the buffer @@ -79,8 +86,24 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { // read directly into the buffer m_file.read(reinterpret_cast(image_buf), bytes_per_frame()); } + + if (m_file.fail()){ + throw std::runtime_error(LOCATION + ifstream_error_msg(m_file)); + } } +void RawSubFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) { + for (size_t i = 0; i < n_frames; i++) { + read_into(image_buf, header); + image_buf += bytes_per_frame(); + if (header) { + ++header; + } + } +} + + + template void RawSubFile::read_with_map(std::byte *image_buf) { auto part_buffer = new std::byte[bytes_per_frame()]; diff --git a/src/utils/ifstream_helpers.cpp b/src/utils/ifstream_helpers.cpp new file mode 100644 index 0000000..74c56f3 --- /dev/null +++ b/src/utils/ifstream_helpers.cpp @@ -0,0 +1,18 @@ +#include "aare/utils/ifstream_helpers.hpp" + +namespace aare { + +std::string ifstream_error_msg(std::ifstream &ifs) { + std::ios_base::iostate state = ifs.rdstate(); + if (state & std::ios_base::eofbit) { + return " End of file reached"; + } else if (state & std::ios_base::badbit) { + return " Bad file stream"; + } else if (state & std::ios_base::failbit) { + return " File read failed"; + }else{ + return " Unknown/no error"; + } +} + +} // namespace aare From 84aafa75f6b5f0097ed1b7b5b03102f2e305a1e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 22 Apr 2025 08:36:34 +0200 Subject: [PATCH 18/23] Building wheels and uploading to pypi (#160) Still to be resolved in another PR: - Consistent versioning across compiled code, conda and pypi --- .github/workflows/build_wheel.yml | 64 +++++++++++++++++++++++++++++++ .gitignore | 3 +- CMakeLists.txt | 2 +- pyproject.toml | 17 ++++++-- python/CMakeLists.txt | 12 ++++-- 5 files changed, 90 insertions(+), 8 deletions(-) create mode 100644 .github/workflows/build_wheel.yml diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml new file mode 100644 index 0000000..f131e77 --- /dev/null +++ b/.github/workflows/build_wheel.yml @@ -0,0 +1,64 @@ +name: Build wheel + +on: + workflow_dispatch: + pull_request: + push: + branches: + - main + release: + types: + - published + + +jobs: + build_wheels: + name: Build wheels on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest,] + + steps: + - uses: actions/checkout@v4 + + - name: Build wheels + run: pipx run cibuildwheel==2.23.0 + + - uses: actions/upload-artifact@v4 + with: + name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }} + path: ./wheelhouse/*.whl + + build_sdist: + name: Build source distribution + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Build sdist + run: pipx run build --sdist + + - uses: actions/upload-artifact@v4 + with: + name: cibw-sdist + path: dist/*.tar.gz + + upload_pypi: + needs: [build_wheels, build_sdist] + runs-on: ubuntu-latest + environment: pypi + permissions: + id-token: write + if: github.event_name == 'release' && github.event.action == 'published' + # or, alternatively, upload to PyPI on every tag starting with 'v' (remove on: release above to use this) + # if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') + steps: + - uses: actions/download-artifact@v4 + with: + # unpacks all CIBW artifacts into dist/ + pattern: cibw-* + path: dist + merge-multiple: true + + - uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.gitignore b/.gitignore index af3e3b7..5982f7f 100644 --- a/.gitignore +++ b/.gitignore @@ -17,7 +17,8 @@ Testing/ ctbDict.cpp ctbDict.h - +wheelhouse/ +dist/ *.pyc */__pycache__/* diff --git a/CMakeLists.txt b/CMakeLists.txt index 2f2a7b5..236e323 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.14) +cmake_minimum_required(VERSION 3.15) project(aare VERSION 1.0.0 diff --git a/pyproject.toml b/pyproject.toml index 470d158..4a477a3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,19 +4,30 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.4.1" +version = "2025.4.2" +requires-python = ">=3.11" +dependencies = [ + "numpy", + "matplotlib", +] +[tool.cibuildwheel] + +build = "cp{311,312,313}-manylinux_x86_64" + [tool.scikit-build] -cmake.verbose = true +build.verbose = true +cmake.build-type = "Release" +install.components = ["python"] [tool.scikit-build.cmake.define] AARE_PYTHON_BINDINGS = "ON" -AARE_SYSTEM_LIBRARIES = "ON" AARE_INSTALL_PYTHONEXT = "ON" + [tool.pytest.ini_options] markers = [ "files: marks tests that need additional data (deselect with '-m \"not files\"')", diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 75847a7..549205a 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -1,5 +1,5 @@ -find_package (Python 3.10 COMPONENTS Interpreter Development REQUIRED) +find_package (Python 3.10 COMPONENTS Interpreter Development.Module REQUIRED) set(PYBIND11_FINDPYTHON ON) # Needed for RH8 # Download or find pybind11 depending on configuration @@ -59,10 +59,16 @@ endforeach(FILE ${PYTHON_EXAMPLES}) if(AARE_INSTALL_PYTHONEXT) - install(TARGETS _aare + install( + TARGETS _aare EXPORT "${TARGETS_EXPORT_NAME}" LIBRARY DESTINATION aare + COMPONENT python ) - install(FILES ${PYTHON_FILES} DESTINATION aare) + install( + FILES ${PYTHON_FILES} + DESTINATION aare + COMPONENT python + ) endif() \ No newline at end of file From 326941e2b4ef69f98bf2f773cf6b3281b9ff78bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 22 Apr 2025 15:20:46 +0200 Subject: [PATCH 19/23] Custom base for decoding ADC data (#163) New function apply_custom_weights (can we find a better name) that takes a uint16 and a NDView of bases for the conversion. For each supplied weight it is used as base (instead of 2) to convert from bits to a double. --------- Co-authored-by: siebsi --- CMakeLists.txt | 1 + include/aare/NDView.hpp | 5 +++ include/aare/decode.hpp | 15 ++++++- python/aare/__init__.py | 4 ++ python/src/ctb_raw_file.hpp | 71 ++++++++++++++++++++------------ src/NDView.test.cpp | 12 ++++++ src/decode.cpp | 43 +++++++++++++++++++- src/decode.test.cpp | 80 +++++++++++++++++++++++++++++++++++++ 8 files changed, 204 insertions(+), 27 deletions(-) create mode 100644 src/decode.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 236e323..b3d7377 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -427,6 +427,7 @@ if(AARE_TESTS) set(TestSources ${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.test.cpp diff --git a/include/aare/NDView.hpp b/include/aare/NDView.hpp index 55b442b..ddb5d1c 100644 --- a/include/aare/NDView.hpp +++ b/include/aare/NDView.hpp @@ -184,4 +184,9 @@ std::ostream& operator <<(std::ostream& os, const NDView& arr){ } +template +NDView make_view(std::vector& vec){ + return NDView(vec.data(), {static_cast(vec.size())}); +} + } // namespace aare \ No newline at end of file diff --git a/include/aare/decode.hpp b/include/aare/decode.hpp index 1c3c479..e784c4a 100644 --- a/include/aare/decode.hpp +++ b/include/aare/decode.hpp @@ -1,6 +1,7 @@ #pragma once #include +#include #include namespace aare { @@ -10,4 +11,16 @@ uint16_t adc_sar_04_decode64to16(uint64_t input); void adc_sar_05_decode64to16(NDView input, NDView output); void adc_sar_04_decode64to16(NDView input, NDView output); -} // namespace aare \ No newline at end of file + +/** + * @brief Apply custom weights to a 16-bit input value. Will sum up weights[i]**i + * for each bit i that is set in the input value. + * @throws std::out_of_range if weights.size() < 16 + * @param input 16-bit input value + * @param weights vector of weights, size must be less than or equal to 16 + */ +double apply_custom_weights(uint16_t input, const NDView weights); + +void apply_custom_weights(NDView input, NDView output, const NDView weights); + +} // namespace aare diff --git a/python/aare/__init__.py b/python/aare/__init__.py index 98e8c72..db9672f 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -13,6 +13,10 @@ from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVe from ._aare import fit_gaus, fit_pol1 from ._aare import Interpolator + + +from ._aare import apply_custom_weights + from .CtbRawFile import CtbRawFile from .RawFile import RawFile from .ScanParameters import ScanParameters diff --git a/python/src/ctb_raw_file.hpp b/python/src/ctb_raw_file.hpp index 56e571b..a88a9d1 100644 --- a/python/src/ctb_raw_file.hpp +++ b/python/src/ctb_raw_file.hpp @@ -10,6 +10,8 @@ #include "aare/decode.hpp" // #include "aare/fClusterFileV2.hpp" +#include "np_helper.hpp" + #include #include #include @@ -65,35 +67,54 @@ m.def("adc_sar_04_decode64to16", [](py::array_t input) { return output; }); - py::class_(m, "CtbRawFile") - .def(py::init()) - .def("read_frame", - [](CtbRawFile &self) { - size_t image_size = self.image_size_in_bytes(); - py::array image; - std::vector shape; - shape.reserve(2); - shape.push_back(1); - shape.push_back(image_size); +m.def( + "apply_custom_weights", + [](py::array_t &input, + py::array_t + &weights) { + - py::array_t header(1); + // Create new array with same shape as the input array (uninitialized values) + py::buffer_info buf = input.request(); + py::array_t output(buf.shape); - // always read bytes - image = py::array_t(shape); + // Use NDViews to call into the C++ library + auto weights_view = make_view_1d(weights); + NDView input_view(input.mutable_data(), {input.size()}); + NDView output_view(output.mutable_data(), {output.size()}); - self.read_into( - reinterpret_cast(image.mutable_data()), - header.mutable_data()); + apply_custom_weights(input_view, output_view, weights_view); + return output; + }); - return py::make_tuple(header, image); - }) - .def("seek", &CtbRawFile::seek) - .def("tell", &CtbRawFile::tell) - .def("master", &CtbRawFile::master) +py::class_(m, "CtbRawFile") + .def(py::init()) + .def("read_frame", + [](CtbRawFile &self) { + size_t image_size = self.image_size_in_bytes(); + py::array image; + std::vector shape; + shape.reserve(2); + shape.push_back(1); + shape.push_back(image_size); - .def_property_readonly("image_size_in_bytes", - &CtbRawFile::image_size_in_bytes) + py::array_t header(1); - .def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file); + // always read bytes + image = py::array_t(shape); -} \ No newline at end of file + self.read_into(reinterpret_cast(image.mutable_data()), + header.mutable_data()); + + return py::make_tuple(header, image); + }) + .def("seek", &CtbRawFile::seek) + .def("tell", &CtbRawFile::tell) + .def("master", &CtbRawFile::master) + + .def_property_readonly("image_size_in_bytes", + &CtbRawFile::image_size_in_bytes) + + .def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file); + +} diff --git a/src/NDView.test.cpp b/src/NDView.test.cpp index 3070de6..6bc8eef 100644 --- a/src/NDView.test.cpp +++ b/src/NDView.test.cpp @@ -190,4 +190,16 @@ TEST_CASE("compare two views") { NDView view2(vec2.data(), Shape<2>{3, 4}); REQUIRE((view1 == view2)); +} + + +TEST_CASE("Create a view over a vector"){ + std::vector vec; + for (int i = 0; i != 12; ++i) { + vec.push_back(i); + } + auto v = aare::make_view(vec); + REQUIRE(v.shape()[0] == 12); + REQUIRE(v[0] == 0); + REQUIRE(v[11] == 11); } \ No newline at end of file diff --git a/src/decode.cpp b/src/decode.cpp index 17c033d..8ac7bc0 100644 --- a/src/decode.cpp +++ b/src/decode.cpp @@ -1,5 +1,5 @@ #include "aare/decode.hpp" - +#include namespace aare { uint16_t adc_sar_05_decode64to16(uint64_t input){ @@ -22,6 +22,10 @@ uint16_t adc_sar_05_decode64to16(uint64_t input){ } void adc_sar_05_decode64to16(NDView input, NDView output){ + if(input.shape() != output.shape()){ + throw std::invalid_argument(LOCATION + " input and output shapes must match"); + } + for(int64_t i = 0; i < input.shape(0); i++){ for(int64_t j = 0; j < input.shape(1); j++){ output(i,j) = adc_sar_05_decode64to16(input(i,j)); @@ -49,6 +53,9 @@ uint16_t adc_sar_04_decode64to16(uint64_t input){ } void adc_sar_04_decode64to16(NDView input, NDView output){ + if(input.shape() != output.shape()){ + throw std::invalid_argument(LOCATION + " input and output shapes must match"); + } for(int64_t i = 0; i < input.shape(0); i++){ for(int64_t j = 0; j < input.shape(1); j++){ output(i,j) = adc_sar_04_decode64to16(input(i,j)); @@ -56,6 +63,40 @@ void adc_sar_04_decode64to16(NDView input, NDView outpu } } +double apply_custom_weights(uint16_t input, const NDView weights) { + if(weights.size() > 16){ + throw std::invalid_argument("weights size must be less than or equal to 16"); + } + + double result = 0.0; + for (ssize_t i = 0; i < weights.size(); ++i) { + result += ((input >> i) & 1) * std::pow(weights[i], i); + } + return result; + +} + +void apply_custom_weights(NDView input, NDView output, const NDView weights) { + if(input.shape() != output.shape()){ + throw std::invalid_argument(LOCATION + " input and output shapes must match"); + } + + //Calculate weights to avoid repeatedly calling std::pow + std::vector weights_powers(weights.size()); + for (ssize_t i = 0; i < weights.size(); ++i) { + weights_powers[i] = std::pow(weights[i], i); + } + + // Apply custom weights to each element in the input array + for (ssize_t i = 0; i < input.shape(0); i++) { + double result = 0.0; + for (size_t bit_index = 0; bit_index < weights_powers.size(); ++bit_index) { + result += ((input(i) >> bit_index) & 1) * weights_powers[bit_index]; + } + output(i) = result; + } +} + } // namespace aare diff --git a/src/decode.test.cpp b/src/decode.test.cpp new file mode 100644 index 0000000..a90213c --- /dev/null +++ b/src/decode.test.cpp @@ -0,0 +1,80 @@ +#include "aare/decode.hpp" + +#include +#include +#include "aare/NDArray.hpp" +using Catch::Matchers::WithinAbs; +#include + +TEST_CASE("test_adc_sar_05_decode64to16"){ + uint64_t input = 0; + uint16_t output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 0); + + + // bit 29 on th input is bit 0 on the output + input = 1UL << 29; + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 1); + + // test all bits by iteratting through the bitlist + std::vector bitlist = {29, 19, 28, 18, 31, 21, 27, 20, 24, 23, 25, 22}; + for (size_t i = 0; i < bitlist.size(); i++) { + input = 1UL << bitlist[i]; + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == (1 << i)); + } + + + // test a few "random" values + input = 0; + input |= (1UL << 29); + input |= (1UL << 19); + input |= (1UL << 28); + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 7UL); + + + input = 0; + input |= (1UL << 18); + input |= (1UL << 27); + input |= (1UL << 25); + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 1096UL); + + input = 0; + input |= (1UL << 25); + input |= (1UL << 22); + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 3072UL); + } + + + TEST_CASE("test_apply_custom_weights") { + + uint16_t input = 1; + aare::NDArray weights_data({3}, 0.0); + weights_data(0) = 1.7; + weights_data(1) = 2.1; + weights_data(2) = 1.8; + + auto weights = weights_data.view(); + + + double output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(1.0, 0.001)); + + input = 1UL << 1; + output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(2.1, 0.001)); + + + input = 1UL << 2; + output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(3.24, 0.001)); + + input = 0b111; + output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(6.34, 0.001)); + + } \ No newline at end of file From b501c31e389e1b8374578ebe1b34a6a74dd9395d Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 22 Apr 2025 15:22:47 +0200 Subject: [PATCH 20/23] added missed commit --- src/NDView.test.cpp | 43 +++++++++++++++---------------------------- src/decode.test.cpp | 4 ++-- 2 files changed, 17 insertions(+), 30 deletions(-) diff --git a/src/NDView.test.cpp b/src/NDView.test.cpp index 6bc8eef..8750f3a 100644 --- a/src/NDView.test.cpp +++ b/src/NDView.test.cpp @@ -3,6 +3,7 @@ #include #include +#include using aare::NDView; using aare::Shape; @@ -21,10 +22,8 @@ TEST_CASE("Element reference 1D") { } TEST_CASE("Element reference 2D") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<2>{3, 4}); REQUIRE(vec.size() == static_cast(data.size())); @@ -58,10 +57,8 @@ TEST_CASE("Element reference 3D") { } TEST_CASE("Plus and miuns with single value") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<2>{3, 4}); data += 5; int i = 0; @@ -116,10 +113,8 @@ TEST_CASE("elementwise assign") { } TEST_CASE("iterators") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<1>{12}); int i = 0; for (const auto item : data) { @@ -167,26 +162,20 @@ TEST_CASE("divide with another span") { } TEST_CASE("Retrieve shape") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<2>{3, 4}); REQUIRE(data.shape()[0] == 3); REQUIRE(data.shape()[1] == 4); } TEST_CASE("compare two views") { - std::vector vec1; - for (int i = 0; i != 12; ++i) { - vec1.push_back(i); - } + std::vector vec1(12); + std::iota(vec1.begin(), vec1.end(), 0); NDView view1(vec1.data(), Shape<2>{3, 4}); - std::vector vec2; - for (int i = 0; i != 12; ++i) { - vec2.push_back(i); - } + std::vector vec2(12); + std::iota(vec2.begin(), vec2.end(), 0); NDView view2(vec2.data(), Shape<2>{3, 4}); REQUIRE((view1 == view2)); @@ -194,10 +183,8 @@ TEST_CASE("compare two views") { TEST_CASE("Create a view over a vector"){ - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); auto v = aare::make_view(vec); REQUIRE(v.shape()[0] == 12); REQUIRE(v[0] == 0); diff --git a/src/decode.test.cpp b/src/decode.test.cpp index a90213c..1e4b2fc 100644 --- a/src/decode.test.cpp +++ b/src/decode.test.cpp @@ -64,12 +64,12 @@ TEST_CASE("test_adc_sar_05_decode64to16"){ double output = aare::apply_custom_weights(input, weights); CHECK_THAT(output, WithinAbs(1.0, 0.001)); - input = 1UL << 1; + input = 1 << 1; output = aare::apply_custom_weights(input, weights); CHECK_THAT(output, WithinAbs(2.1, 0.001)); - input = 1UL << 2; + input = 1 << 2; output = aare::apply_custom_weights(input, weights); CHECK_THAT(output, WithinAbs(3.24, 0.001)); From c6e8e5f6a1f6754bdbf6a8ff9bb90d4c36747368 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 22 Apr 2025 16:16:27 +0200 Subject: [PATCH 21/23] inverted gain map --- conda-recipe/meta.yaml | 2 +- include/aare/ClusterFile.hpp | 2 +- pyproject.toml | 2 +- src/ClusterFile.cpp | 6 ++++++ 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 560e831..0d3b532 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: aare - version: 2025.4.1 #TODO! how to not duplicate this? + version: 2025.4.22 #TODO! how to not duplicate this? diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index bea9f48..b47a1d5 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -124,7 +124,7 @@ class ClusterFile { /** * @brief Set the gain map to use when reading clusters. If set the gain map will be applied - * to the clusters that pass ROI and noise_map selection. + * to the clusters that pass ROI and noise_map selection. The gain map is expected to be in ADU/energy. */ void set_gain_map(const NDView gain_map); diff --git a/pyproject.toml b/pyproject.toml index 4a477a3..6451f39 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.4.2" +version = "2025.4.22" requires-python = ">=3.11" dependencies = [ "numpy", diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index f77ac92..d24e803 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -41,6 +41,12 @@ void ClusterFile::set_noise_map(const NDView noise_map){ void ClusterFile::set_gain_map(const NDView gain_map){ m_gain_map = NDArray(gain_map); + + // Gain map is passed as ADU/keV to avoid dividing in when applying the gain + // map we invert it here + for (auto &item : m_gain_map->view()) { + item = 1.0 / item; + } } ClusterFile::~ClusterFile() { close(); } From 58c934d9cf39dd7b27e55d3e67bb8c20cf622680 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 22 Apr 2025 16:24:15 +0200 Subject: [PATCH 22/23] added mpl to conda specs --- conda-recipe/meta.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 0d3b532..46aee34 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -38,6 +38,7 @@ requirements: run: - python {{python}} - numpy {{ numpy }} + - matplotlib test: From 2e0424254cf92b4b2bb08152eea6c42685920ad3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Fri, 25 Apr 2025 10:31:40 +0200 Subject: [PATCH 23/23] removed uneccecary codna numpy variants (#165) With numpy 2.0 we no longer need to build against every supported numpy version. This way we can save up to 6 builds. - https://numpy.org/doc/stable/dev/depending_on_numpy.html - https://conda-forge.org/docs/maintainer/knowledge_base/#building-against-numpy --- conda-recipe/conda_build_config.yaml | 23 --------------- conda-recipe/meta.yaml | 44 ++++++++++------------------ 2 files changed, 16 insertions(+), 51 deletions(-) diff --git a/conda-recipe/conda_build_config.yaml b/conda-recipe/conda_build_config.yaml index 36a7465..6d3d479 100644 --- a/conda-recipe/conda_build_config.yaml +++ b/conda-recipe/conda_build_config.yaml @@ -1,28 +1,5 @@ python: - 3.11 - - 3.11 - - 3.11 - - 3.12 - - 3.12 - 3.12 - 3.13 - - -numpy: - - 1.26 - - 2.0 - - 2.1 - - 1.26 - - 2.0 - - 2.1 - - 2.1 - - -zip_keys: - - python - - numpy - -pin_run_as_build: - numpy: x.x - python: x.x \ No newline at end of file diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 12c6ca0..5b7eb48 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -2,56 +2,44 @@ package: name: aare version: 2025.4.22 #TODO! how to not duplicate this? - - - - - source: path: .. build: number: 0 script: - - unset CMAKE_GENERATOR && {{ PYTHON }} -m pip install . -vv # [not win] - - {{ PYTHON }} -m pip install . -vv # [win] + - unset CMAKE_GENERATOR && {{ PYTHON }} -m pip install . -vv requirements: build: - - python {{python}} - - numpy {{ numpy }} - {{ compiler('cxx') }} - - - host: - cmake - ninja - - python {{python}} - - numpy {{ numpy }} + + host: + - python - pip + - numpy=2.1 - scikit-build-core - pybind11 >=2.13.0 - - fmt - - zeromq - - nlohmann_json - - catch2 + - matplotlib # needed in host to solve the environment for run run: - - python {{python}} - - numpy {{ numpy }} + - python + - {{ pin_compatible('numpy') }} - matplotlib + test: imports: - aare - # requires: - # - pytest - # source_files: - # - tests - # commands: - # - pytest tests + requires: + - pytest + source_files: + - python/tests + commands: + - python -m pytest python/tests about: - summary: An example project built with pybind11 and scikit-build. - # license_file: LICENSE \ No newline at end of file + summary: Data analysis library for hybrid pixel detectors from PSI