Merge branch 'main' into dev/license

This commit is contained in:
2025-11-21 14:52:54 +01:00
committed by GitHub
49 changed files with 3253 additions and 1171 deletions

View File

@@ -443,6 +443,7 @@ if(AARE_TESTS)
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/DetectorGeometry.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/DetectorGeometry.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolation.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp

View File

@@ -1,58 +1,64 @@
# Release notes # Release notes
This document describes the difference between Release 2025.8.22 and RELEASE_DATE.
## Changes:
Features: ### New Features:
- Added SPDX-License-Identifier: MPL-2.0 to source files - Added SPDX-License-Identifier: MPL-2.0 to source files
- max_sum_2x2 including index of subcluster with highest energy is now available from Python API - Calculate Eta3 supports all cluster types
- eta stores corner as enum class cTopLeft, cTopRight, BottomLeft, cBottomRight indicating 2x2 subcluster with largest energy relative to cluster center - interpolation class supports using cross eta3x3 and eta3x3 on full cluster as well as eta2x2 on full cluster
- max_sum_2x2 returns corner as index - interpolation class has option to calculate the rosenblatt transform
- reduction operations to reduce Clusters of general size to 2x2 or 3x3 clusters
- `max_sum_2x2` including index of subcluster with highest energy is now available from Python API
- interpolation supports bilinear interpolation of eta values for more fine grained transformed uniform coordinates
- Interpolation is documented
Bugfixes:
- File supports reading new master json file format (multiple ROI's not supported yet)
- Added tell to ClusterFile. Returns position in bytes for debugging - Added tell to ClusterFile. Returns position in bytes for debugging
### 2025.8.22 ### Resolved Features:
Features: - calculate_eta coincides with theoretical definition
- Apply calibration works in G0 if passes a 2D calibration and pedestal ### Bugfixes:
- count pixels that switch
- calculate pedestal (also g0 version) - eta calculation assumes correct photon center
- NDArray::view() needs an lvalue to reduce issues with the view outliving the array - eta transformation to uniform coordinates starts at 0
- Bug in interpolation
- File supports reading new master json file format (multiple ROI's not supported yet)
Bugfixes: ### API Changes:
- Now using glibc 2.17 in conda builds (was using the host) - ClusterFinder for 2x2 Cluster disabled
- Fixed shifted pixels in clusters close to the edge of a frame - eta stores corner as enum class cTopLeft, cTopRight, BottomLeft, cBottomRight indicating 2x2 subcluster with largest energy relative to cluster center
- max_sum_2x2 returns corner as index
### 2025.7.18 ## Download, Documentation & Support
Features: ### Download
- Cluster finder now works with 5x5, 7x7 and 9x9 clusters The Source Code:
- Added ClusterVector::empty() member https://github.com/slsdetectorgroup/aare
- Added apply_calibration function for Jungfrau data
Bugfixes:
- Fixed reading RawFiles with ROI fully excluding some sub files.
- Decoding of MH02 files placed the pixels in wrong position
- Removed unused file: ClusterFile.cpp
### 2025.5.22 ### Documentation
Features:
- Added scurve fitting Documentation including installation details:
https://github.com/slsdetectorgroup/aare
### Support
erik.frojdh@psi.ch \
alice.mazzoleni@psi.ch \
dhanya.thattil@psi.ch
Bugfixes:
- Fixed crash when opening raw files with large number of data files

View File

@@ -9,6 +9,7 @@ class ClusterFixture : public benchmark::Fixture {
public: public:
Cluster<int, 2, 2> cluster_2x2{}; Cluster<int, 2, 2> cluster_2x2{};
Cluster<int, 3, 3> cluster_3x3{}; Cluster<int, 3, 3> cluster_3x3{};
Cluster<int, 4, 4> cluster_4x4{};
private: private:
using benchmark::Fixture::SetUp; using benchmark::Fixture::SetUp;
@@ -27,6 +28,13 @@ class ClusterFixture : public benchmark::Fixture {
cluster_3x3.x = 0; cluster_3x3.x = 0;
cluster_3x3.y = 0; cluster_3x3.y = 0;
int temp_data3[16] = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16};
std::copy(std::begin(temp_data3), std::end(temp_data3),
std::begin(cluster_4x4.data));
cluster_4x4.x = 0;
cluster_4x4.y = 0;
} }
// void TearDown(::benchmark::State& state) { // void TearDown(::benchmark::State& state) {
@@ -68,4 +76,29 @@ BENCHMARK_F(ClusterFixture, CalculateGeneralEtaFor3x3Cluster)
benchmark::DoNotOptimize(eta); benchmark::DoNotOptimize(eta);
} }
} }
BENCHMARK_F(ClusterFixture, Calculate2x2Etawithreduction)
(benchmark::State &st) {
for (auto _ : st) {
// This code gets timed
auto reduced_cluster = reduce_to_2x2(cluster_4x4);
Eta2 eta = calculate_eta2(reduced_cluster);
auto reduced_cluster_from_3x3 = reduce_to_2x2(cluster_3x3);
Eta2 eta2 = calculate_eta2(reduced_cluster_from_3x3);
benchmark::DoNotOptimize(eta);
benchmark::DoNotOptimize(eta2);
}
}
BENCHMARK_F(ClusterFixture, Calculate2x2Etawithoutreduction)
(benchmark::State &st) {
for (auto _ : st) {
// This code gets timed
Eta2 eta = calculate_eta2(cluster_4x4);
Eta2 eta2 = calculate_eta2(cluster_3x3);
benchmark::DoNotOptimize(eta);
benchmark::DoNotOptimize(eta2);
}
}
// BENCHMARK_MAIN(); // BENCHMARK_MAIN();

View File

@@ -34,8 +34,8 @@ class ClustersForReduceFixture : public benchmark::Fixture {
}; };
template <typename T> template <typename T>
Cluster<T, 3, 3, int16_t> reduce_to_3x3(const Cluster<T, 5, 5, int16_t> &c) { Cluster<T, 3, 3, uint16_t> reduce_to_3x3(const Cluster<T, 5, 5, uint16_t> &c) {
Cluster<T, 3, 3, int16_t> result; Cluster<T, 3, 3, uint16_t> result;
// Write out the sums in the hope that the compiler can optimize this // Write out the sums in the hope that the compiler can optimize this
std::array<T, 9> sum_3x3_subclusters; std::array<T, 9> sum_3x3_subclusters;
@@ -141,7 +141,7 @@ Cluster<T, 3, 3, int16_t> reduce_to_3x3(const Cluster<T, 5, 5, int16_t> &c) {
BENCHMARK_F(ClustersForReduceFixture, Reduce2x2)(benchmark::State &st) { BENCHMARK_F(ClustersForReduceFixture, Reduce2x2)(benchmark::State &st) {
for (auto _ : st) { for (auto _ : st) {
// This code gets timed // This code gets timed
benchmark::DoNotOptimize(reduce_to_2x2<int, 3, 3, int16_t>( benchmark::DoNotOptimize(reduce_to_2x2<int, 3, 3, uint16_t>(
cluster_3x3)); // make sure compiler evaluates the expression cluster_3x3)); // make sure compiler evaluates the expression
} }
} }
@@ -157,7 +157,7 @@ BENCHMARK_F(ClustersForReduceFixture, Reduce3x3)(benchmark::State &st) {
for (auto _ : st) { for (auto _ : st) {
// This code gets timed // This code gets timed
benchmark::DoNotOptimize( benchmark::DoNotOptimize(
reduce_to_3x3<int, 5, 5, int16_t>(cluster_5x5)); reduce_to_3x3<int, 5, 5, uint16_t>(cluster_5x5));
} }
} }

View File

@@ -28,6 +28,8 @@ configure_file(
@ONLY @ONLY
) )
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/figures"
DESTINATION "${SPHINX_BUILD}")
configure_file( configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/static/extra.css" "${CMAKE_CURRENT_SOURCE_DIR}/static/extra.css"

BIN
docs/figures/Eta2x2.pdf Normal file

Binary file not shown.

BIN
docs/figures/Eta2x2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.7 KiB

BIN
docs/figures/Eta2x2Full.pdf Normal file

Binary file not shown.

BIN
docs/figures/Eta2x2Full.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

BIN
docs/figures/Eta3x3.pdf Normal file

Binary file not shown.

BIN
docs/figures/Eta3x3.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.5 KiB

15
docs/src/Cluster.rst Normal file
View File

@@ -0,0 +1,15 @@
Cluster
========
.. doxygenstruct:: aare::Cluster
:members:
:undoc-members:
:private-members:
**Free Functions:**
.. doxygenfunction:: aare::reduce_to_3x3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>&)
.. doxygenfunction:: aare::reduce_to_2x2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>&)

102
docs/src/Interpolation.rst Normal file
View File

@@ -0,0 +1,102 @@
Interpolation
==============
Interpolation class for :math:`\eta` Interpolation.
The Interpolator class provides methods to interpolate the positions of photons based on their :math:`\eta` values.
.. warning::
The interpolation might lead to erroneous photon positions for clusters at the boarders of a frame. Make sure to filter out such cases.
:math:`\eta`-Functions:
---------------------------
.. doxygenstruct:: aare::Eta2
:members:
:undoc-members:
:private-members:
.. note::
The corner value ``c`` is only relevant when one uses ``calculate_eta_2`` or ``calculate_full_eta2``. Otherwise its default value is ``cTopLeft``.
Supported are the following :math:`\eta`-functions:
.. image:: ../figures/Eta2x2.png
:target: ../figures/Eta2x2.png
:width: 650px
:align: center
:alt: Eta2x2
.. math::
\begin{equation*}
{\color{blue}{\eta_x}} = \frac{Q_{1,1}}{Q_{1,0} + Q_{1,1}} \quad \quad
{\color{green}{\eta_y}} = \frac{Q_{1,1}}{Q_{0,1} + Q_{1,1}}
\end{equation*}
.. doxygenfunction:: aare::calculate_eta2(const ClusterVector<ClusterType>&)
.. doxygenfunction:: aare::calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>&)
.. image:: ../figures/Eta2x2Full.png
:target: ../figures/Eta2x2Full.png
:width: 650px
:align: center
:alt: Eta2x2 Full
.. math::
\begin{equation*}
{\color{blue}{\eta_x}} = \frac{Q_{0,1} + Q_{1,1}}{\sum_i^{2}\sum_j^{2}Q_{i,j}} \quad \quad
{\textcolor{green}{\eta_y}} = \frac{Q_{1,0} + Q_{1,1}}{\sum_i^{2}\sum_j^{2}Q_{i,j}}
\end{equation*}
.. doxygenfunction:: aare::calculate_full_eta2(const ClusterVector<ClusterType>&)
.. doxygenfunction:: aare::calculate_full_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>&)
.. image:: ../figures/Eta3x3.png
:target: ../figures/Eta3x3.png
:width: 650px
:align: center
:alt: Eta3x3
.. math::
\begin{equation*}
{\color{blue}{\eta_x}} = \frac{\sum_{i}^{3} Q_{i,2} - \sum_{i}^{3} Q_{i,0}}{\sum_{i}^{3}\sum_{j}^{3} Q_{i,j}} \quad \quad
{\color{green}{\eta_y}} = \frac{\sum_{j}^{3} Q_{2,j} - \sum_{j}^{3} Q_{0,j}}{\sum_{i}^{3}\sum_{j}^{3} Q_{i,j}}
\end{equation*}
.. doxygenfunction:: aare::calculate_eta3(const ClusterVector<Cluster<T, 3,3, CoordType>>&)
.. doxygenfunction:: aare::calculate_eta3(const Cluster<T, 3, 3, CoordType>&)
.. image:: ../figures/Eta3x3Cross.png
:target: ../figures/Eta3x3Cross.png
:width: 650px
:align: center
:alt: Cross Eta3x3
.. math::
\begin{equation*}
{\color{blue}{\eta_x}} = \frac{Q_{1,2} - Q_{1,0}}{Q_{1,0} + Q_{1,1} + Q_{1,0}} \quad \quad
{\color{green}{\eta_y}} = \frac{Q_{0,2} - Q_{0,1}}{Q_{0,1} + Q_{1,1} + Q_{1,2}}
\end{equation*}
.. doxygenfunction:: aare::calculate_cross_eta3(const ClusterVector<Cluster<T, 3,3, CoordType>>&)
.. doxygenfunction:: aare::calculate_cross_eta3(const Cluster<T, 3, 3, CoordType>&)
Interpolation class:
---------------------
.. Warning::
Make sure to use the same :math:`\eta`-function during interpolation as given by the joint :math:`\eta`-distribution passed to the constructor.
.. doxygenclass:: aare::Interpolator
:members:
:undoc-members:
:private-members:

View File

@@ -29,6 +29,8 @@ AARE
pyCtbRawFile pyCtbRawFile
pyClusterFile pyClusterFile
pyClusterVector pyClusterVector
pyCluster
pyInterpolation
pyJungfrauDataFile pyJungfrauDataFile
pyRawFile pyRawFile
pyRawMasterFile pyRawMasterFile
@@ -47,10 +49,12 @@ AARE
Frame Frame
File File
Dtype Dtype
Cluster
ClusterFinder ClusterFinder
ClusterFinderMT ClusterFinderMT
ClusterFile ClusterFile
ClusterVector ClusterVector
Interpolation
JungfrauDataFile JungfrauDataFile
Pedestal Pedestal
RawFile RawFile

23
docs/src/pyCluster.rst Normal file
View File

@@ -0,0 +1,23 @@
Cluster
========
.. py:currentmodule:: aare
.. autoclass:: Cluster
:members:
:undoc-members:
:inherited-members:
Below is the API of a cluster of size :math:`3\times 3` and type ``int`` but all variants share the same API.
.. autoclass:: aare._aare.Cluster3x3i
:special-members: __init__
:members:
:undoc-members:
:show-inheritance:
:inherited-members:
.. note::
More functions can be found in the :ref:`ClusterVector <py_clustervector>` documentation. Generally apply functions directly on the ``ClusterVector`` instead of looping over individual clusters.

View File

@@ -1,3 +1,5 @@
.. _py_clustervector:
ClusterVector ClusterVector
================ ================
@@ -28,6 +30,13 @@ C++ functions that support the ClusterVector or to view it as a numpy array.
.. py:currentmodule:: aare .. py:currentmodule:: aare
.. autoclass:: ClusterVector
:members:
:undoc-members:
:inherited-members:
Below is the API of the ClusterVector_Cluster3x3i but all variants share the same API.
.. autoclass:: aare._aare.ClusterVector_Cluster3x3i .. autoclass:: aare._aare.ClusterVector_Cluster3x3i
:special-members: __init__ :special-members: __init__
:members: :members:

View File

@@ -0,0 +1,94 @@
Interpolation
==============
Interpolation class for :math:`\eta` Interpolation.
The Interpolator class provides methods to interpolate the positions of photons based on their :math:`\eta` values.
.. warning::
The interpolation might lead to erroneous photon positions for clusters at the boarders of a frame. Make sure to filter out such cases.
Below is an example of the Eta class of type ``double``. Supported are ``Etaf`` of type ``float`` and ``Etai`` of type ``int``.
.. autoclass:: aare._aare.Etad
:members:
:private-members:
.. note::
The corner value ``c`` is only relevant when one uses ``calculate_eta_2`` or ``calculate_full_eta2``. Otherwise its default value is ``cTopLeft``.
Supported are the following :math:`\eta`-functions:
.. py:currentmodule:: aare
.. image:: ../figures/Eta2x2.png
:target: ../figures/Eta2x2.png
:width: 650px
:align: center
:alt: Eta2x2
.. math::
\begin{equation*}
{\color{blue}{\eta_x}} = \frac{Q_{1,1}}{Q_{1,0} + Q_{1,1}} \quad \quad
{\color{green}{\eta_y}} = \frac{Q_{1,1}}{Q_{0,1} + Q_{1,1}}
\end{equation*}
.. autofunction:: calculate_eta2
.. image:: ../figures/Eta2x2Full.png
:target: ../figures/Eta2x2Full.png
:width: 650px
:align: center
:alt: Eta2x2 Full
.. math::
\begin{equation*}
{\color{blue}{\eta_x}} = \frac{Q_{0,1} + Q_{1,1}}{\sum_i^{2}\sum_j^{2}Q_{i,j}} \quad \quad
{\textcolor{green}{\eta_y}} = \frac{Q_{1,0} + Q_{1,1}}{\sum_i^{2}\sum_j^{2}Q_{i,j}}
\end{equation*}
.. autofunction:: calculate_full_eta2
.. image:: ../figures/Eta3x3.png
:target: ../figures/Eta3x3.png
:width: 650px
:align: center
:alt: Eta3x3
.. math::
\begin{equation*}
{\color{blue}{\eta_x}} = \frac{\sum_{i}^{3} Q_{i,2} - \sum_{i}^{3} Q_{i,0}}{\sum_{i}^{3}\sum_{j}^{3} Q_{i,j}} \quad \quad
{\color{green}{\eta_y}} = \frac{\sum_{j}^{3} Q_{2,j} - \sum_{j}^{3} Q_{0,j}}{\sum_{i}^{3}\sum_{j}^{3} Q_{i,j}}
\end{equation*}
.. autofunction:: calculate_eta3
.. image:: ../figures/Eta3x3Cross.png
:target: ../figures/Eta3x3Cross.png
:width: 650px
:align: center
:alt: Cross Eta3x3
.. math::
\begin{equation*}
{\color{blue}{\eta_x}} = \frac{Q_{1,2} - Q_{1,0}}{Q_{1,0} + Q_{1,1} + Q_{1,0}} \quad \quad
{\color{green}{\eta_y}} = \frac{Q_{0,2} - Q_{0,1}}{Q_{0,1} + Q_{1,1} + Q_{1,2}}
\end{equation*}
.. autofunction:: calculate_cross_eta3
Interpolation class for :math:`\eta`-Interpolation
----------------------------------------------------
.. Warning::
Make sure to use the same :math:`\eta`-function during interpolation as given by the joint :math:`\eta`-distribution passed to the constructor.
.. py:currentmodule:: aare
.. autoclass:: Interpolator
:special-members: __init__
:members:
:undoc-members:
:inherited-members:

View File

@@ -20,37 +20,124 @@ enum class pixel : int {
pTopRight = 8 pTopRight = 8
}; };
// TODO: better to have sum after x,y
/**
* eta struct
*/
template <typename T> struct Eta2 { template <typename T> struct Eta2 {
double x; /// @brief eta in x direction
double y; double x{};
/// @brief eta in y direction
double y{};
/// @brief index of subcluster given as corner relative to cluster center
corner c{0}; corner c{0};
T sum; /// @brief photon energy (cluster sum)
T sum{};
}; };
/** /**
* @brief Calculate the eta2 values for all clusters in a Clustervector * @brief Calculate the eta2 values for all clusters in a ClusterVector
*/ */
template <typename ClusterType, template <typename ClusterType,
typename = std::enable_if_t<is_cluster_v<ClusterType>>> typename = std::enable_if_t<is_cluster_v<ClusterType>>>
NDArray<double, 2> calculate_eta2(const ClusterVector<ClusterType> &clusters) { std::vector<Eta2<typename ClusterType::value_type>>
NDArray<double, 2> eta2({static_cast<int64_t>(clusters.size()), 2}); calculate_eta2(const ClusterVector<ClusterType> &clusters) {
std::vector<Eta2<typename ClusterType::value_type>> eta2{};
eta2.reserve(clusters.size());
for (size_t i = 0; i < clusters.size(); i++) { for (size_t i = 0; i < clusters.size(); i++) {
auto e = calculate_eta2(clusters[i]); auto e = calculate_eta2(clusters[i]);
eta2(i, 0) = e.x; eta2.push_back(e);
eta2(i, 1) = e.y;
} }
return eta2; return eta2;
} }
/**
* @brief Calculate the full eta2 values for all clusters in a ClusterVector
*/
template <typename ClusterType,
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
std::vector<Eta2<typename ClusterType::value_type>>
calculate_full_eta2(const ClusterVector<ClusterType> &clusters) {
std::vector<Eta2<typename ClusterType::value_type>> eta2{};
eta2.reserve(clusters.size());
for (size_t i = 0; i < clusters.size(); i++) {
auto e = calculate_full_eta2(clusters[i]);
eta2.push_back(e);
}
return eta2;
}
/**
* @brief Calculate eta3 for all 3x3 clusters in a ClusterVector
*/
template <typename ClusterType,
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
std::vector<Eta2<typename ClusterType::value_type>>
calculate_eta3(const ClusterVector<ClusterType> &clusters) {
std::vector<Eta2<typename ClusterType::value_type>> eta2{};
eta2.reserve(clusters.size());
for (size_t i = 0; i < clusters.size(); i++) {
auto e = calculate_eta3(clusters[i]);
eta2.push_back(e);
}
return eta2;
}
/**
* @brief Calculate cross eta3 for all 3x3 clusters in a ClusterVector
*/
template <typename ClusterType,
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
std::vector<Eta2<typename ClusterType::value_type>>
calculate_cross_eta3(const ClusterVector<ClusterType> &clusters) {
std::vector<Eta2<typename ClusterType::value_type>> eta2{};
eta2.reserve(clusters.size());
for (size_t i = 0; i < clusters.size(); i++) {
auto e = calculate_cross_eta3(clusters[i]);
eta2.push_back(e);
}
return eta2;
}
/**
* @brief helper function to calculate eta2 x and y values
* @param eta reference to the Eta2 object to update
* @param left_x value of the left pixel
* @param right_x value of the right pixel
* @param bottom_y value of the bottom pixel
* @param top_y value of the top pixel
*/
template <typename T>
inline void calculate_eta2(Eta2<T> &eta, const T left_x, const T right_x,
const T bottom_y, const T top_y) {
if ((right_x + left_x) != 0)
eta.x = static_cast<double>(right_x) /
static_cast<double>(right_x + left_x); // between (0,1) the
// closer to zero left
// value probably larger
if ((top_y + bottom_y) != 0)
eta.y = static_cast<double>(top_y) /
static_cast<double>(top_y + bottom_y); // between (0,1) the
// closer to zero bottom
// value probably larger
}
/** /**
* @brief Calculate the eta2 values for a generic sized cluster and return them * @brief Calculate the eta2 values for a generic sized cluster and return them
* in a Eta2 struct containing etay, etax and the index of the respective 2x2 * in a Eta2 struct containing etay, etax and the index (as corner) of the
* subcluster. * respective 2x2 subcluster relative to the cluster center.
*/ */
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY, template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType> typename CoordType = uint16_t>
Eta2<T> Eta2<T>
calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) { calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
@@ -67,67 +154,36 @@ calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
// subcluster top right from center // subcluster top right from center
switch (c) { switch (c) {
case (corner::cTopLeft): case (corner::cTopLeft):
if ((cl.data[cluster_center_index - 1] + calculate_eta2(eta, cl.data[cluster_center_index - 1],
cl.data[cluster_center_index]) != 0) cl.data[cluster_center_index],
eta.x = static_cast<double>(cl.data[cluster_center_index - 1]) / cl.data[cluster_center_index - ClusterSizeX],
static_cast<double>(cl.data[cluster_center_index - 1] + cl.data[cluster_center_index]);
cl.data[cluster_center_index]); // dx = -1
if ((cl.data[cluster_center_index - ClusterSizeX] + // dy = -1
cl.data[cluster_center_index]) != 0)
eta.y = static_cast<double>(
cl.data[cluster_center_index - ClusterSizeX]) /
static_cast<double>(
cl.data[cluster_center_index - ClusterSizeX] +
cl.data[cluster_center_index]);
// dx = 0
// dy = 0
break; break;
case (corner::cTopRight): case (corner::cTopRight):
if (cl.data[cluster_center_index] + cl.data[cluster_center_index + 1] != calculate_eta2(eta, cl.data[cluster_center_index],
0) cl.data[cluster_center_index + 1],
eta.x = static_cast<double>(cl.data[cluster_center_index]) / cl.data[cluster_center_index - ClusterSizeX],
static_cast<double>(cl.data[cluster_center_index] + cl.data[cluster_center_index]);
cl.data[cluster_center_index + 1]); // dx = 0
if ((cl.data[cluster_center_index - ClusterSizeX] + // dy = -1
cl.data[cluster_center_index]) != 0)
eta.y = static_cast<double>(
cl.data[cluster_center_index - ClusterSizeX]) /
static_cast<double>(
cl.data[cluster_center_index - ClusterSizeX] +
cl.data[cluster_center_index]);
// dx = 1
// dy = 0
break; break;
case (corner::cBottomLeft): case (corner::cBottomLeft):
if ((cl.data[cluster_center_index - 1] + calculate_eta2(eta, cl.data[cluster_center_index - 1],
cl.data[cluster_center_index]) != 0) cl.data[cluster_center_index],
eta.x = static_cast<double>(cl.data[cluster_center_index - 1]) / cl.data[cluster_center_index],
static_cast<double>(cl.data[cluster_center_index - 1] + cl.data[cluster_center_index + ClusterSizeX]);
cl.data[cluster_center_index]); // dx = -1
if ((cl.data[cluster_center_index] + // dy = 0
cl.data[cluster_center_index + ClusterSizeX]) != 0)
eta.y = static_cast<double>(cl.data[cluster_center_index]) /
static_cast<double>(
cl.data[cluster_center_index] +
cl.data[cluster_center_index + ClusterSizeX]);
// dx = 0
// dy = 1
break; break;
case (corner::cBottomRight): case (corner::cBottomRight):
if (cl.data[cluster_center_index] + cl.data[cluster_center_index + 1] != calculate_eta2(eta, cl.data[cluster_center_index],
0) cl.data[cluster_center_index + 1],
eta.x = static_cast<double>(cl.data[cluster_center_index]) / cl.data[cluster_center_index],
static_cast<double>(cl.data[cluster_center_index] + cl.data[cluster_center_index + ClusterSizeX]);
cl.data[cluster_center_index + 1]); // dx = 0
if ((cl.data[cluster_center_index] + // dy = 0
cl.data[cluster_center_index + ClusterSizeX]) != 0)
eta.y = static_cast<double>(cl.data[cluster_center_index]) /
static_cast<double>(
cl.data[cluster_center_index] +
cl.data[cluster_center_index + ClusterSizeX]);
// dx = 1
// dy = 1
break; break;
} }
@@ -136,69 +192,255 @@ calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
return eta; return eta;
} }
// TODO! Look up eta2 calculation - photon center should be bottom right corner /**
template <typename T> * @brief Calculate the eta2 values for a generic sized cluster and return them
Eta2<T> calculate_eta2(const Cluster<T, 2, 2, int16_t> &cl) { * in a Eta2 struct containing etay, etax and the index (as corner) of the
* respective 2x2 subcluster relative to the cluster center.
*/
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType>
Eta2<T> calculate_full_eta2(
const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
static_assert(ClusterSizeX > 1 && ClusterSizeY > 1);
Eta2<T> eta{}; Eta2<T> eta{};
if ((cl.data[0] + cl.data[1]) != 0) constexpr size_t cluster_center_index =
eta.x = static_cast<double>(cl.data[2]) / (ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
(cl.data[2] + cl.data[3]); // between (0,1) the closer to zero
// left value probably larger auto max_sum = cl.max_sum_2x2();
if ((cl.data[0] + cl.data[2]) != 0) eta.sum = max_sum.sum;
eta.y = static_cast<double>(cl.data[1]) / corner c = max_sum.index;
(cl.data[1] + cl.data[3]); // between (0,1) the closer to zero
// bottom value probably larger // subcluster top right from center
switch (c) {
case (corner::cTopLeft):
if (eta.sum != 0) {
eta.x = static_cast<double>(
cl.data[cluster_center_index] +
cl.data[cluster_center_index - ClusterSizeX]) /
static_cast<double>(eta.sum);
eta.y = static_cast<double>(cl.data[cluster_center_index - 1] +
cl.data[cluster_center_index]) /
static_cast<double>(eta.sum);
}
// dx = -1
// dy = -1
break;
case (corner::cTopRight):
if (eta.sum != 0) {
eta.x = static_cast<double>(
cl.data[cluster_center_index + 1] +
cl.data[cluster_center_index - ClusterSizeX + 1]) /
static_cast<double>(eta.sum);
eta.y = static_cast<double>(cl.data[cluster_center_index] +
cl.data[cluster_center_index + 1]) /
static_cast<double>(eta.sum);
}
// dx = 0
// dy = -1
break;
case (corner::cBottomLeft):
if (eta.sum != 0) {
eta.x = static_cast<double>(
cl.data[cluster_center_index] +
cl.data[cluster_center_index + ClusterSizeX]) /
static_cast<double>(eta.sum);
eta.y = static_cast<double>(
cl.data[cluster_center_index + ClusterSizeX] +
cl.data[cluster_center_index + ClusterSizeX - 1]) /
static_cast<double>(eta.sum);
}
// dx = -1
// dy = 0
break;
case (corner::cBottomRight):
if (eta.sum != 0) {
eta.x = static_cast<double>(
cl.data[cluster_center_index + 1] +
cl.data[cluster_center_index + ClusterSizeX + 1]) /
static_cast<double>(eta.sum);
eta.y = static_cast<double>(
cl.data[cluster_center_index + ClusterSizeX] +
cl.data[cluster_center_index + ClusterSizeX + 1]) /
static_cast<double>(eta.sum);
}
// dx = 0
// dy = 0
break;
}
eta.c = c;
return eta;
}
template <typename T>
Eta2<T> calculate_eta2(const Cluster<T, 2, 2, uint16_t> &cl) {
Eta2<T> eta{};
// TODO: maybe have as member function of cluster
const uint8_t photon_hit_index =
std::max_element(cl.data.begin(), cl.data.end()) - cl.data.begin();
eta.c = static_cast<corner>(3 - photon_hit_index);
switch (eta.c) {
case corner::cTopLeft:
calculate_eta2(eta, cl.data[2], cl.data[3], cl.data[1], cl.data[3]);
break;
case corner::cTopRight:
calculate_eta2(eta, cl.data[2], cl.data[3], cl.data[0], cl.data[2]);
break;
case corner::cBottomLeft:
calculate_eta2(eta, cl.data[0], cl.data[1], cl.data[1], cl.data[3]);
break;
case corner::cBottomRight:
calculate_eta2(eta, cl.data[0], cl.data[1], cl.data[0], cl.data[2]);
break;
}
eta.sum = cl.sum(); eta.sum = cl.sum();
return eta; return eta;
} }
template <typename T>
Eta2<T> calculate_full_eta2(const Cluster<T, 2, 2, uint16_t> &cl) {
Eta2<T> eta{};
eta.sum = cl.sum();
const uint8_t photon_hit_index =
std::max_element(cl.data.begin(), cl.data.end()) - cl.data.begin();
eta.c = static_cast<corner>(3 - photon_hit_index);
if (eta.sum != 0) {
eta.x = static_cast<double>(cl.data[1] + cl.data[3]) /
static_cast<double>(eta.sum);
eta.y = static_cast<double>(cl.data[2] + cl.data[3]) /
static_cast<double>(eta.sum);
}
return eta;
}
// TODO generalize // TODO generalize
template <typename T> template <typename T>
Eta2<T> calculate_eta2(const Cluster<T, 1, 2, int16_t> &cl) { Eta2<T> calculate_eta2(const Cluster<T, 1, 2, uint16_t> &cl) {
Eta2<T> eta{}; Eta2<T> eta{};
eta.x = 0; eta.x = 0;
eta.y = static_cast<double>(cl.data[0]) / cl.data[1]; eta.y = static_cast<double>(cl.data[1]) / cl.data[0];
eta.sum = cl.sum(); eta.sum = cl.sum();
} }
template <typename T> template <typename T>
Eta2<T> calculate_eta2(const Cluster<T, 2, 1, int16_t> &cl) { Eta2<T> calculate_eta2(const Cluster<T, 2, 1, uint16_t> &cl) {
Eta2<T> eta{}; Eta2<T> eta{};
eta.x = static_cast<double>(cl.data[0]) / cl.data[1]; eta.x = static_cast<double>(cl.data[1]) / cl.data[0];
eta.y = 0; eta.y = 0;
eta.sum = cl.sum(); eta.sum = cl.sum();
} }
// calculates Eta3 for 3x3 cluster based on code from analyze_cluster /**
// TODO only supported for 3x3 Clusters * @brief calculates cross Eta3 for 3x3 cluster
template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) { * cross Eta3 calculates the eta by taking into account only the cross pixels
* {top, bottom, left, right, center}
*/
template <typename T, typename CoordType = uint16_t>
Eta2<T> calculate_cross_eta3(const Cluster<T, 3, 3, CoordType> &cl) {
Eta2<T> eta{}; Eta2<T> eta{};
T sum = 0; T photon_energy = cl.sum();
std::for_each(std::begin(cl.data), std::end(cl.data), eta.sum = photon_energy;
[&sum](T x) { sum += x; });
eta.sum = sum;
if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0) if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0)
eta.x = static_cast<double>(-cl.data[3] + cl.data[3 + 2]) / eta.x =
static_cast<double>(-cl.data[3] + cl.data[3 + 2]) /
(cl.data[3] + cl.data[4] + cl.data[5]); // (-1,1) static_cast<double>(cl.data[3] + cl.data[4] + cl.data[5]); // (-1,1)
if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0) if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0)
eta.y = static_cast<double>(-cl.data[1] + cl.data[2 * 3 + 1]) / eta.y = static_cast<double>(-cl.data[1] + cl.data[2 * 3 + 1]) /
(cl.data[1] + cl.data[4] + cl.data[7]); static_cast<double>(cl.data[1] + cl.data[4] + cl.data[7]);
return eta; return eta;
} }
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
Eta2<T> calculate_cross_eta3(
const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
static_assert(ClusterSizeX > 2 && ClusterSizeY > 2,
"calculate_eta3 only defined for clusters larger than 2x2");
if constexpr (ClusterSizeX != 3 || ClusterSizeY != 3) {
auto reduced_cluster = reduce_cluster_to_3x3(cl);
return calculate_cross_eta3(reduced_cluster);
} else {
return calculate_cross_eta3(cl);
}
}
/**
* @brief calculates Eta3 for 3x3 cluster
* It calculates the eta by taking into account all pixels in the 3x3 cluster
*/
template <typename T, typename CoordType = uint16_t>
Eta2<T> calculate_eta3(const Cluster<T, 3, 3, CoordType> &cl) {
Eta2<T> eta{};
T photon_energy = cl.sum();
eta.sum = photon_energy;
// TODO: how do we handle potential arithmetic overflows? - T could be
// uint16
if (photon_energy != 0) {
std::array<T, 2> column_sums{
static_cast<T>(cl.data[0] + cl.data[3] + cl.data[6]),
static_cast<T>(cl.data[2] + cl.data[5] + cl.data[8])};
eta.x = static_cast<double>(-column_sums[0] + column_sums[1]) /
static_cast<double>(photon_energy);
std::array<T, 2> row_sums{
static_cast<T>(cl.data[0] + cl.data[1] + cl.data[2]),
static_cast<T>(cl.data[6] + cl.data[7] + cl.data[8])};
eta.y = static_cast<double>(-row_sums[0] + row_sums[1]) /
static_cast<double>(photon_energy);
}
return eta;
}
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
Eta2<T>
calculate_eta3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
static_assert(ClusterSizeX > 2 && ClusterSizeY > 2,
"calculate_eta3 only defined for clusters larger than 2x2");
if constexpr (ClusterSizeX != 3 || ClusterSizeY != 3) {
auto reduced_cluster = reduce_cluster_to_3x3(cl);
return calculate_eta3(reduced_cluster);
} else {
return calculate_eta3(cl);
}
}
} // namespace aare } // namespace aare

View File

@@ -19,6 +19,10 @@
namespace aare { namespace aare {
// requires clause c++20 maybe update // requires clause c++20 maybe update
/**
* @brief Cluster struct
*/
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY, template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t> typename CoordType = uint16_t>
struct Cluster { struct Cluster {
@@ -29,8 +33,11 @@ struct Cluster {
static_assert(ClusterSizeX > 0 && ClusterSizeY > 0, static_assert(ClusterSizeX > 0 && ClusterSizeY > 0,
"Cluster sizes must be bigger than zero"); "Cluster sizes must be bigger than zero");
/// @brief Cluster center x coordinate (in pixel coordinates)
CoordType x; CoordType x;
/// @brief Cluster center y coordinate (in pixel coordinates)
CoordType y; CoordType y;
/// @brief Cluster data stored in row-major order starting from top-left
std::array<T, ClusterSizeX * ClusterSizeY> data; std::array<T, ClusterSizeX * ClusterSizeY> data;
static constexpr uint8_t cluster_size_x = ClusterSizeX; static constexpr uint8_t cluster_size_x = ClusterSizeX;
@@ -38,10 +45,12 @@ struct Cluster {
using value_type = T; using value_type = T;
using coord_type = CoordType; using coord_type = CoordType;
/**
* @brief Sum of all elements in the cluster
*/
T sum() const { return std::accumulate(data.begin(), data.end(), T{}); } T sum() const { return std::accumulate(data.begin(), data.end(), T{}); }
// TODO: handle 1 dimensional clusters // TODO: handle 1 dimensional clusters
// TODO: change int to corner
/** /**
* @brief sum of 2x2 subcluster with highest energy * @brief sum of 2x2 subcluster with highest energy
* @return photon energy of subcluster, 2x2 subcluster index relative to * @return photon energy of subcluster, 2x2 subcluster index relative to
@@ -112,66 +121,71 @@ struct Cluster {
* highest sum. * highest sum.
* @param c Cluster to reduce * @param c Cluster to reduce
* @return reduced cluster * @return reduced cluster
* @note The cluster is filled using row major ordering starting at the top-left
* (thus for a max subcluster in the top left cornern the photon hit is at
* the fourth position)
*/ */
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY, template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = int16_t> typename CoordType = uint16_t>
Cluster<T, 2, 2, CoordType> Cluster<T, 2, 2, CoordType>
reduce_to_2x2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) { reduce_to_2x2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
static_assert(ClusterSizeX >= 2 && ClusterSizeY >= 2, static_assert(ClusterSizeX >= 2 && ClusterSizeY >= 2,
"Cluster sizes must be at least 2x2 for reduction to 2x2"); "Cluster sizes must be at least 2x2 for reduction to 2x2");
// TODO maybe add sanity check and check that center is in max subcluster Cluster<T, 2, 2, CoordType> result{};
Cluster<T, 2, 2, CoordType> result;
auto [sum, index] = c.max_sum_2x2(); auto [sum, index] = c.max_sum_2x2();
int16_t cluster_center_index = constexpr int16_t cluster_center_index =
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX; (ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
int16_t index_bottom_left_max_2x2_subcluster = int16_t index_top_left_max_2x2_subcluster = cluster_center_index;
(int(static_cast<int>(index) / (ClusterSizeX - 1))) * ClusterSizeX + switch (index) {
static_cast<int>(index) % (ClusterSizeX - 1); case corner::cTopLeft:
index_top_left_max_2x2_subcluster -= (ClusterSizeX + 1);
break;
case corner::cTopRight:
index_top_left_max_2x2_subcluster -= ClusterSizeX;
break;
case corner::cBottomLeft:
index_top_left_max_2x2_subcluster -= 1;
break;
case corner::cBottomRight:
// no change needed
break;
}
result.x = result.x = c.x;
c.x + (index_bottom_left_max_2x2_subcluster - cluster_center_index) % result.y = c.y;
ClusterSizeX;
result.y =
c.y - (index_bottom_left_max_2x2_subcluster - cluster_center_index) /
ClusterSizeX;
result.data = { result.data = {
c.data[index_bottom_left_max_2x2_subcluster], c.data[index_top_left_max_2x2_subcluster],
c.data[index_bottom_left_max_2x2_subcluster + 1], c.data[index_top_left_max_2x2_subcluster + 1],
c.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX], c.data[index_top_left_max_2x2_subcluster + ClusterSizeX],
c.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1]}; c.data[index_top_left_max_2x2_subcluster + ClusterSizeX + 1]};
return result; return result;
} }
template <typename T> template <typename T>
Cluster<T, 2, 2, int16_t> reduce_to_2x2(const Cluster<T, 3, 3, int16_t> &c) { Cluster<T, 2, 2, uint16_t> reduce_to_2x2(const Cluster<T, 3, 3, uint16_t> &c) {
Cluster<T, 2, 2, int16_t> result; Cluster<T, 2, 2, uint16_t> result{};
auto [s, i] = c.max_sum_2x2(); auto [s, i] = c.max_sum_2x2();
result.x = c.x;
result.y = c.y;
switch (i) { switch (i) {
case corner::cTopLeft: case corner::cTopLeft:
result.x = c.x - 1;
result.y = c.y + 1;
result.data = {c.data[0], c.data[1], c.data[3], c.data[4]}; result.data = {c.data[0], c.data[1], c.data[3], c.data[4]};
break; break;
case corner::cTopRight: case corner::cTopRight:
result.x = c.x;
result.y = c.y + 1;
result.data = {c.data[1], c.data[2], c.data[4], c.data[5]}; result.data = {c.data[1], c.data[2], c.data[4], c.data[5]};
break; break;
case corner::cBottomLeft: case corner::cBottomLeft:
result.x = c.x - 1;
result.y = c.y;
result.data = {c.data[3], c.data[4], c.data[6], c.data[7]}; result.data = {c.data[3], c.data[4], c.data[6], c.data[7]};
break; break;
case corner::cBottomRight: case corner::cBottomRight:
result.x = c.x;
result.y = c.y;
result.data = {c.data[4], c.data[5], c.data[7], c.data[8]}; result.data = {c.data[4], c.data[5], c.data[7], c.data[8]};
break; break;
} }
@@ -179,43 +193,8 @@ Cluster<T, 2, 2, int16_t> reduce_to_2x2(const Cluster<T, 3, 3, int16_t> &c) {
return result; return result;
} }
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = int16_t>
inline std::pair<T, uint16_t>
max_3x3_sum(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cluster) {
if constexpr (ClusterSizeX == 3 && ClusterSizeY == 3) {
return std::make_pair(cluster.sum(), 0);
} else {
size_t index = 0;
T max_3x3_subcluster_sum = 0;
for (size_t i = 0; i < ClusterSizeY - 2; ++i) {
for (size_t j = 0; j < ClusterSizeX - 2; ++j) {
T sum = cluster.data[i * ClusterSizeX + j] +
cluster.data[i * ClusterSizeX + j + 1] +
cluster.data[i * ClusterSizeX + j + 2] +
cluster.data[(i + 1) * ClusterSizeX + j] +
cluster.data[(i + 1) * ClusterSizeX + j + 1] +
cluster.data[(i + 1) * ClusterSizeX + j + 2] +
cluster.data[(i + 2) * ClusterSizeX + j] +
cluster.data[(i + 2) * ClusterSizeX + j + 1] +
cluster.data[(i + 2) * ClusterSizeX + j + 2];
if (sum > max_3x3_subcluster_sum) {
max_3x3_subcluster_sum = sum;
index = i * (ClusterSizeX - 2) + j;
}
}
}
return std::make_pair(max_3x3_subcluster_sum, index);
}
}
/** /**
* @brief Reduce a cluster to a 3x3 cluster by selecting the 3x3 block with the * @brief Reduce a cluster to a 3x3 cluster
* highest sum.
* @param c Cluster to reduce * @param c Cluster to reduce
* @return reduced cluster * @return reduced cluster
*/ */
@@ -227,40 +206,24 @@ reduce_to_3x3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
static_assert(ClusterSizeX >= 3 && ClusterSizeY >= 3, static_assert(ClusterSizeX >= 3 && ClusterSizeY >= 3,
"Cluster sizes must be at least 3x3 for reduction to 3x3"); "Cluster sizes must be at least 3x3 for reduction to 3x3");
Cluster<T, 3, 3, CoordType> result; Cluster<T, 3, 3, CoordType> result{};
// TODO maybe add sanity check and check that center is in max subcluster
auto [sum, index] = max_3x3_sum(c);
int16_t cluster_center_index = int16_t cluster_center_index =
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX; (ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
int16_t index_center_max_3x3_subcluster = result.x = c.x;
(int(index / (ClusterSizeX - 2))) * ClusterSizeX + ClusterSizeX + result.y = c.y;
index % (ClusterSizeX - 2) + 1;
int16_t index_3x3_subcluster_cluster_center = result.data = {c.data[cluster_center_index - ClusterSizeX - 1],
int((cluster_center_index - 1 - ClusterSizeX) / ClusterSizeX) * c.data[cluster_center_index - ClusterSizeX],
(ClusterSizeX - 2) + c.data[cluster_center_index - ClusterSizeX + 1],
(cluster_center_index - 1 - ClusterSizeX) % ClusterSizeX; c.data[cluster_center_index - 1],
c.data[cluster_center_index],
c.data[cluster_center_index + 1],
c.data[cluster_center_index + ClusterSizeX - 1],
c.data[cluster_center_index + ClusterSizeX],
c.data[cluster_center_index + ClusterSizeX + 1]};
result.x =
c.x + (index % (ClusterSizeX - 2) -
(index_3x3_subcluster_cluster_center % (ClusterSizeX - 2)));
result.y =
c.y - (index / (ClusterSizeX - 2) -
(index_3x3_subcluster_cluster_center / (ClusterSizeX - 2)));
result.data = {c.data[index_center_max_3x3_subcluster - ClusterSizeX - 1],
c.data[index_center_max_3x3_subcluster - ClusterSizeX],
c.data[index_center_max_3x3_subcluster - ClusterSizeX + 1],
c.data[index_center_max_3x3_subcluster - 1],
c.data[index_center_max_3x3_subcluster],
c.data[index_center_max_3x3_subcluster + 1],
c.data[index_center_max_3x3_subcluster + ClusterSizeX - 1],
c.data[index_center_max_3x3_subcluster + ClusterSizeX],
c.data[index_center_max_3x3_subcluster + ClusterSizeX + 1]};
return result; return result;
} }

View File

@@ -11,7 +11,8 @@
namespace aare { namespace aare {
template <typename ClusterType, template <typename ClusterType,
typename = std::enable_if_t<is_cluster_v<ClusterType>>> typename = std::enable_if_t<is_cluster_v<ClusterType>>,
typename = std::enable_if_t<no_2x2_cluster<ClusterType>::value>>
class ClusterFileSink { class ClusterFileSink {
ProducerConsumerQueue<ClusterVector<ClusterType>> *m_source; ProducerConsumerQueue<ClusterVector<ClusterType>> *m_source;
std::atomic<bool> m_stop_requested{false}; std::atomic<bool> m_stop_requested{false};

View File

@@ -11,8 +11,16 @@
namespace aare { namespace aare {
template <typename ClusterType,
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
struct no_2x2_cluster {
constexpr static bool value =
ClusterType::cluster_size_x > 2 && ClusterType::cluster_size_y > 2;
};
template <typename ClusterType = Cluster<int32_t, 3, 3>, template <typename ClusterType = Cluster<int32_t, 3, 3>,
typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double> typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double,
typename = std::enable_if_t<no_2x2_cluster<ClusterType>::value>>
class ClusterFinder { class ClusterFinder {
Shape<2> m_image_size; Shape<2> m_image_size;
const PEDESTAL_TYPE m_nSigma; const PEDESTAL_TYPE m_nSigma;

View File

@@ -33,7 +33,8 @@ struct FrameWrapper {
* @tparam CT type of the cluster data * @tparam CT type of the cluster data
*/ */
template <typename ClusterType = Cluster<int32_t, 3, 3>, template <typename ClusterType = Cluster<int32_t, 3, 3>,
typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double> typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double,
typename = std::enable_if_t<no_2x2_cluster<ClusterType>::value>>
class ClusterFinderMT { class ClusterFinderMT {
protected: protected:

View File

@@ -29,7 +29,7 @@ class ClusterVector; // Forward declaration
* needed. * needed.
* @tparam T data type of the pixels in the cluster * @tparam T data type of the pixels in the cluster
* @tparam CoordType data type of the x and y coordinates of the cluster * @tparam CoordType data type of the x and y coordinates of the cluster
* (normally int16_t) * (normally uint16_t)
*/ */
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY, template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType> typename CoordType>
@@ -177,9 +177,12 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
* highest sum. * highest sum.
* @param cv Clustervector containing clusters to reduce * @param cv Clustervector containing clusters to reduce
* @return Clustervector with reduced clusters * @return Clustervector with reduced clusters
* @note The cluster is filled using row major ordering starting at the top-left
* (thus for a max subcluster in the top left cornern the photon hit is at
* the fourth position)
*/ */
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY, template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t> typename CoordType>
ClusterVector<Cluster<T, 2, 2, CoordType>> reduce_to_2x2( ClusterVector<Cluster<T, 2, 2, CoordType>> reduce_to_2x2(
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
&cv) { &cv) {
@@ -191,13 +194,12 @@ ClusterVector<Cluster<T, 2, 2, CoordType>> reduce_to_2x2(
} }
/** /**
* @brief Reduce a cluster to a 3x3 cluster by selecting the 3x3 block with the * @brief Reduce a cluster to a 3x3 cluster
* highest sum.
* @param cv Clustervector containing clusters to reduce * @param cv Clustervector containing clusters to reduce
* @return Clustervector with reduced clusters * @return Clustervector with reduced clusters
*/ */
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY, template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t> typename CoordType>
ClusterVector<Cluster<T, 3, 3, CoordType>> reduce_to_3x3( ClusterVector<Cluster<T, 3, 3, CoordType>> reduce_to_3x3(
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
&cv) { &cv) {

View File

@@ -18,7 +18,10 @@ struct Photon {
}; };
class Interpolator { class Interpolator {
// marginal CDF of eta_x (if rosenblatt applied), conditional
// CDF of eta_x conditioned on eta_y
NDArray<double, 3> m_ietax; NDArray<double, 3> m_ietax;
// conditional CDF of eta_y conditioned on eta_x
NDArray<double, 3> m_ietay; NDArray<double, 3> m_ietay;
NDArray<double, 1> m_etabinsx; NDArray<double, 1> m_etabinsx;
@@ -26,108 +29,210 @@ class Interpolator {
NDArray<double, 1> m_energy_bins; NDArray<double, 1> m_energy_bins;
public: public:
/**
* @brief Constructor for the Interpolator class
* @param etacube joint distribution of etaX, etaY and photon energy
* @param xbins bin edges for etaX
* @param ybins bin edges for etaY
* @param ebins bin edges for photon energy
* @note note first dimension is etaX, second etaY, third photon energy
*/
Interpolator(NDView<double, 3> etacube, NDView<double, 1> xbins, Interpolator(NDView<double, 3> etacube, NDView<double, 1> xbins,
NDView<double, 1> ybins, NDView<double, 1> ebins); NDView<double, 1> ybins, NDView<double, 1> ebins);
/**
* @brief Constructor for the Interpolator class
* @param xbins bin edges for etaX
* @param ybins bin edges for etaY
* @param ebins bin edges for photon energy
*/
Interpolator(NDView<double, 1> xbins, NDView<double, 1> ybins,
NDView<double, 1> ebins);
/**
* @brief transforms the joint eta distribution of etaX and etaY to the two
* independant uniform distributions based on the Roseblatt transform for
* each energy level
* @param etacube joint distribution of etaX, etaY and photon energy
* @note note first dimension is etaX, second etaY, third photon energy
*/
void rosenblatttransform(NDView<double, 3> etacube);
NDArray<double, 3> get_ietax() { return m_ietax; } NDArray<double, 3> get_ietax() { return m_ietax; }
NDArray<double, 3> get_ietay() { return m_ietay; } NDArray<double, 3> get_ietay() { return m_ietay; }
template <typename ClusterType, /**
* @brief interpolates the cluster centers for all clusters to a better
* precision
* @tparam ClusterType Type of Clusters to interpolate
* @tparam Etafunction Function object that calculates desired eta default:
* calculate_eta2
* @return interpolated photons (photon positions are given as double but
* following row column format e.g. x=0, y=0 means top row and first column
* of frame)
*/
template <auto EtaFunction = calculate_eta2, typename ClusterType,
typename Eanble = std::enable_if_t<is_cluster_v<ClusterType>>> typename Eanble = std::enable_if_t<is_cluster_v<ClusterType>>>
std::vector<Photon> interpolate(const ClusterVector<ClusterType> &clusters); std::vector<Photon> interpolate(const ClusterVector<ClusterType> &clusters);
private:
/**
* @brief implements underlying interpolation logic based on EtaFunction
* Type
* @tparam EtaFunction Function object that calculates desired eta default:
* @param u: transformed photon position in x between [0,1]
* @param v: transformed photon position in y between [0,1]
* @param c: corner of eta
*/
template <auto EtaFunction, typename ClusterType>
void interpolation_logic(Photon &photon, const double u, const double v,
const corner c = corner::cTopLeft);
/**
* @brief bilinear interpolation of the transformed eta values
* @param ix index of etaX bin
* @param iy index of etaY bin
* @param ie index of energy bin
* @return pair of interpolated transformed eta values (ietax, ietay)
*/
template <typename T>
std::pair<double, double>
bilinear_interpolation(const size_t ix, const size_t iy, const size_t ie,
const Eta2<T> &eta);
}; };
// TODO: generalize to support any clustertype!!! otherwise add std::enable_if_t template <typename T>
// to only take Cluster2x2 and Cluster3x3 std::pair<double, double>
template <typename ClusterType, typename Enable> Interpolator::bilinear_interpolation(const size_t ix, const size_t iy,
const size_t ie, const Eta2<T> &eta) {
auto next_index_y = static_cast<ssize_t>(iy + 1) >= m_ietax.shape(1)
? m_ietax.shape(1) - 1
: iy + 1;
auto next_index_x = static_cast<ssize_t>(ix + 1) >= m_ietax.shape(0)
? m_ietax.shape(0) - 1
: ix + 1;
// bilinear interpolation
double ietax_interp_left = linear_interpolation(
{m_etabinsy(iy), m_etabinsy(iy + 1)},
{m_ietax(ix, iy, ie), m_ietax(ix, next_index_y, ie)}, eta.y);
double ietax_interp_right =
linear_interpolation({m_etabinsy(iy), m_etabinsy(iy + 1)},
{m_ietax(next_index_x, iy, ie),
m_ietax(next_index_x, next_index_y, ie)},
eta.y);
// transformed photon position x between [0,1]
double ietax_interpolated =
linear_interpolation({m_etabinsx(ix), m_etabinsx(ix + 1)},
{ietax_interp_left, ietax_interp_right}, eta.x);
double ietay_interp_left = linear_interpolation(
{m_etabinsx(ix), m_etabinsx(ix + 1)},
{m_ietay(ix, iy, ie), m_ietay(next_index_x, iy, ie)}, eta.x);
double ietay_interp_right =
linear_interpolation({m_etabinsx(ix), m_etabinsx(ix + 1)},
{m_ietay(ix, next_index_y, ie),
m_ietay(next_index_x, next_index_y, ie)},
eta.x);
// transformed photon position y between [0,1]
double ietay_interpolated =
linear_interpolation({m_etabinsy(iy), m_etabinsy(iy + 1)},
{ietay_interp_left, ietay_interp_right}, eta.y);
return {ietax_interpolated, ietay_interpolated};
}
template <auto EtaFunction, typename ClusterType, typename Enable>
std::vector<Photon> std::vector<Photon>
Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) { Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
std::vector<Photon> photons; std::vector<Photon> photons;
photons.reserve(clusters.size()); photons.reserve(clusters.size());
if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { for (const ClusterType &cluster : clusters) {
for (const ClusterType &cluster : clusters) {
auto eta = calculate_eta2(cluster); auto eta = EtaFunction(cluster);
Photon photon; Photon photon;
photon.x = cluster.x; photon.x = cluster.x;
photon.y = cluster.y; photon.y = cluster.y;
photon.energy = static_cast<decltype(photon.energy)>(eta.sum); photon.energy = static_cast<decltype(photon.energy)>(eta.sum);
// auto ie = nearest_index(m_energy_bins, photon.energy)-1; // std::cout << "eta.x: " << eta.x << " eta.y: " << eta.y << std::endl;
// auto ix = nearest_index(m_etabinsx, eta.x)-1;
// auto iy = nearest_index(m_etabinsy, eta.y)-1;
// Finding the index of the last element that is smaller
// should work fine as long as we have many bins
auto ie = last_smaller(m_energy_bins, photon.energy);
auto ix = last_smaller(m_etabinsx, eta.x);
auto iy = last_smaller(m_etabinsy, eta.y);
// fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy); // Finding the index of the last element that is smaller
// should work fine as long as we have many bins
auto ie = last_smaller(m_energy_bins, photon.energy);
auto ix = last_smaller(m_etabinsx, eta.x);
auto iy = last_smaller(m_etabinsy, eta.y);
double dX, dY; // std::cout << "ix: " << ix << " iy: " << iy << std::endl;
// cBottomLeft = 0,
// cBottomRight = 1,
// cTopLeft = 2,
// cTopRight = 3
// TODO: could also chaneg the sign of the eta calculation
switch (static_cast<corner>(eta.c)) {
case corner::cTopLeft:
dX = 0.0;
dY = 0.0;
break;
case corner::cTopRight:;
dX = 1.0;
dY = 0.0;
break;
case corner::cBottomLeft:
dX = 0.0;
dY = 1.0;
break;
case corner::cBottomRight:
dX = 1.0;
dY = 1.0;
break;
}
photon.x -= m_ietax(ix, iy, ie) - dX;
photon.y -= m_ietay(ix, iy, ie) - dY;
photons.push_back(photon);
}
} else if (clusters.cluster_size_x() == 2 ||
clusters.cluster_size_y() == 2) {
for (const ClusterType &cluster : clusters) {
auto eta = calculate_eta2(cluster);
Photon photon; // TODO: bilinear interpolation only works if all bins have a size > 1 -
photon.x = cluster.x; // otherwise bilinear interpolation with zero values which skew the
photon.y = cluster.y; // results
photon.energy = static_cast<decltype(photon.energy)>(eta.sum); // TODO: maybe trim the bins at the edges with zero values beforehand
// auto [ietax_interpolated, ietay_interpolated] =
// bilinear_interpolation(ix, iy, ie, eta);
// Now do some actual interpolation. double ietax_interpolated = m_ietax(ix, iy, ie);
// Find which energy bin the cluster is in double ietay_interpolated = m_ietay(ix, iy, ie);
// auto ie = nearest_index(m_energy_bins, photon.energy)-1;
// auto ix = nearest_index(m_etabinsx, eta.x)-1;
// auto iy = nearest_index(m_etabinsy, eta.y)-1;
// Finding the index of the last element that is smaller
// should work fine as long as we have many bins
auto ie = last_smaller(m_energy_bins, photon.energy);
auto ix = last_smaller(m_etabinsx, eta.x);
auto iy = last_smaller(m_etabinsy, eta.y);
// TODO: why 2? interpolation_logic<EtaFunction, ClusterType>(
photon.x -= photon, ietax_interpolated, ietay_interpolated, eta.c);
m_ietax(ix, iy, ie); // eta goes between 0 and 1 but we could
// move the hit anywhere in the 2x2
photon.y -= m_ietay(ix, iy, ie);
photons.push_back(photon);
}
} else { photons.push_back(photon);
throw std::runtime_error(
"Only 3x3 and 2x2 clusters are supported for interpolation");
} }
return photons; return photons;
} }
template <auto EtaFunction, typename ClusterType>
void Interpolator::interpolation_logic(Photon &photon, const double u,
const double v, const corner c) {
// std::cout << "u: " << u << " v: " << v << std::endl;
// TODO: try to call this with std::is_same_v and have it constexpr if
// possible
if (EtaFunction == &calculate_eta2<typename ClusterType::value_type,
ClusterType::cluster_size_x,
ClusterType::cluster_size_y,
typename ClusterType::coord_type> ||
EtaFunction == &calculate_full_eta2<typename ClusterType::value_type,
ClusterType::cluster_size_x,
ClusterType::cluster_size_y,
typename ClusterType::coord_type>) {
double dX{}, dY{};
// TODO: could also chaneg the sign of the eta calculation
switch (c) {
case corner::cTopLeft:
dX = -1.0;
dY = -1.0;
break;
case corner::cTopRight:;
dX = 0.0;
dY = -1.0;
break;
case corner::cBottomLeft:
dX = -1.0;
dY = 0.0;
break;
case corner::cBottomRight:
dX = 0.0;
dY = 0.0;
break;
}
photon.x = photon.x + 0.5 + u + dX; // use pixel center + 0.5
photon.y = photon.y + 0.5 + v +
dY; // eta2 calculates the ratio between bottom and sum of
// bottom and top shift by 1 add eta value correctly
} else {
photon.x += u;
photon.y += v;
}
}
} // namespace aare } // namespace aare

View File

@@ -110,4 +110,19 @@ template <typename Container> bool all_equal(const Container &c) {
return false; return false;
} }
/**
* linear interpolation
* @param bin_edge left and right bin edges
* @param bin_values function values at bin edges
* @param coord coordinate to interpolate at
* @return interpolated value at coord
*/
inline double linear_interpolation(const std::pair<double, double> &bin_edge,
const std::pair<double, double> &bin_values,
const double coord) {
const double bin_width = bin_edge.second - bin_edge.first;
return bin_values.first * (1 - (coord - bin_edge.first) / bin_width) +
bin_values.second * (coord - bin_edge.first) / bin_width;
}
} // namespace aare } // namespace aare

View File

@@ -32,7 +32,7 @@ set( PYTHON_FILES
aare/CtbRawFile.py aare/CtbRawFile.py
aare/ClusterFinder.py aare/ClusterFinder.py
aare/ClusterVector.py aare/ClusterVector.py
aare/Cluster.py
aare/calibration.py aare/calibration.py
aare/func.py aare/func.py
aare/RawFile.py aare/RawFile.py

24
python/aare/Cluster.py Normal file
View File

@@ -0,0 +1,24 @@
from . import _aare
import numpy as np
from .ClusterFinder import _type_to_char
def Cluster(x : int, y : int, data, cluster_size=(3,3), dtype = np.int32):
"""
Factory function to create a Cluster object. Provides a cleaner syntax for
the templated Cluster in C++.
.. code-block:: python
from aare import Cluster
Cluster(cluster_size=(3,3), dtype=np.float64)
"""
try:
class_name = f"Cluster{cluster_size[0]}x{cluster_size[1]}{_type_to_char(dtype)}"
cls = getattr(_aare, class_name)
except AttributeError:
raise ValueError(f"Unsupported combination of type and cluster size: {dtype}/{cluster_size} when requesting {class_name}")
return cls(x, y, data)

View File

@@ -11,6 +11,8 @@ def _type_to_char(dtype):
return 'f' return 'f'
elif dtype == np.float64: elif dtype == np.float64:
return 'd' return 'd'
elif dtype == np.int16:
return 'i16'
else: else:
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32, np.float32, and np.float64 are supported.") raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32, np.float32, and np.float64 are supported.")
@@ -27,7 +29,7 @@ def _get_class(name, cluster_size, dtype):
def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024): def ClusterFinder(image_size, cluster_size=(3,3), n_sigma=5, dtype = np.int32, capacity = 1024):
""" """
Factory function to create a ClusterFinder object. Provides a cleaner syntax for Factory function to create a ClusterFinder object. Provides a cleaner syntax for
the templated ClusterFinder in C++. the templated ClusterFinder in C++.
@@ -66,7 +68,7 @@ def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):
return cls(clusterfindermt, cluster_file) return cls(clusterfindermt, cluster_file)
def ClusterFile(fname, cluster_size=(3,3), dtype=np.int32, chunk_size = 1000): def ClusterFile(fname, cluster_size=(3,3), dtype=np.int32, chunk_size = 1000, mode = "r"):
""" """
Factory function to create a ClusterFile object. Provides a cleaner syntax for Factory function to create a ClusterFile object. Provides a cleaner syntax for
the templated ClusterFile in C++. the templated ClusterFile in C++.
@@ -84,4 +86,4 @@ def ClusterFile(fname, cluster_size=(3,3), dtype=np.int32, chunk_size = 1000):
""" """
cls = _get_class("ClusterFile", cluster_size, dtype) cls = _get_class("ClusterFile", cluster_size, dtype)
return cls(fname, chunk_size=chunk_size) return cls(fname, chunk_size=chunk_size, mode=mode)

View File

@@ -1,12 +1,22 @@
# SPDX-License-Identifier: MPL-2.0 # SPDX-License-Identifier: MPL-2.0
from ._aare import ClusterVector_Cluster3x3i from . import _aare
import numpy as np import numpy as np
from .ClusterFinder import _get_class
def ClusterVector(cluster_size, dtype = np.int32): def ClusterVector(cluster_size=(3,3), dtype = np.int32):
"""
Factory function to create a ClusterVector object. Provides a cleaner syntax for
the templated ClusterVector in C++.
.. code-block:: python
from aare import ClusterVector
ClusterVector(cluster_size=(3,3), dtype=np.float64)
"""
cls = _get_class("ClusterVector", cluster_size, dtype)
return cls()
if dtype == np.int32 and cluster_size == (3,3):
return ClusterVector_Cluster3x3i()
else:
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")

View File

@@ -8,16 +8,18 @@ from ._aare import Pedestal_d, Pedestal_f, ClusterFinder_Cluster3x3i, VarCluster
from ._aare import DetectorType from ._aare import DetectorType
from ._aare import hitmap from ._aare import hitmap
from ._aare import ROI from ._aare import ROI
from ._aare import corner
# from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i # from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i
from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT, ClusterFileSink, ClusterFile from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT, ClusterFileSink, ClusterFile
from .ClusterVector import ClusterVector from .ClusterVector import ClusterVector
from .Cluster import Cluster
from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2 from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2
from ._aare import Interpolator from ._aare import Interpolator
from ._aare import calculate_eta2 from ._aare import calculate_eta2, calculate_eta3, calculate_cross_eta3, calculate_full_eta2
from ._aare import reduce_to_2x2, reduce_to_3x3 from ._aare import reduce_to_2x2, reduce_to_3x3
from ._aare import apply_custom_weights from ._aare import apply_custom_weights

View File

@@ -81,9 +81,7 @@ void reduce_to_3x3(py::module &m) {
[](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) { [](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
return reduce_to_3x3(cl); return reduce_to_3x3(cl);
}, },
py::return_value_policy::move, py::return_value_policy::move, R"(Reduce cluster to 3x3 subcluster)");
"Reduce cluster to 3x3 subcluster by taking the 3x3 subcluster with "
"the highest photon energy.");
} }
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY, template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
@@ -96,8 +94,15 @@ void reduce_to_2x2(py::module &m) {
return reduce_to_2x2(cl); return reduce_to_2x2(cl);
}, },
py::return_value_policy::move, py::return_value_policy::move,
"Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with " R"(
"the highest photon energy."); Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with
the highest photon energy.
RETURN:
reduced cluster (cluster is filled in row major ordering starting at the top left. Thus for a max subcluster in the top left corner the photon hit is at the fourth position.)
)");
} }
#pragma GCC diagnostic pop #pragma GCC diagnostic pop

View File

@@ -82,23 +82,4 @@ void define_ClusterFile(py::module &m, const std::string &typestr) {
}); });
} }
template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
typename CoordType = uint16_t>
void register_calculate_eta(py::module &m) {
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
m.def("calculate_eta2",
[](const aare::ClusterVector<ClusterType> &clusters) {
auto eta2 = new NDArray<double, 2>(calculate_eta2(clusters));
return return_image_data(eta2);
});
m.def("calculate_eta2", [](const aare::Cluster<Type, CoordSizeX, CoordSizeY,
CoordType> &cluster) {
auto eta2 = calculate_eta2(cluster);
// TODO return proper eta class
return py::make_tuple(eta2.x, eta2.y, eta2.sum);
});
}
#pragma GCC diagnostic pop #pragma GCC diagnostic pop

View File

@@ -121,12 +121,13 @@ void define_2x2_reduction(py::module &m) {
reduce_to_2x2(cv)); reduce_to_2x2(cv));
}, },
R"( R"(
Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with
the highest photon energy." the highest photon energy.
Parameters Parameters
----------
cv : ClusterVector cv : ClusterVector (clusters are filled in row-major ordering starting at the top left. Thus for a max subcluster in the top left corner the photon hit is at the fourth position.)
)", )",
py::arg("clustervector")); py::arg("clustervector"));
} }
@@ -143,11 +144,10 @@ void define_3x3_reduction(py::module &m) {
reduce_to_3x3(cv)); reduce_to_3x3(cv));
}, },
R"( R"(
Reduce cluster to 3x3 subcluster
Reduce cluster to 3x3 subcluster by taking the 3x3 subcluster with
the highest photon energy."
Parameters Parameters
----------
cv : ClusterVector cv : ClusterVector
)", )",
py::arg("clustervector")); py::arg("clustervector"));

104
python/src/bind_Eta.hpp Normal file
View File

@@ -0,0 +1,104 @@
#include "aare/CalculateEta.hpp"
#include <cstdint>
// #include <pybind11/native_enum.h> only for version 3
#include <pybind11/pybind11.h>
namespace py = pybind11;
using namespace ::aare;
template <typename T>
void define_eta(py::module &m, const std::string &typestr) {
auto class_name = fmt::format("Eta{}", typestr);
py::class_<Eta2<T>>(m, class_name.c_str())
.def(py::init<>())
.def_readonly("x", &Eta2<T>::x, "eta x value")
.def_readonly("y", &Eta2<T>::y, "eta y value")
.def_readonly("c", &Eta2<T>::c,
"eta corner value cTopLeft, cTopRight, "
"cBottomLeft, cBottomRight")
.def_readonly("sum", &Eta2<T>::sum, "photon energy of cluster");
}
void define_corner_enum(py::module &m) {
py::enum_<corner>(m, "corner", "enum.Enum")
.value("cTopLeft", corner::cTopLeft)
.value("cTopRight", corner::cTopRight)
.value("cBottomLeft", corner::cBottomLeft)
.value("cBottomRight", corner::cBottomRight)
.export_values();
}
template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
typename CoordType = uint16_t>
void register_calculate_2x2eta(py::module &m) {
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
m.def(
"calculate_eta2",
[](const aare::ClusterVector<ClusterType> &clusters) {
auto eta2 = new std::vector<Eta2<typename ClusterType::value_type>>(
calculate_eta2(clusters));
return return_vector(eta2);
},
R"(calculates eta2x2)", py::arg("clusters"));
m.def(
"calculate_eta2",
[](const aare::Cluster<Type, CoordSizeX, CoordSizeY, CoordType>
&cluster) { return calculate_eta2(cluster); },
R"(calculates eta2x2)", py::arg("cluster"));
m.def(
"calculate_full_eta2",
[](const aare::Cluster<Type, CoordSizeX, CoordSizeY, CoordType>
&cluster) { return calculate_full_eta2(cluster); },
R"(calculates full eta2x2)", py::arg("cluster"));
m.def(
"calculate_full_eta2",
[](const aare::ClusterVector<ClusterType> &clusters) {
auto eta2 = new std::vector<Eta2<typename ClusterType::value_type>>(
calculate_full_eta2(clusters));
return return_vector(eta2);
},
R"(calculates full eta2x2)", py::arg("clusters"));
}
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
void register_calculate_3x3eta(py::module &m) {
using ClusterType = Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>;
m.def(
"calculate_eta3",
[](const aare::ClusterVector<ClusterType> &clusters) {
auto eta = new std::vector<Eta2<Type>>(calculate_eta3(clusters));
return return_vector(eta);
},
R"(calculates eta3x3 using entire cluster)", py::arg("clusters"));
m.def(
"calculate_cross_eta3",
[](const aare::ClusterVector<ClusterType> &clusters) {
auto eta =
new std::vector<Eta2<Type>>(calculate_cross_eta3(clusters));
return return_vector(eta);
},
R"(calculates eta3x3 taking into account cross pixels in cluster)",
py::arg("clusters"));
m.def(
"calculate_eta3",
[](const ClusterType &cluster) { return calculate_eta3(cluster); },
R"(calculates eta3x3 using entire cluster)", py::arg("cluster"));
m.def(
"calculate_cross_eta3",
[](const ClusterType &cluster) {
return calculate_cross_eta3(cluster);
},
R"(calculates eta3x3 taking into account cross pixels in cluster)",
py::arg("cluster"));
}

View File

@@ -1,4 +1,5 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
#include "aare/CalculateEta.hpp"
#include "aare/Interpolator.hpp" #include "aare/Interpolator.hpp"
#include "aare/NDArray.hpp" #include "aare/NDArray.hpp"
#include "aare/NDView.hpp" #include "aare/NDView.hpp"
@@ -10,19 +11,41 @@
namespace py = pybind11; namespace py = pybind11;
#define REGISTER_INTERPOLATOR_ETA2(T, N, M, U) \
register_interpolate<T, N, M, U, aare::calculate_full_eta2<T, N, M, U>>( \
interpolator, "_full_eta2", "full eta2"); \
register_interpolate<T, N, M, U, aare::calculate_eta2<T, N, M, U>>( \
interpolator, "", "eta2");
#define REGISTER_INTERPOLATOR_ETA3(T, N, M, U) \
register_interpolate<T, N, M, U, aare::calculate_eta3<T, N, M, U>>( \
interpolator, "_eta3", "full eta3"); \
register_interpolate<T, N, M, U, aare::calculate_cross_eta3<T, N, M, U>>( \
interpolator, "_cross_eta3", "cross eta3");
template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY, template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
typename CoordType = uint16_t> typename CoordType = uint16_t, auto EtaFunction>
void register_interpolate(py::class_<aare::Interpolator> &interpolator) { void register_interpolate(py::class_<aare::Interpolator> &interpolator,
const std::string &typestr = "",
const std::string &doc_string_etatype = "eta2x2") {
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>; using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
interpolator.def("interpolate", const std::string docstring = "interpolation based on " +
[](aare::Interpolator &self, doc_string_etatype +
const ClusterVector<ClusterType> &clusters) { "\n\nReturns:\n interpolated photons";
auto photons = self.interpolate<ClusterType>(clusters);
auto *ptr = new std::vector<Photon>{photons}; auto function_name = fmt::format("interpolate{}", typestr);
return return_vector(ptr);
}); interpolator.def(
function_name.c_str(),
[](aare::Interpolator &self,
const ClusterVector<ClusterType> &clusters) {
auto photons = self.interpolate<EtaFunction, ClusterType>(clusters);
auto *ptr = new std::vector<Photon>{photons};
return return_vector(ptr);
},
docstring.c_str(), py::arg("cluster_vector"));
} }
void define_interpolation_bindings(py::module &m) { void define_interpolation_bindings(py::module &m) {
@@ -31,33 +54,91 @@ void define_interpolation_bindings(py::module &m) {
auto interpolator = auto interpolator =
py::class_<aare::Interpolator>(m, "Interpolator") py::class_<aare::Interpolator>(m, "Interpolator")
.def(py::init([](py::array_t<double, py::array::c_style | .def(py::init(
py::array::forcecast> [](py::array_t<double,
etacube, py::array::c_style | py::array::forcecast>
py::array_t<double> xbins, etacube,
py::array_t<double> ybins, py::array_t<double> xbins, py::array_t<double> ybins,
py::array_t<double> ebins) { py::array_t<double> ebins) {
return Interpolator(make_view_3d(etacube), make_view_1d(xbins), return Interpolator(
make_view_1d(ybins), make_view_1d(ebins)); make_view_3d(etacube), make_view_1d(xbins),
})) make_view_1d(ybins), make_view_1d(ebins));
}),
R"doc(
Constructor
Args:
etacube:
joint distribution of eta_x, eta_y and photon energy (**Note:** for the joint distribution first dimension is eta_x, second: eta_y, third: energy bins.)
xbins:
bin edges of etax
ybins:
bin edges of etay
ebins:
bin edges of photon energy
)doc",
py::arg("etacube"),
py::arg("xbins"), py::arg("ybins"),
py::arg("ebins"))
.def(py::init(
[](py::array_t<double> xbins, py::array_t<double> ybins,
py::array_t<double> ebins) {
return Interpolator(make_view_1d(xbins),
make_view_1d(ybins),
make_view_1d(ebins));
}),
R"(
Constructor
Args:
xbins:
bin edges of etax
ybins:
bin edges of etay
ebins:
bin edges of photon energy
)", py::arg("xbins"),
py::arg("ybins"), py::arg("ebins"))
.def(
"rosenblatttransform",
[](Interpolator &self,
py::array_t<double,
py::array::c_style | py::array::forcecast>
etacube) {
return self.rosenblatttransform(make_view_3d(etacube));
},
R"(
calculated the rosenblatttransform for the given distribution
etacube:
joint distribution of eta_x, eta_y and photon energy (**Note:** for the joint distribution first dimension is eta_x, second: eta_y, third: energy bins.)
)",
py::arg("etacube"))
.def("get_ietax", .def("get_ietax",
[](Interpolator &self) { [](Interpolator &self) {
auto *ptr = new NDArray<double, 3>{}; auto *ptr = new NDArray<double, 3>{};
*ptr = self.get_ietax(); *ptr = self.get_ietax();
return return_image_data(ptr); return return_image_data(ptr);
}) }, R"(conditional CDF of etax conditioned on etay, marginal CDF of etax (if rosenblatt transform applied))")
.def("get_ietay", [](Interpolator &self) { .def("get_ietay", [](Interpolator &self) {
auto *ptr = new NDArray<double, 3>{}; auto *ptr = new NDArray<double, 3>{};
*ptr = self.get_ietay(); *ptr = self.get_ietay();
return return_image_data(ptr); return return_image_data(ptr);
}); }, R"(conditional CDF of etay conditioned on etax)");
register_interpolate<int, 3, 3, uint16_t>(interpolator); REGISTER_INTERPOLATOR_ETA3(int, 3, 3, uint16_t);
register_interpolate<float, 3, 3, uint16_t>(interpolator); REGISTER_INTERPOLATOR_ETA3(float, 3, 3, uint16_t);
register_interpolate<double, 3, 3, uint16_t>(interpolator); REGISTER_INTERPOLATOR_ETA3(double, 3, 3, uint16_t);
register_interpolate<int, 2, 2, uint16_t>(interpolator);
register_interpolate<float, 2, 2, uint16_t>(interpolator); REGISTER_INTERPOLATOR_ETA2(int, 3, 3, uint16_t);
register_interpolate<double, 2, 2, uint16_t>(interpolator); REGISTER_INTERPOLATOR_ETA2(float, 3, 3, uint16_t);
REGISTER_INTERPOLATOR_ETA2(double, 3, 3, uint16_t);
REGISTER_INTERPOLATOR_ETA2(int, 2, 2, uint16_t);
REGISTER_INTERPOLATOR_ETA2(float, 2, 2, uint16_t);
REGISTER_INTERPOLATOR_ETA2(double, 2, 2, uint16_t);
// TODO! Evaluate without converting to double // TODO! Evaluate without converting to double
m.def( m.def(

View File

@@ -9,6 +9,7 @@
#include "bind_ClusterFinder.hpp" #include "bind_ClusterFinder.hpp"
#include "bind_ClusterFinderMT.hpp" #include "bind_ClusterFinderMT.hpp"
#include "bind_ClusterVector.hpp" #include "bind_ClusterVector.hpp"
#include "bind_Eta.hpp"
#include "bind_calibration.hpp" #include "bind_calibration.hpp"
// TODO! migrate the other names // TODO! migrate the other names
@@ -43,14 +44,16 @@ double, 'f' for float)
#define DEFINE_CLUSTER_BINDINGS(T, N, M, U, TYPE_CODE) \ #define DEFINE_CLUSTER_BINDINGS(T, N, M, U, TYPE_CODE) \
define_ClusterFile<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \ define_ClusterFile<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_ClusterVector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \ define_ClusterVector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_Cluster<T, N, M, U>(m, #N "x" #M #TYPE_CODE); \
register_calculate_2x2eta<T, N, M, U>(m); \
define_2x2_reduction<T, N, M, U>(m); \
reduce_to_2x2<T, N, M, U>(m);
#define DEFINE_BINDINGS_CLUSTERFINDER(T, N, M, U, TYPE_CODE) \
define_ClusterFinder<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \ define_ClusterFinder<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_ClusterFinderMT<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \ define_ClusterFinderMT<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_ClusterFileSink<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \ define_ClusterFileSink<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_ClusterCollector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \ define_ClusterCollector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE);
define_Cluster<T, N, M, U>(m, #N "x" #M #TYPE_CODE); \
register_calculate_eta<T, N, M, U>(m); \
define_2x2_reduction<T, N, M, U>(m); \
reduce_to_2x2<T, N, M, U>(m);
PYBIND11_MODULE(_aare, m) { PYBIND11_MODULE(_aare, m) {
define_file_io_bindings(m); define_file_io_bindings(m);
@@ -88,7 +91,23 @@ PYBIND11_MODULE(_aare, m) {
DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d); DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d);
DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f); DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f);
// DEFINE_CLUSTER_BINDINGS(double, 2, 1, uint16_t, d); DEFINE_CLUSTER_BINDINGS(int16_t, 3, 3, uint16_t, i16);
DEFINE_BINDINGS_CLUSTERFINDER(int, 3, 3, uint16_t, i);
DEFINE_BINDINGS_CLUSTERFINDER(double, 3, 3, uint16_t, d);
DEFINE_BINDINGS_CLUSTERFINDER(float, 3, 3, uint16_t, f);
DEFINE_BINDINGS_CLUSTERFINDER(int, 5, 5, uint16_t, i);
DEFINE_BINDINGS_CLUSTERFINDER(double, 5, 5, uint16_t, d);
DEFINE_BINDINGS_CLUSTERFINDER(float, 5, 5, uint16_t, f);
DEFINE_BINDINGS_CLUSTERFINDER(int, 7, 7, uint16_t, i);
DEFINE_BINDINGS_CLUSTERFINDER(double, 7, 7, uint16_t, d);
DEFINE_BINDINGS_CLUSTERFINDER(float, 7, 7, uint16_t, f);
DEFINE_BINDINGS_CLUSTERFINDER(int, 9, 9, uint16_t, i);
DEFINE_BINDINGS_CLUSTERFINDER(double, 9, 9, uint16_t, d);
DEFINE_BINDINGS_CLUSTERFINDER(float, 9, 9, uint16_t, f);
define_3x3_reduction<int, 3, 3, uint16_t>(m); define_3x3_reduction<int, 3, 3, uint16_t>(m);
define_3x3_reduction<double, 3, 3, uint16_t>(m); define_3x3_reduction<double, 3, 3, uint16_t>(m);
@@ -116,10 +135,30 @@ PYBIND11_MODULE(_aare, m) {
reduce_to_3x3<double, 9, 9, uint16_t>(m); reduce_to_3x3<double, 9, 9, uint16_t>(m);
reduce_to_3x3<float, 9, 9, uint16_t>(m); reduce_to_3x3<float, 9, 9, uint16_t>(m);
register_calculate_3x3eta<int, 3, 3, uint16_t>(m);
register_calculate_3x3eta<double, 3, 3, uint16_t>(m);
register_calculate_3x3eta<float, 3, 3, uint16_t>(m);
register_calculate_3x3eta<int16_t, 3, 3, uint16_t>(m);
using Sum_index_pair_d = Sum_index_pair<double, corner>; using Sum_index_pair_d = Sum_index_pair<double, corner>;
PYBIND11_NUMPY_DTYPE(Sum_index_pair_d, sum, index); PYBIND11_NUMPY_DTYPE(Sum_index_pair_d, sum, index);
using Sum_index_pair_f = Sum_index_pair<float, corner>; using Sum_index_pair_f = Sum_index_pair<float, corner>;
PYBIND11_NUMPY_DTYPE(Sum_index_pair_f, sum, index); PYBIND11_NUMPY_DTYPE(Sum_index_pair_f, sum, index);
using Sum_index_pair_i = Sum_index_pair<int, corner>; using Sum_index_pair_i = Sum_index_pair<int, corner>;
PYBIND11_NUMPY_DTYPE(Sum_index_pair_i, sum, index); PYBIND11_NUMPY_DTYPE(Sum_index_pair_i, sum, index);
using eta_d = Eta2<double>;
PYBIND11_NUMPY_DTYPE(eta_d, x, y, c, sum);
using eta_i = Eta2<int>;
PYBIND11_NUMPY_DTYPE(eta_i, x, y, c, sum);
using eta_f = Eta2<float>;
PYBIND11_NUMPY_DTYPE(eta_f, x, y, c, sum);
using eta_i16 = Eta2<int16_t>;
PYBIND11_NUMPY_DTYPE(eta_i16, x, y, c, sum);
define_corner_enum(m);
define_eta<float>(m, "f");
define_eta<double>(m, "d");
define_eta<int>(m, "i");
define_eta<int16_t>(m, "i16");
} }

File diff suppressed because one or more lines are too long

View File

@@ -3,6 +3,7 @@ import pytest
import numpy as np import numpy as np
from aare import _aare #import the C++ module from aare import _aare #import the C++ module
from aare import corner
from conftest import test_data_path from conftest import test_data_path
@@ -40,52 +41,49 @@ def test_Interpolator():
xbins = np.linspace(0, 5, 30, dtype=np.float64) xbins = np.linspace(0, 5, 30, dtype=np.float64)
ybins = np.linspace(0, 5, 30, dtype=np.float64) ybins = np.linspace(0, 5, 30, dtype=np.float64)
etacube = np.zeros(shape=[30, 30, 20], dtype=np.float64) etacube = np.zeros(shape=[29, 29, 19], dtype=np.float64)
interpolator = _aare.Interpolator(etacube, xbins, ybins, ebins) interpolator = _aare.Interpolator(etacube, xbins, ybins, ebins)
assert interpolator.get_ietax().shape == (30,30,20) assert interpolator.get_ietax().shape == (30,30,20)
assert interpolator.get_ietay().shape == (30,30,20) assert interpolator.get_ietay().shape == (30,30,20)
clustervector = _aare.ClusterVector_Cluster3x3i() clustervector = _aare.ClusterVector_Cluster3x3i()
cluster = _aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32)) cluster = _aare.Cluster3x3i(1,1, np.ones(9, dtype=np.int32))
clustervector.push_back(cluster) clustervector.push_back(cluster)
interpolated_photons = interpolator.interpolate(clustervector) interpolated_photons = interpolator.interpolate(clustervector)
assert interpolated_photons.size == 1 assert interpolated_photons.size == 1
assert interpolated_photons[0]["x"] == 0 assert interpolated_photons[0]["x"] == 0.5
assert interpolated_photons[0]["y"] == 0 assert interpolated_photons[0]["y"] == 0.5
assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0 assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0
clustervector = _aare.ClusterVector_Cluster2x2i() clustervector = _aare.ClusterVector_Cluster2x2i()
cluster = _aare.Cluster2x2i(0,0, np.ones(4, dtype=np.int32)) cluster = _aare.Cluster2x2i(1,1, np.ones(4, dtype=np.int32))
clustervector.push_back(cluster) clustervector.push_back(cluster)
interpolated_photons = interpolator.interpolate(clustervector) interpolated_photons = interpolator.interpolate(clustervector)
assert interpolated_photons.size == 1 assert interpolated_photons.size == 1
assert interpolated_photons[0]["x"] == 0 assert interpolated_photons[0]["x"] == 0.5
assert interpolated_photons[0]["y"] == 0 assert interpolated_photons[0]["y"] == 0.5
assert interpolated_photons[0]["energy"] == 4 assert interpolated_photons[0]["energy"] == 4
def test_calculate_eta(): def test_calculate_eta():
"""Calculate Eta""" """Calculate Eta"""
clusters = _aare.ClusterVector_Cluster3x3i() cluster = _aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32))
clusters.push_back(_aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32)))
clusters.push_back(_aare.Cluster3x3i(0,0, np.array([1,1,1,2,2,2,3,3,3])))
eta2 = _aare.calculate_eta2(clusters) eta2 = _aare.calculate_eta2(cluster)
assert eta2.shape == (2,2) assert eta2.x == 0.5
assert eta2[0,0] == 0.5 assert eta2.y == 0.5
assert eta2[0,1] == 0.5 assert eta2.c == corner.cTopLeft
assert eta2[1,0] == 0.5 assert eta2.sum == 4
assert eta2[1,1] == 0.4 #2/5
def test_max_sum(): def test_max_sum():
@@ -119,7 +117,7 @@ def test_2x2_reduction():
reduced_cluster = _aare.reduce_to_2x2(cluster) reduced_cluster = _aare.reduce_to_2x2(cluster)
assert reduced_cluster.x == 4 assert reduced_cluster.x == 5
assert reduced_cluster.y == 5 assert reduced_cluster.y == 5
assert (reduced_cluster.data == np.array([[2, 3], [2, 2]], dtype=np.int32)).all() assert (reduced_cluster.data == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
@@ -131,9 +129,9 @@ def test_3x3_reduction():
reduced_cluster = _aare.reduce_to_3x3(cluster) reduced_cluster = _aare.reduce_to_3x3(cluster)
assert reduced_cluster.x == 4 assert reduced_cluster.x == 5
assert reduced_cluster.y == 5 assert reduced_cluster.y == 5
assert (reduced_cluster.data == np.array([[1.0, 2.0, 1.0], [2.0, 2.0, 3.0], [1.0, 2.0, 1.0]], dtype=np.double)).all() assert (reduced_cluster.data == np.array([[2.0, 1.0, 1.0], [2.0, 3.0, 1.0], [2.0, 1.0, 1.0]], dtype=np.double)).all()

View File

@@ -6,7 +6,7 @@ import time
from pathlib import Path from pathlib import Path
import pickle import pickle
from aare import ClusterFile, ClusterVector from aare import ClusterFile, ClusterVector, calculate_eta2
from aare import _aare from aare import _aare
from conftest import test_data_path from conftest import test_data_path
@@ -45,6 +45,19 @@ def test_max_2x2_sum():
assert max_2x2[0]["index"] == 2 assert max_2x2[0]["index"] == 2
def test_eta2():
"""calculate eta2"""
cv = _aare.ClusterVector_Cluster3x3i()
cv.push_back(_aare.Cluster3x3i(19, 22, np.ones(9, dtype=np.int32)))
assert cv.size == 1
eta2 = calculate_eta2(cv)
assert eta2.size == 1
assert eta2[0]["x"] == 0.5
assert eta2[0]["y"] == 0.5
assert eta2[0]["c"] == 0
assert eta2[0]["sum"] == 4
def test_make_a_hitmap_from_cluster_vector(): def test_make_a_hitmap_from_cluster_vector():
cv = _aare.ClusterVector_Cluster3x3i() cv = _aare.ClusterVector_Cluster3x3i()
@@ -75,11 +88,11 @@ def test_2x2_reduction():
reduced_cv = np.array(_aare.reduce_to_2x2(cv), copy=False) reduced_cv = np.array(_aare.reduce_to_2x2(cv), copy=False)
assert reduced_cv.size == 2 assert reduced_cv.size == 2
assert reduced_cv[0]["x"] == 4 assert reduced_cv[0]["x"] == 5
assert reduced_cv[0]["y"] == 5 assert reduced_cv[0]["y"] == 5
assert (reduced_cv[0]["data"] == np.array([[2, 3], [2, 2]], dtype=np.int32)).all() assert (reduced_cv[0]["data"] == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
assert reduced_cv[1]["x"] == 4 assert reduced_cv[1]["x"] == 5
assert reduced_cv[1]["y"] == 6 assert reduced_cv[1]["y"] == 5
assert (reduced_cv[1]["data"] == np.array([[2, 2], [2, 3]], dtype=np.int32)).all() assert (reduced_cv[1]["data"] == np.array([[2, 2], [2, 3]], dtype=np.int32)).all()
@@ -94,6 +107,6 @@ def test_3x3_reduction():
reduced_cv = np.array(_aare.reduce_to_3x3(cv), copy=False) reduced_cv = np.array(_aare.reduce_to_3x3(cv), copy=False)
assert reduced_cv.size == 2 assert reduced_cv.size == 2
assert reduced_cv[0]["x"] == 4 assert reduced_cv[0]["x"] == 5
assert reduced_cv[0]["y"] == 5 assert reduced_cv[0]["y"] == 5
assert (reduced_cv[0]["data"] == np.array([[1.0, 2.0, 1.0], [2.0, 2.0, 3.0], [1.0, 2.0, 1.0]], dtype=np.double)).all() assert (reduced_cv[0]["data"] == np.array([[2.0, 1.0, 1.0], [2.0, 3.0, 1.0], [2.0, 1.0, 1.0]], dtype=np.double)).all()

View File

@@ -25,10 +25,6 @@ def create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_p
probability_values = gaussian.pdf(data_points) probability_values = gaussian.pdf(data_points)
return (probability_values.reshape(X.shape)).round() #python bindings only support frame types of uint16_t return (probability_values.reshape(X.shape)).round() #python bindings only support frame types of uint16_t
def photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, photon_hit):
scaled_photon_hit_x = cluster_center - (1 - photon_hit[0][0])*pixels_per_superpixel*pixel_width
scaled_photon_hit_y = cluster_center - (1 - photon_hit[0][1])*pixels_per_superpixel*pixel_width
return (scaled_photon_hit_x, scaled_photon_hit_y)
def create_2x2cluster_from_frame(frame, pixels_per_superpixel): def create_2x2cluster_from_frame(frame, pixels_per_superpixel):
return Cluster2x2d(1, 1, np.array([frame[0:pixels_per_superpixel, 0:pixels_per_superpixel].sum(), return Cluster2x2d(1, 1, np.array([frame[0:pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
@@ -49,10 +45,10 @@ def create_3x3cluster_from_frame(frame, pixels_per_superpixel):
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum()], dtype=np.float64)) frame[2*pixels_per_superpixel:3*pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum()], dtype=np.float64))
def calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, cluster_2x2 = True): def calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, bin_edges_x = bh.axis.Regular(100, -0.2, 1.2), bin_edges_y = bh.axis.Regular(100, -0.2, 1.2), cluster_2x2 = True):
hist = bh.Histogram( hist = bh.Histogram(
bh.axis.Regular(100, -0.2, 1.2), bin_edges_x,
bh.axis.Regular(100, -0.2, 1.2), bh.axis.Regular(1, 0, num_pixels*num_pixels*1/(variance*2*np.pi))) bin_edges_y, bh.axis.Regular(1, 0, num_pixels*num_pixels*1/(variance*2*np.pi)))
for _ in range(0, num_frames): for _ in range(0, num_frames):
mean_x = random_number_generator.uniform(pixels_per_superpixel*pixel_width, 2*pixels_per_superpixel*pixel_width) mean_x = random_number_generator.uniform(pixels_per_superpixel*pixel_width, 2*pixels_per_superpixel*pixel_width)
@@ -67,7 +63,7 @@ def calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_
cluster = create_3x3cluster_from_frame(frame, pixels_per_superpixel) cluster = create_3x3cluster_from_frame(frame, pixels_per_superpixel)
eta2 = calculate_eta2(cluster) eta2 = calculate_eta2(cluster)
hist.fill(eta2[0], eta2[1], eta2[2]) hist.fill(eta2.x, eta2.y, eta2.sum)
return hist return hist
@@ -86,9 +82,9 @@ def test_interpolation_of_2x2_cluster(test_data_path):
pixels_per_superpixel = int(num_pixels*0.5) pixels_per_superpixel = int(num_pixels*0.5)
random_number_generator = np.random.default_rng(42) random_number_generator = np.random.default_rng(42)
eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator) eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, bin_edges_x = bh.axis.Regular(100, -0.1, 0.6), bin_edges_y = bh.axis.Regular(100, -0.1, 0.6))
interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges[:-1], eta_distribution.axes[1].edges[:-1], eta_distribution.axes[2].edges[:-1]) interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
#actual photon hit #actual photon hit
mean = 1.2*pixels_per_superpixel*pixel_width mean = 1.2*pixels_per_superpixel*pixel_width
@@ -105,7 +101,7 @@ def test_interpolation_of_2x2_cluster(test_data_path):
cluster_center = 1.5*pixels_per_superpixel*pixel_width cluster_center = 1.5*pixels_per_superpixel*pixel_width
scaled_photon_hit = photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, interpolated_photon) scaled_photon_hit = (interpolated_photon[0][0]*pixels_per_superpixel*pixel_width, interpolated_photon[0][1]*pixels_per_superpixel*pixel_width)
assert (np.linalg.norm(scaled_photon_hit - mean) < np.linalg.norm(np.array([cluster_center, cluster_center] - mean))) assert (np.linalg.norm(scaled_photon_hit - mean) < np.linalg.norm(np.array([cluster_center, cluster_center] - mean)))
@@ -124,13 +120,14 @@ def test_interpolation_of_3x3_cluster(test_data_path):
num_frames = 1000 num_frames = 1000
pixels_per_superpixel = int(num_pixels/3) pixels_per_superpixel = int(num_pixels/3)
random_number_generator = np.random.default_rng(42) random_number_generator = np.random.default_rng(42)
eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, False) eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, bin_edges_x = bh.axis.Regular(100, -0.1, 1.1), bin_edges_y = bh.axis.Regular(100, -0.1, 1.1), cluster_2x2 = False)
interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges[:-1], eta_distribution.axes[1].edges[:-1], eta_distribution.axes[2].edges[:-1]) interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
#actual photon hit #actual photon hit
mean = 1.2*pixels_per_superpixel*pixel_width mean_x = (1 + 0.8)*pixels_per_superpixel*pixel_width
mean = np.array([mean, mean]) mean_y = (1 + 0.2)*pixels_per_superpixel*pixel_width
mean = np.array([mean_x, mean_y])
frame = create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points) frame = create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points)
cluster = create_3x3cluster_from_frame(frame, pixels_per_superpixel) cluster = create_3x3cluster_from_frame(frame, pixels_per_superpixel)
@@ -143,7 +140,7 @@ def test_interpolation_of_3x3_cluster(test_data_path):
cluster_center = 1.5*pixels_per_superpixel*pixel_width cluster_center = 1.5*pixels_per_superpixel*pixel_width
scaled_photon_hit = photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, interpolated_photon) scaled_photon_hit = (interpolated_photon[0][0]*pixels_per_superpixel*pixel_width, interpolated_photon[0][1]*pixels_per_superpixel*pixel_width)
assert (np.linalg.norm(scaled_photon_hit - mean) < np.linalg.norm(np.array([cluster_center, cluster_center] - mean))) assert (np.linalg.norm(scaled_photon_hit - mean) < np.linalg.norm(np.array([cluster_center, cluster_center] - mean)))

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,291 @@
#test script to test interpolation on simulated data
import pytest
import pytest_check as check
import numpy as np
import boost_histogram as bh
import pickle
from scipy.stats import multivariate_normal
from aare import Interpolator, calculate_eta2, calculate_cross_eta3, calculate_full_eta2, calculate_eta3
from aare import ClusterFile
from conftest import test_data_path
## TODO: is there something like a test fixture setup/teardown in pytest?
def calculate_eta_distribution(cv, calculate_eta, edges_x=[-0.5,0.5], edges_y=[-0.5,0.5], nbins = 101):
energy_bins = bh.axis.Regular(1, 0, 16) # max and min energy of simulated photons
eta_distribution = bh.Histogram(
bh.axis.Regular(nbins, edges_x[0], edges_x[1]),
bh.axis.Regular(nbins, edges_y[0], edges_y[1]), energy_bins)
eta = calculate_eta(cv)
eta_distribution.fill(eta['x'], eta['y'], eta['sum'])
return eta_distribution
@pytest.fixture
def load_data(test_data_path):
"""Load simulated cluster data and ground truth positions"""
f = ClusterFile(test_data_path / "clust" / "simulated_clusters.clust", dtype=np.float64, mode="r")
cv = f.read_frame()
ground_truths = np.load(test_data_path / "interpolation/ground_truth_simulated.npy")
return cv, ground_truths
@pytest.mark.withdata
def test_eta2_interpolation(load_data, check):
"""Test eta2 interpolation on simulated data"""
cv, ground_truths = load_data
num_bins = 201
eta_distribution = calculate_eta_distribution(cv, calculate_eta2, edges_x=[-0.1,1.1], edges_y=[-0.1,1.1], nbins=num_bins)
interpolator = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
assert interpolator.get_ietax().shape == (num_bins,num_bins,1)
assert interpolator.get_ietay().shape == (num_bins,num_bins,1)
interpolated_photons = interpolator.interpolate(cv)
assert interpolated_photons.size == cv.size
interpolated_photons["x"] += 1.0 #groud truth label uses 5x5 clusters
interpolated_photons["y"] += 1.0
residuals_interpolated_x = abs(ground_truths[:, 0] - interpolated_photons["x"])
residuals_interpolated_y = abs(ground_truths[:, 1] - interpolated_photons["y"])
"""
residuals_center_pixel_x = abs(ground_truths[:, 0] - 2.5)
residuals_center_pixel_y = abs(ground_truths[:, 1] - 2.5)
# interpolation needs to perform better than center pixel assignment - not true for photon close to the center
assert (residuals_interpolated_x < residuals_center_pixel_x).all()
assert (residuals_interpolated_y < residuals_center_pixel_y).all()
"""
# check within photon hit pixel for all
with check:
assert np.allclose(interpolated_photons["x"], ground_truths[:, 0], atol=5e-1)
with check:
assert np.allclose(interpolated_photons["y"], ground_truths[:, 1], atol=5e-1)
# check mean and std of residuals
with check:
assert residuals_interpolated_y.mean() <= 0.1
with check:
assert residuals_interpolated_x.mean() <= 0.1
with check:
assert residuals_interpolated_x.std() <= 0.05
with check:
assert residuals_interpolated_y.std() <= 0.05
@pytest.mark.withdata
def test_eta2_interpolation_rosenblatt(load_data, check):
"""Test eta2 interpolation on simulated data using Rosenblatt transform"""
cv, ground_truths = load_data
num_bins = 201
eta_distribution = calculate_eta_distribution(cv, calculate_eta2, edges_x=[-0.1,1.1], edges_y=[-0.1,1.1], nbins=num_bins)
interpolator = Interpolator(eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
interpolator.rosenblatttransform(eta_distribution)
assert interpolator.get_ietax().shape == (num_bins,num_bins,1)
assert interpolator.get_ietay().shape == (num_bins,num_bins,1)
interpolated_photons = interpolator.interpolate(cv)
assert interpolated_photons.size == cv.size
interpolated_photons["x"] += 1.0 #groud truth label uses 5x5 clusters
interpolated_photons["y"] += 1.0
residuals_interpolated_x = abs(ground_truths[:, 0] - interpolated_photons["x"])
residuals_interpolated_y = abs(ground_truths[:, 1] - interpolated_photons["y"])
"""
residuals_center_pixel_x = abs(ground_truths[:, 0] - 2.5)
residuals_center_pixel_y = abs(ground_truths[:, 1] - 2.5)
# interpolation needs to perform better than center pixel assignment - not true for photon close to the center
assert (residuals_interpolated_x < residuals_center_pixel_x).all()
assert (residuals_interpolated_y < residuals_center_pixel_y).all()
"""
# check within photon hit pixel for all
with check:
assert np.allclose(interpolated_photons["x"], ground_truths[:, 0], atol=5e-1)
with check:
assert np.allclose(interpolated_photons["y"], ground_truths[:, 1], atol=5e-1)
# check mean and std of residuals
with check:
assert residuals_interpolated_y.mean() <= 0.1
with check:
assert residuals_interpolated_x.mean() <= 0.1
with check:
assert residuals_interpolated_x.std() <= 0.055 #performs slightly worse
with check:
assert residuals_interpolated_y.std() <= 0.055 #performs slightly worse
@pytest.mark.withdata
def test_cross_eta_interpolation(load_data, check):
"""Test cross eta interpolation on simulated data"""
cv, ground_truths = load_data
num_bins = 201
eta_distribution = calculate_eta_distribution(cv, calculate_cross_eta3, edges_x=[-0.5,0.5], edges_y=[-0.5,0.5], nbins=num_bins)
interpolator = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
assert interpolator.get_ietax().shape == (num_bins,num_bins,1)
assert interpolator.get_ietay().shape == (num_bins,num_bins,1)
interpolated_photons = interpolator.interpolate_cross_eta3(cv)
assert interpolated_photons.size == cv.size
interpolated_photons["x"] += 1.0 #groud truth label uses 5x5 clusters
interpolated_photons["y"] += 1.0
residuals_interpolated_x = abs(ground_truths[:, 0] - interpolated_photons["x"])
residuals_interpolated_y = abs(ground_truths[:, 1] - interpolated_photons["y"])
"""
residuals_center_pixel_x = abs(ground_truths[:, 0] - 2.5)
residuals_center_pixel_y = abs(ground_truths[:, 1] - 2.5)
# interpolation needs to perform better than center pixel assignment - not true for photon close to the center
assert (residuals_interpolated_x < residuals_center_pixel_x).all()
assert (residuals_interpolated_y < residuals_center_pixel_y).all()
"""
# check within photon hit pixel for all
# TODO: fails as eta_x = 0, eta_y = 0 is not leading to offset (0.5,0.5)
with check:
assert np.allclose(interpolated_photons["x"], ground_truths[:, 0], atol=5e-1)
with check:
assert np.allclose(interpolated_photons["y"], ground_truths[:, 1], atol=5e-1)
# check mean and std of residuals
with check:
assert residuals_interpolated_y.mean() <= 0.1
with check:
assert residuals_interpolated_x.mean() <= 0.1
with check:
assert residuals_interpolated_x.std() <= 0.05
with check:
assert residuals_interpolated_y.std() <= 0.05
@pytest.mark.withdata
def test_eta3_interpolation(load_data, check):
"""Test eta3 interpolation on simulated data"""
cv, ground_truths = load_data
num_bins = 201
eta_distribution = calculate_eta_distribution(cv, calculate_eta3, edges_x=[-0.5,0.5], edges_y=[-0.5,0.5], nbins=num_bins)
interpolator = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
assert interpolator.get_ietax().shape == (num_bins,num_bins,1)
assert interpolator.get_ietay().shape == (num_bins,num_bins,1)
interpolated_photons = interpolator.interpolate_eta3(cv)
assert interpolated_photons.size == cv.size
interpolated_photons["x"] += 1.0 #groud truth label uses 5x5 clusters
interpolated_photons["y"] += 1.0
residuals_interpolated_x = abs(ground_truths[:, 0] - interpolated_photons["x"])
residuals_interpolated_y = abs(ground_truths[:, 1] - interpolated_photons["y"])
"""
residuals_center_pixel_x = abs(ground_truths[:, 0] - 2.5)
residuals_center_pixel_y = abs(ground_truths[:, 1] - 2.5)
# interpolation needs to perform better than center pixel assignment - not true for photon close to the center
assert (residuals_interpolated_x < residuals_center_pixel_x).all()
assert (residuals_interpolated_y < residuals_center_pixel_y).all()
"""
# check within photon hit pixel for all
# TODO: fails as eta_x = 0, eta_y = 0 is not leading to offset (0.5,0.5)
with check:
assert np.allclose(interpolated_photons["x"], ground_truths[:, 0], atol=5e-1)
with check:
assert np.allclose(interpolated_photons["y"], ground_truths[:, 1], atol=5e-1)
# check mean and std of residuals
with check:
assert residuals_interpolated_y.mean() <= 0.1
with check:
assert residuals_interpolated_x.mean() <= 0.1
with check:
assert residuals_interpolated_x.std() <= 0.05
with check:
assert residuals_interpolated_y.std() <= 0.05
@pytest.mark.withdata
def test_full_eta2_interpolation(load_data, check):
"""Test full eta2 interpolation on simulated data"""
cv, ground_truths = load_data
num_bins = 201
eta_distribution = calculate_eta_distribution(cv, calculate_full_eta2, edges_x=[-0.1,1.1], edges_y=[-0.1,1.1], nbins=num_bins)
interpolator = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
assert interpolator.get_ietax().shape == (num_bins,num_bins,1)
assert interpolator.get_ietay().shape == (num_bins,num_bins,1)
interpolated_photons = interpolator.interpolate_full_eta2(cv)
assert interpolated_photons.size == cv.size
interpolated_photons["x"] += 1.0 #groud truth label uses 5x5 clusters
interpolated_photons["y"] += 1.0
residuals_interpolated_x = abs(ground_truths[:, 0] - interpolated_photons["x"])
residuals_interpolated_y = abs(ground_truths[:, 1] - interpolated_photons["y"])
"""
residuals_center_pixel_x = abs(ground_truths[:, 0] - 2.5)
residuals_center_pixel_y = abs(ground_truths[:, 1] - 2.5)
# interpolation needs to perform better than center pixel assignment - not true for photon close to the center
assert (residuals_interpolated_x < residuals_center_pixel_x).all()
assert (residuals_interpolated_y < residuals_center_pixel_y).all()
"""
# check within photon hit pixel for all
with check:
assert np.allclose(interpolated_photons["x"], ground_truths[:, 0], atol=5e-1)
with check:
assert np.allclose(interpolated_photons["y"], ground_truths[:, 1], atol=5e-1)
# check mean and std of residuals
with check:
assert residuals_interpolated_y.mean() <= 0.1
with check:
assert residuals_interpolated_x.mean() <= 0.1
with check:
assert residuals_interpolated_x.std() <= 0.05
with check:
assert residuals_interpolated_y.std() <= 0.05

View File

@@ -21,21 +21,21 @@ using ClusterTypes =
auto get_test_parameters() { auto get_test_parameters() {
return GENERATE( return GENERATE(
std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{0, 0, {1, 2, 3, 1}}}, std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{0, 0, {1, 2, 1, 3}}},
Eta2<int>{2. / 3, 3. / 4, corner::cTopLeft, 7}), Eta2<int>{3. / 4, 3. / 5, corner::cTopLeft, 7}),
std::make_tuple( std::make_tuple(
ClusterTypes{Cluster<int, 3, 3>{0, 0, {1, 2, 3, 4, 5, 6, 1, 2, 7}}}, ClusterTypes{Cluster<int, 3, 3>{0, 0, {1, 2, 3, 4, 7, 6, 1, 2, 5}}},
Eta2<int>{6. / 11, 2. / 7, corner::cBottomRight, 20}), Eta2<int>{6. / 13, 2. / 9, corner::cBottomRight, 20}),
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{ std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
0, 0, {1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 9, 8, 0, 0, {1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 8, 9,
1, 4, 1, 6, 7, 8, 1, 1, 1, 1, 1, 1}}}, 1, 4, 1, 6, 7, 8, 1, 1, 1, 1, 1, 1}}},
Eta2<int>{8. / 17, 7. / 15, corner::cBottomLeft, 30}), Eta2<int>{9. / 17, 7. / 16, corner::cBottomLeft, 30}),
std::make_tuple( std::make_tuple(
ClusterTypes{Cluster<int, 4, 2>{0, 0, {1, 4, 7, 2, 5, 6, 4, 3}}}, ClusterTypes{Cluster<int, 4, 2>{0, 0, {1, 4, 4, 2, 5, 6, 7, 3}}},
Eta2<int>{4. / 10, 4. / 11, corner::cTopLeft, 21}), Eta2<int>{7. / 13, 7. / 11, corner::cTopLeft, 21}),
std::make_tuple( std::make_tuple(
ClusterTypes{Cluster<int, 2, 3>{0, 0, {1, 3, 2, 3, 4, 2}}}, ClusterTypes{Cluster<int, 2, 3>{0, 0, {1, 3, 2, 4, 3, 2}}},
Eta2<int>{3. / 5, 2. / 5, corner::cBottomLeft, 11})); Eta2<int>{4. / 6, 2. / 6, corner::cBottomLeft, 11}));
} }
TEST_CASE("compute_largest_2x2_subcluster", "[eta_calculation]") { TEST_CASE("compute_largest_2x2_subcluster", "[eta_calculation]") {
@@ -62,10 +62,22 @@ TEST_CASE("calculate_eta2", "[eta_calculation]") {
CHECK(eta.sum == expected_eta.sum); CHECK(eta.sum == expected_eta.sum);
} }
// 3x3 cluster layout (rotated to match the cBottomLeft enum): TEST_CASE("calculate_eta2 after reduction", "[eta_calculation]") {
// 6, 7, 8
// 3, 4, 5 auto [cluster, expected_eta] = get_test_parameters();
// 0, 1, 2
auto eta = std::visit(
[](const auto &clustertype) {
auto reduced_cluster = reduce_to_2x2(clustertype);
return calculate_eta2(reduced_cluster);
},
cluster);
CHECK(eta.x == expected_eta.x);
CHECK(eta.y == expected_eta.y);
CHECK(eta.c == expected_eta.c);
CHECK(eta.sum == expected_eta.sum);
}
TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in " TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in "
"the bottom left", "the bottom left",
@@ -75,29 +87,25 @@ TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in "
Cluster<int32_t, 3, 3> cl; Cluster<int32_t, 3, 3> cl;
cl.x = 0; cl.x = 0;
cl.y = 0; cl.y = 0;
cl.data[0] = 30; cl.data[0] = 8;
cl.data[1] = 23; cl.data[1] = 2;
cl.data[2] = 5; cl.data[2] = 5;
cl.data[3] = 20; cl.data[3] = 20;
cl.data[4] = 50; cl.data[4] = 50;
cl.data[5] = 3; cl.data[5] = 3;
cl.data[6] = 8; cl.data[6] = 30;
cl.data[7] = 2; cl.data[7] = 23;
cl.data[8] = 3; cl.data[8] = 3;
// 8, 2, 3
// 20, 50, 3
// 30, 23, 5
auto eta = calculate_eta2(cl); auto eta = calculate_eta2(cl);
CHECK(eta.c == corner::cBottomLeft); CHECK(eta.c == corner::cBottomLeft);
CHECK(eta.x == 50.0 / (20 + 50)); // 4/(3+4) CHECK(eta.x == 50.0 / (20 + 50));
CHECK(eta.y == 50.0 / (23 + 50)); // 4/(1+4) CHECK(eta.y == 23.0 / (23 + 50));
CHECK(eta.sum == 30 + 23 + 20 + 50); CHECK(eta.sum == 30 + 23 + 20 + 50);
} }
TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in " TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in "
"the top left", "the top right",
"[eta_calculation]") { "[eta_calculation]") {
// Create a 3x3 cluster // Create a 3x3 cluster
@@ -106,21 +114,67 @@ TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in "
cl.y = 0; cl.y = 0;
cl.data[0] = 8; cl.data[0] = 8;
cl.data[1] = 12; cl.data[1] = 12;
cl.data[2] = 5; cl.data[2] = 82;
cl.data[3] = 77; cl.data[3] = 77;
cl.data[4] = 80; cl.data[4] = 80;
cl.data[5] = 3; cl.data[5] = 91;
cl.data[6] = 82; cl.data[6] = 5;
cl.data[7] = 91; cl.data[7] = 3;
cl.data[8] = 3; cl.data[8] = 3;
// 82, 91, 3
// 77, 80, 3
// 8, 12, 5
auto eta = calculate_eta2(cl); auto eta = calculate_eta2(cl);
CHECK(eta.c == corner::cTopLeft); CHECK(eta.c == corner::cTopRight);
CHECK(eta.x == 80. / (77 + 80)); // 4/(3+4) CHECK(eta.x == 91. / (80 + 91));
CHECK(eta.y == 91.0 / (91 + 80)); // 7/(7+4) CHECK(eta.y == 80.0 / (80 + 12));
CHECK(eta.sum == 77 + 80 + 82 + 91); CHECK(eta.sum == 12 + 80 + 82 + 91);
} }
auto get_test_parameters_fulleta2x2() {
return GENERATE(
std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{0, 0, {1, 2, 1, 3}}},
Eta2<int>{5. / 7, 4. / 7, corner::cTopLeft, 7}),
std::make_tuple(
ClusterTypes{Cluster<int, 3, 3>{0, 0, {1, 2, 3, 4, 7, 6, 1, 2, 5}}},
Eta2<int>{11. / 20, 7. / 20, corner::cBottomRight, 20}),
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
0, 0, {1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 8, 9,
1, 4, 1, 6, 7, 8, 1, 1, 1, 1, 1, 1}}},
Eta2<int>{16. / 30, 13. / 30, corner::cBottomLeft, 30}),
std::make_tuple(
ClusterTypes{Cluster<int, 4, 2>{0, 0, {1, 4, 4, 2, 5, 6, 7, 3}}},
Eta2<int>{11. / 21, 13. / 21, corner::cTopLeft, 21}),
std::make_tuple(
ClusterTypes{Cluster<int, 2, 3>{0, 0, {1, 3, 2, 4, 3, 2}}},
Eta2<int>{6. / 11, 5. / 11, corner::cBottomLeft, 11}));
}
TEST_CASE("Calculate full eta2", "[eta_calculation]") {
auto [test_cluster, expected_eta] = get_test_parameters_fulleta2x2();
auto eta = std::visit(
[](const auto &clustertype) {
return calculate_full_eta2(clustertype);
},
test_cluster);
CHECK(expected_eta.c == eta.c);
CHECK(expected_eta.sum == eta.sum);
CHECK(expected_eta.x == eta.x);
CHECK(expected_eta.y == eta.y);
}
TEST_CASE("Calculate full eta2 after reduction", "[eta_calculation]") {
auto [test_cluster, expected_eta] = get_test_parameters_fulleta2x2();
auto eta = std::visit(
[](const auto &clustertype) {
auto reduced_cluster = reduce_to_2x2(clustertype);
return calculate_full_eta2(reduced_cluster);
},
test_cluster);
CHECK(expected_eta.c == eta.c);
CHECK(expected_eta.sum == eta.sum);
CHECK(expected_eta.x == eta.x);
CHECK(expected_eta.y == eta.y);
}

View File

@@ -15,7 +15,7 @@
using namespace aare; using namespace aare;
TEST_CASE("Test sum of Cluster", "[.cluster]") { TEST_CASE("Test sum of Cluster", "[cluster]") {
Cluster<int, 2, 2> cluster{0, 0, {1, 2, 3, 4}}; Cluster<int, 2, 2> cluster{0, 0, {1, 2, 3, 4}};
CHECK(cluster.sum() == 10); CHECK(cluster.sum() == 10);
@@ -27,33 +27,33 @@ using ClusterTypes = std::variant<Cluster<int, 2, 2>, Cluster<int, 3, 3>,
using ClusterTypesLargerThan2x2 = using ClusterTypesLargerThan2x2 =
std::variant<Cluster<int, 3, 3>, Cluster<int, 4, 4>, Cluster<int, 5, 5>>; std::variant<Cluster<int, 3, 3>, Cluster<int, 4, 4>, Cluster<int, 5, 5>>;
TEST_CASE("Test reduce to 2x2 Cluster", "[.cluster]") { TEST_CASE("Test reduce to 2x2 Cluster", "[cluster]") {
auto [cluster, expected_reduced_cluster] = GENERATE( auto [cluster, expected_reduced_cluster] = GENERATE(
std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{5, 5, {1, 2, 3, 4}}}, std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{5, 5, {1, 2, 3, 4}}},
Cluster<int, 2, 2>{4, 6, {1, 2, 3, 4}}), Cluster<int, 2, 2>{5, 5, {1, 2, 3, 4}}),
std::make_tuple( std::make_tuple(
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 2, 1, 2, 2}}}, ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 2, 1, 2, 2}}},
Cluster<int, 2, 2>{5, 5, {3, 2, 2, 2}}), Cluster<int, 2, 2>{5, 5, {3, 2, 2, 2}}),
std::make_tuple( std::make_tuple(
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 1, 1, 2, 3, 1, 2, 2, 1}}}, ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 1, 1, 2, 3, 1, 2, 2, 1}}},
Cluster<int, 2, 2>{4, 5, {2, 3, 2, 2}}), Cluster<int, 2, 2>{5, 5, {2, 3, 2, 2}}),
std::make_tuple( std::make_tuple(
ClusterTypes{Cluster<int, 3, 3>{5, 5, {2, 2, 1, 2, 3, 1, 1, 1, 1}}}, ClusterTypes{Cluster<int, 3, 3>{5, 5, {2, 2, 1, 2, 3, 1, 1, 1, 1}}},
Cluster<int, 2, 2>{4, 6, {2, 2, 2, 3}}), Cluster<int, 2, 2>{5, 5, {2, 2, 2, 3}}),
std::make_tuple( std::make_tuple(
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 2, 2, 1, 3, 2, 1, 1, 1}}}, ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 2, 2, 1, 3, 2, 1, 1, 1}}},
Cluster<int, 2, 2>{5, 6, {2, 2, 3, 2}}), Cluster<int, 2, 2>{5, 5, {2, 2, 3, 2}}),
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{ std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
5, 5, {1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 3, 5, 5, {1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 3,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}}, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}},
Cluster<int, 2, 2>{5, 6, {2, 2, 3, 2}}), Cluster<int, 2, 2>{5, 5, {2, 2, 3, 2}}),
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{ std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
5, 5, {1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 3, 5, 5, {1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 3,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}}, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}},
Cluster<int, 2, 2>{4, 6, {2, 2, 2, 3}}), Cluster<int, 2, 2>{5, 5, {2, 2, 2, 3}}),
std::make_tuple( std::make_tuple(
ClusterTypes{Cluster<int, 2, 3>{5, 5, {2, 2, 3, 2, 1, 1}}}, ClusterTypes{Cluster<int, 2, 3>{5, 5, {2, 2, 3, 2, 1, 1}}},
Cluster<int, 2, 2>{4, 6, {2, 2, 3, 2}})); Cluster<int, 2, 2>{5, 5, {2, 2, 3, 2}}));
auto reduced_cluster = std::visit( auto reduced_cluster = std::visit(
[](const auto &clustertype) { return reduce_to_2x2(clustertype); }, [](const auto &clustertype) { return reduce_to_2x2(clustertype); },
@@ -66,7 +66,7 @@ TEST_CASE("Test reduce to 2x2 Cluster", "[.cluster]") {
expected_reduced_cluster.data.begin())); expected_reduced_cluster.data.begin()));
} }
TEST_CASE("Test reduce to 3x3 Cluster", "[.cluster]") { TEST_CASE("Test reduce to 3x3 Cluster", "[cluster]") {
auto [cluster, expected_reduced_cluster] = GENERATE( auto [cluster, expected_reduced_cluster] = GENERATE(
std::make_tuple(ClusterTypesLargerThan2x2{Cluster<int, 3, 3>{ std::make_tuple(ClusterTypesLargerThan2x2{Cluster<int, 3, 3>{
5, 5, {1, 1, 1, 1, 3, 1, 1, 1, 1}}}, 5, 5, {1, 1, 1, 1, 3, 1, 1, 1, 1}}},
@@ -74,23 +74,11 @@ TEST_CASE("Test reduce to 3x3 Cluster", "[.cluster]") {
std::make_tuple( std::make_tuple(
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{ ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
5, 5, {2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1}}}, 5, 5, {2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1}}},
Cluster<int, 3, 3>{4, 6, {2, 2, 1, 2, 2, 1, 1, 1, 3}}), Cluster<int, 3, 3>{5, 5, {2, 1, 1, 1, 3, 1, 1, 1, 1}}),
std::make_tuple(
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
5, 5, {1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 3, 1, 1, 1, 1, 1}}},
Cluster<int, 3, 3>{5, 6, {1, 2, 2, 1, 2, 2, 1, 3, 1}}),
std::make_tuple(
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
5, 5, {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 1, 1, 2, 2}}},
Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 2, 1, 2, 2}}),
std::make_tuple(
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
5, 5, {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 1, 2, 2, 1, 1}}},
Cluster<int, 3, 3>{4, 5, {1, 1, 1, 2, 2, 3, 2, 2, 1}}),
std::make_tuple(ClusterTypesLargerThan2x2{Cluster<int, 5, 5>{ std::make_tuple(ClusterTypesLargerThan2x2{Cluster<int, 5, 5>{
5, 5, {1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 3, 5, 5, {1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 3,
1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1}}}, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1}}},
Cluster<int, 3, 3>{4, 5, {1, 2, 1, 2, 2, 3, 1, 2, 1}})); Cluster<int, 3, 3>{5, 5, {2, 1, 1, 2, 3, 1, 2, 1, 1}}));
auto reduced_cluster = std::visit( auto reduced_cluster = std::visit(
[](const auto &clustertype) { return reduce_to_3x3(clustertype); }, [](const auto &clustertype) { return reduce_to_3x3(clustertype); },

166
src/Interpolation.test.cpp Normal file
View File

@@ -0,0 +1,166 @@
#include "aare/ClusterVector.hpp"
#include "aare/Interpolator.hpp"
#include "aare/NDArray.hpp"
#include <array>
#include <catch2/catch_all.hpp>
#include <catch2/catch_test_macros.hpp>
#include <iostream>
using namespace aare;
TEST_CASE("Test new Interpolation API", "[Interpolation]") {
NDArray<double, 1> energy_bins(std::array<ssize_t, 1>{2});
NDArray<double, 1> etax_bins(std::array<ssize_t, 1>{4}, 0.0);
NDArray<double, 1> etay_bins(std::array<ssize_t, 1>{4}, 0.0);
NDArray<double, 3> eta_distribution(std::array<ssize_t, 3>{3, 3, 1}, 0.0);
Interpolator interpolator(eta_distribution.view(), etax_bins.view(),
etay_bins.view(), energy_bins.view());
ClusterVector<Cluster<double, 3, 3>> cluster_vec{};
cluster_vec.push_back(Cluster<double, 3, 3>{
2, 2, std::array<double, 9>{1, 2, 2, 1, 4, 1, 1, 2, 1}});
auto photons =
interpolator.interpolate<calculate_eta2<double, 3, 3>>(cluster_vec);
CHECK(photons.size() == 1);
}
TEST_CASE("Test constructor", "[Interpolation]") {
NDArray<double, 1> energy_bins(std::array<ssize_t, 1>{2});
NDArray<double, 1> etax_bins(std::array<ssize_t, 1>{4}, 0.0);
NDArray<double, 1> etay_bins(std::array<ssize_t, 1>{4}, 0.0);
NDArray<double, 3> eta_distribution(std::array<ssize_t, 3>{3, 3, 1});
std::iota(eta_distribution.begin(), eta_distribution.end(), 1.0);
Interpolator interpolator(eta_distribution.view(), etax_bins.view(),
etay_bins.view(), energy_bins.view());
auto ietax = interpolator.get_ietax();
auto ietay = interpolator.get_ietay();
CHECK(ietax.shape(0) == 3);
CHECK(ietax.shape(1) == 3);
CHECK(ietax.shape(2) == 1);
CHECK(ietay.shape(0) == 3);
CHECK(ietay.shape(1) == 3);
CHECK(ietay.shape(2) == 1);
std::array<double, 9> expected_ietax{
0.0, 0.0, 0.0, 4.0 / 11.0, 5.0 / 13.0, 6.0 / 15.0, 1.0, 1.0, 1.0};
std::array<double, 9> expected_ietay{
0.0, 2.0 / 5.0, 1.0, 0.0, 5.0 / 11.0, 1.0, 0.0, 8.0 / 17.0, 1.0};
for (ssize_t i = 0; i < ietax.shape(0); i++) {
for (ssize_t j = 0; j < ietax.shape(1); j++) {
CHECK(ietax(i, j, 0) ==
Catch::Approx(expected_ietax[i * ietax.shape(1) + j]));
}
}
for (ssize_t i = 0; i < ietay.shape(0); i++) {
for (ssize_t j = 0; j < ietay.shape(1); j++) {
CHECK(ietay(i, j, 0) ==
Catch::Approx(expected_ietay[i * ietay.shape(1) + j]));
}
}
}
TEST_CASE("Test constructor with zero bins at borders", "[Interpolation]") {
NDArray<double, 1> energy_bins(std::array<ssize_t, 1>{2});
NDArray<double, 1> etax_bins(std::array<ssize_t, 1>{5}, 0.0);
NDArray<double, 1> etay_bins(std::array<ssize_t, 1>{5}, 0.0);
NDArray<double, 3> eta_distribution(std::array<ssize_t, 3>{4, 4, 1}, 0.0);
eta_distribution(1, 1, 0) = 1.0;
eta_distribution(1, 2, 0) = 2.0;
eta_distribution(2, 1, 0) = 3.0;
eta_distribution(2, 2, 0) = 4.0;
Interpolator interpolator(eta_distribution.view(), etax_bins.view(),
etay_bins.view(), energy_bins.view());
auto ietax = interpolator.get_ietax();
auto ietay = interpolator.get_ietay();
CHECK(ietax.shape(0) == 4);
CHECK(ietax.shape(1) == 4);
CHECK(ietax.shape(2) == 1);
CHECK(ietay.shape(0) == 4);
CHECK(ietay.shape(1) == 4);
CHECK(ietay.shape(2) == 1);
std::array<double, 16> expected_ietax{
0.0, 0.0, 0.0, 0.0, 0.0, 1.0 / 4.0, 2.0 / 6.0, 0.0,
0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0};
std::array<double, 16> expected_ietay{
0.0, 0.0, 0.0, 0.0, 0.0, 1.0 / 3.0, 1.0, 1.0,
0.0, 3.0 / 7.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0};
for (ssize_t i = 0; i < ietax.shape(0); i++) {
for (ssize_t j = 0; j < ietax.shape(1); j++) {
CHECK(ietax(i, j, 0) ==
Catch::Approx(expected_ietax[i * ietax.shape(1) + j]));
}
}
for (ssize_t i = 0; i < ietay.shape(0); i++) {
for (ssize_t j = 0; j < ietay.shape(1); j++) {
CHECK(ietay(i, j, 0) ==
Catch::Approx(expected_ietay[i * ietay.shape(1) + j]));
}
}
}
TEST_CASE("Test Rosenblatt", "[Interpolation]") {
NDArray<double, 1> energy_bins(std::array<ssize_t, 1>{2});
NDArray<double, 1> etax_bins(std::array<ssize_t, 1>{4}, 0.0);
NDArray<double, 1> etay_bins(std::array<ssize_t, 1>{4}, 0.0);
NDArray<double, 3> eta_distribution(std::array<ssize_t, 3>{3, 3, 1});
std::iota(eta_distribution.begin(), eta_distribution.end(), 1.0);
Interpolator interpolator(etax_bins.view(), etay_bins.view(),
energy_bins.view());
interpolator.rosenblatttransform(eta_distribution.view());
auto ietax = interpolator.get_ietax();
auto ietay = interpolator.get_ietay();
CHECK(ietax.shape(0) == 3);
CHECK(ietax.shape(1) == 3);
CHECK(ietax.shape(2) == 1);
CHECK(ietay.shape(0) == 3);
CHECK(ietay.shape(1) == 3);
CHECK(ietay.shape(2) == 1);
// marginal CDF of eta_x
std::array<double, 9> expected_ietax{
0.0, 0.0, 0.0, 15.0 / 39.0, 15.0 / 39.0, 15.0 / 39.0, 1.0, 1.0, 1.0};
// conditional CDF of eta_y
std::array<double, 9> expected_ietay{
0.0, 2.0 / 5.0, 1.0, 0.0, 5.0 / 11.0, 1.0, 0.0, 8.0 / 17.0, 1.0};
for (ssize_t i = 0; i < ietax.shape(0); i++) {
for (ssize_t j = 0; j < ietax.shape(1); j++) {
CHECK(ietax(i, j, 0) ==
Catch::Approx(expected_ietax[i * ietax.shape(1) + j]));
}
}
for (ssize_t i = 0; i < ietay.shape(0); i++) {
for (ssize_t j = 0; j < ietay.shape(1); j++) {
CHECK(ietay(i, j, 0) ==
Catch::Approx(expected_ietay[i * ietay.shape(1) + j]));
}
}
}

View File

@@ -3,55 +3,145 @@
namespace aare { namespace aare {
Interpolator::Interpolator(NDView<double, 1> xbins, NDView<double, 1> ybins,
NDView<double, 1> ebins)
: m_etabinsx(xbins), m_etabinsy(ybins), m_energy_bins(ebins){};
Interpolator::Interpolator(NDView<double, 3> etacube, NDView<double, 1> xbins, Interpolator::Interpolator(NDView<double, 3> etacube, NDView<double, 1> xbins,
NDView<double, 1> ybins, NDView<double, 1> ebins) NDView<double, 1> ybins, NDView<double, 1> ebins)
: m_ietax(etacube), m_ietay(etacube), m_etabinsx(xbins), m_etabinsy(ybins), : m_etabinsx(xbins), m_etabinsy(ybins), m_energy_bins(ebins) {
m_energy_bins(ebins) { if (etacube.shape(0) + 1 != xbins.size() ||
if (etacube.shape(0) != xbins.size() || etacube.shape(1) != ybins.size() || etacube.shape(1) + 1 != ybins.size() ||
etacube.shape(2) != ebins.size()) { etacube.shape(2) + 1 != ebins.size()) {
throw std::invalid_argument( throw std::invalid_argument(
"The shape of the etacube does not match the shape of the bins"); "The shape of the etacube does not match the shape of the bins");
} }
// Cumulative sum in the x direction m_ietax = NDArray<double, 3>(etacube);
for (ssize_t i = 1; i < m_ietax.shape(0); i++) {
for (ssize_t j = 0; j < m_ietax.shape(1); j++) {
for (ssize_t k = 0; k < m_ietax.shape(2); k++) {
m_ietax(i, j, k) += m_ietax(i - 1, j, k);
}
}
}
// Normalize by the highest row, if norm less than 1 don't do anything m_ietay = NDArray<double, 3>(etacube);
// prefix sum - conditional CDF
for (ssize_t i = 0; i < m_ietax.shape(0); i++) { for (ssize_t i = 0; i < m_ietax.shape(0); i++) {
for (ssize_t j = 0; j < m_ietax.shape(1); j++) { for (ssize_t j = 0; j < m_ietax.shape(1); j++) {
for (ssize_t k = 0; k < m_ietax.shape(2); k++) { for (ssize_t k = 0; k < m_ietax.shape(2); k++) {
auto val = m_ietax(m_ietax.shape(0) - 1, j, k); m_ietax(i, j, k) += (i == 0) ? 0 : m_ietax(i - 1, j, k);
double norm = val < 1 ? 1 : val;
m_ietax(i, j, k) /= norm; m_ietay(i, j, k) += (j == 0) ? 0 : m_ietay(i, j - 1, k);
} }
} }
} }
// Cumulative sum in the y direction // Standardize, if norm less than 1 don't do anything
for (ssize_t i = 0; i < m_ietay.shape(0); i++) { for (ssize_t i = 0; i < m_ietax.shape(0); i++) {
for (ssize_t j = 1; j < m_ietay.shape(1); j++) { for (ssize_t j = 0; j < m_ietax.shape(1); j++) {
for (ssize_t k = 0; k < m_ietay.shape(2); k++) { for (ssize_t k = 0; k < m_ietax.shape(2); k++) {
m_ietay(i, j, k) += m_ietay(i, j - 1, k); auto shift_x = etacube(0, j, k);
} auto val_etax = m_ietax(m_ietax.shape(0) - 1, j, k) - shift_x;
} double norm_etax = val_etax == 0 ? 1 : val_etax;
} m_ietax(i, j, k) -= shift_x;
m_ietax(i, j, k) /= norm_etax;
auto shift_y = etacube(i, 0, k);
auto val_etay = m_ietay(i, m_ietay.shape(1) - 1, k) - shift_y;
double norm_etay = val_etay == 0 ? 1 : val_etay;
m_ietay(i, j, k) -= shift_y;
// Normalize by the highest column, if norm less than 1 don't do anything m_ietay(i, j, k) /= norm_etay;
for (ssize_t i = 0; i < m_ietay.shape(0); i++) {
for (ssize_t j = 0; j < m_ietay.shape(1); j++) {
for (ssize_t k = 0; k < m_ietay.shape(2); k++) {
auto val = m_ietay(i, m_ietay.shape(1) - 1, k);
double norm = val < 1 ? 1 : val;
m_ietay(i, j, k) /= norm;
} }
} }
} }
} }
void Interpolator::rosenblatttransform(NDView<double, 3> etacube) {
if (etacube.shape(0) + 1 != m_etabinsx.size() ||
etacube.shape(1) + 1 != m_etabinsy.size() ||
etacube.shape(2) + 1 != m_energy_bins.size()) {
throw std::invalid_argument(
"The shape of the etacube does not match the shape of the bins");
}
// TODO: less loops and better performance if ebins is first dimension
// (violates backwardscompatibility ieta_x and ieta_y public getters,
// previously generated etacubes)
// TODO: maybe more loops is better then storing total_sum_y and
// total_sum_x
// marginal CDF for eta_x
NDArray<double, 2> marg_CDF_EtaX(
std::array<ssize_t, 2>{m_etabinsx.size() - 1, m_energy_bins.size() - 1},
0.0); // simulate proper probability distribution with zero at start
// conditional CDF for eta_y
NDArray<double, 3> cond_CDF_EtaY(etacube);
for (ssize_t i = 0; i < cond_CDF_EtaY.shape(0); ++i) {
for (ssize_t j = 0; j < cond_CDF_EtaY.shape(1); ++j) {
for (ssize_t k = 0; k < cond_CDF_EtaY.shape(2); ++k) {
// cumsum along y-axis
marg_CDF_EtaX(i, k) +=
etacube(i, j,
k); // marginal probability for etaX
// cumsum along y-axis
cond_CDF_EtaY(i, j, k) +=
(j == 0) ? 0 : cond_CDF_EtaY(i, j - 1, k);
}
}
}
// cumsum along x-axis
for (ssize_t i = 1; i < marg_CDF_EtaX.shape(0); ++i) {
for (ssize_t k = 0; k < marg_CDF_EtaX.shape(1); ++k) {
marg_CDF_EtaX(0, k) =
0.0; // shift by first value to ensure values between 0 and 1
marg_CDF_EtaX(i, k) += marg_CDF_EtaX(i - 1, k);
}
}
// normalize marg_CDF_EtaX
for (ssize_t i = 1; i < marg_CDF_EtaX.shape(0); ++i) {
for (ssize_t k = 0; k < marg_CDF_EtaX.shape(1); ++k) {
double norm = marg_CDF_EtaX(marg_CDF_EtaX.shape(0) - 1, k) == 0
? 1
: marg_CDF_EtaX(marg_CDF_EtaX.shape(0) - 1, k);
marg_CDF_EtaX(i, k) /= norm;
}
}
// standardize, normalize conditional CDF for etaY
// Note P(EtaY|EtaX) = P(EtaY,EtaX)/P(EtaX) we dont divide by P(EtaX) as it
// cancels out during normalization
for (ssize_t i = 0; i < cond_CDF_EtaY.shape(0); ++i) {
for (ssize_t j = 0; j < cond_CDF_EtaY.shape(1); ++j) {
for (ssize_t k = 0; k < cond_CDF_EtaY.shape(2); ++k) {
double shift = etacube(i, 0, k);
double norm =
(cond_CDF_EtaY(i, cond_CDF_EtaY.shape(1) - 1, k) - shift) ==
0
? 1
: cond_CDF_EtaY(i, cond_CDF_EtaY.shape(1) - 1, k) -
shift;
cond_CDF_EtaY(i, j, k) -= shift;
cond_CDF_EtaY(i, j, k) /= norm;
}
}
}
m_ietay = std::move(
cond_CDF_EtaY); // TODO maybe rename m_ietay to lookup or CDF_EtaY_cond
// TODO: should actually be only 2dimensional keep three dimension due to
// consistency with Annas code change though
m_ietax = NDArray<double, 3>(
std::array<ssize_t, 3>{m_etabinsx.size() - 1, m_etabinsy.size() - 1,
m_energy_bins.size() - 1});
for (ssize_t i = 0; i < m_etabinsx.size() - 1; ++i)
for (ssize_t j = 0; j < m_etabinsy.size() - 1; ++j)
for (ssize_t k = 0; k < m_energy_bins.size() - 1; ++k)
m_ietax(i, j, k) = marg_CDF_EtaX(i, k);
}
} // namespace aare } // namespace aare

View File

@@ -193,3 +193,43 @@ TEST_CASE("Last element is different", "[algorithm]") {
std::vector<int> vec = {1, 1, 1, 1, 2}; std::vector<int> vec = {1, 1, 1, 1, 2};
REQUIRE(aare::all_equal(vec) == false); REQUIRE(aare::all_equal(vec) == false);
} }
TEST_CASE("Linear interpolation", "[algorithm]") {
SECTION("interpolated mean value") {
const double interpolated_value =
aare::linear_interpolation({0.0, 1.0}, {4.0, 6.0}, 0.5);
REQUIRE(interpolated_value == 5.0);
}
SECTION("interpolate left value") {
const double interpolated_value =
aare::linear_interpolation({0.0, 1.0}, {4.0, 6.0}, 0.0);
REQUIRE(interpolated_value == 4.0);
}
SECTION("interpolate right value") {
const double interpolated_value =
aare::linear_interpolation({0.0, 1.0}, {4.0, 6.0}, 1.0);
REQUIRE(interpolated_value == 6.0);
}
SECTION("interpolate the same value") {
const double interpolated_value =
aare::linear_interpolation({0.0, 1.0}, {4.0, 4.0}, 0.5);
REQUIRE(interpolated_value == 4.0);
}
}
TEST_CASE("Bilinear interpolation", "[algorithm]") {
SECTION("interpolated mean value") {
const double interpolated_value_left =
aare::linear_interpolation({0.0, 1.0}, {4.0, 6.0}, 0.5);
const double interpolated_value_right =
aare::linear_interpolation({0.0, 1.0}, {5.0, 6.0}, 0.5);
const double interpolated_value = aare::linear_interpolation(
{0.5, 1.0}, {interpolated_value_left, interpolated_value_right},
0.75);
REQUIRE(interpolated_value == 5.25);
}
}