Merge branch 'main' into dev/license

This commit is contained in:
2025-11-21 14:52:54 +01:00
committed by GitHub
49 changed files with 3253 additions and 1171 deletions

View File

@@ -32,7 +32,7 @@ set( PYTHON_FILES
aare/CtbRawFile.py
aare/ClusterFinder.py
aare/ClusterVector.py
aare/Cluster.py
aare/calibration.py
aare/func.py
aare/RawFile.py

24
python/aare/Cluster.py Normal file
View File

@@ -0,0 +1,24 @@
from . import _aare
import numpy as np
from .ClusterFinder import _type_to_char
def Cluster(x : int, y : int, data, cluster_size=(3,3), dtype = np.int32):
"""
Factory function to create a Cluster object. Provides a cleaner syntax for
the templated Cluster in C++.
.. code-block:: python
from aare import Cluster
Cluster(cluster_size=(3,3), dtype=np.float64)
"""
try:
class_name = f"Cluster{cluster_size[0]}x{cluster_size[1]}{_type_to_char(dtype)}"
cls = getattr(_aare, class_name)
except AttributeError:
raise ValueError(f"Unsupported combination of type and cluster size: {dtype}/{cluster_size} when requesting {class_name}")
return cls(x, y, data)

View File

@@ -11,6 +11,8 @@ def _type_to_char(dtype):
return 'f'
elif dtype == np.float64:
return 'd'
elif dtype == np.int16:
return 'i16'
else:
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32, np.float32, and np.float64 are supported.")
@@ -27,7 +29,7 @@ def _get_class(name, cluster_size, dtype):
def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024):
def ClusterFinder(image_size, cluster_size=(3,3), n_sigma=5, dtype = np.int32, capacity = 1024):
"""
Factory function to create a ClusterFinder object. Provides a cleaner syntax for
the templated ClusterFinder in C++.
@@ -66,7 +68,7 @@ def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):
return cls(clusterfindermt, cluster_file)
def ClusterFile(fname, cluster_size=(3,3), dtype=np.int32, chunk_size = 1000):
def ClusterFile(fname, cluster_size=(3,3), dtype=np.int32, chunk_size = 1000, mode = "r"):
"""
Factory function to create a ClusterFile object. Provides a cleaner syntax for
the templated ClusterFile in C++.
@@ -84,4 +86,4 @@ def ClusterFile(fname, cluster_size=(3,3), dtype=np.int32, chunk_size = 1000):
"""
cls = _get_class("ClusterFile", cluster_size, dtype)
return cls(fname, chunk_size=chunk_size)
return cls(fname, chunk_size=chunk_size, mode=mode)

View File

@@ -1,12 +1,22 @@
# SPDX-License-Identifier: MPL-2.0
from ._aare import ClusterVector_Cluster3x3i
from . import _aare
import numpy as np
from .ClusterFinder import _get_class
def ClusterVector(cluster_size, dtype = np.int32):
def ClusterVector(cluster_size=(3,3), dtype = np.int32):
"""
Factory function to create a ClusterVector object. Provides a cleaner syntax for
the templated ClusterVector in C++.
.. code-block:: python
from aare import ClusterVector
ClusterVector(cluster_size=(3,3), dtype=np.float64)
"""
cls = _get_class("ClusterVector", cluster_size, dtype)
return cls()
if dtype == np.int32 and cluster_size == (3,3):
return ClusterVector_Cluster3x3i()
else:
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")

View File

@@ -8,16 +8,18 @@ from ._aare import Pedestal_d, Pedestal_f, ClusterFinder_Cluster3x3i, VarCluster
from ._aare import DetectorType
from ._aare import hitmap
from ._aare import ROI
from ._aare import corner
# from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i
from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT, ClusterFileSink, ClusterFile
from .ClusterVector import ClusterVector
from .Cluster import Cluster
from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2
from ._aare import Interpolator
from ._aare import calculate_eta2
from ._aare import calculate_eta2, calculate_eta3, calculate_cross_eta3, calculate_full_eta2
from ._aare import reduce_to_2x2, reduce_to_3x3
from ._aare import apply_custom_weights

View File

@@ -81,9 +81,7 @@ void reduce_to_3x3(py::module &m) {
[](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
return reduce_to_3x3(cl);
},
py::return_value_policy::move,
"Reduce cluster to 3x3 subcluster by taking the 3x3 subcluster with "
"the highest photon energy.");
py::return_value_policy::move, R"(Reduce cluster to 3x3 subcluster)");
}
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
@@ -96,8 +94,15 @@ void reduce_to_2x2(py::module &m) {
return reduce_to_2x2(cl);
},
py::return_value_policy::move,
"Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with "
"the highest photon energy.");
R"(
Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with
the highest photon energy.
RETURN:
reduced cluster (cluster is filled in row major ordering starting at the top left. Thus for a max subcluster in the top left corner the photon hit is at the fourth position.)
)");
}
#pragma GCC diagnostic pop

View File

@@ -82,23 +82,4 @@ void define_ClusterFile(py::module &m, const std::string &typestr) {
});
}
template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
typename CoordType = uint16_t>
void register_calculate_eta(py::module &m) {
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
m.def("calculate_eta2",
[](const aare::ClusterVector<ClusterType> &clusters) {
auto eta2 = new NDArray<double, 2>(calculate_eta2(clusters));
return return_image_data(eta2);
});
m.def("calculate_eta2", [](const aare::Cluster<Type, CoordSizeX, CoordSizeY,
CoordType> &cluster) {
auto eta2 = calculate_eta2(cluster);
// TODO return proper eta class
return py::make_tuple(eta2.x, eta2.y, eta2.sum);
});
}
#pragma GCC diagnostic pop

View File

@@ -121,12 +121,13 @@ void define_2x2_reduction(py::module &m) {
reduce_to_2x2(cv));
},
R"(
Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with
the highest photon energy."
the highest photon energy.
Parameters
----------
cv : ClusterVector
cv : ClusterVector (clusters are filled in row-major ordering starting at the top left. Thus for a max subcluster in the top left corner the photon hit is at the fourth position.)
)",
py::arg("clustervector"));
}
@@ -143,11 +144,10 @@ void define_3x3_reduction(py::module &m) {
reduce_to_3x3(cv));
},
R"(
Reduce cluster to 3x3 subcluster by taking the 3x3 subcluster with
the highest photon energy."
Reduce cluster to 3x3 subcluster
Parameters
----------
cv : ClusterVector
)",
py::arg("clustervector"));

104
python/src/bind_Eta.hpp Normal file
View File

@@ -0,0 +1,104 @@
#include "aare/CalculateEta.hpp"
#include <cstdint>
// #include <pybind11/native_enum.h> only for version 3
#include <pybind11/pybind11.h>
namespace py = pybind11;
using namespace ::aare;
template <typename T>
void define_eta(py::module &m, const std::string &typestr) {
auto class_name = fmt::format("Eta{}", typestr);
py::class_<Eta2<T>>(m, class_name.c_str())
.def(py::init<>())
.def_readonly("x", &Eta2<T>::x, "eta x value")
.def_readonly("y", &Eta2<T>::y, "eta y value")
.def_readonly("c", &Eta2<T>::c,
"eta corner value cTopLeft, cTopRight, "
"cBottomLeft, cBottomRight")
.def_readonly("sum", &Eta2<T>::sum, "photon energy of cluster");
}
void define_corner_enum(py::module &m) {
py::enum_<corner>(m, "corner", "enum.Enum")
.value("cTopLeft", corner::cTopLeft)
.value("cTopRight", corner::cTopRight)
.value("cBottomLeft", corner::cBottomLeft)
.value("cBottomRight", corner::cBottomRight)
.export_values();
}
template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
typename CoordType = uint16_t>
void register_calculate_2x2eta(py::module &m) {
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
m.def(
"calculate_eta2",
[](const aare::ClusterVector<ClusterType> &clusters) {
auto eta2 = new std::vector<Eta2<typename ClusterType::value_type>>(
calculate_eta2(clusters));
return return_vector(eta2);
},
R"(calculates eta2x2)", py::arg("clusters"));
m.def(
"calculate_eta2",
[](const aare::Cluster<Type, CoordSizeX, CoordSizeY, CoordType>
&cluster) { return calculate_eta2(cluster); },
R"(calculates eta2x2)", py::arg("cluster"));
m.def(
"calculate_full_eta2",
[](const aare::Cluster<Type, CoordSizeX, CoordSizeY, CoordType>
&cluster) { return calculate_full_eta2(cluster); },
R"(calculates full eta2x2)", py::arg("cluster"));
m.def(
"calculate_full_eta2",
[](const aare::ClusterVector<ClusterType> &clusters) {
auto eta2 = new std::vector<Eta2<typename ClusterType::value_type>>(
calculate_full_eta2(clusters));
return return_vector(eta2);
},
R"(calculates full eta2x2)", py::arg("clusters"));
}
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
void register_calculate_3x3eta(py::module &m) {
using ClusterType = Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>;
m.def(
"calculate_eta3",
[](const aare::ClusterVector<ClusterType> &clusters) {
auto eta = new std::vector<Eta2<Type>>(calculate_eta3(clusters));
return return_vector(eta);
},
R"(calculates eta3x3 using entire cluster)", py::arg("clusters"));
m.def(
"calculate_cross_eta3",
[](const aare::ClusterVector<ClusterType> &clusters) {
auto eta =
new std::vector<Eta2<Type>>(calculate_cross_eta3(clusters));
return return_vector(eta);
},
R"(calculates eta3x3 taking into account cross pixels in cluster)",
py::arg("clusters"));
m.def(
"calculate_eta3",
[](const ClusterType &cluster) { return calculate_eta3(cluster); },
R"(calculates eta3x3 using entire cluster)", py::arg("cluster"));
m.def(
"calculate_cross_eta3",
[](const ClusterType &cluster) {
return calculate_cross_eta3(cluster);
},
R"(calculates eta3x3 taking into account cross pixels in cluster)",
py::arg("cluster"));
}

View File

@@ -1,4 +1,5 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/CalculateEta.hpp"
#include "aare/Interpolator.hpp"
#include "aare/NDArray.hpp"
#include "aare/NDView.hpp"
@@ -10,19 +11,41 @@
namespace py = pybind11;
#define REGISTER_INTERPOLATOR_ETA2(T, N, M, U) \
register_interpolate<T, N, M, U, aare::calculate_full_eta2<T, N, M, U>>( \
interpolator, "_full_eta2", "full eta2"); \
register_interpolate<T, N, M, U, aare::calculate_eta2<T, N, M, U>>( \
interpolator, "", "eta2");
#define REGISTER_INTERPOLATOR_ETA3(T, N, M, U) \
register_interpolate<T, N, M, U, aare::calculate_eta3<T, N, M, U>>( \
interpolator, "_eta3", "full eta3"); \
register_interpolate<T, N, M, U, aare::calculate_cross_eta3<T, N, M, U>>( \
interpolator, "_cross_eta3", "cross eta3");
template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
typename CoordType = uint16_t>
void register_interpolate(py::class_<aare::Interpolator> &interpolator) {
typename CoordType = uint16_t, auto EtaFunction>
void register_interpolate(py::class_<aare::Interpolator> &interpolator,
const std::string &typestr = "",
const std::string &doc_string_etatype = "eta2x2") {
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
interpolator.def("interpolate",
[](aare::Interpolator &self,
const ClusterVector<ClusterType> &clusters) {
auto photons = self.interpolate<ClusterType>(clusters);
auto *ptr = new std::vector<Photon>{photons};
return return_vector(ptr);
});
const std::string docstring = "interpolation based on " +
doc_string_etatype +
"\n\nReturns:\n interpolated photons";
auto function_name = fmt::format("interpolate{}", typestr);
interpolator.def(
function_name.c_str(),
[](aare::Interpolator &self,
const ClusterVector<ClusterType> &clusters) {
auto photons = self.interpolate<EtaFunction, ClusterType>(clusters);
auto *ptr = new std::vector<Photon>{photons};
return return_vector(ptr);
},
docstring.c_str(), py::arg("cluster_vector"));
}
void define_interpolation_bindings(py::module &m) {
@@ -31,33 +54,91 @@ void define_interpolation_bindings(py::module &m) {
auto interpolator =
py::class_<aare::Interpolator>(m, "Interpolator")
.def(py::init([](py::array_t<double, py::array::c_style |
py::array::forcecast>
etacube,
py::array_t<double> xbins,
py::array_t<double> ybins,
py::array_t<double> ebins) {
return Interpolator(make_view_3d(etacube), make_view_1d(xbins),
make_view_1d(ybins), make_view_1d(ebins));
}))
.def(py::init(
[](py::array_t<double,
py::array::c_style | py::array::forcecast>
etacube,
py::array_t<double> xbins, py::array_t<double> ybins,
py::array_t<double> ebins) {
return Interpolator(
make_view_3d(etacube), make_view_1d(xbins),
make_view_1d(ybins), make_view_1d(ebins));
}),
R"doc(
Constructor
Args:
etacube:
joint distribution of eta_x, eta_y and photon energy (**Note:** for the joint distribution first dimension is eta_x, second: eta_y, third: energy bins.)
xbins:
bin edges of etax
ybins:
bin edges of etay
ebins:
bin edges of photon energy
)doc",
py::arg("etacube"),
py::arg("xbins"), py::arg("ybins"),
py::arg("ebins"))
.def(py::init(
[](py::array_t<double> xbins, py::array_t<double> ybins,
py::array_t<double> ebins) {
return Interpolator(make_view_1d(xbins),
make_view_1d(ybins),
make_view_1d(ebins));
}),
R"(
Constructor
Args:
xbins:
bin edges of etax
ybins:
bin edges of etay
ebins:
bin edges of photon energy
)", py::arg("xbins"),
py::arg("ybins"), py::arg("ebins"))
.def(
"rosenblatttransform",
[](Interpolator &self,
py::array_t<double,
py::array::c_style | py::array::forcecast>
etacube) {
return self.rosenblatttransform(make_view_3d(etacube));
},
R"(
calculated the rosenblatttransform for the given distribution
etacube:
joint distribution of eta_x, eta_y and photon energy (**Note:** for the joint distribution first dimension is eta_x, second: eta_y, third: energy bins.)
)",
py::arg("etacube"))
.def("get_ietax",
[](Interpolator &self) {
auto *ptr = new NDArray<double, 3>{};
*ptr = self.get_ietax();
return return_image_data(ptr);
})
}, R"(conditional CDF of etax conditioned on etay, marginal CDF of etax (if rosenblatt transform applied))")
.def("get_ietay", [](Interpolator &self) {
auto *ptr = new NDArray<double, 3>{};
*ptr = self.get_ietay();
return return_image_data(ptr);
});
}, R"(conditional CDF of etay conditioned on etax)");
register_interpolate<int, 3, 3, uint16_t>(interpolator);
register_interpolate<float, 3, 3, uint16_t>(interpolator);
register_interpolate<double, 3, 3, uint16_t>(interpolator);
register_interpolate<int, 2, 2, uint16_t>(interpolator);
register_interpolate<float, 2, 2, uint16_t>(interpolator);
register_interpolate<double, 2, 2, uint16_t>(interpolator);
REGISTER_INTERPOLATOR_ETA3(int, 3, 3, uint16_t);
REGISTER_INTERPOLATOR_ETA3(float, 3, 3, uint16_t);
REGISTER_INTERPOLATOR_ETA3(double, 3, 3, uint16_t);
REGISTER_INTERPOLATOR_ETA2(int, 3, 3, uint16_t);
REGISTER_INTERPOLATOR_ETA2(float, 3, 3, uint16_t);
REGISTER_INTERPOLATOR_ETA2(double, 3, 3, uint16_t);
REGISTER_INTERPOLATOR_ETA2(int, 2, 2, uint16_t);
REGISTER_INTERPOLATOR_ETA2(float, 2, 2, uint16_t);
REGISTER_INTERPOLATOR_ETA2(double, 2, 2, uint16_t);
// TODO! Evaluate without converting to double
m.def(

View File

@@ -9,6 +9,7 @@
#include "bind_ClusterFinder.hpp"
#include "bind_ClusterFinderMT.hpp"
#include "bind_ClusterVector.hpp"
#include "bind_Eta.hpp"
#include "bind_calibration.hpp"
// TODO! migrate the other names
@@ -43,14 +44,16 @@ double, 'f' for float)
#define DEFINE_CLUSTER_BINDINGS(T, N, M, U, TYPE_CODE) \
define_ClusterFile<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_ClusterVector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_Cluster<T, N, M, U>(m, #N "x" #M #TYPE_CODE); \
register_calculate_2x2eta<T, N, M, U>(m); \
define_2x2_reduction<T, N, M, U>(m); \
reduce_to_2x2<T, N, M, U>(m);
#define DEFINE_BINDINGS_CLUSTERFINDER(T, N, M, U, TYPE_CODE) \
define_ClusterFinder<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_ClusterFinderMT<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_ClusterFileSink<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_ClusterCollector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_Cluster<T, N, M, U>(m, #N "x" #M #TYPE_CODE); \
register_calculate_eta<T, N, M, U>(m); \
define_2x2_reduction<T, N, M, U>(m); \
reduce_to_2x2<T, N, M, U>(m);
define_ClusterCollector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE);
PYBIND11_MODULE(_aare, m) {
define_file_io_bindings(m);
@@ -88,7 +91,23 @@ PYBIND11_MODULE(_aare, m) {
DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d);
DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f);
// DEFINE_CLUSTER_BINDINGS(double, 2, 1, uint16_t, d);
DEFINE_CLUSTER_BINDINGS(int16_t, 3, 3, uint16_t, i16);
DEFINE_BINDINGS_CLUSTERFINDER(int, 3, 3, uint16_t, i);
DEFINE_BINDINGS_CLUSTERFINDER(double, 3, 3, uint16_t, d);
DEFINE_BINDINGS_CLUSTERFINDER(float, 3, 3, uint16_t, f);
DEFINE_BINDINGS_CLUSTERFINDER(int, 5, 5, uint16_t, i);
DEFINE_BINDINGS_CLUSTERFINDER(double, 5, 5, uint16_t, d);
DEFINE_BINDINGS_CLUSTERFINDER(float, 5, 5, uint16_t, f);
DEFINE_BINDINGS_CLUSTERFINDER(int, 7, 7, uint16_t, i);
DEFINE_BINDINGS_CLUSTERFINDER(double, 7, 7, uint16_t, d);
DEFINE_BINDINGS_CLUSTERFINDER(float, 7, 7, uint16_t, f);
DEFINE_BINDINGS_CLUSTERFINDER(int, 9, 9, uint16_t, i);
DEFINE_BINDINGS_CLUSTERFINDER(double, 9, 9, uint16_t, d);
DEFINE_BINDINGS_CLUSTERFINDER(float, 9, 9, uint16_t, f);
define_3x3_reduction<int, 3, 3, uint16_t>(m);
define_3x3_reduction<double, 3, 3, uint16_t>(m);
@@ -116,10 +135,30 @@ PYBIND11_MODULE(_aare, m) {
reduce_to_3x3<double, 9, 9, uint16_t>(m);
reduce_to_3x3<float, 9, 9, uint16_t>(m);
register_calculate_3x3eta<int, 3, 3, uint16_t>(m);
register_calculate_3x3eta<double, 3, 3, uint16_t>(m);
register_calculate_3x3eta<float, 3, 3, uint16_t>(m);
register_calculate_3x3eta<int16_t, 3, 3, uint16_t>(m);
using Sum_index_pair_d = Sum_index_pair<double, corner>;
PYBIND11_NUMPY_DTYPE(Sum_index_pair_d, sum, index);
using Sum_index_pair_f = Sum_index_pair<float, corner>;
PYBIND11_NUMPY_DTYPE(Sum_index_pair_f, sum, index);
using Sum_index_pair_i = Sum_index_pair<int, corner>;
PYBIND11_NUMPY_DTYPE(Sum_index_pair_i, sum, index);
using eta_d = Eta2<double>;
PYBIND11_NUMPY_DTYPE(eta_d, x, y, c, sum);
using eta_i = Eta2<int>;
PYBIND11_NUMPY_DTYPE(eta_i, x, y, c, sum);
using eta_f = Eta2<float>;
PYBIND11_NUMPY_DTYPE(eta_f, x, y, c, sum);
using eta_i16 = Eta2<int16_t>;
PYBIND11_NUMPY_DTYPE(eta_i16, x, y, c, sum);
define_corner_enum(m);
define_eta<float>(m, "f");
define_eta<double>(m, "d");
define_eta<int>(m, "i");
define_eta<int16_t>(m, "i16");
}

File diff suppressed because one or more lines are too long

View File

@@ -3,6 +3,7 @@ import pytest
import numpy as np
from aare import _aare #import the C++ module
from aare import corner
from conftest import test_data_path
@@ -40,52 +41,49 @@ def test_Interpolator():
xbins = np.linspace(0, 5, 30, dtype=np.float64)
ybins = np.linspace(0, 5, 30, dtype=np.float64)
etacube = np.zeros(shape=[30, 30, 20], dtype=np.float64)
etacube = np.zeros(shape=[29, 29, 19], dtype=np.float64)
interpolator = _aare.Interpolator(etacube, xbins, ybins, ebins)
assert interpolator.get_ietax().shape == (30,30,20)
assert interpolator.get_ietay().shape == (30,30,20)
clustervector = _aare.ClusterVector_Cluster3x3i()
cluster = _aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32))
cluster = _aare.Cluster3x3i(1,1, np.ones(9, dtype=np.int32))
clustervector.push_back(cluster)
interpolated_photons = interpolator.interpolate(clustervector)
assert interpolated_photons.size == 1
assert interpolated_photons[0]["x"] == 0
assert interpolated_photons[0]["y"] == 0
assert interpolated_photons[0]["x"] == 0.5
assert interpolated_photons[0]["y"] == 0.5
assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0
clustervector = _aare.ClusterVector_Cluster2x2i()
cluster = _aare.Cluster2x2i(0,0, np.ones(4, dtype=np.int32))
cluster = _aare.Cluster2x2i(1,1, np.ones(4, dtype=np.int32))
clustervector.push_back(cluster)
interpolated_photons = interpolator.interpolate(clustervector)
assert interpolated_photons.size == 1
assert interpolated_photons[0]["x"] == 0
assert interpolated_photons[0]["y"] == 0
assert interpolated_photons[0]["x"] == 0.5
assert interpolated_photons[0]["y"] == 0.5
assert interpolated_photons[0]["energy"] == 4
def test_calculate_eta():
"""Calculate Eta"""
clusters = _aare.ClusterVector_Cluster3x3i()
clusters.push_back(_aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32)))
clusters.push_back(_aare.Cluster3x3i(0,0, np.array([1,1,1,2,2,2,3,3,3])))
cluster = _aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32))
eta2 = _aare.calculate_eta2(clusters)
eta2 = _aare.calculate_eta2(cluster)
assert eta2.shape == (2,2)
assert eta2[0,0] == 0.5
assert eta2[0,1] == 0.5
assert eta2[1,0] == 0.5
assert eta2[1,1] == 0.4 #2/5
assert eta2.x == 0.5
assert eta2.y == 0.5
assert eta2.c == corner.cTopLeft
assert eta2.sum == 4
def test_max_sum():
@@ -119,7 +117,7 @@ def test_2x2_reduction():
reduced_cluster = _aare.reduce_to_2x2(cluster)
assert reduced_cluster.x == 4
assert reduced_cluster.x == 5
assert reduced_cluster.y == 5
assert (reduced_cluster.data == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
@@ -131,9 +129,9 @@ def test_3x3_reduction():
reduced_cluster = _aare.reduce_to_3x3(cluster)
assert reduced_cluster.x == 4
assert reduced_cluster.x == 5
assert reduced_cluster.y == 5
assert (reduced_cluster.data == np.array([[1.0, 2.0, 1.0], [2.0, 2.0, 3.0], [1.0, 2.0, 1.0]], dtype=np.double)).all()
assert (reduced_cluster.data == np.array([[2.0, 1.0, 1.0], [2.0, 3.0, 1.0], [2.0, 1.0, 1.0]], dtype=np.double)).all()

View File

@@ -6,7 +6,7 @@ import time
from pathlib import Path
import pickle
from aare import ClusterFile, ClusterVector
from aare import ClusterFile, ClusterVector, calculate_eta2
from aare import _aare
from conftest import test_data_path
@@ -45,6 +45,19 @@ def test_max_2x2_sum():
assert max_2x2[0]["index"] == 2
def test_eta2():
"""calculate eta2"""
cv = _aare.ClusterVector_Cluster3x3i()
cv.push_back(_aare.Cluster3x3i(19, 22, np.ones(9, dtype=np.int32)))
assert cv.size == 1
eta2 = calculate_eta2(cv)
assert eta2.size == 1
assert eta2[0]["x"] == 0.5
assert eta2[0]["y"] == 0.5
assert eta2[0]["c"] == 0
assert eta2[0]["sum"] == 4
def test_make_a_hitmap_from_cluster_vector():
cv = _aare.ClusterVector_Cluster3x3i()
@@ -75,11 +88,11 @@ def test_2x2_reduction():
reduced_cv = np.array(_aare.reduce_to_2x2(cv), copy=False)
assert reduced_cv.size == 2
assert reduced_cv[0]["x"] == 4
assert reduced_cv[0]["x"] == 5
assert reduced_cv[0]["y"] == 5
assert (reduced_cv[0]["data"] == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
assert reduced_cv[1]["x"] == 4
assert reduced_cv[1]["y"] == 6
assert reduced_cv[1]["x"] == 5
assert reduced_cv[1]["y"] == 5
assert (reduced_cv[1]["data"] == np.array([[2, 2], [2, 3]], dtype=np.int32)).all()
@@ -94,6 +107,6 @@ def test_3x3_reduction():
reduced_cv = np.array(_aare.reduce_to_3x3(cv), copy=False)
assert reduced_cv.size == 2
assert reduced_cv[0]["x"] == 4
assert reduced_cv[0]["x"] == 5
assert reduced_cv[0]["y"] == 5
assert (reduced_cv[0]["data"] == np.array([[1.0, 2.0, 1.0], [2.0, 2.0, 3.0], [1.0, 2.0, 1.0]], dtype=np.double)).all()
assert (reduced_cv[0]["data"] == np.array([[2.0, 1.0, 1.0], [2.0, 3.0, 1.0], [2.0, 1.0, 1.0]], dtype=np.double)).all()

View File

@@ -25,10 +25,6 @@ def create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_p
probability_values = gaussian.pdf(data_points)
return (probability_values.reshape(X.shape)).round() #python bindings only support frame types of uint16_t
def photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, photon_hit):
scaled_photon_hit_x = cluster_center - (1 - photon_hit[0][0])*pixels_per_superpixel*pixel_width
scaled_photon_hit_y = cluster_center - (1 - photon_hit[0][1])*pixels_per_superpixel*pixel_width
return (scaled_photon_hit_x, scaled_photon_hit_y)
def create_2x2cluster_from_frame(frame, pixels_per_superpixel):
return Cluster2x2d(1, 1, np.array([frame[0:pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
@@ -49,10 +45,10 @@ def create_3x3cluster_from_frame(frame, pixels_per_superpixel):
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum()], dtype=np.float64))
def calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, cluster_2x2 = True):
def calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, bin_edges_x = bh.axis.Regular(100, -0.2, 1.2), bin_edges_y = bh.axis.Regular(100, -0.2, 1.2), cluster_2x2 = True):
hist = bh.Histogram(
bh.axis.Regular(100, -0.2, 1.2),
bh.axis.Regular(100, -0.2, 1.2), bh.axis.Regular(1, 0, num_pixels*num_pixels*1/(variance*2*np.pi)))
bin_edges_x,
bin_edges_y, bh.axis.Regular(1, 0, num_pixels*num_pixels*1/(variance*2*np.pi)))
for _ in range(0, num_frames):
mean_x = random_number_generator.uniform(pixels_per_superpixel*pixel_width, 2*pixels_per_superpixel*pixel_width)
@@ -67,7 +63,7 @@ def calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_
cluster = create_3x3cluster_from_frame(frame, pixels_per_superpixel)
eta2 = calculate_eta2(cluster)
hist.fill(eta2[0], eta2[1], eta2[2])
hist.fill(eta2.x, eta2.y, eta2.sum)
return hist
@@ -86,9 +82,9 @@ def test_interpolation_of_2x2_cluster(test_data_path):
pixels_per_superpixel = int(num_pixels*0.5)
random_number_generator = np.random.default_rng(42)
eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator)
eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, bin_edges_x = bh.axis.Regular(100, -0.1, 0.6), bin_edges_y = bh.axis.Regular(100, -0.1, 0.6))
interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges[:-1], eta_distribution.axes[1].edges[:-1], eta_distribution.axes[2].edges[:-1])
interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
#actual photon hit
mean = 1.2*pixels_per_superpixel*pixel_width
@@ -105,7 +101,7 @@ def test_interpolation_of_2x2_cluster(test_data_path):
cluster_center = 1.5*pixels_per_superpixel*pixel_width
scaled_photon_hit = photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, interpolated_photon)
scaled_photon_hit = (interpolated_photon[0][0]*pixels_per_superpixel*pixel_width, interpolated_photon[0][1]*pixels_per_superpixel*pixel_width)
assert (np.linalg.norm(scaled_photon_hit - mean) < np.linalg.norm(np.array([cluster_center, cluster_center] - mean)))
@@ -124,13 +120,14 @@ def test_interpolation_of_3x3_cluster(test_data_path):
num_frames = 1000
pixels_per_superpixel = int(num_pixels/3)
random_number_generator = np.random.default_rng(42)
eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, False)
eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, bin_edges_x = bh.axis.Regular(100, -0.1, 1.1), bin_edges_y = bh.axis.Regular(100, -0.1, 1.1), cluster_2x2 = False)
interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges[:-1], eta_distribution.axes[1].edges[:-1], eta_distribution.axes[2].edges[:-1])
interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
#actual photon hit
mean = 1.2*pixels_per_superpixel*pixel_width
mean = np.array([mean, mean])
mean_x = (1 + 0.8)*pixels_per_superpixel*pixel_width
mean_y = (1 + 0.2)*pixels_per_superpixel*pixel_width
mean = np.array([mean_x, mean_y])
frame = create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points)
cluster = create_3x3cluster_from_frame(frame, pixels_per_superpixel)
@@ -143,7 +140,7 @@ def test_interpolation_of_3x3_cluster(test_data_path):
cluster_center = 1.5*pixels_per_superpixel*pixel_width
scaled_photon_hit = photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, interpolated_photon)
scaled_photon_hit = (interpolated_photon[0][0]*pixels_per_superpixel*pixel_width, interpolated_photon[0][1]*pixels_per_superpixel*pixel_width)
assert (np.linalg.norm(scaled_photon_hit - mean) < np.linalg.norm(np.array([cluster_center, cluster_center] - mean)))

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,291 @@
#test script to test interpolation on simulated data
import pytest
import pytest_check as check
import numpy as np
import boost_histogram as bh
import pickle
from scipy.stats import multivariate_normal
from aare import Interpolator, calculate_eta2, calculate_cross_eta3, calculate_full_eta2, calculate_eta3
from aare import ClusterFile
from conftest import test_data_path
## TODO: is there something like a test fixture setup/teardown in pytest?
def calculate_eta_distribution(cv, calculate_eta, edges_x=[-0.5,0.5], edges_y=[-0.5,0.5], nbins = 101):
energy_bins = bh.axis.Regular(1, 0, 16) # max and min energy of simulated photons
eta_distribution = bh.Histogram(
bh.axis.Regular(nbins, edges_x[0], edges_x[1]),
bh.axis.Regular(nbins, edges_y[0], edges_y[1]), energy_bins)
eta = calculate_eta(cv)
eta_distribution.fill(eta['x'], eta['y'], eta['sum'])
return eta_distribution
@pytest.fixture
def load_data(test_data_path):
"""Load simulated cluster data and ground truth positions"""
f = ClusterFile(test_data_path / "clust" / "simulated_clusters.clust", dtype=np.float64, mode="r")
cv = f.read_frame()
ground_truths = np.load(test_data_path / "interpolation/ground_truth_simulated.npy")
return cv, ground_truths
@pytest.mark.withdata
def test_eta2_interpolation(load_data, check):
"""Test eta2 interpolation on simulated data"""
cv, ground_truths = load_data
num_bins = 201
eta_distribution = calculate_eta_distribution(cv, calculate_eta2, edges_x=[-0.1,1.1], edges_y=[-0.1,1.1], nbins=num_bins)
interpolator = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
assert interpolator.get_ietax().shape == (num_bins,num_bins,1)
assert interpolator.get_ietay().shape == (num_bins,num_bins,1)
interpolated_photons = interpolator.interpolate(cv)
assert interpolated_photons.size == cv.size
interpolated_photons["x"] += 1.0 #groud truth label uses 5x5 clusters
interpolated_photons["y"] += 1.0
residuals_interpolated_x = abs(ground_truths[:, 0] - interpolated_photons["x"])
residuals_interpolated_y = abs(ground_truths[:, 1] - interpolated_photons["y"])
"""
residuals_center_pixel_x = abs(ground_truths[:, 0] - 2.5)
residuals_center_pixel_y = abs(ground_truths[:, 1] - 2.5)
# interpolation needs to perform better than center pixel assignment - not true for photon close to the center
assert (residuals_interpolated_x < residuals_center_pixel_x).all()
assert (residuals_interpolated_y < residuals_center_pixel_y).all()
"""
# check within photon hit pixel for all
with check:
assert np.allclose(interpolated_photons["x"], ground_truths[:, 0], atol=5e-1)
with check:
assert np.allclose(interpolated_photons["y"], ground_truths[:, 1], atol=5e-1)
# check mean and std of residuals
with check:
assert residuals_interpolated_y.mean() <= 0.1
with check:
assert residuals_interpolated_x.mean() <= 0.1
with check:
assert residuals_interpolated_x.std() <= 0.05
with check:
assert residuals_interpolated_y.std() <= 0.05
@pytest.mark.withdata
def test_eta2_interpolation_rosenblatt(load_data, check):
"""Test eta2 interpolation on simulated data using Rosenblatt transform"""
cv, ground_truths = load_data
num_bins = 201
eta_distribution = calculate_eta_distribution(cv, calculate_eta2, edges_x=[-0.1,1.1], edges_y=[-0.1,1.1], nbins=num_bins)
interpolator = Interpolator(eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
interpolator.rosenblatttransform(eta_distribution)
assert interpolator.get_ietax().shape == (num_bins,num_bins,1)
assert interpolator.get_ietay().shape == (num_bins,num_bins,1)
interpolated_photons = interpolator.interpolate(cv)
assert interpolated_photons.size == cv.size
interpolated_photons["x"] += 1.0 #groud truth label uses 5x5 clusters
interpolated_photons["y"] += 1.0
residuals_interpolated_x = abs(ground_truths[:, 0] - interpolated_photons["x"])
residuals_interpolated_y = abs(ground_truths[:, 1] - interpolated_photons["y"])
"""
residuals_center_pixel_x = abs(ground_truths[:, 0] - 2.5)
residuals_center_pixel_y = abs(ground_truths[:, 1] - 2.5)
# interpolation needs to perform better than center pixel assignment - not true for photon close to the center
assert (residuals_interpolated_x < residuals_center_pixel_x).all()
assert (residuals_interpolated_y < residuals_center_pixel_y).all()
"""
# check within photon hit pixel for all
with check:
assert np.allclose(interpolated_photons["x"], ground_truths[:, 0], atol=5e-1)
with check:
assert np.allclose(interpolated_photons["y"], ground_truths[:, 1], atol=5e-1)
# check mean and std of residuals
with check:
assert residuals_interpolated_y.mean() <= 0.1
with check:
assert residuals_interpolated_x.mean() <= 0.1
with check:
assert residuals_interpolated_x.std() <= 0.055 #performs slightly worse
with check:
assert residuals_interpolated_y.std() <= 0.055 #performs slightly worse
@pytest.mark.withdata
def test_cross_eta_interpolation(load_data, check):
"""Test cross eta interpolation on simulated data"""
cv, ground_truths = load_data
num_bins = 201
eta_distribution = calculate_eta_distribution(cv, calculate_cross_eta3, edges_x=[-0.5,0.5], edges_y=[-0.5,0.5], nbins=num_bins)
interpolator = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
assert interpolator.get_ietax().shape == (num_bins,num_bins,1)
assert interpolator.get_ietay().shape == (num_bins,num_bins,1)
interpolated_photons = interpolator.interpolate_cross_eta3(cv)
assert interpolated_photons.size == cv.size
interpolated_photons["x"] += 1.0 #groud truth label uses 5x5 clusters
interpolated_photons["y"] += 1.0
residuals_interpolated_x = abs(ground_truths[:, 0] - interpolated_photons["x"])
residuals_interpolated_y = abs(ground_truths[:, 1] - interpolated_photons["y"])
"""
residuals_center_pixel_x = abs(ground_truths[:, 0] - 2.5)
residuals_center_pixel_y = abs(ground_truths[:, 1] - 2.5)
# interpolation needs to perform better than center pixel assignment - not true for photon close to the center
assert (residuals_interpolated_x < residuals_center_pixel_x).all()
assert (residuals_interpolated_y < residuals_center_pixel_y).all()
"""
# check within photon hit pixel for all
# TODO: fails as eta_x = 0, eta_y = 0 is not leading to offset (0.5,0.5)
with check:
assert np.allclose(interpolated_photons["x"], ground_truths[:, 0], atol=5e-1)
with check:
assert np.allclose(interpolated_photons["y"], ground_truths[:, 1], atol=5e-1)
# check mean and std of residuals
with check:
assert residuals_interpolated_y.mean() <= 0.1
with check:
assert residuals_interpolated_x.mean() <= 0.1
with check:
assert residuals_interpolated_x.std() <= 0.05
with check:
assert residuals_interpolated_y.std() <= 0.05
@pytest.mark.withdata
def test_eta3_interpolation(load_data, check):
"""Test eta3 interpolation on simulated data"""
cv, ground_truths = load_data
num_bins = 201
eta_distribution = calculate_eta_distribution(cv, calculate_eta3, edges_x=[-0.5,0.5], edges_y=[-0.5,0.5], nbins=num_bins)
interpolator = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
assert interpolator.get_ietax().shape == (num_bins,num_bins,1)
assert interpolator.get_ietay().shape == (num_bins,num_bins,1)
interpolated_photons = interpolator.interpolate_eta3(cv)
assert interpolated_photons.size == cv.size
interpolated_photons["x"] += 1.0 #groud truth label uses 5x5 clusters
interpolated_photons["y"] += 1.0
residuals_interpolated_x = abs(ground_truths[:, 0] - interpolated_photons["x"])
residuals_interpolated_y = abs(ground_truths[:, 1] - interpolated_photons["y"])
"""
residuals_center_pixel_x = abs(ground_truths[:, 0] - 2.5)
residuals_center_pixel_y = abs(ground_truths[:, 1] - 2.5)
# interpolation needs to perform better than center pixel assignment - not true for photon close to the center
assert (residuals_interpolated_x < residuals_center_pixel_x).all()
assert (residuals_interpolated_y < residuals_center_pixel_y).all()
"""
# check within photon hit pixel for all
# TODO: fails as eta_x = 0, eta_y = 0 is not leading to offset (0.5,0.5)
with check:
assert np.allclose(interpolated_photons["x"], ground_truths[:, 0], atol=5e-1)
with check:
assert np.allclose(interpolated_photons["y"], ground_truths[:, 1], atol=5e-1)
# check mean and std of residuals
with check:
assert residuals_interpolated_y.mean() <= 0.1
with check:
assert residuals_interpolated_x.mean() <= 0.1
with check:
assert residuals_interpolated_x.std() <= 0.05
with check:
assert residuals_interpolated_y.std() <= 0.05
@pytest.mark.withdata
def test_full_eta2_interpolation(load_data, check):
"""Test full eta2 interpolation on simulated data"""
cv, ground_truths = load_data
num_bins = 201
eta_distribution = calculate_eta_distribution(cv, calculate_full_eta2, edges_x=[-0.1,1.1], edges_y=[-0.1,1.1], nbins=num_bins)
interpolator = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
assert interpolator.get_ietax().shape == (num_bins,num_bins,1)
assert interpolator.get_ietay().shape == (num_bins,num_bins,1)
interpolated_photons = interpolator.interpolate_full_eta2(cv)
assert interpolated_photons.size == cv.size
interpolated_photons["x"] += 1.0 #groud truth label uses 5x5 clusters
interpolated_photons["y"] += 1.0
residuals_interpolated_x = abs(ground_truths[:, 0] - interpolated_photons["x"])
residuals_interpolated_y = abs(ground_truths[:, 1] - interpolated_photons["y"])
"""
residuals_center_pixel_x = abs(ground_truths[:, 0] - 2.5)
residuals_center_pixel_y = abs(ground_truths[:, 1] - 2.5)
# interpolation needs to perform better than center pixel assignment - not true for photon close to the center
assert (residuals_interpolated_x < residuals_center_pixel_x).all()
assert (residuals_interpolated_y < residuals_center_pixel_y).all()
"""
# check within photon hit pixel for all
with check:
assert np.allclose(interpolated_photons["x"], ground_truths[:, 0], atol=5e-1)
with check:
assert np.allclose(interpolated_photons["y"], ground_truths[:, 1], atol=5e-1)
# check mean and std of residuals
with check:
assert residuals_interpolated_y.mean() <= 0.1
with check:
assert residuals_interpolated_x.mean() <= 0.1
with check:
assert residuals_interpolated_x.std() <= 0.05
with check:
assert residuals_interpolated_y.std() <= 0.05