added rounding

This commit is contained in:
froejdh_e
2026-01-20 15:40:26 +01:00
183 changed files with 7154 additions and 1035 deletions

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
find_package (Python 3.10 COMPONENTS Interpreter Development.Module REQUIRED)
set(PYBIND11_FINDPYTHON ON) # Needed for RH8
@@ -31,7 +32,7 @@ set( PYTHON_FILES
aare/CtbRawFile.py
aare/ClusterFinder.py
aare/ClusterVector.py
aare/Cluster.py
aare/calibration.py
aare/func.py
aare/RawFile.py

24
python/aare/Cluster.py Normal file
View File

@@ -0,0 +1,24 @@
from . import _aare
import numpy as np
from .ClusterFinder import _type_to_char
def Cluster(x : int, y : int, data, cluster_size=(3,3), dtype = np.int32):
"""
Factory function to create a Cluster object. Provides a cleaner syntax for
the templated Cluster in C++.
.. code-block:: python
from aare import Cluster
Cluster(cluster_size=(3,3), dtype=np.float64)
"""
try:
class_name = f"Cluster{cluster_size[0]}x{cluster_size[1]}{_type_to_char(dtype)}"
cls = getattr(_aare, class_name)
except AttributeError:
raise ValueError(f"Unsupported combination of type and cluster size: {dtype}/{cluster_size} when requesting {class_name}")
return cls(x, y, data)

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
from . import _aare
import numpy as np
@@ -10,6 +11,8 @@ def _type_to_char(dtype):
return 'f'
elif dtype == np.float64:
return 'd'
elif dtype == np.int16:
return 'i16'
else:
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32, np.float32, and np.float64 are supported.")
@@ -46,14 +49,13 @@ def ClusterFinderMT(image_size, saved_cluster_size = (3,3), checked_cluster_size
return cls(image_size, n_sigma=n_sigma, capacity=capacity, n_threads=n_threads, cluster_size_x=checked_cluster_size[0], cluster_size_y=checked_cluster_size[1])
def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32):
def ClusterCollector(clusterfindermt, dtype=np.int32):
"""
Factory function to create a ClusterCollector object. Provides a cleaner syntax for
the templated ClusterCollector in C++.
"""
cls = _get_class("ClusterCollector", cluster_size, dtype)
cls = _get_class("ClusterCollector", clusterfindermt.cluster_size, dtype)
return cls(clusterfindermt)
def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):
@@ -66,7 +68,7 @@ def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):
return cls(clusterfindermt, cluster_file)
def ClusterFile(fname, cluster_size=(3,3), dtype=np.int32, chunk_size = 1000):
def ClusterFile(fname, cluster_size=(3,3), dtype=np.int32, chunk_size = 1000, mode = "r"):
"""
Factory function to create a ClusterFile object. Provides a cleaner syntax for
the templated ClusterFile in C++.
@@ -84,4 +86,4 @@ def ClusterFile(fname, cluster_size=(3,3), dtype=np.int32, chunk_size = 1000):
"""
cls = _get_class("ClusterFile", cluster_size, dtype)
return cls(fname, chunk_size=chunk_size)
return cls(fname, chunk_size=chunk_size, mode=mode)

View File

@@ -1,11 +1,22 @@
# SPDX-License-Identifier: MPL-2.0
from ._aare import ClusterVector_Cluster3x3i
from . import _aare
import numpy as np
from .ClusterFinder import _get_class
def ClusterVector(cluster_size, dtype = np.int32):
def ClusterVector(cluster_size=(3,3), dtype = np.int32):
"""
Factory function to create a ClusterVector object. Provides a cleaner syntax for
the templated ClusterVector in C++.
.. code-block:: python
from aare import ClusterVector
ClusterVector(cluster_size=(3,3), dtype=np.float64)
"""
cls = _get_class("ClusterVector", cluster_size, dtype)
return cls()
if dtype == np.int32 and cluster_size == (3,3):
return ClusterVector_Cluster3x3i()
else:
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
from . import _aare
import numpy as np

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
from . import _aare
import numpy as np
from .ScanParameters import ScanParameters

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
from . import _aare
class ScanParameters(_aare.ScanParameters):

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
# Make the compiled classes that live in _aare available from aare.
from . import _aare
@@ -7,17 +8,19 @@ from ._aare import Pedestal_d, Pedestal_f, ClusterFinder_Cluster3x3i, VarCluster
from ._aare import DetectorType
from ._aare import hitmap
from ._aare import ROI
from ._aare import corner
# from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i
from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT, ClusterFileSink, ClusterFile
from .ClusterVector import ClusterVector
from .Cluster import Cluster
from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2
from ._aare import Interpolator
from ._aare import calculate_eta2
from ._aare import calculate_eta2, calculate_eta3, calculate_cross_eta3, calculate_full_eta2
from ._aare import reduce_to_2x2, reduce_to_3x3
from ._aare import apply_custom_weights

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
#Calibration related functions
import numpy as np
def load_calibration(fname, hg0=False):

View File

@@ -1 +1,2 @@
# SPDX-License-Identifier: MPL-2.0
from ._aare import gaus, pol1, scurve, scurve2

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
import numpy as np
from . import _aare
@@ -48,6 +49,43 @@ class Matterhorn02Transform:
else:
return np.take(data.view(np.uint16), self.pixel_map[0:counters])
class Mythen302Transform:
"""
Transform Mythen 302 test chip data from a buffer of bytes (uint8_t)
to a uint32 numpy array of [64,3] representing channels and counters.
Assumes data taken with rx_dbitlist 17 6, rx_dbitreorder 1 and Digital
Samples = 2310 [(64x3x24)/2 + some extra]
.. note::
The offset is in number of bits 0-7
"""
_n_channels = 64
_n_counters = 3
def __init__(self, offset=4):
self.offset = offset
def __call__(self, data : np.ndarray):
"""
Transform buffer of data to a [64,3] np.ndarray of uint32.
Parameters
----------
data : np.ndarray
Expected dtype: uint8
Returns
----------
image : np.ndarray
uint32 array of size 64, 3
"""
res = _aare.decode_my302(data, self.offset)
res = res.reshape(
Mythen302Transform._n_channels, Mythen302Transform._n_counters
)
return res
#on import generate the pixel maps to avoid doing it every time
moench05 = Moench05Transform()

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
import matplotlib.pyplot as plt
import numpy as np
from aare import fit_gaus, fit_pol1

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
from aare import apply_calibration
import numpy as np

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/Cluster.hpp"
#include <cstdint>
@@ -24,7 +25,8 @@ void define_Cluster(py::module &m, const std::string &typestr) {
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>(
m, class_name.c_str(), py::buffer_protocol())
.def(py::init([](uint8_t x, uint8_t y, py::array_t<Type> data) {
.def(py::init([](CoordType x, CoordType y,
py::array_t<Type, py::array::forcecast> data) {
py::buffer_info buf_info = data.request();
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster;
cluster.x = x;
@@ -34,31 +36,73 @@ void define_Cluster(py::module &m, const std::string &typestr) {
cluster.data[i] = r(i);
}
return cluster;
}));
}))
/*
//TODO! Review if to keep or not
.def_property(
"data",
[](ClusterType &c) -> py::array {
return py::array(py::buffer_info(
c.data, sizeof(Type),
py::format_descriptor<Type>::format(), // Type
// format
1, // Number of dimensions
{static_cast<ssize_t>(ClusterSizeX *
ClusterSizeY)}, // Shape (flattened)
{sizeof(Type)} // Stride (step size between elements)
));
// TODO! Review if to keep or not
.def_property_readonly(
"data",
[](Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> &c)
-> py::array {
return py::array(py::buffer_info(
c.data.data(), sizeof(Type),
py::format_descriptor<Type>::format(), // Type
// format
2, // Number of dimensions
{static_cast<ssize_t>(ClusterSizeX),
static_cast<ssize_t>(ClusterSizeY)}, // Shape (flattened)
{sizeof(Type) * ClusterSizeY, sizeof(Type)}
// Stride (step size between elements)
));
})
.def_readonly("x",
&Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>::x)
.def_readonly("y",
&Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>::y)
.def(
"max_sum_2x2",
[](Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> &self) {
auto max_sum = self.max_sum_2x2();
return py::make_tuple(max_sum.sum,
static_cast<int>(max_sum.index));
},
R"(calculates sum of 2x2 subcluster with highest energy and index relative to cluster center 0: top_left, 1: top_right, 2: bottom_left, 3: bottom_right
)");
}
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = int16_t>
void reduce_to_3x3(py::module &m) {
m.def(
"reduce_to_3x3",
[](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
return reduce_to_3x3(cl);
},
[](ClusterType &c, py::array_t<Type> arr) {
py::buffer_info buf_info = arr.request();
Type *ptr = static_cast<Type *>(buf_info.ptr);
std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY,
c.data); // TODO dont iterate over centers!!!
py::return_value_policy::move, R"(Reduce cluster to 3x3 subcluster)");
}
});
*/
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = int16_t>
void reduce_to_2x2(py::module &m) {
m.def(
"reduce_to_2x2",
[](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
return reduce_to_2x2(cl);
},
py::return_value_policy::move,
R"(
Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with
the highest photon energy.
RETURN:
reduced cluster (cluster is filled in row major ordering starting at the top left. Thus for a max subcluster in the top left corner the photon hit is at the fourth position.)
)");
}
#pragma GCC diagnostic pop

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/ClusterCollector.hpp"
#include "aare/ClusterFileSink.hpp"
#include "aare/ClusterFinder.hpp"

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/CalculateEta.hpp"
#include "aare/ClusterFile.hpp"
#include "aare/defs.hpp"
@@ -44,14 +45,15 @@ void define_ClusterFile(py::module &m, const std::string &typestr) {
auto v = new ClusterVector<ClusterType>(self.read_frame());
return v;
})
.def("set_roi", &ClusterFile<ClusterType>::set_roi,
py::arg("roi"))
.def("set_roi", &ClusterFile<ClusterType>::set_roi, py::arg("roi"))
.def("tell", &ClusterFile<ClusterType>::tell)
.def(
"set_noise_map",
[](ClusterFile<ClusterType> &self, py::array_t<int32_t> noise_map) {
auto view = make_view_2d(noise_map);
self.set_noise_map(view);
}, py::arg("noise_map"))
},
py::arg("noise_map"))
.def("set_gain_map",
[](ClusterFile<ClusterType> &self, py::array_t<double> gain_map) {
@@ -80,15 +82,4 @@ void define_ClusterFile(py::module &m, const std::string &typestr) {
});
}
template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
typename CoordType = uint16_t>
void register_calculate_eta(py::module &m) {
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
m.def("calculate_eta2",
[](const aare::ClusterVector<ClusterType> &clusters) {
auto eta2 = new NDArray<double, 2>(calculate_eta2(clusters));
return return_image_data(eta2);
});
}
#pragma GCC diagnostic pop

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/ClusterCollector.hpp"
#include "aare/ClusterFileSink.hpp"
#include "aare/ClusterFinder.hpp"

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/ClusterCollector.hpp"
#include "aare/ClusterFileSink.hpp"
#include "aare/ClusterFinder.hpp"

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/ClusterCollector.hpp"
#include "aare/ClusterFileSink.hpp"
#include "aare/ClusterFinder.hpp"

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/ClusterCollector.hpp"
#include "aare/ClusterFileSink.hpp"
#include "aare/ClusterFinder.hpp"
@@ -44,11 +45,16 @@ void define_ClusterVector(py::module &m, const std::string &typestr) {
auto *vec = new std::vector<Type>(self.sum());
return return_vector(vec);
})
.def("sum_2x2",
[](ClusterVector<ClusterType> &self) {
auto *vec = new std::vector<Type>(self.sum_2x2());
return return_vector(vec);
})
.def(
"sum_2x2",
[](ClusterVector<ClusterType> &self) {
auto *vec = new std::vector<Sum_index_pair<Type, corner>>(
self.sum_2x2());
return return_vector(vec);
},
R"(calculates sum of 2x2 subcluster with highest energy and index relative to cluster center 0: top_left, 1: top_right, 2: bottom_left, 3: bottom_right
)")
.def_property_readonly("size", &ClusterVector<ClusterType>::size)
.def("item_size", &ClusterVector<ClusterType>::item_size)
.def_property_readonly("fmt",
@@ -104,4 +110,47 @@ void define_ClusterVector(py::module &m, const std::string &typestr) {
});
}
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
void define_2x2_reduction(py::module &m) {
m.def(
"reduce_to_2x2",
[](const ClusterVector<
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>> &cv) {
return new ClusterVector<Cluster<Type, 2, 2, CoordType>>(
reduce_to_2x2(cv));
},
R"(
Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with
the highest photon energy.
Parameters
cv : ClusterVector (clusters are filled in row-major ordering starting at the top left. Thus for a max subcluster in the top left corner the photon hit is at the fourth position.)
)",
py::arg("clustervector"));
}
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
void define_3x3_reduction(py::module &m) {
m.def(
"reduce_to_3x3",
[](const ClusterVector<
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>> &cv) {
return new ClusterVector<Cluster<Type, 3, 3, CoordType>>(
reduce_to_3x3(cv));
},
R"(
Reduce cluster to 3x3 subcluster
Parameters
cv : ClusterVector
)",
py::arg("clustervector"));
}
#pragma GCC diagnostic pop

104
python/src/bind_Eta.hpp Normal file
View File

@@ -0,0 +1,104 @@
#include "aare/CalculateEta.hpp"
#include <cstdint>
// #include <pybind11/native_enum.h> only for version 3
#include <pybind11/pybind11.h>
namespace py = pybind11;
using namespace ::aare;
template <typename T>
void define_eta(py::module &m, const std::string &typestr) {
auto class_name = fmt::format("Eta{}", typestr);
py::class_<Eta2<T>>(m, class_name.c_str())
.def(py::init<>())
.def_readonly("x", &Eta2<T>::x, "eta x value")
.def_readonly("y", &Eta2<T>::y, "eta y value")
.def_readonly("c", &Eta2<T>::c,
"eta corner value cTopLeft, cTopRight, "
"cBottomLeft, cBottomRight")
.def_readonly("sum", &Eta2<T>::sum, "photon energy of cluster");
}
void define_corner_enum(py::module &m) {
py::enum_<corner>(m, "corner", "enum.Enum")
.value("cTopLeft", corner::cTopLeft)
.value("cTopRight", corner::cTopRight)
.value("cBottomLeft", corner::cBottomLeft)
.value("cBottomRight", corner::cBottomRight)
.export_values();
}
template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
typename CoordType = uint16_t>
void register_calculate_2x2eta(py::module &m) {
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
m.def(
"calculate_eta2",
[](const aare::ClusterVector<ClusterType> &clusters) {
auto eta2 = new std::vector<Eta2<typename ClusterType::value_type>>(
calculate_eta2(clusters));
return return_vector(eta2);
},
R"(calculates eta2x2)", py::arg("clusters"));
m.def(
"calculate_eta2",
[](const aare::Cluster<Type, CoordSizeX, CoordSizeY, CoordType>
&cluster) { return calculate_eta2(cluster); },
R"(calculates eta2x2)", py::arg("cluster"));
m.def(
"calculate_full_eta2",
[](const aare::Cluster<Type, CoordSizeX, CoordSizeY, CoordType>
&cluster) { return calculate_full_eta2(cluster); },
R"(calculates full eta2x2)", py::arg("cluster"));
m.def(
"calculate_full_eta2",
[](const aare::ClusterVector<ClusterType> &clusters) {
auto eta2 = new std::vector<Eta2<typename ClusterType::value_type>>(
calculate_full_eta2(clusters));
return return_vector(eta2);
},
R"(calculates full eta2x2)", py::arg("clusters"));
}
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
void register_calculate_3x3eta(py::module &m) {
using ClusterType = Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>;
m.def(
"calculate_eta3",
[](const aare::ClusterVector<ClusterType> &clusters) {
auto eta = new std::vector<Eta2<Type>>(calculate_eta3(clusters));
return return_vector(eta);
},
R"(calculates eta3x3 using entire cluster)", py::arg("clusters"));
m.def(
"calculate_cross_eta3",
[](const aare::ClusterVector<ClusterType> &clusters) {
auto eta =
new std::vector<Eta2<Type>>(calculate_cross_eta3(clusters));
return return_vector(eta);
},
R"(calculates eta3x3 taking into account cross pixels in cluster)",
py::arg("clusters"));
m.def(
"calculate_eta3",
[](const ClusterType &cluster) { return calculate_eta3(cluster); },
R"(calculates eta3x3 using entire cluster)", py::arg("cluster"));
m.def(
"calculate_cross_eta3",
[](const ClusterType &cluster) {
return calculate_cross_eta3(cluster);
},
R"(calculates eta3x3 taking into account cross pixels in cluster)",
py::arg("cluster"));
}

View File

@@ -0,0 +1,181 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/CalculateEta.hpp"
#include "aare/Interpolator.hpp"
#include "aare/NDArray.hpp"
#include "aare/NDView.hpp"
#include "np_helper.hpp"
#include <cstdint>
#include <filesystem>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
namespace py = pybind11;
#define REGISTER_INTERPOLATOR_ETA2(T, N, M, U) \
register_interpolate<T, N, M, U, aare::calculate_full_eta2<T, N, M, U>>( \
interpolator, "_full_eta2", "full eta2"); \
register_interpolate<T, N, M, U, aare::calculate_eta2<T, N, M, U>>( \
interpolator, "", "eta2");
#define REGISTER_INTERPOLATOR_ETA3(T, N, M, U) \
register_interpolate<T, N, M, U, aare::calculate_eta3<T, N, M, U>>( \
interpolator, "_eta3", "full eta3"); \
register_interpolate<T, N, M, U, aare::calculate_cross_eta3<T, N, M, U>>( \
interpolator, "_cross_eta3", "cross eta3");
template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
typename CoordType = uint16_t, auto EtaFunction>
void register_interpolate(py::class_<aare::Interpolator> &interpolator,
const std::string &typestr = "",
const std::string &doc_string_etatype = "eta2x2") {
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
const std::string docstring = "interpolation based on " +
doc_string_etatype +
"\n\nReturns:\n interpolated photons";
auto function_name = fmt::format("interpolate{}", typestr);
interpolator.def(
function_name.c_str(),
[](aare::Interpolator &self,
const ClusterVector<ClusterType> &clusters) {
auto photons = self.interpolate<EtaFunction, ClusterType>(clusters);
auto *ptr = new std::vector<Photon>{photons};
return return_vector(ptr);
},
docstring.c_str(), py::arg("cluster_vector"));
}
template <typename Type>
void register_transform_eta_values(
py::class_<aare::Interpolator> &interpolator) {
interpolator.def(
"transform_eta_values",
[](Interpolator &self, const Eta2<Type> &eta) {
auto uniform_coord = self.transform_eta_values(eta);
return py::make_tuple(uniform_coord.x, uniform_coord.y);
},
R"(eta.x eta.y transformed to uniform coordinates based on CDF ietax, ietay)",
py::arg("Eta"));
}
void define_interpolation_bindings(py::module &m) {
PYBIND11_NUMPY_DTYPE(aare::Photon, x, y, energy);
auto interpolator =
py::class_<aare::Interpolator>(m, "Interpolator")
.def(py::init(
[](py::array_t<double,
py::array::c_style | py::array::forcecast>
etacube,
py::array_t<double> xbins, py::array_t<double> ybins,
py::array_t<double> ebins) {
return Interpolator(
make_view_3d(etacube), make_view_1d(xbins),
make_view_1d(ybins), make_view_1d(ebins));
}),
R"doc(
Constructor
Args:
etacube:
joint distribution of eta_x, eta_y and photon energy (**Note:** for the joint distribution first dimension is eta_x, second: eta_y, third: energy bins.)
xbins:
bin edges of etax
ybins:
bin edges of etay
ebins:
bin edges of photon energy
)doc",
py::arg("etacube"),
py::arg("xbins"), py::arg("ybins"),
py::arg("ebins"))
.def(py::init(
[](py::array_t<double> xbins, py::array_t<double> ybins,
py::array_t<double> ebins) {
return Interpolator(make_view_1d(xbins),
make_view_1d(ybins),
make_view_1d(ebins));
}),
R"(
Constructor
Args:
xbins:
bin edges of etax
ybins:
bin edges of etay
ebins:
bin edges of photon energy
)", py::arg("xbins"),
py::arg("ybins"), py::arg("ebins"))
.def(
"rosenblatttransform",
[](Interpolator &self,
py::array_t<double,
py::array::c_style | py::array::forcecast>
etacube) {
return self.rosenblatttransform(make_view_3d(etacube));
},
R"(
calculated the rosenblatttransform for the given distribution
etacube:
joint distribution of eta_x, eta_y and photon energy (**Note:** for the joint distribution first dimension is eta_x, second: eta_y, third: energy bins.)
)",
py::arg("etacube"))
.def("get_ietax",
[](Interpolator &self) {
auto *ptr = new NDArray<double, 3>{};
*ptr = self.get_ietax();
return return_image_data(ptr);
}, R"(conditional CDF of etax conditioned on etay, marginal CDF of etax (if rosenblatt transform applied))")
.def("get_ietay", [](Interpolator &self) {
auto *ptr = new NDArray<double, 3>{};
*ptr = self.get_ietay();
return return_image_data(ptr);
}, R"(conditional CDF of etay conditioned on etax)");
REGISTER_INTERPOLATOR_ETA3(int, 3, 3, uint16_t);
REGISTER_INTERPOLATOR_ETA3(float, 3, 3, uint16_t);
REGISTER_INTERPOLATOR_ETA3(double, 3, 3, uint16_t);
REGISTER_INTERPOLATOR_ETA2(int, 3, 3, uint16_t);
REGISTER_INTERPOLATOR_ETA2(float, 3, 3, uint16_t);
REGISTER_INTERPOLATOR_ETA2(double, 3, 3, uint16_t);
REGISTER_INTERPOLATOR_ETA2(int, 2, 2, uint16_t);
REGISTER_INTERPOLATOR_ETA2(float, 2, 2, uint16_t);
REGISTER_INTERPOLATOR_ETA2(double, 2, 2, uint16_t);
register_transform_eta_values<int>(interpolator);
register_transform_eta_values<float>(interpolator);
register_transform_eta_values<double>(interpolator);
// TODO! Evaluate without converting to double
m.def(
"hej",
[]() {
// auto boost_histogram = py::module_::import("boost_histogram");
// py::object axis =
// boost_histogram.attr("axis").attr("Regular")(10, 0.0, 10.0);
// py::object histogram = boost_histogram.attr("Histogram")(axis);
// return histogram;
// return h;
},
R"(
Evaluate a 1D Gaussian function for all points in x using parameters par.
Parameters
----------
x : array_like
The points at which to evaluate the Gaussian function.
par : array_like
The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation.
)");
}

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/calibration.hpp"
#include <cstdint>

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/CtbRawFile.hpp"
#include "aare/File.hpp"
@@ -95,6 +96,69 @@ void define_ctb_raw_file_io_bindings(py::module &m) {
return output;
});
m.def("expand24to32bit",
[](py::array_t<uint8_t, py::array::c_style | py::array::forcecast>
&input, uint32_t offset){
aare::BitOffset bitoff(offset);
py::buffer_info buf = input.request();
constexpr uint32_t bytes_per_channel = 3; //24 bit
py::array_t<uint32_t> output(buf.size/bytes_per_channel);
NDView<uint8_t, 1> input_view(input.mutable_data(),
{input.size()});
NDView<uint32_t, 1> output_view(output.mutable_data(),
{output.size()});
aare::expand24to32bit(input_view, output_view, bitoff);
return output;
});
m.def("decode_my302",
[](py::array_t<uint8_t, py::array::c_style | py::array::forcecast>
&input, uint32_t offset){
// Physical layout of the chip
constexpr size_t channels = 64;
constexpr size_t counters = 3;
constexpr size_t bytes_per_channel = 3; //24 bit
constexpr int n_outputs = 2;
ssize_t expected_size = channels*counters*bytes_per_channel;
//If whe have an offset we need one extra byte per output
aare::BitOffset bitoff(offset);
if(bitoff.value())
expected_size += n_outputs;
if (input.size() != expected_size) {
throw std::runtime_error(
fmt::format("{} Expected an input size of {} bytes. Called "
"with input size of {}",
LOCATION, expected_size, input.size()));
}
py::buffer_info buf = input.request();
py::array_t<uint32_t> output(channels * counters);
for (int i = 0; i!=n_outputs; ++i){
auto step = input.size()/n_outputs;
auto out_step = output.size()/n_outputs;
NDView<uint8_t, 1> input_view(input.mutable_data()+step*i,
{input.size()/n_outputs});
NDView<uint32_t, 1> output_view(output.mutable_data()+out_step*i,
{output.size()/n_outputs});
aare::expand24to32bit(input_view, output_view, bitoff);
}
return output;
});
py::class_<CtbRawFile>(m, "CtbRawFile")
.def(py::init<const std::filesystem::path &>())
.def("read_frame",

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/CtbRawFile.hpp"
#include "aare/File.hpp"
#include "aare/Frame.hpp"
@@ -65,7 +66,7 @@ void define_file_io_bindings(py::module &m) {
.def_property_readonly("bytes_per_pixel", &File::bytes_per_pixel)
.def_property_readonly(
"detector_type",
[](File &self) { return ToString(self.detector_type()); })
[](File &self) { return self.detector_type(); })
.def("read_frame",
[](File &self) {
const uint8_t item_size = self.bytes_per_pixel();
@@ -160,21 +161,6 @@ void define_file_io_bindings(py::module &m) {
}
});
py::class_<FileConfig>(m, "FileConfig")
.def(py::init<>())
.def_readwrite("rows", &FileConfig::rows)
.def_readwrite("cols", &FileConfig::cols)
.def_readwrite("version", &FileConfig::version)
.def_readwrite("geometry", &FileConfig::geometry)
.def_readwrite("detector_type", &FileConfig::detector_type)
.def_readwrite("max_frames_per_file", &FileConfig::max_frames_per_file)
.def_readwrite("total_frames", &FileConfig::total_frames)
.def_readwrite("dtype", &FileConfig::dtype)
.def("__eq__", &FileConfig::operator==)
.def("__ne__", &FileConfig::operator!=)
.def("__repr__", [](const FileConfig &a) {
return "<FileConfig: " + a.to_string() + ">";
});
py::class_<ScanParameters>(m, "ScanParameters")
.def(py::init<const std::string &>())

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include <cstdint>
#include <filesystem>
#include <pybind11/pybind11.h>

View File

@@ -1,82 +0,0 @@
#include "aare/Interpolator.hpp"
#include "aare/NDArray.hpp"
#include "aare/NDView.hpp"
#include "np_helper.hpp"
#include <cstdint>
#include <filesystem>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
namespace py = pybind11;
template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
typename CoordType = uint16_t>
void register_interpolate(py::class_<aare::Interpolator> &interpolator) {
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
interpolator.def("interpolate",
[](aare::Interpolator &self,
const ClusterVector<ClusterType> &clusters) {
auto photons = self.interpolate<ClusterType>(clusters);
auto *ptr = new std::vector<Photon>{photons};
return return_vector(ptr);
});
}
void define_interpolation_bindings(py::module &m) {
PYBIND11_NUMPY_DTYPE(aare::Photon, x, y, energy);
auto interpolator =
py::class_<aare::Interpolator>(m, "Interpolator")
.def(py::init([](py::array_t<double, py::array::c_style |
py::array::forcecast>
etacube,
py::array_t<double> xbins,
py::array_t<double> ybins,
py::array_t<double> ebins) {
return Interpolator(make_view_3d(etacube), make_view_1d(xbins),
make_view_1d(ybins), make_view_1d(ebins));
}))
.def("get_ietax",
[](Interpolator &self) {
auto *ptr = new NDArray<double, 3>{};
*ptr = self.get_ietax();
return return_image_data(ptr);
})
.def("get_ietay", [](Interpolator &self) {
auto *ptr = new NDArray<double, 3>{};
*ptr = self.get_ietay();
return return_image_data(ptr);
});
register_interpolate<int, 3, 3, uint16_t>(interpolator);
register_interpolate<float, 3, 3, uint16_t>(interpolator);
register_interpolate<double, 3, 3, uint16_t>(interpolator);
register_interpolate<int, 2, 2, uint16_t>(interpolator);
register_interpolate<float, 2, 2, uint16_t>(interpolator);
register_interpolate<double, 2, 2, uint16_t>(interpolator);
// TODO! Evaluate without converting to double
m.def(
"hej",
[]() {
// auto boost_histogram = py::module_::import("boost_histogram");
// py::object axis =
// boost_histogram.attr("axis").attr("Regular")(10, 0.0, 10.0);
// py::object histogram = boost_histogram.attr("Histogram")(axis);
// return histogram;
// return h;
},
R"(
Evaluate a 1D Gaussian function for all points in x using parameters par.
Parameters
----------
x : array_like
The points at which to evaluate the Gaussian function.
par : array_like
The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation.
)");
}

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/JungfrauDataFile.hpp"
#include "aare/defs.hpp"

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
// Files with bindings to the different classes
// New style file naming
@@ -8,13 +9,14 @@
#include "bind_ClusterFinder.hpp"
#include "bind_ClusterFinderMT.hpp"
#include "bind_ClusterVector.hpp"
#include "bind_Eta.hpp"
#include "bind_Interpolator.hpp"
#include "bind_calibration.hpp"
// TODO! migrate the other names
#include "ctb_raw_file.hpp"
#include "file.hpp"
#include "fit.hpp"
#include "interpolation.hpp"
#include "jungfrau_data_file.hpp"
#include "pedestal.hpp"
#include "pixel_map.hpp"
@@ -42,12 +44,16 @@ double, 'f' for float)
#define DEFINE_CLUSTER_BINDINGS(T, N, M, U, TYPE_CODE) \
define_ClusterFile<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_ClusterVector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_Cluster<T, N, M, U>(m, #N "x" #M #TYPE_CODE); \
register_calculate_2x2eta<T, N, M, U>(m); \
define_2x2_reduction<T, N, M, U>(m); \
reduce_to_2x2<T, N, M, U>(m);
#define DEFINE_BINDINGS_CLUSTERFINDER(T, N, M, U, TYPE_CODE) \
define_ClusterFinder<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_ClusterFinderMT<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_ClusterFileSink<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_ClusterCollector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_Cluster<T, N, M, U>(m, #N "x" #M #TYPE_CODE); \
register_calculate_eta<T, N, M, U>(m);
define_ClusterCollector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE);
PYBIND11_MODULE(_aare, m) {
define_file_io_bindings(m);
@@ -89,4 +95,74 @@ PYBIND11_MODULE(_aare, m) {
DEFINE_CLUSTER_BINDINGS(double, 21, 21, uint16_t, d);
DEFINE_CLUSTER_BINDINGS(float, 21, 21, uint16_t, f);
DEFINE_CLUSTER_BINDINGS(int16_t, 3, 3, uint16_t, i16);
DEFINE_BINDINGS_CLUSTERFINDER(int, 3, 3, uint16_t, i);
DEFINE_BINDINGS_CLUSTERFINDER(double, 3, 3, uint16_t, d);
DEFINE_BINDINGS_CLUSTERFINDER(float, 3, 3, uint16_t, f);
DEFINE_BINDINGS_CLUSTERFINDER(int, 5, 5, uint16_t, i);
DEFINE_BINDINGS_CLUSTERFINDER(double, 5, 5, uint16_t, d);
DEFINE_BINDINGS_CLUSTERFINDER(float, 5, 5, uint16_t, f);
DEFINE_BINDINGS_CLUSTERFINDER(int, 7, 7, uint16_t, i);
DEFINE_BINDINGS_CLUSTERFINDER(double, 7, 7, uint16_t, d);
DEFINE_BINDINGS_CLUSTERFINDER(float, 7, 7, uint16_t, f);
DEFINE_BINDINGS_CLUSTERFINDER(int, 9, 9, uint16_t, i);
DEFINE_BINDINGS_CLUSTERFINDER(double, 9, 9, uint16_t, d);
DEFINE_BINDINGS_CLUSTERFINDER(float, 9, 9, uint16_t, f);
define_3x3_reduction<int, 3, 3, uint16_t>(m);
define_3x3_reduction<double, 3, 3, uint16_t>(m);
define_3x3_reduction<float, 3, 3, uint16_t>(m);
define_3x3_reduction<int, 5, 5, uint16_t>(m);
define_3x3_reduction<double, 5, 5, uint16_t>(m);
define_3x3_reduction<float, 5, 5, uint16_t>(m);
define_3x3_reduction<int, 7, 7, uint16_t>(m);
define_3x3_reduction<double, 7, 7, uint16_t>(m);
define_3x3_reduction<float, 7, 7, uint16_t>(m);
define_3x3_reduction<int, 9, 9, uint16_t>(m);
define_3x3_reduction<double, 9, 9, uint16_t>(m);
define_3x3_reduction<float, 9, 9, uint16_t>(m);
reduce_to_3x3<int, 3, 3, uint16_t>(m);
reduce_to_3x3<double, 3, 3, uint16_t>(m);
reduce_to_3x3<float, 3, 3, uint16_t>(m);
reduce_to_3x3<int, 5, 5, uint16_t>(m);
reduce_to_3x3<double, 5, 5, uint16_t>(m);
reduce_to_3x3<float, 5, 5, uint16_t>(m);
reduce_to_3x3<int, 7, 7, uint16_t>(m);
reduce_to_3x3<double, 7, 7, uint16_t>(m);
reduce_to_3x3<float, 7, 7, uint16_t>(m);
reduce_to_3x3<int, 9, 9, uint16_t>(m);
reduce_to_3x3<double, 9, 9, uint16_t>(m);
reduce_to_3x3<float, 9, 9, uint16_t>(m);
register_calculate_3x3eta<int, 3, 3, uint16_t>(m);
register_calculate_3x3eta<double, 3, 3, uint16_t>(m);
register_calculate_3x3eta<float, 3, 3, uint16_t>(m);
register_calculate_3x3eta<int16_t, 3, 3, uint16_t>(m);
using Sum_index_pair_d = Sum_index_pair<double, corner>;
PYBIND11_NUMPY_DTYPE(Sum_index_pair_d, sum, index);
using Sum_index_pair_f = Sum_index_pair<float, corner>;
PYBIND11_NUMPY_DTYPE(Sum_index_pair_f, sum, index);
using Sum_index_pair_i = Sum_index_pair<int, corner>;
PYBIND11_NUMPY_DTYPE(Sum_index_pair_i, sum, index);
using eta_d = Eta2<double>;
PYBIND11_NUMPY_DTYPE(eta_d, x, y, c, sum);
using eta_i = Eta2<int>;
PYBIND11_NUMPY_DTYPE(eta_i, x, y, c, sum);
using eta_f = Eta2<float>;
PYBIND11_NUMPY_DTYPE(eta_f, x, y, c, sum);
using eta_i16 = Eta2<int16_t>;
PYBIND11_NUMPY_DTYPE(eta_i16, x, y, c, sum);
define_corner_enum(m);
define_eta<float>(m, "f");
define_eta<double>(m, "d");
define_eta<int>(m, "i");
define_eta<int16_t>(m, "i16");
}

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#pragma once
#include <iostream>

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/Pedestal.hpp"
#include "np_helper.hpp"

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/PixelMap.hpp"
#include "np_helper.hpp"

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/CtbRawFile.hpp"
#include "aare/File.hpp"
#include "aare/Frame.hpp"

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/CtbRawFile.hpp"
#include "aare/File.hpp"
@@ -84,5 +85,21 @@ void define_raw_master_file_bindings(py::module &m) {
.def_property_readonly("quad", &RawMasterFile::quad)
.def_property_readonly("scan_parameters",
&RawMasterFile::scan_parameters)
.def_property_readonly("roi", &RawMasterFile::roi);
.def_property_readonly("roi", &RawMasterFile::roi)
.def_property_readonly(
"exptime",
[](RawMasterFile &self) -> std::optional<double> {
if (self.exptime()) {
double seconds =
std::chrono::duration<double>(*self.exptime()).count();
return seconds;
} else {
return std::nullopt;
}
})
.def_property_readonly("period", [](RawMasterFile &self) {
double seconds =
std::chrono::duration<double>(self.period()).count();
return seconds;
});
}

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/CtbRawFile.hpp"
#include "aare/File.hpp"
#include "aare/Frame.hpp"

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/VarClusterFinder.hpp"
#include "np_helper.hpp"
// #include "aare/defs.hpp"

File diff suppressed because one or more lines are too long

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
import os
from pathlib import Path
import pytest

View File

@@ -1,7 +1,9 @@
# SPDX-License-Identifier: MPL-2.0
import pytest
import numpy as np
from aare import _aare #import the C++ module
from aare import corner
from conftest import test_data_path
@@ -39,52 +41,65 @@ def test_Interpolator():
xbins = np.linspace(0, 5, 30, dtype=np.float64)
ybins = np.linspace(0, 5, 30, dtype=np.float64)
etacube = np.zeros(shape=[30, 30, 20], dtype=np.float64)
etacube = np.zeros(shape=[29, 29, 19], dtype=np.float64)
interpolator = _aare.Interpolator(etacube, xbins, ybins, ebins)
assert interpolator.get_ietax().shape == (30,30,20)
assert interpolator.get_ietay().shape == (30,30,20)
assert interpolator.get_ietax().shape == (29,29,19)
assert interpolator.get_ietay().shape == (29,29,19)
clustervector = _aare.ClusterVector_Cluster3x3i()
cluster = _aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32))
cluster = _aare.Cluster3x3i(1,1, np.ones(9, dtype=np.int32))
clustervector.push_back(cluster)
[u,v] = interpolator.transform_eta_values(_aare.Etai())
assert u == 0
assert v == 0
interpolated_photons = interpolator.interpolate(clustervector)
assert interpolated_photons.size == 1
assert interpolated_photons[0]["x"] == -1
assert interpolated_photons[0]["y"] == -1
assert interpolated_photons[0]["x"] == 0.5
assert interpolated_photons[0]["y"] == 0.5
assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0
clustervector = _aare.ClusterVector_Cluster2x2i()
cluster = _aare.Cluster2x2i(0,0, np.ones(4, dtype=np.int32))
cluster = _aare.Cluster2x2i(1,1, np.ones(4, dtype=np.int32))
clustervector.push_back(cluster)
interpolated_photons = interpolator.interpolate(clustervector)
assert interpolated_photons.size == 1
assert interpolated_photons[0]["x"] == 0
assert interpolated_photons[0]["y"] == 0
assert interpolated_photons[0]["x"] == 0.5
assert interpolated_photons[0]["y"] == 0.5
assert interpolated_photons[0]["energy"] == 4
def test_calculate_eta():
"""Calculate Eta"""
clusters = _aare.ClusterVector_Cluster3x3i()
clusters.push_back(_aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32)))
clusters.push_back(_aare.Cluster3x3i(0,0, np.array([1,1,1,2,2,2,3,3,3])))
cluster = _aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32))
eta2 = _aare.calculate_eta2(clusters)
eta2 = _aare.calculate_eta2(cluster)
assert eta2.x == 0.5
assert eta2.y == 0.5
assert eta2.c == corner.cTopLeft
assert eta2.sum == 4
def test_max_sum():
"""Max 2x2 Sum"""
cluster = _aare.Cluster3x3i(5,5,np.array([1, 1, 1, 2, 3, 1, 2, 2, 1], dtype=np.int32))
max_sum = cluster.max_sum_2x2()
assert max_sum[0] == 9
assert max_sum[1] == 2
assert eta2.shape == (2,2)
assert eta2[0,0] == 0.5
assert eta2[0,1] == 0.5
assert eta2[1,0] == 0.5
assert eta2[1,1] == 0.6 #1/5
def test_cluster_finder():
"""Test ClusterFinder"""
@@ -101,6 +116,27 @@ def test_cluster_finder():
assert clusters.size == 0
def test_2x2_reduction():
"""Test 2x2 Reduction"""
cluster = _aare.Cluster3x3i(5,5,np.array([1, 1, 1, 2, 3, 1, 2, 2, 1], dtype=np.int32))
reduced_cluster = _aare.reduce_to_2x2(cluster)
assert reduced_cluster.x == 5
assert reduced_cluster.y == 5
assert (reduced_cluster.data == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
def test_3x3_reduction():
"""Test 3x3 Reduction"""
cluster = _aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double))
reduced_cluster = _aare.reduce_to_3x3(cluster)
assert reduced_cluster.x == 5
assert reduced_cluster.y == 5
assert (reduced_cluster.data == np.array([[2.0, 1.0, 1.0], [2.0, 3.0, 1.0], [2.0, 1.0, 1.0]], dtype=np.double)).all()

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
import pytest
import numpy as np

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
import pytest
import numpy as np
import boost_histogram as bh
@@ -5,7 +6,7 @@ import time
from pathlib import Path
import pickle
from aare import ClusterFile
from aare import ClusterFile, ClusterVector, calculate_eta2
from aare import _aare
from conftest import test_data_path
@@ -32,6 +33,31 @@ def test_push_back_on_cluster_vector():
assert arr[0]['y'] == 22
def test_max_2x2_sum():
"""max_2x2_sum"""
cv = _aare.ClusterVector_Cluster3x3i()
cv.push_back(_aare.Cluster3x3i(19, 22, np.array([0,1,0,2,3,0,2,1,0], dtype=np.int32)))
cv.push_back(_aare.Cluster3x3i(19, 22, np.ones(9, dtype=np.int32)))
assert cv.size == 2
max_2x2 = cv.sum_2x2()
assert max_2x2.size == 2
assert max_2x2[0]["sum"] == 8
assert max_2x2[0]["index"] == 2
def test_eta2():
"""calculate eta2"""
cv = _aare.ClusterVector_Cluster3x3i()
cv.push_back(_aare.Cluster3x3i(19, 22, np.ones(9, dtype=np.int32)))
assert cv.size == 1
eta2 = calculate_eta2(cv)
assert eta2.size == 1
assert eta2[0]["x"] == 0.5
assert eta2[0]["y"] == 0.5
assert eta2[0]["c"] == 0
assert eta2[0]["sum"] == 4
def test_make_a_hitmap_from_cluster_vector():
cv = _aare.ClusterVector_Cluster3x3i()
@@ -51,4 +77,36 @@ def test_make_a_hitmap_from_cluster_vector():
# print(img)
# print(ref)
assert (img == ref).all()
def test_2x2_reduction():
cv = ClusterVector((3,3))
cv.push_back(_aare.Cluster3x3i(5, 5, np.array([1, 1, 1, 2, 3, 1, 2, 2, 1], dtype=np.int32)))
cv.push_back(_aare.Cluster3x3i(5, 5, np.array([2, 2, 1, 2, 3, 1, 1, 1, 1], dtype=np.int32)))
reduced_cv = np.array(_aare.reduce_to_2x2(cv), copy=False)
assert reduced_cv.size == 2
assert reduced_cv[0]["x"] == 5
assert reduced_cv[0]["y"] == 5
assert (reduced_cv[0]["data"] == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
assert reduced_cv[1]["x"] == 5
assert reduced_cv[1]["y"] == 5
assert (reduced_cv[1]["data"] == np.array([[2, 2], [2, 3]], dtype=np.int32)).all()
def test_3x3_reduction():
cv = _aare.ClusterVector_Cluster5x5d()
cv.push_back(_aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double)))
cv.push_back(_aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double)))
reduced_cv = np.array(_aare.reduce_to_3x3(cv), copy=False)
assert reduced_cv.size == 2
assert reduced_cv[0]["x"] == 5
assert reduced_cv[0]["y"] == 5
assert (reduced_cv[0]["data"] == np.array([[2.0, 1.0, 1.0], [2.0, 3.0, 1.0], [2.0, 1.0, 1.0]], dtype=np.double)).all()

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
import pytest
from aare import RawFile
import numpy as np

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
import pytest
import numpy as np
from aare import RawSubFile, DetectorType

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
import pytest
import numpy as np

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,289 @@
#test script to test interpolation on simulated data
import pytest
import pytest_check as check
import numpy as np
import boost_histogram as bh
from aare import Interpolator, calculate_eta2, calculate_cross_eta3, calculate_full_eta2, calculate_eta3
from aare import ClusterFile
from conftest import test_data_path
## TODO: is there something like a test fixture setup/teardown in pytest?
def calculate_eta_distribution(cv, calculate_eta, edges_x=[-0.5,0.5], edges_y=[-0.5,0.5], nbins = 101):
energy_bins = bh.axis.Regular(1, 0, 16) # max and min energy of simulated photons
eta_distribution = bh.Histogram(
bh.axis.Regular(nbins, edges_x[0], edges_x[1]),
bh.axis.Regular(nbins, edges_y[0], edges_y[1]), energy_bins)
eta = calculate_eta(cv)
eta_distribution.fill(eta['x'], eta['y'], eta['sum'])
return eta_distribution
@pytest.fixture
def load_data(test_data_path):
"""Load simulated cluster data and ground truth positions"""
f = ClusterFile(test_data_path / "clust" / "simulated_clusters.clust", dtype=np.float64, mode="r")
cv = f.read_frame()
ground_truths = np.load(test_data_path / "interpolation/ground_truth_simulated.npy")
return cv, ground_truths
@pytest.mark.withdata
def test_eta2_interpolation(load_data, check):
"""Test eta2 interpolation on simulated data"""
cv, ground_truths = load_data
num_bins = 201
eta_distribution = calculate_eta_distribution(cv, calculate_eta2, edges_x=[-0.1,1.1], edges_y=[-0.1,1.1], nbins=num_bins)
interpolator = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
assert interpolator.get_ietax().shape == (num_bins,num_bins,1)
assert interpolator.get_ietay().shape == (num_bins,num_bins,1)
interpolated_photons = interpolator.interpolate(cv)
assert interpolated_photons.size == cv.size
interpolated_photons["x"] += 1.0 #groud truth label uses 5x5 clusters
interpolated_photons["y"] += 1.0
residuals_interpolated_x = abs(ground_truths[:, 0] - interpolated_photons["x"])
residuals_interpolated_y = abs(ground_truths[:, 1] - interpolated_photons["y"])
"""
residuals_center_pixel_x = abs(ground_truths[:, 0] - 2.5)
residuals_center_pixel_y = abs(ground_truths[:, 1] - 2.5)
# interpolation needs to perform better than center pixel assignment - not true for photon close to the center
assert (residuals_interpolated_x < residuals_center_pixel_x).all()
assert (residuals_interpolated_y < residuals_center_pixel_y).all()
"""
# check within photon hit pixel for all
with check:
assert np.allclose(interpolated_photons["x"], ground_truths[:, 0], atol=5e-1)
with check:
assert np.allclose(interpolated_photons["y"], ground_truths[:, 1], atol=5e-1)
# check mean and std of residuals
with check:
assert residuals_interpolated_y.mean() <= 0.1
with check:
assert residuals_interpolated_x.mean() <= 0.1
with check:
assert residuals_interpolated_x.std() <= 0.05
with check:
assert residuals_interpolated_y.std() <= 0.05
@pytest.mark.withdata
def test_eta2_interpolation_rosenblatt(load_data, check):
"""Test eta2 interpolation on simulated data using Rosenblatt transform"""
cv, ground_truths = load_data
num_bins = 201
eta_distribution = calculate_eta_distribution(cv, calculate_eta2, edges_x=[-0.1,1.1], edges_y=[-0.1,1.1], nbins=num_bins)
interpolator = Interpolator(eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
interpolator.rosenblatttransform(eta_distribution)
assert interpolator.get_ietax().shape == (num_bins,num_bins,1)
assert interpolator.get_ietay().shape == (num_bins,num_bins,1)
interpolated_photons = interpolator.interpolate(cv)
assert interpolated_photons.size == cv.size
interpolated_photons["x"] += 1.0 #groud truth label uses 5x5 clusters
interpolated_photons["y"] += 1.0
residuals_interpolated_x = abs(ground_truths[:, 0] - interpolated_photons["x"])
residuals_interpolated_y = abs(ground_truths[:, 1] - interpolated_photons["y"])
"""
residuals_center_pixel_x = abs(ground_truths[:, 0] - 2.5)
residuals_center_pixel_y = abs(ground_truths[:, 1] - 2.5)
# interpolation needs to perform better than center pixel assignment - not true for photon close to the center
assert (residuals_interpolated_x < residuals_center_pixel_x).all()
assert (residuals_interpolated_y < residuals_center_pixel_y).all()
"""
# check within photon hit pixel for all
with check:
assert np.allclose(interpolated_photons["x"], ground_truths[:, 0], atol=5e-1)
with check:
assert np.allclose(interpolated_photons["y"], ground_truths[:, 1], atol=5e-1)
# check mean and std of residuals
with check:
assert residuals_interpolated_y.mean() <= 0.1
with check:
assert residuals_interpolated_x.mean() <= 0.1
with check:
assert residuals_interpolated_x.std() <= 0.055 #performs slightly worse
with check:
assert residuals_interpolated_y.std() <= 0.055 #performs slightly worse
@pytest.mark.withdata
def test_cross_eta_interpolation(load_data, check):
"""Test cross eta interpolation on simulated data"""
cv, ground_truths = load_data
num_bins = 201
eta_distribution = calculate_eta_distribution(cv, calculate_cross_eta3, edges_x=[-0.5,0.5], edges_y=[-0.5,0.5], nbins=num_bins)
interpolator = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
assert interpolator.get_ietax().shape == (num_bins,num_bins,1)
assert interpolator.get_ietay().shape == (num_bins,num_bins,1)
interpolated_photons = interpolator.interpolate_cross_eta3(cv)
assert interpolated_photons.size == cv.size
interpolated_photons["x"] += 1.0 #groud truth label uses 5x5 clusters
interpolated_photons["y"] += 1.0
residuals_interpolated_x = abs(ground_truths[:, 0] - interpolated_photons["x"])
residuals_interpolated_y = abs(ground_truths[:, 1] - interpolated_photons["y"])
"""
residuals_center_pixel_x = abs(ground_truths[:, 0] - 2.5)
residuals_center_pixel_y = abs(ground_truths[:, 1] - 2.5)
# interpolation needs to perform better than center pixel assignment - not true for photon close to the center
assert (residuals_interpolated_x < residuals_center_pixel_x).all()
assert (residuals_interpolated_y < residuals_center_pixel_y).all()
"""
# check within photon hit pixel for all
# TODO: fails as eta_x = 0, eta_y = 0 is not leading to offset (0.5,0.5)
with check:
assert np.allclose(interpolated_photons["x"], ground_truths[:, 0], atol=5e-1)
with check:
assert np.allclose(interpolated_photons["y"], ground_truths[:, 1], atol=5e-1)
# check mean and std of residuals
with check:
assert residuals_interpolated_y.mean() <= 0.1
with check:
assert residuals_interpolated_x.mean() <= 0.1
with check:
assert residuals_interpolated_x.std() <= 0.05
with check:
assert residuals_interpolated_y.std() <= 0.05
@pytest.mark.withdata
def test_eta3_interpolation(load_data, check):
"""Test eta3 interpolation on simulated data"""
cv, ground_truths = load_data
num_bins = 201
eta_distribution = calculate_eta_distribution(cv, calculate_eta3, edges_x=[-0.5,0.5], edges_y=[-0.5,0.5], nbins=num_bins)
interpolator = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
assert interpolator.get_ietax().shape == (num_bins,num_bins,1)
assert interpolator.get_ietay().shape == (num_bins,num_bins,1)
interpolated_photons = interpolator.interpolate_eta3(cv)
assert interpolated_photons.size == cv.size
interpolated_photons["x"] += 1.0 #groud truth label uses 5x5 clusters
interpolated_photons["y"] += 1.0
residuals_interpolated_x = abs(ground_truths[:, 0] - interpolated_photons["x"])
residuals_interpolated_y = abs(ground_truths[:, 1] - interpolated_photons["y"])
"""
residuals_center_pixel_x = abs(ground_truths[:, 0] - 2.5)
residuals_center_pixel_y = abs(ground_truths[:, 1] - 2.5)
# interpolation needs to perform better than center pixel assignment - not true for photon close to the center
assert (residuals_interpolated_x < residuals_center_pixel_x).all()
assert (residuals_interpolated_y < residuals_center_pixel_y).all()
"""
# check within photon hit pixel for all
# TODO: fails as eta_x = 0, eta_y = 0 is not leading to offset (0.5,0.5)
with check:
assert np.allclose(interpolated_photons["x"], ground_truths[:, 0], atol=5e-1)
with check:
assert np.allclose(interpolated_photons["y"], ground_truths[:, 1], atol=5e-1)
# check mean and std of residuals
with check:
assert residuals_interpolated_y.mean() <= 0.1
with check:
assert residuals_interpolated_x.mean() <= 0.1
with check:
assert residuals_interpolated_x.std() <= 0.05
with check:
assert residuals_interpolated_y.std() <= 0.05
@pytest.mark.withdata
def test_full_eta2_interpolation(load_data, check):
"""Test full eta2 interpolation on simulated data"""
cv, ground_truths = load_data
num_bins = 201
eta_distribution = calculate_eta_distribution(cv, calculate_full_eta2, edges_x=[-0.1,1.1], edges_y=[-0.1,1.1], nbins=num_bins)
interpolator = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
assert interpolator.get_ietax().shape == (num_bins,num_bins,1)
assert interpolator.get_ietay().shape == (num_bins,num_bins,1)
interpolated_photons = interpolator.interpolate_full_eta2(cv)
assert interpolated_photons.size == cv.size
interpolated_photons["x"] += 1.0 #groud truth label uses 5x5 clusters
interpolated_photons["y"] += 1.0
residuals_interpolated_x = abs(ground_truths[:, 0] - interpolated_photons["x"])
residuals_interpolated_y = abs(ground_truths[:, 1] - interpolated_photons["y"])
"""
residuals_center_pixel_x = abs(ground_truths[:, 0] - 2.5)
residuals_center_pixel_y = abs(ground_truths[:, 1] - 2.5)
# interpolation needs to perform better than center pixel assignment - not true for photon close to the center
assert (residuals_interpolated_x < residuals_center_pixel_x).all()
assert (residuals_interpolated_y < residuals_center_pixel_y).all()
"""
# check within photon hit pixel for all
with check:
assert np.allclose(interpolated_photons["x"], ground_truths[:, 0], atol=5e-1)
with check:
assert np.allclose(interpolated_photons["y"], ground_truths[:, 1], atol=5e-1)
# check mean and std of residuals
with check:
assert residuals_interpolated_y.mean() <= 0.1
with check:
assert residuals_interpolated_x.mean() <= 0.1
with check:
assert residuals_interpolated_x.std() <= 0.05
with check:
assert residuals_interpolated_y.std() <= 0.05

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MPL-2.0
import pytest
import numpy as np
from aare import JungfrauDataFile