mirror of
https://github.com/slsdetectorgroup/aare.git
synced 2025-12-22 21:11:25 +01:00
test for interpolation with simulated normal energy distribution
This commit is contained in:
@@ -44,14 +44,14 @@ void define_ClusterFile(py::module &m, const std::string &typestr) {
|
|||||||
auto v = new ClusterVector<ClusterType>(self.read_frame());
|
auto v = new ClusterVector<ClusterType>(self.read_frame());
|
||||||
return v;
|
return v;
|
||||||
})
|
})
|
||||||
.def("set_roi", &ClusterFile<ClusterType>::set_roi,
|
.def("set_roi", &ClusterFile<ClusterType>::set_roi, py::arg("roi"))
|
||||||
py::arg("roi"))
|
|
||||||
.def(
|
.def(
|
||||||
"set_noise_map",
|
"set_noise_map",
|
||||||
[](ClusterFile<ClusterType> &self, py::array_t<int32_t> noise_map) {
|
[](ClusterFile<ClusterType> &self, py::array_t<int32_t> noise_map) {
|
||||||
auto view = make_view_2d(noise_map);
|
auto view = make_view_2d(noise_map);
|
||||||
self.set_noise_map(view);
|
self.set_noise_map(view);
|
||||||
}, py::arg("noise_map"))
|
},
|
||||||
|
py::arg("noise_map"))
|
||||||
|
|
||||||
.def("set_gain_map",
|
.def("set_gain_map",
|
||||||
[](ClusterFile<ClusterType> &self, py::array_t<double> gain_map) {
|
[](ClusterFile<ClusterType> &self, py::array_t<double> gain_map) {
|
||||||
@@ -84,11 +84,19 @@ template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
|
|||||||
typename CoordType = uint16_t>
|
typename CoordType = uint16_t>
|
||||||
void register_calculate_eta(py::module &m) {
|
void register_calculate_eta(py::module &m) {
|
||||||
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
|
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
|
||||||
|
|
||||||
m.def("calculate_eta2",
|
m.def("calculate_eta2",
|
||||||
[](const aare::ClusterVector<ClusterType> &clusters) {
|
[](const aare::ClusterVector<ClusterType> &clusters) {
|
||||||
auto eta2 = new NDArray<double, 2>(calculate_eta2(clusters));
|
auto eta2 = new NDArray<double, 2>(calculate_eta2(clusters));
|
||||||
return return_image_data(eta2);
|
return return_image_data(eta2);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
m.def("calculate_eta2", [](const aare::Cluster<Type, CoordSizeX, CoordSizeY,
|
||||||
|
CoordType> &cluster) {
|
||||||
|
auto eta2 = calculate_eta2(cluster);
|
||||||
|
// TODO return proper eta class
|
||||||
|
return py::make_tuple(eta2.x, eta2.y, eta2.sum);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
#pragma GCC diagnostic pop
|
#pragma GCC diagnostic pop
|
||||||
@@ -87,6 +87,8 @@ PYBIND11_MODULE(_aare, m) {
|
|||||||
DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d);
|
DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d);
|
||||||
DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f);
|
DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f);
|
||||||
|
|
||||||
|
// DEFINE_CLUSTER_BINDINGS(double, 2, 1, uint16_t, d);
|
||||||
|
|
||||||
define_3x3_reduction<int, 3, 3, uint16_t>(m);
|
define_3x3_reduction<int, 3, 3, uint16_t>(m);
|
||||||
define_3x3_reduction<double, 3, 3, uint16_t>(m);
|
define_3x3_reduction<double, 3, 3, uint16_t>(m);
|
||||||
define_3x3_reduction<float, 3, 3, uint16_t>(m);
|
define_3x3_reduction<float, 3, 3, uint16_t>(m);
|
||||||
|
|||||||
@@ -53,8 +53,8 @@ def test_Interpolator():
|
|||||||
|
|
||||||
assert interpolated_photons.size == 1
|
assert interpolated_photons.size == 1
|
||||||
|
|
||||||
assert interpolated_photons[0]["x"] == -1
|
assert interpolated_photons[0]["x"] == 0
|
||||||
assert interpolated_photons[0]["y"] == -1
|
assert interpolated_photons[0]["y"] == 0
|
||||||
assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0
|
assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0
|
||||||
|
|
||||||
clustervector = _aare.ClusterVector_Cluster2x2i()
|
clustervector = _aare.ClusterVector_Cluster2x2i()
|
||||||
@@ -84,7 +84,7 @@ def test_calculate_eta():
|
|||||||
assert eta2[0,0] == 0.5
|
assert eta2[0,0] == 0.5
|
||||||
assert eta2[0,1] == 0.5
|
assert eta2[0,1] == 0.5
|
||||||
assert eta2[1,0] == 0.5
|
assert eta2[1,0] == 0.5
|
||||||
assert eta2[1,1] == 0.6 #1/5
|
assert eta2[1,1] == 0.4 #2/5
|
||||||
|
|
||||||
def test_cluster_finder():
|
def test_cluster_finder():
|
||||||
"""Test ClusterFinder"""
|
"""Test ClusterFinder"""
|
||||||
|
|||||||
143
python/tests/test_Interpolation.py
Normal file
143
python/tests/test_Interpolation.py
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
import pytest
|
||||||
|
import numpy as np
|
||||||
|
import boost_histogram as bh
|
||||||
|
import pickle
|
||||||
|
from scipy.stats import multivariate_normal
|
||||||
|
|
||||||
|
from aare import Interpolator, calculate_eta2
|
||||||
|
from aare._aare import ClusterVector_Cluster2x2d, Cluster2x2d, Cluster3x3d, ClusterVector_Cluster3x3d
|
||||||
|
|
||||||
|
from conftest import test_data_path
|
||||||
|
|
||||||
|
pixel_width = 1e-4
|
||||||
|
values = np.arange(0.5*pixel_width, 0.1, pixel_width)
|
||||||
|
num_pixels = values.size
|
||||||
|
X, Y = np.meshgrid(values, values)
|
||||||
|
data_points = np.stack([X.ravel(), Y.ravel()], axis=1)
|
||||||
|
variance = 10*pixel_width
|
||||||
|
covariance_matrix = np.array([[variance, 0],[0, variance]])
|
||||||
|
|
||||||
|
def create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points):
|
||||||
|
gaussian = multivariate_normal(mean=mean, cov=covariance_matrix)
|
||||||
|
probability_values = gaussian.pdf(data_points)
|
||||||
|
return (probability_values.reshape(X.shape)).round() #python bindings only support frame types of uint16_t
|
||||||
|
|
||||||
|
def photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, photon_hit):
|
||||||
|
scaled_photon_hit_x = cluster_center - (1 - photon_hit[0][0])*0.5*pixels_per_superpixel*pixel_width
|
||||||
|
scaled_photon_hit_y = cluster_center - (1 - photon_hit[0][1])*0.5*pixels_per_superpixel*pixel_width
|
||||||
|
return (scaled_photon_hit_x, scaled_photon_hit_y)
|
||||||
|
|
||||||
|
def create_2x2cluster_from_frame(frame, pixels_per_superpixel):
|
||||||
|
return Cluster2x2d(1, 1, np.array([frame[0:pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
|
||||||
|
frame[0:pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
|
||||||
|
frame[pixels_per_superpixel:2*pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
|
||||||
|
frame[pixels_per_superpixel:2*pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum()], dtype=np.float64))
|
||||||
|
|
||||||
|
|
||||||
|
def create_3x3cluster_from_frame(frame, pixels_per_superpixel):
|
||||||
|
return Cluster3x3d(1, 1, np.array([frame[0:pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
|
||||||
|
frame[0:pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
|
||||||
|
frame[0:pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum(),
|
||||||
|
frame[pixels_per_superpixel:2*pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
|
||||||
|
frame[pixels_per_superpixel:2*pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
|
||||||
|
frame[pixels_per_superpixel:2*pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum(),
|
||||||
|
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
|
||||||
|
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
|
||||||
|
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum()], dtype=np.float64))
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_eta_distribution(num_frames, pixels_per_superpixel, cluster_2x2 = True):
|
||||||
|
hist = bh.Histogram(
|
||||||
|
bh.axis.Regular(100, -0.2, 1.2),
|
||||||
|
bh.axis.Regular(100, -0.2, 1.2), bh.axis.Regular(1, 0, num_pixels*num_pixels*1/(variance*2*np.pi)))
|
||||||
|
|
||||||
|
for frame_index in range(0, num_frames):
|
||||||
|
mean_x = np.random.uniform(pixels_per_superpixel*pixel_width, 2*pixels_per_superpixel*pixel_width)
|
||||||
|
mean_y = np.random.uniform(pixels_per_superpixel*pixel_width, 2*pixels_per_superpixel*pixel_width)
|
||||||
|
frame = create_photon_hit_with_gaussian_distribution(np.array([mean_x, mean_y]), variance, data_points)
|
||||||
|
|
||||||
|
cluster = None
|
||||||
|
|
||||||
|
if cluster_2x2:
|
||||||
|
cluster = create_2x2cluster_from_frame(frame, pixels_per_superpixel)
|
||||||
|
else:
|
||||||
|
cluster = create_3x3cluster_from_frame(frame, pixels_per_superpixel)
|
||||||
|
|
||||||
|
eta2 = calculate_eta2(cluster)
|
||||||
|
hist.fill(eta2[0], eta2[1], eta2[2])
|
||||||
|
|
||||||
|
return hist
|
||||||
|
|
||||||
|
@pytest.mark.withdata
|
||||||
|
def test_interpolation_of_2x2_cluster(test_data_path):
|
||||||
|
"""Test Interpolation of 2x2 cluster from Photon hit with Gaussian Distribution"""
|
||||||
|
|
||||||
|
#TODO maybe better to compute in test instead of loading - depends on eta
|
||||||
|
"""
|
||||||
|
filename = test_data_path/"eta_distributions"/"eta_distribution_2x2cluster_gaussian.pkl"
|
||||||
|
with open(filename, "rb") as f:
|
||||||
|
eta_distribution = pickle.load(f)
|
||||||
|
"""
|
||||||
|
|
||||||
|
num_frames = 1000
|
||||||
|
pixels_per_superpixel = int(num_pixels*0.5)
|
||||||
|
eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel)
|
||||||
|
|
||||||
|
interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges[:-1], eta_distribution.axes[1].edges[:-1], eta_distribution.axes[2].edges[:-1])
|
||||||
|
|
||||||
|
#actual photon hit
|
||||||
|
mean = 1.2*pixels_per_superpixel*pixel_width
|
||||||
|
mean = np.array([mean, mean])
|
||||||
|
frame = create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points)
|
||||||
|
cluster = create_2x2cluster_from_frame(frame, pixels_per_superpixel)
|
||||||
|
|
||||||
|
clustervec = ClusterVector_Cluster2x2d()
|
||||||
|
clustervec.push_back(cluster)
|
||||||
|
|
||||||
|
interpolated_photon = interpolation.interpolate(clustervec)
|
||||||
|
|
||||||
|
assert interpolated_photon.size == 1
|
||||||
|
|
||||||
|
cluster_center = 1.5*pixels_per_superpixel*pixel_width
|
||||||
|
|
||||||
|
scaled_photon_hit = photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, interpolated_photon)
|
||||||
|
|
||||||
|
assert (np.linalg.norm(scaled_photon_hit - mean) < np.linalg.norm(np.array([cluster_center, cluster_center] - mean)))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.withdata
|
||||||
|
def test_interpolation_of_3x3_cluster(test_data_path):
|
||||||
|
"""Test Interpolation of 3x3 Cluster from Photon hit with Gaussian Distribution"""
|
||||||
|
|
||||||
|
#TODO maybe better to compute in test instead of loading - depends on eta
|
||||||
|
"""
|
||||||
|
filename = test_data_path/"eta_distributions"/"eta_distribution_3x3cluster_gaussian.pkl"
|
||||||
|
with open(filename, "rb") as f:
|
||||||
|
eta_distribution = pickle.load(f)
|
||||||
|
"""
|
||||||
|
|
||||||
|
num_frames = 1000
|
||||||
|
pixels_per_superpixel = int(num_pixels/3)
|
||||||
|
eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, False)
|
||||||
|
|
||||||
|
interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges[:-1], eta_distribution.axes[1].edges[:-1], eta_distribution.axes[2].edges[:-1])
|
||||||
|
|
||||||
|
#actual photon hit
|
||||||
|
mean = 1.2*pixels_per_superpixel*pixel_width
|
||||||
|
mean = np.array([mean, mean])
|
||||||
|
frame = create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points)
|
||||||
|
cluster = create_3x3cluster_from_frame(frame, pixels_per_superpixel)
|
||||||
|
|
||||||
|
clustervec = ClusterVector_Cluster3x3d()
|
||||||
|
clustervec.push_back(cluster)
|
||||||
|
|
||||||
|
interpolated_photon = interpolation.interpolate(clustervec)
|
||||||
|
|
||||||
|
assert interpolated_photon.size == 1
|
||||||
|
|
||||||
|
cluster_center = 1.5*pixels_per_superpixel*pixel_width
|
||||||
|
|
||||||
|
scaled_photon_hit = photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, interpolated_photon)
|
||||||
|
|
||||||
|
assert (np.linalg.norm(scaled_photon_hit - mean) < np.linalg.norm(np.array([cluster_center, cluster_center] - mean)))
|
||||||
|
|
||||||
663
python/tests/test_interpolation.ipynb
Normal file
663
python/tests/test_interpolation.ipynb
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user