mirror of
https://github.com/slsdetectorgroup/aare.git
synced 2025-06-23 03:57:57 +02:00
WIP
This commit is contained in:
@ -108,6 +108,79 @@ ClusterVector<int32_t> ClusterFile::read_clusters(size_t n_clusters) {
|
||||
return clusters;
|
||||
}
|
||||
|
||||
ClusterVector<int32_t> ClusterFile::read_clusters(size_t n_clusters, ROI roi) {
|
||||
if (m_mode != "r") {
|
||||
throw std::runtime_error("File not opened for reading");
|
||||
}
|
||||
|
||||
ClusterVector<int32_t> clusters(3,3);
|
||||
clusters.reserve(n_clusters);
|
||||
|
||||
int32_t iframe = 0; // frame number needs to be 4 bytes!
|
||||
size_t nph_read = 0;
|
||||
uint32_t nn = m_num_left;
|
||||
uint32_t nph = m_num_left; // number of clusters in frame needs to be 4
|
||||
|
||||
// auto buf = reinterpret_cast<Cluster3x3 *>(clusters.data());
|
||||
// auto buf = clusters.data();
|
||||
|
||||
Cluster3x3 tmp; //this would break if the cluster size changes
|
||||
|
||||
// if there are photons left from previous frame read them first
|
||||
if (nph) {
|
||||
if (nph > n_clusters) {
|
||||
// if we have more photons left in the frame then photons to read we
|
||||
// read directly the requested number
|
||||
nn = n_clusters;
|
||||
} else {
|
||||
nn = nph;
|
||||
}
|
||||
//Read one cluster, in the ROI push back
|
||||
// nph_read += fread((buf + nph_read*clusters.item_size()),
|
||||
// clusters.item_size(), nn, fp);
|
||||
for(size_t i = 0; i < nn; i++){
|
||||
fread(&tmp, sizeof(tmp), 1, fp);
|
||||
if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){
|
||||
clusters.push_back(tmp.x, tmp.y, reinterpret_cast<std::byte*>(tmp.data));
|
||||
nph_read++;
|
||||
}
|
||||
}
|
||||
|
||||
m_num_left = nph - nn; // write back the number of photons left
|
||||
}
|
||||
|
||||
if (nph_read < n_clusters) {
|
||||
// keep on reading frames and photons until reaching n_clusters
|
||||
while (fread(&iframe, sizeof(iframe), 1, fp)) {
|
||||
// read number of clusters in frame
|
||||
if (fread(&nph, sizeof(nph), 1, fp)) {
|
||||
if (nph > (n_clusters - nph_read))
|
||||
nn = n_clusters - nph_read;
|
||||
else
|
||||
nn = nph;
|
||||
|
||||
// nph_read += fread((buf + nph_read*clusters.item_size()),
|
||||
// clusters.item_size(), nn, fp);
|
||||
for(size_t i = 0; i < nn; i++){
|
||||
fread(&tmp, sizeof(tmp), 1, fp);
|
||||
if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){
|
||||
clusters.push_back(tmp.x, tmp.y, reinterpret_cast<std::byte*>(tmp.data));
|
||||
nph_read++;
|
||||
}
|
||||
}
|
||||
m_num_left = nph - nn;
|
||||
}
|
||||
if (nph_read >= n_clusters)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Resize the vector to the number of clusters.
|
||||
// No new allocation, only change bounds.
|
||||
clusters.resize(nph_read);
|
||||
return clusters;
|
||||
}
|
||||
|
||||
ClusterVector<int32_t> ClusterFile::read_frame() {
|
||||
if (m_mode != "r") {
|
||||
throw std::runtime_error("File not opened for reading");
|
||||
@ -268,11 +341,23 @@ ClusterVector<int32_t> ClusterFile::read_frame() {
|
||||
NDArray<double, 2> calculate_eta2(ClusterVector<int> &clusters) {
|
||||
//TOTO! make work with 2x2 clusters
|
||||
NDArray<double, 2> eta2({static_cast<int64_t>(clusters.size()), 2});
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
auto e = calculate_eta2(clusters.at<Cluster3x3>(i));
|
||||
eta2(i, 0) = e.x;
|
||||
eta2(i, 1) = e.y;
|
||||
|
||||
if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) {
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
auto e = calculate_eta2(clusters.at<Cluster3x3>(i));
|
||||
eta2(i, 0) = e.x;
|
||||
eta2(i, 1) = e.y;
|
||||
}
|
||||
}else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
auto e = calculate_eta2(clusters.at<Cluster2x2>(i));
|
||||
eta2(i, 0) = e.x;
|
||||
eta2(i, 1) = e.y;
|
||||
}
|
||||
}else{
|
||||
throw std::runtime_error("Only 3x3 and 2x2 clusters are supported");
|
||||
}
|
||||
|
||||
return eta2;
|
||||
}
|
||||
|
||||
@ -290,7 +375,7 @@ Eta2 calculate_eta2(Cluster3x3 &cl) {
|
||||
tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8];
|
||||
|
||||
auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin();
|
||||
|
||||
eta.sum = tot2[c];
|
||||
switch (c) {
|
||||
case cBottomLeft:
|
||||
if ((cl.data[3] + cl.data[4]) != 0)
|
||||
@ -333,6 +418,19 @@ Eta2 calculate_eta2(Cluster3x3 &cl) {
|
||||
return eta;
|
||||
}
|
||||
|
||||
|
||||
Eta2 calculate_eta2(Cluster2x2 &cl) {
|
||||
Eta2 eta{};
|
||||
|
||||
eta.x = static_cast<double>(cl.data[0]) / (cl.data[0] + cl.data[1]);
|
||||
eta.y = static_cast<double>(cl.data[0]) / (cl.data[0] + cl.data[2]);
|
||||
eta.sum = cl.data[0] + cl.data[1] + cl.data[2]+ cl.data[3];
|
||||
eta.c = cBottomLeft; //TODO! This is not correct
|
||||
return eta;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad,
|
||||
double *eta2x, double *eta2y, double *eta3x,
|
||||
double *eta3y) {
|
||||
|
159
src/Interpolator.cpp
Normal file
159
src/Interpolator.cpp
Normal file
@ -0,0 +1,159 @@
|
||||
#include "aare/Interpolator.hpp"
|
||||
|
||||
namespace aare {
|
||||
|
||||
Interpolator::Interpolator(NDView<double, 3> etacube, NDView<double, 1> xbins,
|
||||
NDView<double, 1> ybins, NDView<double, 1> ebins)
|
||||
: m_ietax(etacube), m_ietay(etacube), m_etabinsx(xbins), m_etabinsy(ybins), m_energy_bins(ebins) {
|
||||
if (etacube.shape(0) != xbins.size() || etacube.shape(1) != ybins.size() ||
|
||||
etacube.shape(2) != ebins.size()) {
|
||||
throw std::invalid_argument(
|
||||
"The shape of the etacube does not match the shape of the bins");
|
||||
}
|
||||
|
||||
// Cumulative sum in the x direction, can maybe be combined with a copy?
|
||||
for (ssize_t k = 0; k < m_ietax.shape(2); k++) {
|
||||
for (ssize_t j = 0; j < m_ietax.shape(1); j++) {
|
||||
for (ssize_t i = 1; i < m_ietax.shape(0); i++) {
|
||||
m_ietax(i, j, k) += m_ietax(i - 1, j, k);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize by the highest row, if norm less than 1 don't do anything
|
||||
for (ssize_t k = 0; k < m_ietax.shape(2); k++) {
|
||||
for (ssize_t j = 0; j < m_ietax.shape(1); j++) {
|
||||
auto val = m_ietax(m_ietax.shape(0) - 1, j, k);
|
||||
double norm = val < 1 ? 1 : val;
|
||||
for (ssize_t i = 0; i < m_ietax.shape(0); i++) {
|
||||
m_ietax(i, j, k) /= norm;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cumulative sum in the y direction
|
||||
for (ssize_t k = 0; k < m_ietay.shape(2); k++) {
|
||||
for (ssize_t i = 0; i < m_ietay.shape(0); i++) {
|
||||
for (ssize_t j = 1; j < m_ietay.shape(1); j++) {
|
||||
m_ietay(i, j, k) += m_ietay(i, j - 1, k);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize by the highest column, if norm less than 1 don't do anything
|
||||
for (ssize_t k = 0; k < m_ietay.shape(2); k++) {
|
||||
for (ssize_t i = 0; i < m_ietay.shape(0); i++) {
|
||||
auto val = m_ietay(i, m_ietay.shape(1) - 1, k);
|
||||
double norm = val < 1 ? 1 : val;
|
||||
for (ssize_t j = 0; j < m_ietay.shape(1); j++) {
|
||||
m_ietay(i, j, k) /= norm;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
std::vector<Photon> Interpolator::interpolate(const ClusterVector<int32_t>& clusters) {
|
||||
std::vector<Photon> photons;
|
||||
photons.reserve(clusters.size());
|
||||
|
||||
if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) {
|
||||
for (size_t i = 0; i<clusters.size(); i++){
|
||||
|
||||
auto cluster = clusters.at<Cluster3x3>(i);
|
||||
Eta2 eta= calculate_eta2(cluster);
|
||||
|
||||
Photon photon;
|
||||
photon.x = cluster.x;
|
||||
photon.y = cluster.y;
|
||||
photon.energy = eta.sum;
|
||||
|
||||
//Now do some actual interpolation.
|
||||
//Find which energy bin the cluster is in
|
||||
//TODO! Could we use boost-histogram Axis.index here?
|
||||
ssize_t idx = std::lower_bound(m_energy_bins.begin(), m_energy_bins.end(), photon.energy)-m_energy_bins.begin();
|
||||
auto ix = std::lower_bound(m_etabinsx.begin(), m_etabinsx.end(), eta.x)- m_etabinsx.begin();
|
||||
auto iy = std::lower_bound(m_etabinsy.begin(), m_etabinsy.end(), eta.y)- m_etabinsy.begin();
|
||||
|
||||
|
||||
// ibx=(np.abs(etabinsx - ex)).argmin() #Find out which bin the eta should land in
|
||||
// iby=(np.abs(etabinsy - ey)).argmin()
|
||||
double dX, dY;
|
||||
int ex, ey;
|
||||
// cBottomLeft = 0,
|
||||
// cBottomRight = 1,
|
||||
// cTopLeft = 2,
|
||||
// cTopRight = 3
|
||||
switch (eta.c) {
|
||||
case cTopLeft:
|
||||
dX = -1.;
|
||||
dY = 0;
|
||||
break;
|
||||
case cTopRight:;
|
||||
dX = 0;
|
||||
dY = 0;
|
||||
break;
|
||||
case cBottomLeft:
|
||||
dX = -1.;
|
||||
dY = -1.;
|
||||
break;
|
||||
case cBottomRight:
|
||||
dX = 0;
|
||||
dY = -1.;
|
||||
break;
|
||||
}
|
||||
photon.x += m_ietax(ix, iy, idx) + dX + 0.5;
|
||||
photon.y += m_ietay(ix, iy, idx) + dY + 0.5;
|
||||
|
||||
|
||||
// fmt::print("x: {}, y: {}, energy: {}\n", photon.x, photon.y, photon.energy);
|
||||
photons.push_back(photon);
|
||||
}
|
||||
}else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){
|
||||
//TODO! Implement 2x2 interpolation
|
||||
for (size_t i = 0; i<clusters.size(); i++){
|
||||
|
||||
auto cluster = clusters.at<Cluster2x2>(i);
|
||||
Eta2 eta= calculate_eta2(cluster);
|
||||
|
||||
Photon photon;
|
||||
photon.x = cluster.x;
|
||||
photon.y = cluster.y;
|
||||
photon.energy = eta.sum;
|
||||
|
||||
//Now do some actual interpolation.
|
||||
//Find which energy bin the cluster is in
|
||||
//TODO! Could we use boost-histogram Axis.index here?
|
||||
ssize_t idx = std::lower_bound(m_energy_bins.begin(), m_energy_bins.end(), photon.energy)-m_energy_bins.begin();
|
||||
// auto ix = std::lower_bound(m_etabinsx.begin(), m_etabinsx.end(), eta.x)- m_etabinsx.begin();
|
||||
// auto iy = std::lower_bound(m_etabinsy.begin(), m_etabinsy.end(), eta.y)- m_etabinsy.begin();
|
||||
// if(ix<0) ix=0;
|
||||
// if(iy<0) iy=0;
|
||||
|
||||
auto find_index = [](NDArray<double, 1>& etabins, double val){
|
||||
auto iter = std::min_element(etabins.begin(), etabins.end(),
|
||||
[val,etabins](double a, double b) {
|
||||
return std::abs(a - val) < std::abs(b - val);
|
||||
});
|
||||
return std::distance(etabins.begin(), iter);
|
||||
};
|
||||
auto ix = find_index(m_etabinsx, eta.x)-1;
|
||||
auto iy = find_index(m_etabinsy, eta.y)-1;
|
||||
|
||||
photon.x += (1-m_ietax(ix, iy, idx))*2; //eta goes between 0 and 1 but we could move the hit anywhere in the 2x2
|
||||
photon.y += (1-m_ietay(ix, iy, idx))*2;
|
||||
|
||||
// photon.x = ix;
|
||||
// photon.y = iy;
|
||||
photons.push_back(photon);
|
||||
}
|
||||
|
||||
}else{
|
||||
throw std::runtime_error("Only 3x3 and 2x2 clusters are supported for interpolation");
|
||||
}
|
||||
|
||||
|
||||
return photons;
|
||||
}
|
||||
|
||||
} // namespace aare
|
@ -2,6 +2,7 @@
|
||||
#include <array>
|
||||
#include <catch2/benchmark/catch_benchmark.hpp>
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <numeric>
|
||||
|
||||
using aare::NDArray;
|
||||
using aare::NDView;
|
||||
@ -34,6 +35,24 @@ TEST_CASE("Construct from an NDView") {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("3D NDArray from NDView"){
|
||||
std::vector<int> data(27);
|
||||
std::iota(data.begin(), data.end(), 0);
|
||||
NDView<int, 3> view(data.data(), Shape<3>{3, 3, 3});
|
||||
NDArray<int, 3> image(view);
|
||||
REQUIRE(image.shape() == view.shape());
|
||||
REQUIRE(image.size() == view.size());
|
||||
REQUIRE(image.data() != view.data());
|
||||
|
||||
for(int64_t i=0; i<image.shape(0); i++){
|
||||
for(int64_t j=0; j<image.shape(1); j++){
|
||||
for(int64_t k=0; k<image.shape(2); k++){
|
||||
REQUIRE(image(i, j, k) == view(i, j, k));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("1D image") {
|
||||
std::array<int64_t, 1> shape{{20}};
|
||||
NDArray<short, 1> img(shape, 3);
|
||||
|
Reference in New Issue
Block a user