mirror of
https://github.com/slsdetectorgroup/aare.git
synced 2025-04-30 18:30:03 +02:00
Merge branch 'developer' into fit_scurve
This commit is contained in:
commit
d1292a4c1e
@ -39,7 +39,7 @@ set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
|
|||||||
|
|
||||||
|
|
||||||
# General options
|
# General options
|
||||||
option(AARE_PYTHON_BINDINGS "Build python bindings" ON)
|
option(AARE_PYTHON_BINDINGS "Build python bindings" OFF)
|
||||||
option(AARE_TESTS "Build tests" OFF)
|
option(AARE_TESTS "Build tests" OFF)
|
||||||
option(AARE_BENCHMARKS "Build benchmarks" OFF)
|
option(AARE_BENCHMARKS "Build benchmarks" OFF)
|
||||||
option(AARE_EXAMPLES "Build examples" OFF)
|
option(AARE_EXAMPLES "Build examples" OFF)
|
||||||
@ -340,6 +340,8 @@ endif()
|
|||||||
|
|
||||||
set(PUBLICHEADERS
|
set(PUBLICHEADERS
|
||||||
include/aare/ArrayExpr.hpp
|
include/aare/ArrayExpr.hpp
|
||||||
|
include/aare/CalculateEta.hpp
|
||||||
|
include/aare/Cluster.hpp
|
||||||
include/aare/ClusterFinder.hpp
|
include/aare/ClusterFinder.hpp
|
||||||
include/aare/ClusterFile.hpp
|
include/aare/ClusterFile.hpp
|
||||||
include/aare/CtbRawFile.hpp
|
include/aare/CtbRawFile.hpp
|
||||||
@ -352,6 +354,7 @@ set(PUBLICHEADERS
|
|||||||
include/aare/FileInterface.hpp
|
include/aare/FileInterface.hpp
|
||||||
include/aare/FilePtr.hpp
|
include/aare/FilePtr.hpp
|
||||||
include/aare/Frame.hpp
|
include/aare/Frame.hpp
|
||||||
|
include/aare/GainMap.hpp
|
||||||
include/aare/geo_helpers.hpp
|
include/aare/geo_helpers.hpp
|
||||||
include/aare/JungfrauDataFile.hpp
|
include/aare/JungfrauDataFile.hpp
|
||||||
include/aare/NDArray.hpp
|
include/aare/NDArray.hpp
|
||||||
@ -365,13 +368,11 @@ set(PUBLICHEADERS
|
|||||||
include/aare/RawSubFile.hpp
|
include/aare/RawSubFile.hpp
|
||||||
include/aare/VarClusterFinder.hpp
|
include/aare/VarClusterFinder.hpp
|
||||||
include/aare/utils/task.hpp
|
include/aare/utils/task.hpp
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
set(SourceFiles
|
set(SourceFiles
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/CtbRawFile.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/CtbRawFile.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp
|
||||||
@ -393,15 +394,12 @@ set(SourceFiles
|
|||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
add_library(aare_core STATIC ${SourceFiles})
|
add_library(aare_core STATIC ${SourceFiles})
|
||||||
target_include_directories(aare_core PUBLIC
|
target_include_directories(aare_core PUBLIC
|
||||||
"$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>"
|
"$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>"
|
||||||
"$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>"
|
"$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
target_link_libraries(
|
target_link_libraries(
|
||||||
aare_core
|
aare_core
|
||||||
PUBLIC
|
PUBLIC
|
||||||
@ -410,7 +408,7 @@ target_link_libraries(
|
|||||||
${STD_FS_LIB} # from helpers.cmake
|
${STD_FS_LIB} # from helpers.cmake
|
||||||
PRIVATE
|
PRIVATE
|
||||||
aare_compiler_flags
|
aare_compiler_flags
|
||||||
"$<BUILD_INTERFACE:lmfit>"
|
$<BUILD_INTERFACE:lmfit>
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -436,7 +434,10 @@ if(AARE_TESTS)
|
|||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Cluster.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/CalculateEta.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinderMT.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp
|
||||||
|
@ -1,11 +1,27 @@
|
|||||||
find_package(benchmark REQUIRED)
|
|
||||||
|
|
||||||
add_executable(ndarray_benchmark ndarray_benchmark.cpp)
|
include(FetchContent)
|
||||||
|
|
||||||
target_link_libraries(ndarray_benchmark benchmark::benchmark aare_core aare_compiler_flags)
|
|
||||||
# target_link_libraries(tests PRIVATE aare_core aare_compiler_flags)
|
|
||||||
|
|
||||||
set_target_properties(ndarray_benchmark PROPERTIES
|
FetchContent_Declare(
|
||||||
RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}
|
benchmark
|
||||||
# OUTPUT_NAME run_tests
|
GIT_REPOSITORY https://github.com/google/benchmark.git
|
||||||
|
GIT_TAG v1.8.3 # Change to the latest version if needed
|
||||||
|
)
|
||||||
|
|
||||||
|
# Ensure Google Benchmark is built correctly
|
||||||
|
set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE)
|
||||||
|
|
||||||
|
FetchContent_MakeAvailable(benchmark)
|
||||||
|
|
||||||
|
add_executable(benchmarks)
|
||||||
|
|
||||||
|
target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp)
|
||||||
|
|
||||||
|
# Link Google Benchmark and other necessary libraries
|
||||||
|
target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags)
|
||||||
|
|
||||||
|
# Set output properties
|
||||||
|
set_target_properties(benchmarks PROPERTIES
|
||||||
|
RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}
|
||||||
|
OUTPUT_NAME run_benchmarks
|
||||||
)
|
)
|
70
benchmarks/calculateeta_benchmark.cpp
Normal file
70
benchmarks/calculateeta_benchmark.cpp
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
#include "aare/CalculateEta.hpp"
|
||||||
|
#include "aare/ClusterFile.hpp"
|
||||||
|
#include <benchmark/benchmark.h>
|
||||||
|
|
||||||
|
using namespace aare;
|
||||||
|
|
||||||
|
class ClusterFixture : public benchmark::Fixture {
|
||||||
|
public:
|
||||||
|
Cluster<int, 2, 2> cluster_2x2{};
|
||||||
|
Cluster<int, 3, 3> cluster_3x3{};
|
||||||
|
|
||||||
|
private:
|
||||||
|
using benchmark::Fixture::SetUp;
|
||||||
|
|
||||||
|
void SetUp([[maybe_unused]] const benchmark::State &state) override {
|
||||||
|
int temp_data[4] = {1, 2, 3, 1};
|
||||||
|
std::copy(std::begin(temp_data), std::end(temp_data),
|
||||||
|
std::begin(cluster_2x2.data));
|
||||||
|
|
||||||
|
cluster_2x2.x = 0;
|
||||||
|
cluster_2x2.y = 0;
|
||||||
|
|
||||||
|
int temp_data2[9] = {1, 2, 3, 1, 3, 4, 5, 1, 20};
|
||||||
|
std::copy(std::begin(temp_data2), std::end(temp_data2),
|
||||||
|
std::begin(cluster_3x3.data));
|
||||||
|
|
||||||
|
cluster_3x3.x = 0;
|
||||||
|
cluster_3x3.y = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// void TearDown(::benchmark::State& state) {
|
||||||
|
// }
|
||||||
|
};
|
||||||
|
|
||||||
|
BENCHMARK_F(ClusterFixture, Calculate2x2Eta)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
Eta2 eta = calculate_eta2(cluster_2x2);
|
||||||
|
benchmark::DoNotOptimize(eta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// almost takes double the time
|
||||||
|
BENCHMARK_F(ClusterFixture,
|
||||||
|
CalculateGeneralEtaFor2x2Cluster)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
Eta2 eta = calculate_eta2<int, 2, 2>(cluster_2x2);
|
||||||
|
benchmark::DoNotOptimize(eta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(ClusterFixture, Calculate3x3Eta)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
Eta2 eta = calculate_eta2(cluster_3x3);
|
||||||
|
benchmark::DoNotOptimize(eta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// almost takes double the time
|
||||||
|
BENCHMARK_F(ClusterFixture,
|
||||||
|
CalculateGeneralEtaFor3x3Cluster)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
Eta2 eta = calculate_eta2<int, 3, 3>(cluster_3x3);
|
||||||
|
benchmark::DoNotOptimize(eta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// BENCHMARK_MAIN();
|
170
include/aare/CalculateEta.hpp
Normal file
170
include/aare/CalculateEta.hpp
Normal file
@ -0,0 +1,170 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "aare/Cluster.hpp"
|
||||||
|
#include "aare/ClusterVector.hpp"
|
||||||
|
#include "aare/NDArray.hpp"
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
enum class corner : int {
|
||||||
|
cBottomLeft = 0,
|
||||||
|
cBottomRight = 1,
|
||||||
|
cTopLeft = 2,
|
||||||
|
cTopRight = 3
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class pixel : int {
|
||||||
|
pBottomLeft = 0,
|
||||||
|
pBottom = 1,
|
||||||
|
pBottomRight = 2,
|
||||||
|
pLeft = 3,
|
||||||
|
pCenter = 4,
|
||||||
|
pRight = 5,
|
||||||
|
pTopLeft = 6,
|
||||||
|
pTop = 7,
|
||||||
|
pTopRight = 8
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T> struct Eta2 {
|
||||||
|
double x;
|
||||||
|
double y;
|
||||||
|
int c;
|
||||||
|
T sum;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Calculate the eta2 values for all clusters in a Clustervector
|
||||||
|
*/
|
||||||
|
template <typename ClusterType,
|
||||||
|
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||||
|
NDArray<double, 2> calculate_eta2(const ClusterVector<ClusterType> &clusters) {
|
||||||
|
NDArray<double, 2> eta2({static_cast<int64_t>(clusters.size()), 2});
|
||||||
|
|
||||||
|
for (size_t i = 0; i < clusters.size(); i++) {
|
||||||
|
auto e = calculate_eta2(clusters[i]);
|
||||||
|
eta2(i, 0) = e.x;
|
||||||
|
eta2(i, 1) = e.y;
|
||||||
|
}
|
||||||
|
|
||||||
|
return eta2;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Calculate the eta2 values for a generic sized cluster and return them
|
||||||
|
* in a Eta2 struct containing etay, etax and the index of the respective 2x2
|
||||||
|
* subcluster.
|
||||||
|
*/
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType>
|
||||||
|
Eta2<T>
|
||||||
|
calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||||
|
Eta2<T> eta{};
|
||||||
|
|
||||||
|
auto max_sum = cl.max_sum_2x2();
|
||||||
|
eta.sum = max_sum.first;
|
||||||
|
auto c = max_sum.second;
|
||||||
|
|
||||||
|
size_t cluster_center_index =
|
||||||
|
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||||
|
|
||||||
|
size_t index_bottom_left_max_2x2_subcluster =
|
||||||
|
(int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1);
|
||||||
|
|
||||||
|
// check that cluster center is in max subcluster
|
||||||
|
if (cluster_center_index != index_bottom_left_max_2x2_subcluster &&
|
||||||
|
cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 &&
|
||||||
|
cluster_center_index !=
|
||||||
|
index_bottom_left_max_2x2_subcluster + ClusterSizeX &&
|
||||||
|
cluster_center_index !=
|
||||||
|
index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1)
|
||||||
|
throw std::runtime_error("Photon center is not in max 2x2_subcluster");
|
||||||
|
|
||||||
|
if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) %
|
||||||
|
ClusterSizeX ==
|
||||||
|
0) {
|
||||||
|
if ((cl.data[cluster_center_index + 1] +
|
||||||
|
cl.data[cluster_center_index]) != 0)
|
||||||
|
|
||||||
|
eta.x = static_cast<double>(cl.data[cluster_center_index + 1]) /
|
||||||
|
static_cast<double>((cl.data[cluster_center_index + 1] +
|
||||||
|
cl.data[cluster_center_index]));
|
||||||
|
} else {
|
||||||
|
if ((cl.data[cluster_center_index] +
|
||||||
|
cl.data[cluster_center_index - 1]) != 0)
|
||||||
|
|
||||||
|
eta.x = static_cast<double>(cl.data[cluster_center_index]) /
|
||||||
|
static_cast<double>((cl.data[cluster_center_index - 1] +
|
||||||
|
cl.data[cluster_center_index]));
|
||||||
|
}
|
||||||
|
if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) /
|
||||||
|
ClusterSizeX <
|
||||||
|
1) {
|
||||||
|
assert(cluster_center_index + ClusterSizeX <
|
||||||
|
ClusterSizeX * ClusterSizeY); // suppress warning
|
||||||
|
if ((cl.data[cluster_center_index] +
|
||||||
|
cl.data[cluster_center_index + ClusterSizeX]) != 0)
|
||||||
|
eta.y = static_cast<double>(
|
||||||
|
cl.data[cluster_center_index + ClusterSizeX]) /
|
||||||
|
static_cast<double>(
|
||||||
|
(cl.data[cluster_center_index] +
|
||||||
|
cl.data[cluster_center_index + ClusterSizeX]));
|
||||||
|
} else {
|
||||||
|
if ((cl.data[cluster_center_index] +
|
||||||
|
cl.data[cluster_center_index - ClusterSizeX]) != 0)
|
||||||
|
eta.y = static_cast<double>(cl.data[cluster_center_index]) /
|
||||||
|
static_cast<double>(
|
||||||
|
(cl.data[cluster_center_index] +
|
||||||
|
cl.data[cluster_center_index - ClusterSizeX]));
|
||||||
|
}
|
||||||
|
|
||||||
|
eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no
|
||||||
|
// underyling enum class
|
||||||
|
return eta;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO! Look up eta2 calculation - photon center should be top right corner
|
||||||
|
template <typename T>
|
||||||
|
Eta2<T> calculate_eta2(const Cluster<T, 2, 2, int16_t> &cl) {
|
||||||
|
Eta2<T> eta{};
|
||||||
|
|
||||||
|
if ((cl.data[0] + cl.data[1]) != 0)
|
||||||
|
eta.x = static_cast<double>(cl.data[1]) / (cl.data[0] + cl.data[1]);
|
||||||
|
if ((cl.data[0] + cl.data[2]) != 0)
|
||||||
|
eta.y = static_cast<double>(cl.data[2]) / (cl.data[0] + cl.data[2]);
|
||||||
|
eta.sum = cl.sum();
|
||||||
|
eta.c = static_cast<int>(corner::cBottomLeft); // TODO! This is not correct,
|
||||||
|
// but need to put something
|
||||||
|
return eta;
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculates Eta3 for 3x3 cluster based on code from analyze_cluster
|
||||||
|
// TODO only supported for 3x3 Clusters
|
||||||
|
template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) {
|
||||||
|
|
||||||
|
Eta2<T> eta{};
|
||||||
|
|
||||||
|
T sum = 0;
|
||||||
|
|
||||||
|
std::for_each(std::begin(cl.data), std::end(cl.data),
|
||||||
|
[&sum](T x) { sum += x; });
|
||||||
|
|
||||||
|
eta.sum = sum;
|
||||||
|
|
||||||
|
eta.c = corner::cBottomLeft;
|
||||||
|
|
||||||
|
if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0)
|
||||||
|
|
||||||
|
eta.x = static_cast<double>(-cl.data[3] + cl.data[3 + 2]) /
|
||||||
|
|
||||||
|
(cl.data[3] + cl.data[4] + cl.data[5]);
|
||||||
|
|
||||||
|
if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0)
|
||||||
|
|
||||||
|
eta.y = static_cast<double>(-cl.data[1] + cl.data[2 * 3 + 1]) /
|
||||||
|
|
||||||
|
(cl.data[1] + cl.data[4] + cl.data[7]);
|
||||||
|
|
||||||
|
return eta;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace aare
|
@ -1,36 +1,86 @@
|
|||||||
|
|
||||||
|
/************************************************
|
||||||
|
* @file Cluster.hpp
|
||||||
|
* @short definition of cluster, where CoordType (x,y) give
|
||||||
|
* the cluster center coordinates and data the actual cluster data
|
||||||
|
* cluster size is given as template parameters
|
||||||
|
***********************************************/
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <cstddef>
|
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <numeric>
|
#include <numeric>
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
namespace aare {
|
namespace aare {
|
||||||
|
|
||||||
//TODO! Template this?
|
// requires clause c++20 maybe update
|
||||||
struct Cluster3x3 {
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
int16_t x;
|
typename CoordType = int16_t>
|
||||||
int16_t y;
|
struct Cluster {
|
||||||
int32_t data[9];
|
|
||||||
|
|
||||||
int32_t sum_2x2() const{
|
static_assert(std::is_arithmetic_v<T>, "T needs to be an arithmetic type");
|
||||||
std::array<int32_t, 4> total;
|
static_assert(std::is_integral_v<CoordType>,
|
||||||
total[0] = data[0] + data[1] + data[3] + data[4];
|
"CoordType needs to be an integral type");
|
||||||
total[1] = data[1] + data[2] + data[4] + data[5];
|
static_assert(ClusterSizeX > 0 && ClusterSizeY > 0,
|
||||||
total[2] = data[3] + data[4] + data[6] + data[7];
|
"Cluster sizes must be bigger than zero");
|
||||||
total[3] = data[4] + data[5] + data[7] + data[8];
|
|
||||||
return *std::max_element(total.begin(), total.end());
|
CoordType x;
|
||||||
|
CoordType y;
|
||||||
|
std::array<T, ClusterSizeX * ClusterSizeY> data;
|
||||||
|
|
||||||
|
static constexpr uint8_t cluster_size_x = ClusterSizeX;
|
||||||
|
static constexpr uint8_t cluster_size_y = ClusterSizeY;
|
||||||
|
using value_type = T;
|
||||||
|
using coord_type = CoordType;
|
||||||
|
|
||||||
|
T sum() const { return std::accumulate(data.begin(), data.end(), T{}); }
|
||||||
|
|
||||||
|
std::pair<T, int> max_sum_2x2() const {
|
||||||
|
|
||||||
|
if constexpr (cluster_size_x == 3 && cluster_size_y == 3) {
|
||||||
|
std::array<T, 4> sum_2x2_subclusters;
|
||||||
|
sum_2x2_subclusters[0] = data[0] + data[1] + data[3] + data[4];
|
||||||
|
sum_2x2_subclusters[1] = data[1] + data[2] + data[4] + data[5];
|
||||||
|
sum_2x2_subclusters[2] = data[3] + data[4] + data[6] + data[7];
|
||||||
|
sum_2x2_subclusters[3] = data[4] + data[5] + data[7] + data[8];
|
||||||
|
int index = std::max_element(sum_2x2_subclusters.begin(),
|
||||||
|
sum_2x2_subclusters.end()) -
|
||||||
|
sum_2x2_subclusters.begin();
|
||||||
|
return std::make_pair(sum_2x2_subclusters[index], index);
|
||||||
|
} else if constexpr (cluster_size_x == 2 && cluster_size_y == 2) {
|
||||||
|
return std::make_pair(data[0] + data[1] + data[2] + data[3], 0);
|
||||||
|
} else {
|
||||||
|
constexpr size_t num_2x2_subclusters =
|
||||||
|
(ClusterSizeX - 1) * (ClusterSizeY - 1);
|
||||||
|
|
||||||
|
std::array<T, num_2x2_subclusters> sum_2x2_subcluster;
|
||||||
|
for (size_t i = 0; i < ClusterSizeY - 1; ++i) {
|
||||||
|
for (size_t j = 0; j < ClusterSizeX - 1; ++j)
|
||||||
|
sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] =
|
||||||
|
data[i * ClusterSizeX + j] +
|
||||||
|
data[i * ClusterSizeX + j + 1] +
|
||||||
|
data[(i + 1) * ClusterSizeX + j] +
|
||||||
|
data[(i + 1) * ClusterSizeX + j + 1];
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t sum() const{
|
int index = std::max_element(sum_2x2_subcluster.begin(),
|
||||||
return std::accumulate(data, data + 9, 0);
|
sum_2x2_subcluster.end()) -
|
||||||
|
sum_2x2_subcluster.begin();
|
||||||
|
return std::make_pair(sum_2x2_subcluster[index], index);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
struct Cluster2x2 {
|
|
||||||
int16_t x;
|
// Type Traits for is_cluster_type
|
||||||
int16_t y;
|
template <typename T>
|
||||||
int32_t data[4];
|
struct is_cluster : std::false_type {}; // Default case: Not a Cluster
|
||||||
};
|
|
||||||
|
template <typename T, uint8_t X, uint8_t Y, typename CoordType>
|
||||||
|
struct is_cluster<Cluster<T, X, Y, CoordType>> : std::true_type {}; // Cluster
|
||||||
|
|
||||||
|
template <typename T> constexpr bool is_cluster_v = is_cluster<T>::value;
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
@ -2,29 +2,31 @@
|
|||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
#include "aare/ProducerConsumerQueue.hpp"
|
|
||||||
#include "aare/ClusterVector.hpp"
|
|
||||||
#include "aare/ClusterFinderMT.hpp"
|
#include "aare/ClusterFinderMT.hpp"
|
||||||
|
#include "aare/ClusterVector.hpp"
|
||||||
|
#include "aare/ProducerConsumerQueue.hpp"
|
||||||
|
|
||||||
namespace aare {
|
namespace aare {
|
||||||
|
|
||||||
class ClusterCollector{
|
template <typename ClusterType,
|
||||||
ProducerConsumerQueue<ClusterVector<int>>* m_source;
|
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||||
|
class ClusterCollector {
|
||||||
|
ProducerConsumerQueue<ClusterVector<ClusterType>> *m_source;
|
||||||
std::atomic<bool> m_stop_requested{false};
|
std::atomic<bool> m_stop_requested{false};
|
||||||
std::atomic<bool> m_stopped{true};
|
std::atomic<bool> m_stopped{true};
|
||||||
std::chrono::milliseconds m_default_wait{1};
|
std::chrono::milliseconds m_default_wait{1};
|
||||||
std::thread m_thread;
|
std::thread m_thread;
|
||||||
std::vector<ClusterVector<int>> m_clusters;
|
std::vector<ClusterVector<ClusterType>> m_clusters;
|
||||||
|
|
||||||
void process(){
|
void process() {
|
||||||
m_stopped = false;
|
m_stopped = false;
|
||||||
fmt::print("ClusterCollector started\n");
|
fmt::print("ClusterCollector started\n");
|
||||||
while (!m_stop_requested || !m_source->isEmpty()) {
|
while (!m_stop_requested || !m_source->isEmpty()) {
|
||||||
if (ClusterVector<int> *clusters = m_source->frontPtr();
|
if (ClusterVector<ClusterType> *clusters = m_source->frontPtr();
|
||||||
clusters != nullptr) {
|
clusters != nullptr) {
|
||||||
m_clusters.push_back(std::move(*clusters));
|
m_clusters.push_back(std::move(*clusters));
|
||||||
m_source->popFront();
|
m_source->popFront();
|
||||||
}else{
|
} else {
|
||||||
std::this_thread::sleep_for(m_default_wait);
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -33,16 +35,20 @@ class ClusterCollector{
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ClusterCollector(ClusterFinderMT<uint16_t, double, int32_t>* source){
|
ClusterCollector(ClusterFinderMT<ClusterType, uint16_t, double> *source) {
|
||||||
m_source = source->sink();
|
m_source = source->sink();
|
||||||
m_thread = std::thread(&ClusterCollector::process, this);
|
m_thread =
|
||||||
|
std::thread(&ClusterCollector::process,
|
||||||
|
this); // only one process does that so why isnt it
|
||||||
|
// automatically written to m_cluster in collect
|
||||||
|
// - instead of writing first to m_sink?
|
||||||
}
|
}
|
||||||
void stop(){
|
void stop() {
|
||||||
m_stop_requested = true;
|
m_stop_requested = true;
|
||||||
m_thread.join();
|
m_thread.join();
|
||||||
}
|
}
|
||||||
std::vector<ClusterVector<int>> steal_clusters(){
|
std::vector<ClusterVector<ClusterType>> steal_clusters() {
|
||||||
if(!m_stopped){
|
if (!m_stopped) {
|
||||||
throw std::runtime_error("ClusterCollector is still running");
|
throw std::runtime_error("ClusterCollector is still running");
|
||||||
}
|
}
|
||||||
return std::move(m_clusters);
|
return std::move(m_clusters);
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include "aare/Cluster.hpp"
|
#include "aare/Cluster.hpp"
|
||||||
#include "aare/ClusterVector.hpp"
|
#include "aare/ClusterVector.hpp"
|
||||||
|
#include "aare/GainMap.hpp"
|
||||||
#include "aare/NDArray.hpp"
|
#include "aare/NDArray.hpp"
|
||||||
#include "aare/defs.hpp"
|
#include "aare/defs.hpp"
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
@ -10,43 +11,18 @@
|
|||||||
|
|
||||||
namespace aare {
|
namespace aare {
|
||||||
|
|
||||||
|
/*
|
||||||
|
Binary cluster file. Expects data to be layed out as:
|
||||||
|
int32_t frame_number
|
||||||
|
uint32_t number_of_clusters
|
||||||
|
int16_t x, int16_t y, int32_t data[9] x number_of_clusters
|
||||||
|
int32_t frame_number
|
||||||
|
uint32_t number_of_clusters
|
||||||
|
....
|
||||||
|
*/
|
||||||
|
|
||||||
//TODO! Legacy enums, migrate to enum class
|
// TODO: change to support any type of clusters, e.g. header line with
|
||||||
typedef enum {
|
// clsuter_size_x, cluster_size_y,
|
||||||
cBottomLeft = 0,
|
|
||||||
cBottomRight = 1,
|
|
||||||
cTopLeft = 2,
|
|
||||||
cTopRight = 3
|
|
||||||
} corner;
|
|
||||||
|
|
||||||
typedef enum {
|
|
||||||
pBottomLeft = 0,
|
|
||||||
pBottom = 1,
|
|
||||||
pBottomRight = 2,
|
|
||||||
pLeft = 3,
|
|
||||||
pCenter = 4,
|
|
||||||
pRight = 5,
|
|
||||||
pTopLeft = 6,
|
|
||||||
pTop = 7,
|
|
||||||
pTopRight = 8
|
|
||||||
} pixel;
|
|
||||||
|
|
||||||
struct Eta2 {
|
|
||||||
double x;
|
|
||||||
double y;
|
|
||||||
corner c;
|
|
||||||
int32_t sum;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ClusterAnalysis {
|
|
||||||
uint32_t c;
|
|
||||||
int32_t tot;
|
|
||||||
double etax;
|
|
||||||
double etay;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Class to read and write cluster files
|
* @brief Class to read and write cluster files
|
||||||
* Expects data to be laid out as:
|
* Expects data to be laid out as:
|
||||||
@ -59,14 +35,19 @@ struct ClusterAnalysis {
|
|||||||
* uint32_t number_of_clusters
|
* uint32_t number_of_clusters
|
||||||
* etc.
|
* etc.
|
||||||
*/
|
*/
|
||||||
|
template <typename ClusterType,
|
||||||
|
typename Enable = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||||
class ClusterFile {
|
class ClusterFile {
|
||||||
FILE *fp{};
|
FILE *fp{};
|
||||||
|
const std::string m_filename{};
|
||||||
uint32_t m_num_left{}; /*Number of photons left in frame*/
|
uint32_t m_num_left{}; /*Number of photons left in frame*/
|
||||||
size_t m_chunk_size{}; /*Number of clusters to read at a time*/
|
size_t m_chunk_size{}; /*Number of clusters to read at a time*/
|
||||||
const std::string m_mode; /*Mode to open the file in*/
|
std::string m_mode; /*Mode to open the file in*/
|
||||||
std::optional<ROI> m_roi; /*Region of interest, will be applied if set*/
|
std::optional<ROI> m_roi; /*Region of interest, will be applied if set*/
|
||||||
std::optional<NDArray<int32_t, 2>> m_noise_map; /*Noise map to cut photons, will be applied if set*/
|
std::optional<NDArray<int32_t, 2>>
|
||||||
std::optional<NDArray<double, 2>> m_gain_map; /*Gain map to apply to the clusters, will be applied if set*/
|
m_noise_map; /*Noise map to cut photons, will be applied if set*/
|
||||||
|
std::optional<InvertedGainMap> m_gain_map; /*Gain map to apply to the
|
||||||
|
clusters, will be applied if set*/
|
||||||
|
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
@ -79,30 +60,81 @@ class ClusterFile {
|
|||||||
* @throws std::runtime_error if the file could not be opened
|
* @throws std::runtime_error if the file could not be opened
|
||||||
*/
|
*/
|
||||||
ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000,
|
ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000,
|
||||||
const std::string &mode = "r");
|
const std::string &mode = "r")
|
||||||
|
|
||||||
|
: m_filename(fname.string()), m_chunk_size(chunk_size), m_mode(mode) {
|
||||||
|
|
||||||
~ClusterFile();
|
if (mode == "r") {
|
||||||
|
fp = fopen(m_filename.c_str(), "rb");
|
||||||
|
if (!fp) {
|
||||||
|
throw std::runtime_error("Could not open file for reading: " +
|
||||||
|
m_filename);
|
||||||
|
}
|
||||||
|
} else if (mode == "w") {
|
||||||
|
fp = fopen(m_filename.c_str(), "wb");
|
||||||
|
if (!fp) {
|
||||||
|
throw std::runtime_error("Could not open file for writing: " +
|
||||||
|
m_filename);
|
||||||
|
}
|
||||||
|
} else if (mode == "a") {
|
||||||
|
fp = fopen(m_filename.c_str(), "ab");
|
||||||
|
if (!fp) {
|
||||||
|
throw std::runtime_error("Could not open file for appending: " +
|
||||||
|
m_filename);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw std::runtime_error("Unsupported mode: " + mode);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
~ClusterFile() { close(); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Read n_clusters clusters from the file discarding frame numbers.
|
* @brief Read n_clusters clusters from the file discarding
|
||||||
* If EOF is reached the returned vector will have less than n_clusters
|
* frame numbers. If EOF is reached the returned vector will
|
||||||
* clusters
|
* have less than n_clusters clusters
|
||||||
*/
|
*/
|
||||||
ClusterVector<int32_t> read_clusters(size_t n_clusters);
|
ClusterVector<ClusterType> read_clusters(size_t n_clusters) {
|
||||||
|
if (m_mode != "r") {
|
||||||
ClusterVector<int32_t> read_clusters(size_t n_clusters, ROI roi);
|
throw std::runtime_error("File not opened for reading");
|
||||||
|
}
|
||||||
|
if (m_noise_map || m_roi) {
|
||||||
|
return read_clusters_with_cut(n_clusters);
|
||||||
|
} else {
|
||||||
|
return read_clusters_without_cut(n_clusters);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Read a single frame from the file and return the clusters. The
|
* @brief Read a single frame from the file and return the
|
||||||
* cluster vector will have the frame number set.
|
* clusters. The cluster vector will have the frame number
|
||||||
* @throws std::runtime_error if the file is not opened for reading or the file pointer not
|
* set.
|
||||||
* at the beginning of a frame
|
* @throws std::runtime_error if the file is not opened for
|
||||||
|
* reading or the file pointer not at the beginning of a
|
||||||
|
* frame
|
||||||
*/
|
*/
|
||||||
ClusterVector<int32_t> read_frame();
|
ClusterVector<ClusterType> read_frame() {
|
||||||
|
if (m_mode != "r") {
|
||||||
|
throw std::runtime_error(LOCATION + "File not opened for reading");
|
||||||
|
}
|
||||||
|
if (m_noise_map || m_roi) {
|
||||||
|
return read_frame_with_cut();
|
||||||
|
} else {
|
||||||
|
return read_frame_without_cut();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void write_frame(const ClusterVector<ClusterType> &clusters) {
|
||||||
|
if (m_mode != "w" && m_mode != "a") {
|
||||||
|
throw std::runtime_error("File not opened for writing");
|
||||||
|
}
|
||||||
|
|
||||||
void write_frame(const ClusterVector<int32_t> &clusters);
|
int32_t frame_number = clusters.frame_number();
|
||||||
|
fwrite(&frame_number, sizeof(frame_number), 1, fp);
|
||||||
|
uint32_t n_clusters = clusters.size();
|
||||||
|
fwrite(&n_clusters, sizeof(n_clusters), 1, fp);
|
||||||
|
fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Return the chunk size
|
* @brief Return the chunk size
|
||||||
@ -110,43 +142,308 @@ class ClusterFile {
|
|||||||
size_t chunk_size() const { return m_chunk_size; }
|
size_t chunk_size() const { return m_chunk_size; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Set the region of interest to use when reading clusters. If set only clusters within
|
* @brief Set the region of interest to use when reading
|
||||||
* the ROI will be read.
|
* clusters. If set only clusters within the ROI will be
|
||||||
|
* read.
|
||||||
*/
|
*/
|
||||||
void set_roi(ROI roi);
|
void set_roi(ROI roi) { m_roi = roi; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Set the noise map to use when reading clusters. If set clusters below the noise
|
* @brief Set the noise map to use when reading clusters. If
|
||||||
* level will be discarded. Selection criteria one of: Central pixel above noise, highest
|
* set clusters below the noise level will be discarded.
|
||||||
* 2x2 sum above 2 * noise, total sum above 3 * noise.
|
* Selection criteria one of: Central pixel above noise,
|
||||||
|
* highest 2x2 sum above 2 * noise, total sum above 3 *
|
||||||
|
* noise.
|
||||||
*/
|
*/
|
||||||
void set_noise_map(const NDView<int32_t, 2> noise_map);
|
void set_noise_map(const NDView<int32_t, 2> noise_map) {
|
||||||
|
m_noise_map = NDArray<int32_t, 2>(noise_map);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Set the gain map to use when reading clusters. If set the gain map will be applied
|
* @brief Set the gain map to use when reading clusters. If set the gain map
|
||||||
* to the clusters that pass ROI and noise_map selection. The gain map is expected to be in ADU/energy.
|
* will be applied to the clusters that pass ROI and noise_map selection.
|
||||||
|
* The gain map is expected to be in ADU/energy.
|
||||||
*/
|
*/
|
||||||
void set_gain_map(const NDView<double, 2> gain_map);
|
void set_gain_map(const NDView<double, 2> gain_map) {
|
||||||
|
m_gain_map = InvertedGainMap(gain_map);
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_gain_map(const InvertedGainMap &gain_map) {
|
||||||
|
m_gain_map = gain_map;
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_gain_map(const InvertedGainMap &&gain_map) {
|
||||||
|
m_gain_map = gain_map;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Close the file. If not closed the file will be closed in the destructor
|
* @brief Close the file. If not closed the file will be
|
||||||
|
* closed in the destructor
|
||||||
*/
|
*/
|
||||||
void close();
|
void close() {
|
||||||
|
if (fp) {
|
||||||
|
fclose(fp);
|
||||||
|
fp = nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @brief Open the file in specific mode
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void open(const std::string &mode) {
|
||||||
|
if (fp) {
|
||||||
|
close();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mode == "r") {
|
||||||
|
fp = fopen(m_filename.c_str(), "rb");
|
||||||
|
if (!fp) {
|
||||||
|
throw std::runtime_error("Could not open file for reading: " +
|
||||||
|
m_filename);
|
||||||
|
}
|
||||||
|
m_mode = "r";
|
||||||
|
} else if (mode == "w") {
|
||||||
|
fp = fopen(m_filename.c_str(), "wb");
|
||||||
|
if (!fp) {
|
||||||
|
throw std::runtime_error("Could not open file for writing: " +
|
||||||
|
m_filename);
|
||||||
|
}
|
||||||
|
m_mode = "w";
|
||||||
|
} else if (mode == "a") {
|
||||||
|
fp = fopen(m_filename.c_str(), "ab");
|
||||||
|
if (!fp) {
|
||||||
|
throw std::runtime_error("Could not open file for appending: " +
|
||||||
|
m_filename);
|
||||||
|
}
|
||||||
|
m_mode = "a";
|
||||||
|
} else {
|
||||||
|
throw std::runtime_error("Unsupported mode: " + mode);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ClusterVector<int32_t> read_clusters_with_cut(size_t n_clusters);
|
ClusterVector<ClusterType> read_clusters_with_cut(size_t n_clusters);
|
||||||
ClusterVector<int32_t> read_clusters_without_cut(size_t n_clusters);
|
ClusterVector<ClusterType> read_clusters_without_cut(size_t n_clusters);
|
||||||
ClusterVector<int32_t> read_frame_with_cut();
|
ClusterVector<ClusterType> read_frame_with_cut();
|
||||||
ClusterVector<int32_t> read_frame_without_cut();
|
ClusterVector<ClusterType> read_frame_without_cut();
|
||||||
bool is_selected(Cluster3x3 &cl);
|
bool is_selected(ClusterType &cl);
|
||||||
Cluster3x3 read_one_cluster();
|
ClusterType read_one_cluster();
|
||||||
};
|
};
|
||||||
|
|
||||||
//TODO! helper functions that doesn't really belong here
|
template <typename ClusterType, typename Enable>
|
||||||
NDArray<double, 2> calculate_eta2(ClusterVector<int> &clusters);
|
ClusterVector<ClusterType>
|
||||||
Eta2 calculate_eta2(Cluster3x3 &cl);
|
ClusterFile<ClusterType, Enable>::read_clusters_without_cut(size_t n_clusters) {
|
||||||
Eta2 calculate_eta2(Cluster2x2 &cl);
|
if (m_mode != "r") {
|
||||||
|
throw std::runtime_error("File not opened for reading");
|
||||||
|
}
|
||||||
|
|
||||||
|
ClusterVector<ClusterType> clusters(n_clusters);
|
||||||
|
clusters.resize(n_clusters);
|
||||||
|
|
||||||
|
int32_t iframe = 0; // frame number needs to be 4 bytes!
|
||||||
|
size_t nph_read = 0;
|
||||||
|
uint32_t nn = m_num_left;
|
||||||
|
uint32_t nph = m_num_left; // number of clusters in frame needs to be 4
|
||||||
|
|
||||||
|
auto buf = clusters.data();
|
||||||
|
// if there are photons left from previous frame read them first
|
||||||
|
if (nph) {
|
||||||
|
if (nph > n_clusters) {
|
||||||
|
// if we have more photons left in the frame then photons to
|
||||||
|
// read we read directly the requested number
|
||||||
|
nn = n_clusters;
|
||||||
|
} else {
|
||||||
|
nn = nph;
|
||||||
|
}
|
||||||
|
nph_read += fread((buf + nph_read), clusters.item_size(), nn, fp);
|
||||||
|
m_num_left = nph - nn; // write back the number of photons left
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nph_read < n_clusters) {
|
||||||
|
// keep on reading frames and photons until reaching n_clusters
|
||||||
|
while (fread(&iframe, sizeof(iframe), 1, fp)) {
|
||||||
|
clusters.set_frame_number(iframe);
|
||||||
|
// read number of clusters in frame
|
||||||
|
if (fread(&nph, sizeof(nph), 1, fp)) {
|
||||||
|
if (nph > (n_clusters - nph_read))
|
||||||
|
nn = n_clusters - nph_read;
|
||||||
|
else
|
||||||
|
nn = nph;
|
||||||
|
|
||||||
|
nph_read +=
|
||||||
|
fread((buf + nph_read), clusters.item_size(), nn, fp);
|
||||||
|
m_num_left = nph - nn;
|
||||||
|
}
|
||||||
|
if (nph_read >= n_clusters)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resize the vector to the number o f clusters.
|
||||||
|
// No new allocation, only change bounds.
|
||||||
|
clusters.resize(nph_read);
|
||||||
|
if (m_gain_map)
|
||||||
|
m_gain_map->apply_gain_map(clusters);
|
||||||
|
return clusters;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ClusterType, typename Enable>
|
||||||
|
ClusterVector<ClusterType>
|
||||||
|
ClusterFile<ClusterType, Enable>::read_clusters_with_cut(size_t n_clusters) {
|
||||||
|
ClusterVector<ClusterType> clusters;
|
||||||
|
clusters.reserve(n_clusters);
|
||||||
|
|
||||||
|
// if there are photons left from previous frame read them first
|
||||||
|
if (m_num_left) {
|
||||||
|
while (m_num_left && clusters.size() < n_clusters) {
|
||||||
|
ClusterType c = read_one_cluster();
|
||||||
|
if (is_selected(c)) {
|
||||||
|
clusters.push_back(c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// we did not have enough clusters left in the previous frame
|
||||||
|
// keep on reading frames until reaching n_clusters
|
||||||
|
if (clusters.size() < n_clusters) {
|
||||||
|
// sanity check
|
||||||
|
if (m_num_left) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
LOCATION + "Entered second loop with clusters left\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t frame_number = 0; // frame number needs to be 4 bytes!
|
||||||
|
while (fread(&frame_number, sizeof(frame_number), 1, fp)) {
|
||||||
|
if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) {
|
||||||
|
clusters.set_frame_number(
|
||||||
|
frame_number); // cluster vector will hold the last
|
||||||
|
// frame number
|
||||||
|
while (m_num_left && clusters.size() < n_clusters) {
|
||||||
|
ClusterType c = read_one_cluster();
|
||||||
|
if (is_selected(c)) {
|
||||||
|
clusters.push_back(c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// we have enough clusters, break out of the outer while loop
|
||||||
|
if (clusters.size() >= n_clusters)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (m_gain_map)
|
||||||
|
m_gain_map->apply_gain_map(clusters);
|
||||||
|
|
||||||
|
return clusters;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ClusterType, typename Enable>
|
||||||
|
ClusterType ClusterFile<ClusterType, Enable>::read_one_cluster() {
|
||||||
|
ClusterType c;
|
||||||
|
auto rc = fread(&c, sizeof(c), 1, fp);
|
||||||
|
if (rc != 1) {
|
||||||
|
throw std::runtime_error(LOCATION + "Could not read cluster");
|
||||||
|
}
|
||||||
|
--m_num_left;
|
||||||
|
return c;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ClusterType, typename Enable>
|
||||||
|
ClusterVector<ClusterType>
|
||||||
|
ClusterFile<ClusterType, Enable>::read_frame_without_cut() {
|
||||||
|
if (m_mode != "r") {
|
||||||
|
throw std::runtime_error("File not opened for reading");
|
||||||
|
}
|
||||||
|
if (m_num_left) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
"There are still photons left in the last frame");
|
||||||
|
}
|
||||||
|
int32_t frame_number;
|
||||||
|
if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) {
|
||||||
|
throw std::runtime_error(LOCATION + "Could not read frame number");
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t n_clusters; // Saved as 32bit integer in the cluster file
|
||||||
|
if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) {
|
||||||
|
throw std::runtime_error(LOCATION +
|
||||||
|
"Could not read number of clusters");
|
||||||
|
}
|
||||||
|
|
||||||
|
ClusterVector<ClusterType> clusters(n_clusters);
|
||||||
|
clusters.set_frame_number(frame_number);
|
||||||
|
|
||||||
|
clusters.resize(n_clusters);
|
||||||
|
|
||||||
|
if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) !=
|
||||||
|
static_cast<size_t>(n_clusters)) {
|
||||||
|
throw std::runtime_error(LOCATION + "Could not read clusters");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m_gain_map)
|
||||||
|
m_gain_map->apply_gain_map(clusters);
|
||||||
|
return clusters;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ClusterType, typename Enable>
|
||||||
|
ClusterVector<ClusterType>
|
||||||
|
ClusterFile<ClusterType, Enable>::read_frame_with_cut() {
|
||||||
|
if (m_mode != "r") {
|
||||||
|
throw std::runtime_error("File not opened for reading");
|
||||||
|
}
|
||||||
|
if (m_num_left) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
"There are still photons left in the last frame");
|
||||||
|
}
|
||||||
|
int32_t frame_number;
|
||||||
|
if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) {
|
||||||
|
throw std::runtime_error("Could not read frame number");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fread(&m_num_left, sizeof(m_num_left), 1, fp) != 1) {
|
||||||
|
throw std::runtime_error("Could not read number of clusters");
|
||||||
|
}
|
||||||
|
|
||||||
|
ClusterVector<ClusterType> clusters;
|
||||||
|
clusters.reserve(m_num_left);
|
||||||
|
clusters.set_frame_number(frame_number);
|
||||||
|
while (m_num_left) {
|
||||||
|
ClusterType c = read_one_cluster();
|
||||||
|
if (is_selected(c)) {
|
||||||
|
clusters.push_back(c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (m_gain_map)
|
||||||
|
m_gain_map->apply_gain_map(clusters);
|
||||||
|
return clusters;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ClusterType, typename Enable>
|
||||||
|
bool ClusterFile<ClusterType, Enable>::is_selected(ClusterType &cl) {
|
||||||
|
// Should fail fast
|
||||||
|
if (m_roi) {
|
||||||
|
if (!(m_roi->contains(cl.x, cl.y))) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t cluster_center_index =
|
||||||
|
(ClusterType::cluster_size_x / 2) +
|
||||||
|
(ClusterType::cluster_size_y / 2) * ClusterType::cluster_size_x;
|
||||||
|
|
||||||
|
if (m_noise_map) {
|
||||||
|
auto sum_1x1 = cl.data[cluster_center_index]; // central pixel
|
||||||
|
auto sum_2x2 = cl.max_sum_2x2().first; // highest sum of 2x2 subclusters
|
||||||
|
auto total_sum = cl.sum(); // sum of all pixels
|
||||||
|
|
||||||
|
auto noise =
|
||||||
|
(*m_noise_map)(cl.y, cl.x); // TODO! check if this is correct
|
||||||
|
if (sum_1x1 <= noise || sum_2x2 <= 2 * noise ||
|
||||||
|
total_sum <= 3 * noise) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// we passed all checks
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
||||||
|
@ -3,35 +3,41 @@
|
|||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
#include "aare/ProducerConsumerQueue.hpp"
|
|
||||||
#include "aare/ClusterVector.hpp"
|
|
||||||
#include "aare/ClusterFinderMT.hpp"
|
#include "aare/ClusterFinderMT.hpp"
|
||||||
|
#include "aare/ClusterVector.hpp"
|
||||||
|
#include "aare/ProducerConsumerQueue.hpp"
|
||||||
|
|
||||||
namespace aare{
|
namespace aare {
|
||||||
|
|
||||||
class ClusterFileSink{
|
template <typename ClusterType,
|
||||||
ProducerConsumerQueue<ClusterVector<int>>* m_source;
|
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||||
|
class ClusterFileSink {
|
||||||
|
ProducerConsumerQueue<ClusterVector<ClusterType>> *m_source;
|
||||||
std::atomic<bool> m_stop_requested{false};
|
std::atomic<bool> m_stop_requested{false};
|
||||||
std::atomic<bool> m_stopped{true};
|
std::atomic<bool> m_stopped{true};
|
||||||
std::chrono::milliseconds m_default_wait{1};
|
std::chrono::milliseconds m_default_wait{1};
|
||||||
std::thread m_thread;
|
std::thread m_thread;
|
||||||
std::ofstream m_file;
|
std::ofstream m_file;
|
||||||
|
|
||||||
|
void process() {
|
||||||
void process(){
|
|
||||||
m_stopped = false;
|
m_stopped = false;
|
||||||
fmt::print("ClusterFileSink started\n");
|
fmt::print("ClusterFileSink started\n");
|
||||||
while (!m_stop_requested || !m_source->isEmpty()) {
|
while (!m_stop_requested || !m_source->isEmpty()) {
|
||||||
if (ClusterVector<int> *clusters = m_source->frontPtr();
|
if (ClusterVector<ClusterType> *clusters = m_source->frontPtr();
|
||||||
clusters != nullptr) {
|
clusters != nullptr) {
|
||||||
// Write clusters to file
|
// Write clusters to file
|
||||||
int32_t frame_number = clusters->frame_number(); //TODO! Should we store frame number already as int?
|
int32_t frame_number =
|
||||||
|
clusters->frame_number(); // TODO! Should we store frame
|
||||||
|
// number already as int?
|
||||||
uint32_t num_clusters = clusters->size();
|
uint32_t num_clusters = clusters->size();
|
||||||
m_file.write(reinterpret_cast<const char*>(&frame_number), sizeof(frame_number));
|
m_file.write(reinterpret_cast<const char *>(&frame_number),
|
||||||
m_file.write(reinterpret_cast<const char*>(&num_clusters), sizeof(num_clusters));
|
sizeof(frame_number));
|
||||||
m_file.write(reinterpret_cast<const char*>(clusters->data()), clusters->size() * clusters->item_size());
|
m_file.write(reinterpret_cast<const char *>(&num_clusters),
|
||||||
|
sizeof(num_clusters));
|
||||||
|
m_file.write(reinterpret_cast<const char *>(clusters->data()),
|
||||||
|
clusters->size() * clusters->item_size());
|
||||||
m_source->popFront();
|
m_source->popFront();
|
||||||
}else{
|
} else {
|
||||||
std::this_thread::sleep_for(m_default_wait);
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -40,17 +46,17 @@ class ClusterFileSink{
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ClusterFileSink(ClusterFinderMT<uint16_t, double, int32_t>* source, const std::filesystem::path& fname){
|
ClusterFileSink(ClusterFinderMT<ClusterType, uint16_t, double> *source,
|
||||||
|
const std::filesystem::path &fname) {
|
||||||
m_source = source->sink();
|
m_source = source->sink();
|
||||||
m_thread = std::thread(&ClusterFileSink::process, this);
|
m_thread = std::thread(&ClusterFileSink::process, this);
|
||||||
m_file.open(fname, std::ios::binary);
|
m_file.open(fname, std::ios::binary);
|
||||||
}
|
}
|
||||||
void stop(){
|
void stop() {
|
||||||
m_stop_requested = true;
|
m_stop_requested = true;
|
||||||
m_thread.join();
|
m_thread.join();
|
||||||
m_file.close();
|
m_file.close();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
@ -1,148 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
#include "aare/core/defs.hpp"
|
|
||||||
#include <filesystem>
|
|
||||||
#include <string>
|
|
||||||
#include <fmt/format.h>
|
|
||||||
|
|
||||||
namespace aare {
|
|
||||||
struct ClusterHeader {
|
|
||||||
int32_t frame_number;
|
|
||||||
int32_t n_clusters;
|
|
||||||
std::string to_string() const {
|
|
||||||
return "frame_number: " + std::to_string(frame_number) + ", n_clusters: " + std::to_string(n_clusters);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ClusterV2_ {
|
|
||||||
int16_t x;
|
|
||||||
int16_t y;
|
|
||||||
std::array<int32_t, 9> data;
|
|
||||||
std::string to_string(bool detailed = false) const {
|
|
||||||
if (detailed) {
|
|
||||||
std::string data_str = "[";
|
|
||||||
for (auto &d : data) {
|
|
||||||
data_str += std::to_string(d) + ", ";
|
|
||||||
}
|
|
||||||
data_str += "]";
|
|
||||||
return "x: " + std::to_string(x) + ", y: " + std::to_string(y) + ", data: " + data_str;
|
|
||||||
}
|
|
||||||
return "x: " + std::to_string(x) + ", y: " + std::to_string(y);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ClusterV2 {
|
|
||||||
ClusterV2_ cluster;
|
|
||||||
int32_t frame_number;
|
|
||||||
std::string to_string() const {
|
|
||||||
return "frame_number: " + std::to_string(frame_number) + ", " + cluster.to_string();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief
|
|
||||||
* important not: fp always points to the clusters header and does not point to individual clusters
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
class ClusterFileV2 {
|
|
||||||
std::filesystem::path m_fpath;
|
|
||||||
std::string m_mode;
|
|
||||||
FILE *fp{nullptr};
|
|
||||||
|
|
||||||
void check_open(){
|
|
||||||
if (!fp)
|
|
||||||
throw std::runtime_error(fmt::format("File: {} not open", m_fpath.string()));
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
ClusterFileV2(std::filesystem::path const &fpath, std::string const &mode): m_fpath(fpath), m_mode(mode) {
|
|
||||||
if (m_mode != "r" && m_mode != "w")
|
|
||||||
throw std::invalid_argument("mode must be 'r' or 'w'");
|
|
||||||
if (m_mode == "r" && !std::filesystem::exists(m_fpath))
|
|
||||||
throw std::invalid_argument("File does not exist");
|
|
||||||
if (mode == "r") {
|
|
||||||
fp = fopen(fpath.string().c_str(), "rb");
|
|
||||||
} else if (mode == "w") {
|
|
||||||
if (std::filesystem::exists(fpath)) {
|
|
||||||
fp = fopen(fpath.string().c_str(), "r+b");
|
|
||||||
} else {
|
|
||||||
fp = fopen(fpath.string().c_str(), "wb");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (fp == nullptr) {
|
|
||||||
throw std::runtime_error("Failed to open file");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
~ClusterFileV2() { close(); }
|
|
||||||
std::vector<ClusterV2> read() {
|
|
||||||
check_open();
|
|
||||||
|
|
||||||
ClusterHeader header;
|
|
||||||
fread(&header, sizeof(ClusterHeader), 1, fp);
|
|
||||||
std::vector<ClusterV2_> clusters_(header.n_clusters);
|
|
||||||
fread(clusters_.data(), sizeof(ClusterV2_), header.n_clusters, fp);
|
|
||||||
std::vector<ClusterV2> clusters;
|
|
||||||
for (auto &c : clusters_) {
|
|
||||||
ClusterV2 cluster;
|
|
||||||
cluster.cluster = std::move(c);
|
|
||||||
cluster.frame_number = header.frame_number;
|
|
||||||
clusters.push_back(cluster);
|
|
||||||
}
|
|
||||||
|
|
||||||
return clusters;
|
|
||||||
}
|
|
||||||
std::vector<std::vector<ClusterV2>> read(int n_frames) {
|
|
||||||
std::vector<std::vector<ClusterV2>> clusters;
|
|
||||||
for (int i = 0; i < n_frames; i++) {
|
|
||||||
clusters.push_back(read());
|
|
||||||
}
|
|
||||||
return clusters;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t write(std::vector<ClusterV2> const &clusters) {
|
|
||||||
check_open();
|
|
||||||
if (m_mode != "w")
|
|
||||||
throw std::runtime_error("File not opened in write mode");
|
|
||||||
if (clusters.empty())
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
ClusterHeader header;
|
|
||||||
header.frame_number = clusters[0].frame_number;
|
|
||||||
header.n_clusters = clusters.size();
|
|
||||||
fwrite(&header, sizeof(ClusterHeader), 1, fp);
|
|
||||||
for (auto &c : clusters) {
|
|
||||||
fwrite(&c.cluster, sizeof(ClusterV2_), 1, fp);
|
|
||||||
}
|
|
||||||
return clusters.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t write(std::vector<std::vector<ClusterV2>> const &clusters) {
|
|
||||||
check_open();
|
|
||||||
if (m_mode != "w")
|
|
||||||
throw std::runtime_error("File not opened in write mode");
|
|
||||||
|
|
||||||
size_t n_clusters = 0;
|
|
||||||
for (auto &c : clusters) {
|
|
||||||
n_clusters += write(c);
|
|
||||||
}
|
|
||||||
return n_clusters;
|
|
||||||
}
|
|
||||||
|
|
||||||
int seek_to_begin() { return fseek(fp, 0, SEEK_SET); }
|
|
||||||
int seek_to_end() { return fseek(fp, 0, SEEK_END); }
|
|
||||||
|
|
||||||
int32_t frame_number() {
|
|
||||||
auto pos = ftell(fp);
|
|
||||||
ClusterHeader header;
|
|
||||||
fread(&header, sizeof(ClusterHeader), 1, fp);
|
|
||||||
fseek(fp, pos, SEEK_SET);
|
|
||||||
return header.frame_number;
|
|
||||||
}
|
|
||||||
|
|
||||||
void close() {
|
|
||||||
if (fp) {
|
|
||||||
fclose(fp);
|
|
||||||
fp = nullptr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} // namespace aare
|
|
@ -10,17 +10,19 @@
|
|||||||
|
|
||||||
namespace aare {
|
namespace aare {
|
||||||
|
|
||||||
template <typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double,
|
template <typename ClusterType = Cluster<int32_t, 3, 3>,
|
||||||
typename CT = int32_t>
|
typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double>
|
||||||
class ClusterFinder {
|
class ClusterFinder {
|
||||||
Shape<2> m_image_size;
|
Shape<2> m_image_size;
|
||||||
const int m_cluster_sizeX;
|
|
||||||
const int m_cluster_sizeY;
|
|
||||||
const PEDESTAL_TYPE m_nSigma;
|
const PEDESTAL_TYPE m_nSigma;
|
||||||
const PEDESTAL_TYPE c2;
|
const PEDESTAL_TYPE c2;
|
||||||
const PEDESTAL_TYPE c3;
|
const PEDESTAL_TYPE c3;
|
||||||
Pedestal<PEDESTAL_TYPE> m_pedestal;
|
Pedestal<PEDESTAL_TYPE> m_pedestal;
|
||||||
ClusterVector<CT> m_clusters;
|
ClusterVector<ClusterType> m_clusters;
|
||||||
|
|
||||||
|
static const uint8_t ClusterSizeX = ClusterType::cluster_size_x;
|
||||||
|
static const uint8_t ClusterSizeY = ClusterType::cluster_size_y;
|
||||||
|
using CT = typename ClusterType::value_type;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
@ -31,15 +33,12 @@ class ClusterFinder {
|
|||||||
* @param capacity initial capacity of the cluster vector
|
* @param capacity initial capacity of the cluster vector
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
ClusterFinder(Shape<2> image_size, Shape<2> cluster_size,
|
ClusterFinder(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
|
||||||
PEDESTAL_TYPE nSigma = 5.0, size_t capacity = 1000000)
|
size_t capacity = 1000000)
|
||||||
: m_image_size(image_size), m_cluster_sizeX(cluster_size[0]),
|
: m_image_size(image_size), m_nSigma(nSigma),
|
||||||
m_cluster_sizeY(cluster_size[1]),
|
c2(sqrt((ClusterSizeY + 1) / 2 * (ClusterSizeX + 1) / 2)),
|
||||||
m_nSigma(nSigma),
|
c3(sqrt(ClusterSizeX * ClusterSizeY)),
|
||||||
c2(sqrt((m_cluster_sizeY + 1) / 2 * (m_cluster_sizeX + 1) / 2)),
|
m_pedestal(image_size[0], image_size[1]), m_clusters(capacity) {};
|
||||||
c3(sqrt(m_cluster_sizeX * m_cluster_sizeY)),
|
|
||||||
m_pedestal(image_size[0], image_size[1]),
|
|
||||||
m_clusters(m_cluster_sizeX, m_cluster_sizeY, capacity) {};
|
|
||||||
|
|
||||||
void push_pedestal_frame(NDView<FRAME_TYPE, 2> frame) {
|
void push_pedestal_frame(NDView<FRAME_TYPE, 2> frame) {
|
||||||
m_pedestal.push(frame);
|
m_pedestal.push(frame);
|
||||||
@ -56,23 +55,28 @@ class ClusterFinder {
|
|||||||
* same capacity as the old one
|
* same capacity as the old one
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
ClusterVector<CT> steal_clusters(bool realloc_same_capacity = false) {
|
ClusterVector<ClusterType>
|
||||||
ClusterVector<CT> tmp = std::move(m_clusters);
|
steal_clusters(bool realloc_same_capacity = false) {
|
||||||
|
ClusterVector<ClusterType> tmp = std::move(m_clusters);
|
||||||
if (realloc_same_capacity)
|
if (realloc_same_capacity)
|
||||||
m_clusters = ClusterVector<CT>(m_cluster_sizeX, m_cluster_sizeY,
|
m_clusters = ClusterVector<ClusterType>(tmp.capacity());
|
||||||
tmp.capacity());
|
|
||||||
else
|
else
|
||||||
m_clusters = ClusterVector<CT>(m_cluster_sizeX, m_cluster_sizeY);
|
m_clusters = ClusterVector<ClusterType>{};
|
||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
void find_clusters(NDView<FRAME_TYPE, 2> frame, uint64_t frame_number = 0) {
|
void find_clusters(NDView<FRAME_TYPE, 2> frame, uint64_t frame_number = 0) {
|
||||||
// // TODO! deal with even size clusters
|
// // TODO! deal with even size clusters
|
||||||
// // currently 3,3 -> +/- 1
|
// // currently 3,3 -> +/- 1
|
||||||
// // 4,4 -> +/- 2
|
// // 4,4 -> +/- 2
|
||||||
int dy = m_cluster_sizeY / 2;
|
int dy = ClusterSizeY / 2;
|
||||||
int dx = m_cluster_sizeX / 2;
|
int dx = ClusterSizeX / 2;
|
||||||
|
int has_center_pixel_x =
|
||||||
|
ClusterSizeX %
|
||||||
|
2; // for even sized clusters there is no proper cluster center and
|
||||||
|
// even amount of pixels around the center
|
||||||
|
int has_center_pixel_y = ClusterSizeY % 2;
|
||||||
|
|
||||||
m_clusters.set_frame_number(frame_number);
|
m_clusters.set_frame_number(frame_number);
|
||||||
std::vector<CT> cluster_data(m_cluster_sizeX * m_cluster_sizeY);
|
|
||||||
for (int iy = 0; iy < frame.shape(0); iy++) {
|
for (int iy = 0; iy < frame.shape(0); iy++) {
|
||||||
for (int ix = 0; ix < frame.shape(1); ix++) {
|
for (int ix = 0; ix < frame.shape(1); ix++) {
|
||||||
|
|
||||||
@ -87,8 +91,8 @@ class ClusterFinder {
|
|||||||
continue; // NEGATIVE_PEDESTAL go to next pixel
|
continue; // NEGATIVE_PEDESTAL go to next pixel
|
||||||
// TODO! No pedestal update???
|
// TODO! No pedestal update???
|
||||||
|
|
||||||
for (int ir = -dy; ir < dy + 1; ir++) {
|
for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) {
|
||||||
for (int ic = -dx; ic < dx + 1; ic++) {
|
for (int ic = -dx; ic < dx + has_center_pixel_x; ic++) {
|
||||||
if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
|
if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
|
||||||
iy + ir >= 0 && iy + ir < frame.shape(0)) {
|
iy + ir >= 0 && iy + ir < frame.shape(0)) {
|
||||||
PEDESTAL_TYPE val =
|
PEDESTAL_TYPE val =
|
||||||
@ -109,27 +113,33 @@ class ClusterFinder {
|
|||||||
// pass
|
// pass
|
||||||
} else {
|
} else {
|
||||||
// m_pedestal.push(iy, ix, frame(iy, ix)); // Safe option
|
// m_pedestal.push(iy, ix, frame(iy, ix)); // Safe option
|
||||||
m_pedestal.push_fast(iy, ix, frame(iy, ix)); // Assume we have reached n_samples in the pedestal, slight performance improvement
|
m_pedestal.push_fast(
|
||||||
|
iy, ix,
|
||||||
|
frame(iy,
|
||||||
|
ix)); // Assume we have reached n_samples in the
|
||||||
|
// pedestal, slight performance improvement
|
||||||
continue; // It was a pedestal value nothing to store
|
continue; // It was a pedestal value nothing to store
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store cluster
|
// Store cluster
|
||||||
if (value == max) {
|
if (value == max) {
|
||||||
// Zero out the cluster data
|
ClusterType cluster{};
|
||||||
std::fill(cluster_data.begin(), cluster_data.end(), 0);
|
cluster.x = ix;
|
||||||
|
cluster.y = iy;
|
||||||
|
|
||||||
// Fill the cluster data since we have a photon to store
|
// Fill the cluster data since we have a photon to store
|
||||||
// It's worth redoing the look since most of the time we
|
// It's worth redoing the look since most of the time we
|
||||||
// don't have a photon
|
// don't have a photon
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (int ir = -dy; ir < dy + 1; ir++) {
|
for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) {
|
||||||
for (int ic = -dx; ic < dx + 1; ic++) {
|
for (int ic = -dx; ic < dx + has_center_pixel_y; ic++) {
|
||||||
if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
|
if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
|
||||||
iy + ir >= 0 && iy + ir < frame.shape(0)) {
|
iy + ir >= 0 && iy + ir < frame.shape(0)) {
|
||||||
CT tmp =
|
CT tmp =
|
||||||
static_cast<CT>(frame(iy + ir, ix + ic)) -
|
static_cast<CT>(frame(iy + ir, ix + ic)) -
|
||||||
m_pedestal.mean(iy + ir, ix + ic);
|
static_cast<CT>(
|
||||||
cluster_data[i] =
|
m_pedestal.mean(iy + ir, ix + ic));
|
||||||
|
cluster.data[i] =
|
||||||
tmp; // Watch for out of bounds access
|
tmp; // Watch for out of bounds access
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
@ -137,9 +147,7 @@ class ClusterFinder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add the cluster to the output ClusterVector
|
// Add the cluster to the output ClusterVector
|
||||||
m_clusters.push_back(
|
m_clusters.push_back(cluster);
|
||||||
ix, iy,
|
|
||||||
reinterpret_cast<std::byte *>(cluster_data.data()));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -30,14 +30,17 @@ struct FrameWrapper {
|
|||||||
* @tparam PEDESTAL_TYPE type of the pedestal data
|
* @tparam PEDESTAL_TYPE type of the pedestal data
|
||||||
* @tparam CT type of the cluster data
|
* @tparam CT type of the cluster data
|
||||||
*/
|
*/
|
||||||
template <typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double,
|
template <typename ClusterType = Cluster<int32_t, 3, 3>,
|
||||||
typename CT = int32_t>
|
typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double>
|
||||||
class ClusterFinderMT {
|
class ClusterFinderMT {
|
||||||
|
|
||||||
|
protected:
|
||||||
|
using CT = typename ClusterType::value_type;
|
||||||
size_t m_current_thread{0};
|
size_t m_current_thread{0};
|
||||||
size_t m_n_threads{0};
|
size_t m_n_threads{0};
|
||||||
using Finder = ClusterFinder<FRAME_TYPE, PEDESTAL_TYPE, CT>;
|
using Finder = ClusterFinder<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>;
|
||||||
using InputQueue = ProducerConsumerQueue<FrameWrapper>;
|
using InputQueue = ProducerConsumerQueue<FrameWrapper>;
|
||||||
using OutputQueue = ProducerConsumerQueue<ClusterVector<int>>;
|
using OutputQueue = ProducerConsumerQueue<ClusterVector<ClusterType>>;
|
||||||
std::vector<std::unique_ptr<InputQueue>> m_input_queues;
|
std::vector<std::unique_ptr<InputQueue>> m_input_queues;
|
||||||
std::vector<std::unique_ptr<OutputQueue>> m_output_queues;
|
std::vector<std::unique_ptr<OutputQueue>> m_output_queues;
|
||||||
|
|
||||||
@ -48,6 +51,7 @@ class ClusterFinderMT {
|
|||||||
std::thread m_collect_thread;
|
std::thread m_collect_thread;
|
||||||
std::chrono::milliseconds m_default_wait{1};
|
std::chrono::milliseconds m_default_wait{1};
|
||||||
|
|
||||||
|
private:
|
||||||
std::atomic<bool> m_stop_requested{false};
|
std::atomic<bool> m_stop_requested{false};
|
||||||
std::atomic<bool> m_processing_threads_stopped{true};
|
std::atomic<bool> m_processing_threads_stopped{true};
|
||||||
|
|
||||||
@ -66,7 +70,8 @@ class ClusterFinderMT {
|
|||||||
switch (frame->type) {
|
switch (frame->type) {
|
||||||
case FrameType::DATA:
|
case FrameType::DATA:
|
||||||
cf->find_clusters(frame->data.view(), frame->frame_number);
|
cf->find_clusters(frame->data.view(), frame->frame_number);
|
||||||
m_output_queues[thread_id]->write(cf->steal_clusters(realloc_same_capacity));
|
m_output_queues[thread_id]->write(
|
||||||
|
cf->steal_clusters(realloc_same_capacity));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case FrameType::PEDESTAL:
|
case FrameType::PEDESTAL:
|
||||||
@ -114,28 +119,32 @@ class ClusterFinderMT {
|
|||||||
* expected number of clusters in a frame per frame.
|
* expected number of clusters in a frame per frame.
|
||||||
* @param n_threads number of threads to use
|
* @param n_threads number of threads to use
|
||||||
*/
|
*/
|
||||||
ClusterFinderMT(Shape<2> image_size, Shape<2> cluster_size,
|
ClusterFinderMT(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
|
||||||
PEDESTAL_TYPE nSigma = 5.0, size_t capacity = 2000,
|
size_t capacity = 2000, size_t n_threads = 3)
|
||||||
size_t n_threads = 3)
|
|
||||||
: m_n_threads(n_threads) {
|
: m_n_threads(n_threads) {
|
||||||
|
|
||||||
for (size_t i = 0; i < n_threads; i++) {
|
for (size_t i = 0; i < n_threads; i++) {
|
||||||
m_cluster_finders.push_back(
|
m_cluster_finders.push_back(
|
||||||
std::make_unique<ClusterFinder<FRAME_TYPE, PEDESTAL_TYPE, CT>>(
|
std::make_unique<
|
||||||
image_size, cluster_size, nSigma, capacity));
|
ClusterFinder<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>>(
|
||||||
|
image_size, nSigma, capacity));
|
||||||
}
|
}
|
||||||
for (size_t i = 0; i < n_threads; i++) {
|
for (size_t i = 0; i < n_threads; i++) {
|
||||||
m_input_queues.emplace_back(std::make_unique<InputQueue>(200));
|
m_input_queues.emplace_back(std::make_unique<InputQueue>(200));
|
||||||
m_output_queues.emplace_back(std::make_unique<OutputQueue>(200));
|
m_output_queues.emplace_back(std::make_unique<OutputQueue>(200));
|
||||||
}
|
}
|
||||||
//TODO! Should we start automatically?
|
// TODO! Should we start automatically?
|
||||||
start();
|
start();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Return the sink queue where all the clusters are collected
|
* @brief Return the sink queue where all the clusters are collected
|
||||||
* @warning You need to empty this queue otherwise the cluster finder will wait forever
|
* @warning You need to empty this queue otherwise the cluster finder will
|
||||||
|
* wait forever
|
||||||
*/
|
*/
|
||||||
ProducerConsumerQueue<ClusterVector<int>> *sink() { return &m_sink; }
|
ProducerConsumerQueue<ClusterVector<ClusterType>> *sink() {
|
||||||
|
return &m_sink;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Start all processing threads
|
* @brief Start all processing threads
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
#include "aare/Cluster.hpp" //TODO maybe store in seperate file !!!
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
@ -13,292 +14,157 @@
|
|||||||
|
|
||||||
namespace aare {
|
namespace aare {
|
||||||
|
|
||||||
|
template <typename ClusterType,
|
||||||
|
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||||
|
class ClusterVector; // Forward declaration
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief ClusterVector is a container for clusters of various sizes. It uses a
|
* @brief ClusterVector is a container for clusters of various sizes. It
|
||||||
* contiguous memory buffer to store the clusters. It is templated on the data
|
* uses a contiguous memory buffer to store the clusters. It is templated on
|
||||||
* type and the coordinate type of the clusters.
|
* the data type and the coordinate type of the clusters.
|
||||||
* @note push_back can invalidate pointers to elements in the container
|
* @note push_back can invalidate pointers to elements in the container
|
||||||
* @warning ClusterVector is currently move only to catch unintended copies, but
|
* @warning ClusterVector is currently move only to catch unintended copies,
|
||||||
* this might change since there are probably use cases where copying is needed.
|
* but this might change since there are probably use cases where copying is
|
||||||
|
* needed.
|
||||||
* @tparam T data type of the pixels in the cluster
|
* @tparam T data type of the pixels in the cluster
|
||||||
* @tparam CoordType data type of the x and y coordinates of the cluster
|
* @tparam CoordType data type of the x and y coordinates of the cluster
|
||||||
* (normally int16_t)
|
* (normally int16_t)
|
||||||
*/
|
*/
|
||||||
template <typename T, typename CoordType = int16_t> class ClusterVector {
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
using value_type = T;
|
typename CoordType>
|
||||||
size_t m_cluster_size_x;
|
class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||||
size_t m_cluster_size_y;
|
|
||||||
std::byte *m_data{};
|
std::vector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> m_data{};
|
||||||
size_t m_size{0};
|
int32_t m_frame_number{0}; // TODO! Check frame number size and type
|
||||||
size_t m_capacity;
|
|
||||||
uint64_t m_frame_number{0}; // TODO! Check frame number size and type
|
|
||||||
/*
|
|
||||||
Format string used in the python bindings to create a numpy
|
|
||||||
array from the buffer
|
|
||||||
= - native byte order
|
|
||||||
h - short
|
|
||||||
d - double
|
|
||||||
i - int
|
|
||||||
*/
|
|
||||||
constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:";
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
using value_type = T;
|
||||||
|
using ClusterType = Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Construct a new ClusterVector object
|
* @brief Construct a new ClusterVector object
|
||||||
* @param cluster_size_x size of the cluster in x direction
|
|
||||||
* @param cluster_size_y size of the cluster in y direction
|
|
||||||
* @param capacity initial capacity of the buffer in number of clusters
|
* @param capacity initial capacity of the buffer in number of clusters
|
||||||
* @param frame_number frame number of the clusters. Default is 0, which is
|
* @param frame_number frame number of the clusters. Default is 0, which is
|
||||||
* also used to indicate that the clusters come from many frames
|
* also used to indicate that the clusters come from many frames
|
||||||
*/
|
*/
|
||||||
ClusterVector(size_t cluster_size_x = 3, size_t cluster_size_y = 3,
|
ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0)
|
||||||
size_t capacity = 1024, uint64_t frame_number = 0)
|
: m_frame_number(frame_number) {
|
||||||
: m_cluster_size_x(cluster_size_x), m_cluster_size_y(cluster_size_y),
|
m_data.reserve(capacity);
|
||||||
m_capacity(capacity), m_frame_number(frame_number) {
|
|
||||||
allocate_buffer(capacity);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
~ClusterVector() { delete[] m_data; }
|
|
||||||
|
|
||||||
// Move constructor
|
// Move constructor
|
||||||
ClusterVector(ClusterVector &&other) noexcept
|
ClusterVector(ClusterVector &&other) noexcept
|
||||||
: m_cluster_size_x(other.m_cluster_size_x),
|
: m_data(other.m_data), m_frame_number(other.m_frame_number) {
|
||||||
m_cluster_size_y(other.m_cluster_size_y), m_data(other.m_data),
|
other.m_data.clear();
|
||||||
m_size(other.m_size), m_capacity(other.m_capacity),
|
|
||||||
m_frame_number(other.m_frame_number) {
|
|
||||||
other.m_data = nullptr;
|
|
||||||
other.m_size = 0;
|
|
||||||
other.m_capacity = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move assignment operator
|
// Move assignment operator
|
||||||
ClusterVector &operator=(ClusterVector &&other) noexcept {
|
ClusterVector &operator=(ClusterVector &&other) noexcept {
|
||||||
if (this != &other) {
|
if (this != &other) {
|
||||||
delete[] m_data;
|
|
||||||
m_cluster_size_x = other.m_cluster_size_x;
|
|
||||||
m_cluster_size_y = other.m_cluster_size_y;
|
|
||||||
m_data = other.m_data;
|
m_data = other.m_data;
|
||||||
m_size = other.m_size;
|
|
||||||
m_capacity = other.m_capacity;
|
|
||||||
m_frame_number = other.m_frame_number;
|
m_frame_number = other.m_frame_number;
|
||||||
other.m_data = nullptr;
|
other.m_data.clear();
|
||||||
other.m_size = 0;
|
|
||||||
other.m_capacity = 0;
|
|
||||||
other.m_frame_number = 0;
|
other.m_frame_number = 0;
|
||||||
}
|
}
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Reserve space for at least capacity clusters
|
|
||||||
* @param capacity number of clusters to reserve space for
|
|
||||||
* @note If capacity is less than the current capacity, the function does
|
|
||||||
* nothing.
|
|
||||||
*/
|
|
||||||
void reserve(size_t capacity) {
|
|
||||||
if (capacity > m_capacity) {
|
|
||||||
allocate_buffer(capacity);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Add a cluster to the vector
|
|
||||||
* @param x x-coordinate of the cluster
|
|
||||||
* @param y y-coordinate of the cluster
|
|
||||||
* @param data pointer to the data of the cluster
|
|
||||||
* @warning The data pointer must point to a buffer of size cluster_size_x *
|
|
||||||
* cluster_size_y * sizeof(T)
|
|
||||||
*/
|
|
||||||
void push_back(CoordType x, CoordType y, const std::byte *data) {
|
|
||||||
if (m_size == m_capacity) {
|
|
||||||
allocate_buffer(m_capacity * 2);
|
|
||||||
}
|
|
||||||
std::byte *ptr = element_ptr(m_size);
|
|
||||||
*reinterpret_cast<CoordType *>(ptr) = x;
|
|
||||||
ptr += sizeof(CoordType);
|
|
||||||
*reinterpret_cast<CoordType *>(ptr) = y;
|
|
||||||
ptr += sizeof(CoordType);
|
|
||||||
|
|
||||||
std::copy(data, data + m_cluster_size_x * m_cluster_size_y * sizeof(T),
|
|
||||||
ptr);
|
|
||||||
m_size++;
|
|
||||||
}
|
|
||||||
ClusterVector &operator+=(const ClusterVector &other) {
|
|
||||||
if (m_size + other.m_size > m_capacity) {
|
|
||||||
allocate_buffer(m_capacity + other.m_size);
|
|
||||||
}
|
|
||||||
std::copy(other.m_data, other.m_data + other.m_size * item_size(),
|
|
||||||
m_data + m_size * item_size());
|
|
||||||
m_size += other.m_size;
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Sum the pixels in each cluster
|
* @brief Sum the pixels in each cluster
|
||||||
* @return std::vector<T> vector of sums for each cluster
|
* @return std::vector<T> vector of sums for each cluster
|
||||||
*/
|
*/
|
||||||
std::vector<T> sum() {
|
std::vector<T> sum() {
|
||||||
std::vector<T> sums(m_size);
|
std::vector<T> sums(m_data.size());
|
||||||
const size_t stride = item_size();
|
|
||||||
const size_t n_pixels = m_cluster_size_x * m_cluster_size_y;
|
std::transform(
|
||||||
std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y
|
m_data.begin(), m_data.end(), sums.begin(),
|
||||||
|
[](const ClusterType &cluster) { return cluster.sum(); });
|
||||||
|
|
||||||
for (size_t i = 0; i < m_size; i++) {
|
|
||||||
sums[i] =
|
|
||||||
std::accumulate(reinterpret_cast<T *>(ptr),
|
|
||||||
reinterpret_cast<T *>(ptr) + n_pixels, T{});
|
|
||||||
ptr += stride;
|
|
||||||
}
|
|
||||||
return sums;
|
return sums;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Return the maximum sum of the 2x2 subclusters in each cluster
|
* @brief Sum the pixels in the 2x2 subcluster with the biggest pixel sum in
|
||||||
|
* each cluster
|
||||||
* @return std::vector<T> vector of sums for each cluster
|
* @return std::vector<T> vector of sums for each cluster
|
||||||
* @throws std::runtime_error if the cluster size is not 3x3
|
|
||||||
* @warning Only 3x3 clusters are supported for the 2x2 sum.
|
|
||||||
*/
|
*/
|
||||||
std::vector<T> sum_2x2() {
|
std::vector<T> sum_2x2() {
|
||||||
std::vector<T> sums(m_size);
|
std::vector<T> sums_2x2(m_data.size());
|
||||||
const size_t stride = item_size();
|
|
||||||
|
|
||||||
if (m_cluster_size_x != 3 || m_cluster_size_y != 3) {
|
std::transform(m_data.begin(), m_data.end(), sums_2x2.begin(),
|
||||||
throw std::runtime_error(
|
[](const ClusterType &cluster) {
|
||||||
"Only 3x3 clusters are supported for the 2x2 sum.");
|
return cluster.max_sum_2x2().first;
|
||||||
}
|
});
|
||||||
std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y
|
|
||||||
|
|
||||||
for (size_t i = 0; i < m_size; i++) {
|
return sums_2x2;
|
||||||
std::array<T, 4> total;
|
|
||||||
auto T_ptr = reinterpret_cast<T *>(ptr);
|
|
||||||
total[0] = T_ptr[0] + T_ptr[1] + T_ptr[3] + T_ptr[4];
|
|
||||||
total[1] = T_ptr[1] + T_ptr[2] + T_ptr[4] + T_ptr[5];
|
|
||||||
total[2] = T_ptr[3] + T_ptr[4] + T_ptr[6] + T_ptr[7];
|
|
||||||
total[3] = T_ptr[4] + T_ptr[5] + T_ptr[7] + T_ptr[8];
|
|
||||||
|
|
||||||
sums[i] = *std::max_element(total.begin(), total.end());
|
|
||||||
ptr += stride;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return sums;
|
/**
|
||||||
|
* @brief Reserve space for at least capacity clusters
|
||||||
|
* @param capacity number of clusters to reserve space for
|
||||||
|
* @note If capacity is less than the current capacity, the function does
|
||||||
|
* nothing.
|
||||||
|
*/
|
||||||
|
void reserve(size_t capacity) { m_data.reserve(capacity); }
|
||||||
|
|
||||||
|
void resize(size_t size) { m_data.resize(size); }
|
||||||
|
|
||||||
|
void push_back(const ClusterType &cluster) { m_data.push_back(cluster); }
|
||||||
|
|
||||||
|
ClusterVector &operator+=(const ClusterVector &other) {
|
||||||
|
m_data.insert(m_data.end(), other.begin(), other.end());
|
||||||
|
|
||||||
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Return the number of clusters in the vector
|
* @brief Return the number of clusters in the vector
|
||||||
*/
|
*/
|
||||||
size_t size() const { return m_size; }
|
size_t size() const { return m_data.size(); }
|
||||||
|
|
||||||
|
uint8_t cluster_size_x() const { return ClusterSizeX; }
|
||||||
|
|
||||||
|
uint8_t cluster_size_y() const { return ClusterSizeY; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Return the capacity of the buffer in number of clusters. This is
|
* @brief Return the capacity of the buffer in number of clusters. This is
|
||||||
* the number of clusters that can be stored in the current buffer without
|
* the number of clusters that can be stored in the current buffer without
|
||||||
* reallocation.
|
* reallocation.
|
||||||
*/
|
*/
|
||||||
size_t capacity() const { return m_capacity; }
|
size_t capacity() const { return m_data.capacity(); }
|
||||||
|
|
||||||
|
const auto begin() const { return m_data.begin(); }
|
||||||
|
|
||||||
|
const auto end() const { return m_data.end(); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Return the size in bytes of a single cluster
|
* @brief Return the size in bytes of a single cluster
|
||||||
*/
|
*/
|
||||||
size_t item_size() const {
|
size_t item_size() const {
|
||||||
return 2 * sizeof(CoordType) +
|
return sizeof(ClusterType); // 2 * sizeof(CoordType) + ClusterSizeX *
|
||||||
m_cluster_size_x * m_cluster_size_y * sizeof(T);
|
// ClusterSizeY * sizeof(T);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
ClusterType *data() { return m_data.data(); }
|
||||||
* @brief Return the offset in bytes for the i-th cluster
|
ClusterType const *data() const { return m_data.data(); }
|
||||||
*/
|
|
||||||
size_t element_offset(size_t i) const { return item_size() * i; }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Return a pointer to the i-th cluster
|
|
||||||
*/
|
|
||||||
std::byte *element_ptr(size_t i) { return m_data + element_offset(i); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Return a pointer to the i-th cluster
|
|
||||||
*/
|
|
||||||
const std::byte *element_ptr(size_t i) const {
|
|
||||||
return m_data + element_offset(i);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t cluster_size_x() const { return m_cluster_size_x; }
|
|
||||||
size_t cluster_size_y() const { return m_cluster_size_y; }
|
|
||||||
|
|
||||||
std::byte *data() { return m_data; }
|
|
||||||
std::byte const *data() const { return m_data; }
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Return a reference to the i-th cluster casted to type V
|
* @brief Return a reference to the i-th cluster casted to type V
|
||||||
* @tparam V type of the cluster
|
* @tparam V type of the cluster
|
||||||
*/
|
*/
|
||||||
template <typename V> V &at(size_t i) {
|
ClusterType &operator[](size_t i) { return m_data[i]; }
|
||||||
return *reinterpret_cast<V *>(element_ptr(i));
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename V> const V &at(size_t i) const {
|
const ClusterType &operator[](size_t i) const { return m_data[i]; }
|
||||||
return *reinterpret_cast<const V *>(element_ptr(i));
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::string_view fmt_base() const {
|
|
||||||
// TODO! how do we match on coord_t?
|
|
||||||
return m_fmt_base;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Return the frame number of the clusters. 0 is used to indicate
|
* @brief Return the frame number of the clusters. 0 is used to indicate
|
||||||
* that the clusters come from many frames
|
* that the clusters come from many frames
|
||||||
*/
|
*/
|
||||||
uint64_t frame_number() const { return m_frame_number; }
|
int32_t frame_number() const { return m_frame_number; }
|
||||||
|
|
||||||
void set_frame_number(uint64_t frame_number) {
|
void set_frame_number(int32_t frame_number) {
|
||||||
m_frame_number = frame_number;
|
m_frame_number = frame_number;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Resize the vector to contain new_size clusters. If new_size is
|
|
||||||
* greater than the current capacity, a new buffer is allocated. If the size
|
|
||||||
* is smaller no memory is freed, size is just updated.
|
|
||||||
* @param new_size new size of the vector
|
|
||||||
* @warning The additional clusters are not initialized
|
|
||||||
*/
|
|
||||||
void resize(size_t new_size) {
|
|
||||||
// TODO! Should we initialize the new clusters?
|
|
||||||
if (new_size > m_capacity) {
|
|
||||||
allocate_buffer(new_size);
|
|
||||||
}
|
|
||||||
m_size = new_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
void apply_gain_map(const NDView<double> gain_map){
|
|
||||||
//in principle we need to know the size of the image for this lookup
|
|
||||||
//TODO! check orientations
|
|
||||||
std::array<int64_t, 9> xcorr = {-1, 0, 1, -1, 0, 1, -1, 0, 1};
|
|
||||||
std::array<int64_t, 9> ycorr = {-1, -1, -1, 0, 0, 0, 1, 1, 1};
|
|
||||||
for (size_t i=0; i<m_size; i++){
|
|
||||||
auto& cl = at<Cluster3x3>(i);
|
|
||||||
|
|
||||||
if (cl.x > 0 && cl.y > 0 && cl.x < gain_map.shape(1)-1 && cl.y < gain_map.shape(0)-1){
|
|
||||||
for (size_t j=0; j<9; j++){
|
|
||||||
size_t x = cl.x + xcorr[j];
|
|
||||||
size_t y = cl.y + ycorr[j];
|
|
||||||
cl.data[j] = static_cast<T>(cl.data[j] * gain_map(y, x));
|
|
||||||
}
|
|
||||||
}else{
|
|
||||||
memset(cl.data, 0, 9*sizeof(T)); //clear edge clusters
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
void allocate_buffer(size_t new_capacity) {
|
|
||||||
size_t num_bytes = item_size() * new_capacity;
|
|
||||||
std::byte *new_data = new std::byte[num_bytes]{};
|
|
||||||
std::copy(m_data, m_data + item_size() * m_size, new_data);
|
|
||||||
delete[] m_data;
|
|
||||||
m_data = new_data;
|
|
||||||
m_capacity = new_capacity;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
68
include/aare/GainMap.hpp
Normal file
68
include/aare/GainMap.hpp
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
/************************************************
|
||||||
|
* @file GainMap.hpp
|
||||||
|
* @short function to apply gain map of image size to a vector of clusters -
|
||||||
|
*note stored gainmap is inverted for efficient aaplication to images
|
||||||
|
***********************************************/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
#include "aare/Cluster.hpp"
|
||||||
|
#include "aare/ClusterVector.hpp"
|
||||||
|
#include "aare/NDArray.hpp"
|
||||||
|
#include "aare/NDView.hpp"
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
class InvertedGainMap {
|
||||||
|
|
||||||
|
public:
|
||||||
|
explicit InvertedGainMap(const NDArray<double, 2> &gain_map)
|
||||||
|
: m_gain_map(gain_map) {
|
||||||
|
for (auto &item : m_gain_map) {
|
||||||
|
item = 1.0 / item;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
explicit InvertedGainMap(const NDView<double, 2> gain_map) {
|
||||||
|
m_gain_map = NDArray<double, 2>(gain_map);
|
||||||
|
for (auto &item : m_gain_map) {
|
||||||
|
item = 1.0 / item;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ClusterType,
|
||||||
|
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||||
|
void apply_gain_map(ClusterVector<ClusterType> &clustervec) {
|
||||||
|
// in principle we need to know the size of the image for this lookup
|
||||||
|
size_t ClusterSizeX = clustervec.cluster_size_x();
|
||||||
|
size_t ClusterSizeY = clustervec.cluster_size_y();
|
||||||
|
|
||||||
|
using T = typename ClusterVector<ClusterType>::value_type;
|
||||||
|
|
||||||
|
int64_t index_cluster_center_x = ClusterSizeX / 2;
|
||||||
|
int64_t index_cluster_center_y = ClusterSizeY / 2;
|
||||||
|
for (size_t i = 0; i < clustervec.size(); i++) {
|
||||||
|
auto &cl = clustervec[i];
|
||||||
|
|
||||||
|
if (cl.x > 0 && cl.y > 0 && cl.x < m_gain_map.shape(1) - 1 &&
|
||||||
|
cl.y < m_gain_map.shape(0) - 1) {
|
||||||
|
for (size_t j = 0; j < ClusterSizeX * ClusterSizeY; j++) {
|
||||||
|
size_t x = cl.x + j % ClusterSizeX - index_cluster_center_x;
|
||||||
|
size_t y = cl.y + j / ClusterSizeX - index_cluster_center_y;
|
||||||
|
cl.data[j] = static_cast<T>(
|
||||||
|
static_cast<double>(cl.data[j]) *
|
||||||
|
m_gain_map(
|
||||||
|
y, x)); // cast after conversion to keep precision
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// clear edge clusters
|
||||||
|
cl.data.fill(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
NDArray<double, 2> m_gain_map{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // end of namespace aare
|
@ -1,29 +1,130 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include "aare/CalculateEta.hpp"
|
||||||
|
#include "aare/Cluster.hpp"
|
||||||
|
#include "aare/ClusterFile.hpp" //Cluster_3x3
|
||||||
|
#include "aare/ClusterVector.hpp"
|
||||||
#include "aare/NDArray.hpp"
|
#include "aare/NDArray.hpp"
|
||||||
#include "aare/NDView.hpp"
|
#include "aare/NDView.hpp"
|
||||||
#include "aare/ClusterVector.hpp"
|
#include "aare/algorithm.hpp"
|
||||||
#include "aare/ClusterFile.hpp" //Cluster_3x3
|
|
||||||
namespace aare{
|
|
||||||
|
|
||||||
struct Photon{
|
namespace aare {
|
||||||
|
|
||||||
|
struct Photon {
|
||||||
double x;
|
double x;
|
||||||
double y;
|
double y;
|
||||||
double energy;
|
double energy;
|
||||||
};
|
};
|
||||||
|
|
||||||
class Interpolator{
|
class Interpolator {
|
||||||
NDArray<double, 3> m_ietax;
|
NDArray<double, 3> m_ietax;
|
||||||
NDArray<double, 3> m_ietay;
|
NDArray<double, 3> m_ietay;
|
||||||
|
|
||||||
NDArray<double, 1> m_etabinsx;
|
NDArray<double, 1> m_etabinsx;
|
||||||
NDArray<double, 1> m_etabinsy;
|
NDArray<double, 1> m_etabinsy;
|
||||||
NDArray<double, 1> m_energy_bins;
|
NDArray<double, 1> m_energy_bins;
|
||||||
public:
|
|
||||||
Interpolator(NDView<double, 3> etacube, NDView<double, 1> xbins, NDView<double, 1> ybins, NDView<double, 1> ebins);
|
|
||||||
NDArray<double, 3> get_ietax(){return m_ietax;}
|
|
||||||
NDArray<double, 3> get_ietay(){return m_ietay;}
|
|
||||||
|
|
||||||
std::vector<Photon> interpolate(const ClusterVector<int32_t>& clusters);
|
public:
|
||||||
|
Interpolator(NDView<double, 3> etacube, NDView<double, 1> xbins,
|
||||||
|
NDView<double, 1> ybins, NDView<double, 1> ebins);
|
||||||
|
NDArray<double, 3> get_ietax() { return m_ietax; }
|
||||||
|
NDArray<double, 3> get_ietay() { return m_ietay; }
|
||||||
|
|
||||||
|
template <typename ClusterType,
|
||||||
|
typename Eanble = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||||
|
std::vector<Photon> interpolate(const ClusterVector<ClusterType> &clusters);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// TODO: generalize to support any clustertype!!! otherwise add std::enable_if_t
|
||||||
|
// to only take Cluster2x2 and Cluster3x3
|
||||||
|
template <typename ClusterType, typename Enable>
|
||||||
|
std::vector<Photon>
|
||||||
|
Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
|
||||||
|
std::vector<Photon> photons;
|
||||||
|
photons.reserve(clusters.size());
|
||||||
|
|
||||||
|
if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) {
|
||||||
|
for (const ClusterType &cluster : clusters) {
|
||||||
|
|
||||||
|
auto eta = calculate_eta2(cluster);
|
||||||
|
|
||||||
|
Photon photon;
|
||||||
|
photon.x = cluster.x;
|
||||||
|
photon.y = cluster.y;
|
||||||
|
photon.energy = eta.sum;
|
||||||
|
|
||||||
|
// auto ie = nearest_index(m_energy_bins, photon.energy)-1;
|
||||||
|
// auto ix = nearest_index(m_etabinsx, eta.x)-1;
|
||||||
|
// auto iy = nearest_index(m_etabinsy, eta.y)-1;
|
||||||
|
// Finding the index of the last element that is smaller
|
||||||
|
// should work fine as long as we have many bins
|
||||||
|
auto ie = last_smaller(m_energy_bins, photon.energy);
|
||||||
|
auto ix = last_smaller(m_etabinsx, eta.x);
|
||||||
|
auto iy = last_smaller(m_etabinsy, eta.y);
|
||||||
|
|
||||||
|
// fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy);
|
||||||
|
|
||||||
|
double dX, dY;
|
||||||
|
// cBottomLeft = 0,
|
||||||
|
// cBottomRight = 1,
|
||||||
|
// cTopLeft = 2,
|
||||||
|
// cTopRight = 3
|
||||||
|
switch (static_cast<corner>(eta.c)) {
|
||||||
|
case corner::cTopLeft:
|
||||||
|
dX = -1.;
|
||||||
|
dY = 0;
|
||||||
|
break;
|
||||||
|
case corner::cTopRight:;
|
||||||
|
dX = 0;
|
||||||
|
dY = 0;
|
||||||
|
break;
|
||||||
|
case corner::cBottomLeft:
|
||||||
|
dX = -1.;
|
||||||
|
dY = -1.;
|
||||||
|
break;
|
||||||
|
case corner::cBottomRight:
|
||||||
|
dX = 0.;
|
||||||
|
dY = -1.;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
photon.x += m_ietax(ix, iy, ie) * 2 + dX;
|
||||||
|
photon.y += m_ietay(ix, iy, ie) * 2 + dY;
|
||||||
|
photons.push_back(photon);
|
||||||
|
}
|
||||||
|
} else if (clusters.cluster_size_x() == 2 ||
|
||||||
|
clusters.cluster_size_y() == 2) {
|
||||||
|
for (const ClusterType &cluster : clusters) {
|
||||||
|
auto eta = calculate_eta2(cluster);
|
||||||
|
|
||||||
|
Photon photon;
|
||||||
|
photon.x = cluster.x;
|
||||||
|
photon.y = cluster.y;
|
||||||
|
photon.energy = eta.sum;
|
||||||
|
|
||||||
|
// Now do some actual interpolation.
|
||||||
|
// Find which energy bin the cluster is in
|
||||||
|
// auto ie = nearest_index(m_energy_bins, photon.energy)-1;
|
||||||
|
// auto ix = nearest_index(m_etabinsx, eta.x)-1;
|
||||||
|
// auto iy = nearest_index(m_etabinsy, eta.y)-1;
|
||||||
|
// Finding the index of the last element that is smaller
|
||||||
|
// should work fine as long as we have many bins
|
||||||
|
auto ie = last_smaller(m_energy_bins, photon.energy);
|
||||||
|
auto ix = last_smaller(m_etabinsx, eta.x);
|
||||||
|
auto iy = last_smaller(m_etabinsy, eta.y);
|
||||||
|
|
||||||
|
photon.x += m_ietax(ix, iy, ie) *
|
||||||
|
2; // eta goes between 0 and 1 but we could move the hit
|
||||||
|
// anywhere in the 2x2
|
||||||
|
photon.y += m_ietay(ix, iy, ie) * 2;
|
||||||
|
photons.push_back(photon);
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
throw std::runtime_error(
|
||||||
|
"Only 3x3 and 2x2 clusters are supported for interpolation");
|
||||||
|
}
|
||||||
|
|
||||||
|
return photons;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
@ -29,6 +29,9 @@ target_link_libraries(_aare PRIVATE aare_core aare_compiler_flags)
|
|||||||
set( PYTHON_FILES
|
set( PYTHON_FILES
|
||||||
aare/__init__.py
|
aare/__init__.py
|
||||||
aare/CtbRawFile.py
|
aare/CtbRawFile.py
|
||||||
|
aare/ClusterFinder.py
|
||||||
|
aare/ClusterVector.py
|
||||||
|
|
||||||
aare/func.py
|
aare/func.py
|
||||||
aare/RawFile.py
|
aare/RawFile.py
|
||||||
aare/transform.py
|
aare/transform.py
|
||||||
@ -36,6 +39,7 @@ set( PYTHON_FILES
|
|||||||
aare/utils.py
|
aare/utils.py
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# Copy the python files to the build directory
|
# Copy the python files to the build directory
|
||||||
foreach(FILE ${PYTHON_FILES})
|
foreach(FILE ${PYTHON_FILES})
|
||||||
configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} )
|
configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} )
|
||||||
|
67
python/aare/ClusterFinder.py
Normal file
67
python/aare/ClusterFinder.py
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
|
||||||
|
from ._aare import ClusterFinder_Cluster3x3i, ClusterFinder_Cluster2x2i, ClusterFinderMT_Cluster3x3i, ClusterFinderMT_Cluster2x2i, ClusterCollector_Cluster3x3i, ClusterCollector_Cluster2x2i
|
||||||
|
|
||||||
|
|
||||||
|
from ._aare import ClusterFileSink_Cluster3x3i, ClusterFileSink_Cluster2x2i
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024):
|
||||||
|
"""
|
||||||
|
Factory function to create a ClusterFinder object. Provides a cleaner syntax for
|
||||||
|
the templated ClusterFinder in C++.
|
||||||
|
"""
|
||||||
|
if dtype == np.int32 and cluster_size == (3,3):
|
||||||
|
return ClusterFinder_Cluster3x3i(image_size, n_sigma = n_sigma, capacity=capacity)
|
||||||
|
elif dtype == np.int32 and cluster_size == (2,2):
|
||||||
|
return ClusterFinder_Cluster2x2i(image_size, n_sigma = n_sigma, capacity=capacity)
|
||||||
|
else:
|
||||||
|
#TODO! add the other formats
|
||||||
|
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
||||||
|
|
||||||
|
|
||||||
|
def ClusterFinderMT(image_size, cluster_size = (3,3), dtype=np.int32, n_sigma=5, capacity = 1024, n_threads = 3):
|
||||||
|
"""
|
||||||
|
Factory function to create a ClusterFinderMT object. Provides a cleaner syntax for
|
||||||
|
the templated ClusterFinderMT in C++.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if dtype == np.int32 and cluster_size == (3,3):
|
||||||
|
return ClusterFinderMT_Cluster3x3i(image_size, n_sigma = n_sigma,
|
||||||
|
capacity = capacity, n_threads = n_threads)
|
||||||
|
elif dtype == np.int32 and cluster_size == (2,2):
|
||||||
|
return ClusterFinderMT_Cluster2x2i(image_size, n_sigma = n_sigma,
|
||||||
|
capacity = capacity, n_threads = n_threads)
|
||||||
|
else:
|
||||||
|
#TODO! add the other formats
|
||||||
|
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
||||||
|
|
||||||
|
|
||||||
|
def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32):
|
||||||
|
"""
|
||||||
|
Factory function to create a ClusterCollector object. Provides a cleaner syntax for
|
||||||
|
the templated ClusterCollector in C++.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if dtype == np.int32 and cluster_size == (3,3):
|
||||||
|
return ClusterCollector_Cluster3x3i(clusterfindermt)
|
||||||
|
elif dtype == np.int32 and cluster_size == (2,2):
|
||||||
|
return ClusterCollector_Cluster2x2i(clusterfindermt)
|
||||||
|
|
||||||
|
else:
|
||||||
|
#TODO! add the other formats
|
||||||
|
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
||||||
|
|
||||||
|
def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):
|
||||||
|
"""
|
||||||
|
Factory function to create a ClusterCollector object. Provides a cleaner syntax for
|
||||||
|
the templated ClusterCollector in C++.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if dtype == np.int32 and clusterfindermt.cluster_size == (3,3):
|
||||||
|
return ClusterFileSink_Cluster3x3i(clusterfindermt, cluster_file)
|
||||||
|
elif dtype == np.int32 and clusterfindermt.cluster_size == (2,2):
|
||||||
|
return ClusterFileSink_Cluster2x2i(clusterfindermt, cluster_file)
|
||||||
|
|
||||||
|
else:
|
||||||
|
#TODO! add the other formats
|
||||||
|
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
11
python/aare/ClusterVector.py
Normal file
11
python/aare/ClusterVector.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
|
||||||
|
|
||||||
|
from ._aare import ClusterVector_Cluster3x3i
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
def ClusterVector(cluster_size, dtype = np.int32):
|
||||||
|
|
||||||
|
if dtype == np.int32 and cluster_size == (3,3):
|
||||||
|
return ClusterVector_Cluster3x3i()
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
@ -3,16 +3,21 @@ from . import _aare
|
|||||||
|
|
||||||
|
|
||||||
from ._aare import File, RawMasterFile, RawSubFile, JungfrauDataFile
|
from ._aare import File, RawMasterFile, RawSubFile, JungfrauDataFile
|
||||||
from ._aare import Pedestal_d, Pedestal_f, ClusterFinder, VarClusterFinder
|
from ._aare import Pedestal_d, Pedestal_f, ClusterFinder_Cluster3x3i, VarClusterFinder
|
||||||
from ._aare import DetectorType
|
from ._aare import DetectorType
|
||||||
from ._aare import ClusterFile
|
from ._aare import ClusterFile_Cluster3x3i as ClusterFile
|
||||||
from ._aare import hitmap
|
from ._aare import hitmap
|
||||||
from ._aare import ROI
|
from ._aare import ROI
|
||||||
|
|
||||||
from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i
|
# from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i
|
||||||
|
|
||||||
|
from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT, ClusterFileSink
|
||||||
|
from .ClusterVector import ClusterVector
|
||||||
|
|
||||||
|
|
||||||
from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2
|
from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2
|
||||||
from ._aare import Interpolator
|
from ._aare import Interpolator
|
||||||
|
from ._aare import calculate_eta2
|
||||||
|
|
||||||
|
|
||||||
from ._aare import apply_custom_weights
|
from ._aare import apply_custom_weights
|
||||||
|
104
python/src/bind_ClusterVector.hpp
Normal file
104
python/src/bind_ClusterVector.hpp
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
#include "aare/ClusterCollector.hpp"
|
||||||
|
#include "aare/ClusterFileSink.hpp"
|
||||||
|
#include "aare/ClusterFinder.hpp"
|
||||||
|
#include "aare/ClusterFinderMT.hpp"
|
||||||
|
#include "aare/ClusterVector.hpp"
|
||||||
|
#include "aare/NDView.hpp"
|
||||||
|
#include "aare/Pedestal.hpp"
|
||||||
|
#include "np_helper.hpp"
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <filesystem>
|
||||||
|
#include <pybind11/pybind11.h>
|
||||||
|
#include <pybind11/stl.h>
|
||||||
|
#include <pybind11/stl_bind.h>
|
||||||
|
|
||||||
|
namespace py = pybind11;
|
||||||
|
using pd_type = double;
|
||||||
|
|
||||||
|
using namespace aare;
|
||||||
|
|
||||||
|
#pragma GCC diagnostic push
|
||||||
|
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||||
|
|
||||||
|
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType = uint16_t>
|
||||||
|
void define_ClusterVector(py::module &m, const std::string &typestr) {
|
||||||
|
using ClusterType = Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||||
|
auto class_name = fmt::format("ClusterVector_{}", typestr);
|
||||||
|
|
||||||
|
py::class_<ClusterVector<
|
||||||
|
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>, void>>(
|
||||||
|
m, class_name.c_str(),
|
||||||
|
py::buffer_protocol())
|
||||||
|
|
||||||
|
.def(py::init()) // TODO change!!!
|
||||||
|
|
||||||
|
.def("push_back",
|
||||||
|
[](ClusterVector<ClusterType> &self, const ClusterType &cluster) {
|
||||||
|
self.push_back(cluster);
|
||||||
|
})
|
||||||
|
|
||||||
|
.def("sum",
|
||||||
|
[](ClusterVector<ClusterType> &self) {
|
||||||
|
auto *vec = new std::vector<Type>(self.sum());
|
||||||
|
return return_vector(vec);
|
||||||
|
})
|
||||||
|
.def("sum_2x2", [](ClusterVector<ClusterType> &self){
|
||||||
|
auto *vec = new std::vector<Type>(self.sum_2x2());
|
||||||
|
return return_vector(vec);
|
||||||
|
})
|
||||||
|
.def_property_readonly("size", &ClusterVector<ClusterType>::size)
|
||||||
|
.def("item_size", &ClusterVector<ClusterType>::item_size)
|
||||||
|
.def_property_readonly("fmt",
|
||||||
|
[typestr](ClusterVector<ClusterType> &self) {
|
||||||
|
return fmt_format<ClusterType>;
|
||||||
|
})
|
||||||
|
|
||||||
|
.def_property_readonly("cluster_size_x",
|
||||||
|
&ClusterVector<ClusterType>::cluster_size_x)
|
||||||
|
.def_property_readonly("cluster_size_y",
|
||||||
|
&ClusterVector<ClusterType>::cluster_size_y)
|
||||||
|
.def_property_readonly("capacity",
|
||||||
|
&ClusterVector<ClusterType>::capacity)
|
||||||
|
.def_property("frame_number", &ClusterVector<ClusterType>::frame_number,
|
||||||
|
&ClusterVector<ClusterType>::set_frame_number)
|
||||||
|
.def_buffer(
|
||||||
|
[typestr](ClusterVector<ClusterType> &self) -> py::buffer_info {
|
||||||
|
return py::buffer_info(
|
||||||
|
self.data(), /* Pointer to buffer */
|
||||||
|
self.item_size(), /* Size of one scalar */
|
||||||
|
fmt_format<ClusterType>, /* Format descriptor */
|
||||||
|
1, /* Number of dimensions */
|
||||||
|
{self.size()}, /* Buffer dimensions */
|
||||||
|
{self.item_size()} /* Strides (in bytes) for each index */
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Free functions using ClusterVector
|
||||||
|
m.def("hitmap",
|
||||||
|
[](std::array<size_t, 2> image_size, ClusterVector<ClusterType> &cv) {
|
||||||
|
// Create a numpy array to hold the hitmap
|
||||||
|
// The shape of the array is (image_size[0], image_size[1])
|
||||||
|
// note that the python array is passed as [row, col] which
|
||||||
|
// is the opposite of the clusters [x,y]
|
||||||
|
py::array_t<int32_t> hitmap(image_size);
|
||||||
|
auto r = hitmap.mutable_unchecked<2>();
|
||||||
|
|
||||||
|
// Initialize hitmap to 0
|
||||||
|
for (py::ssize_t i = 0; i < r.shape(0); i++)
|
||||||
|
for (py::ssize_t j = 0; j < r.shape(1); j++)
|
||||||
|
r(i, j) = 0;
|
||||||
|
|
||||||
|
// Loop over the clusters and increment the hitmap
|
||||||
|
// Skip out of bound clusters
|
||||||
|
for (const auto &cluster : cv) {
|
||||||
|
auto x = cluster.x;
|
||||||
|
auto y = cluster.y;
|
||||||
|
if (x < image_size[1] && y < image_size[0])
|
||||||
|
r(cluster.y, cluster.x) += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return hitmap;
|
||||||
|
});
|
||||||
|
}
|
@ -16,194 +16,196 @@
|
|||||||
namespace py = pybind11;
|
namespace py = pybind11;
|
||||||
using pd_type = double;
|
using pd_type = double;
|
||||||
|
|
||||||
template <typename T>
|
using namespace aare;
|
||||||
void define_cluster_vector(py::module &m, const std::string &typestr) {
|
|
||||||
auto class_name = fmt::format("ClusterVector_{}", typestr);
|
#pragma GCC diagnostic push
|
||||||
py::class_<ClusterVector<T>>(m, class_name.c_str(), py::buffer_protocol())
|
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||||
.def(py::init<int, int>(),
|
|
||||||
py::arg("cluster_size_x") = 3, py::arg("cluster_size_y") = 3)
|
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
.def("push_back",
|
typename CoordType>
|
||||||
[](ClusterVector<T> &self, int x, int y, py::array_t<T> data) {
|
void define_cluster(py::module &m, const std::string &typestr) {
|
||||||
// auto view = make_view_2d(data);
|
auto class_name = fmt::format("Cluster{}", typestr);
|
||||||
self.push_back(x, y, reinterpret_cast<const std::byte*>(data.data()));
|
|
||||||
})
|
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>(
|
||||||
.def_property_readonly("size", &ClusterVector<T>::size)
|
m, class_name.c_str(), py::buffer_protocol())
|
||||||
.def("item_size", &ClusterVector<T>::item_size)
|
|
||||||
.def_property_readonly("fmt",
|
.def(py::init([](uint8_t x, uint8_t y, py::array_t<Type> data) {
|
||||||
[typestr](ClusterVector<T> &self) {
|
py::buffer_info buf_info = data.request();
|
||||||
return fmt::format(
|
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster;
|
||||||
self.fmt_base(), self.cluster_size_x(),
|
cluster.x = x;
|
||||||
self.cluster_size_y(), typestr);
|
cluster.y = y;
|
||||||
})
|
auto r = data.template unchecked<1>(); // no bounds checks
|
||||||
.def("sum",
|
for (py::ssize_t i = 0; i < data.size(); ++i) {
|
||||||
[](ClusterVector<T> &self) {
|
cluster.data[i] = r(i);
|
||||||
auto *vec = new std::vector<T>(self.sum());
|
}
|
||||||
return return_vector(vec);
|
return cluster;
|
||||||
})
|
}));
|
||||||
.def("sum_2x2", [](ClusterVector<T> &self) {
|
|
||||||
auto *vec = new std::vector<T>(self.sum_2x2());
|
/*
|
||||||
return return_vector(vec);
|
.def_property(
|
||||||
})
|
"data",
|
||||||
.def_property_readonly("cluster_size_x", &ClusterVector<T>::cluster_size_x)
|
[](ClusterType &c) -> py::array {
|
||||||
.def_property_readonly("cluster_size_y", &ClusterVector<T>::cluster_size_y)
|
return py::array(py::buffer_info(
|
||||||
.def_property_readonly("capacity", &ClusterVector<T>::capacity)
|
c.data, sizeof(Type),
|
||||||
.def_property("frame_number", &ClusterVector<T>::frame_number,
|
py::format_descriptor<Type>::format(), // Type
|
||||||
&ClusterVector<T>::set_frame_number)
|
// format
|
||||||
.def_buffer([typestr](ClusterVector<T> &self) -> py::buffer_info {
|
1, // Number of dimensions
|
||||||
return py::buffer_info(
|
{static_cast<ssize_t>(ClusterSizeX *
|
||||||
self.data(), /* Pointer to buffer */
|
ClusterSizeY)}, // Shape (flattened)
|
||||||
self.item_size(), /* Size of one scalar */
|
{sizeof(Type)} // Stride (step size between elements)
|
||||||
fmt::format(self.fmt_base(), self.cluster_size_x(),
|
));
|
||||||
self.cluster_size_y(),
|
},
|
||||||
typestr), /* Format descriptor */
|
[](ClusterType &c, py::array_t<Type> arr) {
|
||||||
1, /* Number of dimensions */
|
py::buffer_info buf_info = arr.request();
|
||||||
{self.size()}, /* Buffer dimensions */
|
Type *ptr = static_cast<Type *>(buf_info.ptr);
|
||||||
{self.item_size()} /* Strides (in bytes) for each index */
|
std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY,
|
||||||
);
|
c.data); // TODO dont iterate over centers!!!
|
||||||
|
|
||||||
});
|
});
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
void define_cluster_finder_mt_bindings(py::module &m) {
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
py::class_<ClusterFinderMT<uint16_t, pd_type>>(m, "ClusterFinderMT")
|
typename CoordType = uint16_t>
|
||||||
.def(py::init<Shape<2>, Shape<2>, pd_type, size_t, size_t>(),
|
void define_cluster_finder_mt_bindings(py::module &m,
|
||||||
py::arg("image_size"), py::arg("cluster_size"),
|
const std::string &typestr) {
|
||||||
py::arg("n_sigma") = 5.0, py::arg("capacity") = 2048,
|
auto class_name = fmt::format("ClusterFinderMT_{}", typestr);
|
||||||
py::arg("n_threads") = 3)
|
|
||||||
|
using ClusterType = Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||||
|
|
||||||
|
py::class_<ClusterFinderMT<ClusterType, uint16_t, pd_type>>(
|
||||||
|
m, class_name.c_str())
|
||||||
|
.def(py::init<Shape<2>, pd_type, size_t, size_t>(),
|
||||||
|
py::arg("image_size"), py::arg("n_sigma") = 5.0,
|
||||||
|
py::arg("capacity") = 2048, py::arg("n_threads") = 3)
|
||||||
.def("push_pedestal_frame",
|
.def("push_pedestal_frame",
|
||||||
[](ClusterFinderMT<uint16_t, pd_type> &self,
|
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
||||||
py::array_t<uint16_t> frame) {
|
py::array_t<uint16_t> frame) {
|
||||||
auto view = make_view_2d(frame);
|
auto view = make_view_2d(frame);
|
||||||
self.push_pedestal_frame(view);
|
self.push_pedestal_frame(view);
|
||||||
})
|
})
|
||||||
.def(
|
.def(
|
||||||
"find_clusters",
|
"find_clusters",
|
||||||
[](ClusterFinderMT<uint16_t, pd_type> &self,
|
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
||||||
py::array_t<uint16_t> frame, uint64_t frame_number) {
|
py::array_t<uint16_t> frame, uint64_t frame_number) {
|
||||||
auto view = make_view_2d(frame);
|
auto view = make_view_2d(frame);
|
||||||
self.find_clusters(view, frame_number);
|
self.find_clusters(view, frame_number);
|
||||||
return;
|
return;
|
||||||
},
|
},
|
||||||
py::arg(), py::arg("frame_number") = 0)
|
py::arg(), py::arg("frame_number") = 0)
|
||||||
.def("clear_pedestal", &ClusterFinderMT<uint16_t, pd_type>::clear_pedestal)
|
.def_property_readonly("cluster_size", [](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self){
|
||||||
.def("sync", &ClusterFinderMT<uint16_t, pd_type>::sync)
|
return py::make_tuple(ClusterSizeX, ClusterSizeY);
|
||||||
.def("stop", &ClusterFinderMT<uint16_t, pd_type>::stop)
|
})
|
||||||
.def("start", &ClusterFinderMT<uint16_t, pd_type>::start)
|
.def("clear_pedestal",
|
||||||
.def("pedestal",
|
&ClusterFinderMT<ClusterType, uint16_t, pd_type>::clear_pedestal)
|
||||||
[](ClusterFinderMT<uint16_t, pd_type> &self, size_t thread_index) {
|
.def("sync", &ClusterFinderMT<ClusterType, uint16_t, pd_type>::sync)
|
||||||
|
.def("stop", &ClusterFinderMT<ClusterType, uint16_t, pd_type>::stop)
|
||||||
|
.def("start", &ClusterFinderMT<ClusterType, uint16_t, pd_type>::start)
|
||||||
|
.def(
|
||||||
|
"pedestal",
|
||||||
|
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
||||||
|
size_t thread_index) {
|
||||||
auto pd = new NDArray<pd_type, 2>{};
|
auto pd = new NDArray<pd_type, 2>{};
|
||||||
*pd = self.pedestal(thread_index);
|
*pd = self.pedestal(thread_index);
|
||||||
return return_image_data(pd);
|
return return_image_data(pd);
|
||||||
},py::arg("thread_index") = 0)
|
},
|
||||||
.def("noise",
|
py::arg("thread_index") = 0)
|
||||||
[](ClusterFinderMT<uint16_t, pd_type> &self, size_t thread_index) {
|
.def(
|
||||||
|
"noise",
|
||||||
|
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
||||||
|
size_t thread_index) {
|
||||||
auto arr = new NDArray<pd_type, 2>{};
|
auto arr = new NDArray<pd_type, 2>{};
|
||||||
*arr = self.noise(thread_index);
|
*arr = self.noise(thread_index);
|
||||||
return return_image_data(arr);
|
return return_image_data(arr);
|
||||||
},py::arg("thread_index") = 0);
|
},
|
||||||
|
py::arg("thread_index") = 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void define_cluster_collector_bindings(py::module &m) {
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
py::class_<ClusterCollector>(m, "ClusterCollector")
|
typename CoordType = uint16_t>
|
||||||
.def(py::init<ClusterFinderMT<uint16_t, double, int32_t> *>())
|
void define_cluster_collector_bindings(py::module &m,
|
||||||
.def("stop", &ClusterCollector::stop)
|
const std::string &typestr) {
|
||||||
|
auto class_name = fmt::format("ClusterCollector_{}", typestr);
|
||||||
|
|
||||||
|
using ClusterType = Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||||
|
|
||||||
|
py::class_<ClusterCollector<ClusterType>>(m, class_name.c_str())
|
||||||
|
.def(py::init<ClusterFinderMT<ClusterType, uint16_t, double> *>())
|
||||||
|
.def("stop", &ClusterCollector<ClusterType>::stop)
|
||||||
.def(
|
.def(
|
||||||
"steal_clusters",
|
"steal_clusters",
|
||||||
[](ClusterCollector &self) {
|
[](ClusterCollector<ClusterType> &self) {
|
||||||
auto v =
|
auto v = new std::vector<ClusterVector<ClusterType>>(
|
||||||
new std::vector<ClusterVector<int>>(self.steal_clusters());
|
self.steal_clusters());
|
||||||
return v;
|
return v; // TODO change!!!
|
||||||
},
|
},
|
||||||
py::return_value_policy::take_ownership);
|
py::return_value_policy::take_ownership);
|
||||||
}
|
}
|
||||||
|
|
||||||
void define_cluster_file_sink_bindings(py::module &m) {
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
py::class_<ClusterFileSink>(m, "ClusterFileSink")
|
typename CoordType = uint16_t>
|
||||||
.def(py::init<ClusterFinderMT<uint16_t, double, int32_t> *,
|
void define_cluster_file_sink_bindings(py::module &m,
|
||||||
|
const std::string &typestr) {
|
||||||
|
auto class_name = fmt::format("ClusterFileSink_{}", typestr);
|
||||||
|
|
||||||
|
using ClusterType = Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||||
|
|
||||||
|
py::class_<ClusterFileSink<ClusterType>>(m, class_name.c_str())
|
||||||
|
.def(py::init<ClusterFinderMT<ClusterType, uint16_t, double> *,
|
||||||
const std::filesystem::path &>())
|
const std::filesystem::path &>())
|
||||||
.def("stop", &ClusterFileSink::stop);
|
.def("stop", &ClusterFileSink<ClusterType>::stop);
|
||||||
}
|
}
|
||||||
|
|
||||||
void define_cluster_finder_bindings(py::module &m) {
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
py::class_<ClusterFinder<uint16_t, pd_type>>(m, "ClusterFinder")
|
typename CoordType = uint16_t>
|
||||||
.def(py::init<Shape<2>, Shape<2>, pd_type, size_t>(),
|
void define_cluster_finder_bindings(py::module &m, const std::string &typestr) {
|
||||||
py::arg("image_size"), py::arg("cluster_size"),
|
auto class_name = fmt::format("ClusterFinder_{}", typestr);
|
||||||
|
|
||||||
|
using ClusterType = Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||||
|
|
||||||
|
py::class_<ClusterFinder<ClusterType, uint16_t, pd_type>>(
|
||||||
|
m, class_name.c_str())
|
||||||
|
.def(py::init<Shape<2>, pd_type, size_t>(), py::arg("image_size"),
|
||||||
py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000)
|
py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000)
|
||||||
.def("push_pedestal_frame",
|
.def("push_pedestal_frame",
|
||||||
[](ClusterFinder<uint16_t, pd_type> &self,
|
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self,
|
||||||
py::array_t<uint16_t> frame) {
|
py::array_t<uint16_t> frame) {
|
||||||
auto view = make_view_2d(frame);
|
auto view = make_view_2d(frame);
|
||||||
self.push_pedestal_frame(view);
|
self.push_pedestal_frame(view);
|
||||||
})
|
})
|
||||||
.def("clear_pedestal", &ClusterFinder<uint16_t, pd_type>::clear_pedestal)
|
.def("clear_pedestal",
|
||||||
.def_property_readonly("pedestal",
|
&ClusterFinder<ClusterType, uint16_t, pd_type>::clear_pedestal)
|
||||||
[](ClusterFinder<uint16_t, pd_type> &self) {
|
.def_property_readonly(
|
||||||
|
"pedestal",
|
||||||
|
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self) {
|
||||||
auto pd = new NDArray<pd_type, 2>{};
|
auto pd = new NDArray<pd_type, 2>{};
|
||||||
*pd = self.pedestal();
|
*pd = self.pedestal();
|
||||||
return return_image_data(pd);
|
return return_image_data(pd);
|
||||||
})
|
})
|
||||||
.def_property_readonly("noise",
|
.def_property_readonly(
|
||||||
[](ClusterFinder<uint16_t, pd_type> &self) {
|
"noise",
|
||||||
|
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self) {
|
||||||
auto arr = new NDArray<pd_type, 2>{};
|
auto arr = new NDArray<pd_type, 2>{};
|
||||||
*arr = self.noise();
|
*arr = self.noise();
|
||||||
return return_image_data(arr);
|
return return_image_data(arr);
|
||||||
})
|
})
|
||||||
.def(
|
.def(
|
||||||
"steal_clusters",
|
"steal_clusters",
|
||||||
[](ClusterFinder<uint16_t, pd_type> &self,
|
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self,
|
||||||
bool realloc_same_capacity) {
|
bool realloc_same_capacity) {
|
||||||
auto v = new ClusterVector<int>(
|
ClusterVector<ClusterType> clusters =
|
||||||
self.steal_clusters(realloc_same_capacity));
|
self.steal_clusters(realloc_same_capacity);
|
||||||
return v;
|
return clusters;
|
||||||
},
|
},
|
||||||
py::arg("realloc_same_capacity") = false)
|
py::arg("realloc_same_capacity") = false)
|
||||||
.def(
|
.def(
|
||||||
"find_clusters",
|
"find_clusters",
|
||||||
[](ClusterFinder<uint16_t, pd_type> &self,
|
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self,
|
||||||
py::array_t<uint16_t> frame, uint64_t frame_number) {
|
py::array_t<uint16_t> frame, uint64_t frame_number) {
|
||||||
auto view = make_view_2d(frame);
|
auto view = make_view_2d(frame);
|
||||||
self.find_clusters(view, frame_number);
|
self.find_clusters(view, frame_number);
|
||||||
return;
|
return;
|
||||||
},
|
},
|
||||||
py::arg(), py::arg("frame_number") = 0);
|
py::arg(), py::arg("frame_number") = 0);
|
||||||
|
|
||||||
m.def("hitmap",
|
|
||||||
[](std::array<size_t, 2> image_size, ClusterVector<int32_t> &cv) {
|
|
||||||
py::array_t<int32_t> hitmap(image_size);
|
|
||||||
auto r = hitmap.mutable_unchecked<2>();
|
|
||||||
|
|
||||||
// Initialize hitmap to 0
|
|
||||||
for (py::ssize_t i = 0; i < r.shape(0); i++)
|
|
||||||
for (py::ssize_t j = 0; j < r.shape(1); j++)
|
|
||||||
r(i, j) = 0;
|
|
||||||
|
|
||||||
size_t stride = cv.item_size();
|
|
||||||
auto ptr = cv.data();
|
|
||||||
for (size_t i = 0; i < cv.size(); i++) {
|
|
||||||
auto x = *reinterpret_cast<int16_t *>(ptr);
|
|
||||||
auto y = *reinterpret_cast<int16_t *>(ptr + sizeof(int16_t));
|
|
||||||
r(y, x) += 1;
|
|
||||||
ptr += stride;
|
|
||||||
}
|
|
||||||
return hitmap;
|
|
||||||
});
|
|
||||||
define_cluster_vector<int>(m, "i");
|
|
||||||
define_cluster_vector<double>(m, "d");
|
|
||||||
define_cluster_vector<float>(m, "f");
|
|
||||||
|
|
||||||
py::class_<DynamicCluster>(m, "DynamicCluster", py::buffer_protocol())
|
|
||||||
.def(py::init<int, int, Dtype>())
|
|
||||||
.def("size", &DynamicCluster::size)
|
|
||||||
.def("begin", &DynamicCluster::begin)
|
|
||||||
.def("end", &DynamicCluster::end)
|
|
||||||
.def_readwrite("x", &DynamicCluster::x)
|
|
||||||
.def_readwrite("y", &DynamicCluster::y)
|
|
||||||
.def_buffer([](DynamicCluster &c) -> py::buffer_info {
|
|
||||||
return py::buffer_info(c.data(), c.dt.bytes(), c.dt.format_descr(),
|
|
||||||
1, {c.size()}, {c.dt.bytes()});
|
|
||||||
})
|
|
||||||
|
|
||||||
.def("__repr__", [](const DynamicCluster &a) {
|
|
||||||
return "<DynamicCluster: x: " + std::to_string(a.x) +
|
|
||||||
", y: " + std::to_string(a.y) + ">";
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
#pragma GCC diagnostic pop
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
#include "aare/CalculateEta.hpp"
|
||||||
#include "aare/ClusterFile.hpp"
|
#include "aare/ClusterFile.hpp"
|
||||||
#include "aare/defs.hpp"
|
#include "aare/defs.hpp"
|
||||||
|
|
||||||
@ -10,61 +11,81 @@
|
|||||||
#include <pybind11/stl/filesystem.h>
|
#include <pybind11/stl/filesystem.h>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
//Disable warnings for unused parameters, as we ignore some
|
// Disable warnings for unused parameters, as we ignore some
|
||||||
//in the __exit__ method
|
// in the __exit__ method
|
||||||
#pragma GCC diagnostic push
|
#pragma GCC diagnostic push
|
||||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||||
|
|
||||||
|
|
||||||
namespace py = pybind11;
|
namespace py = pybind11;
|
||||||
using namespace ::aare;
|
using namespace ::aare;
|
||||||
|
|
||||||
void define_cluster_file_io_bindings(py::module &m) {
|
template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
|
||||||
PYBIND11_NUMPY_DTYPE(Cluster3x3, x, y, data);
|
typename CoordType = uint16_t>
|
||||||
|
void define_cluster_file_io_bindings(py::module &m,
|
||||||
|
const std::string &typestr) {
|
||||||
|
|
||||||
py::class_<ClusterFile>(m, "ClusterFile")
|
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
|
||||||
|
|
||||||
|
auto class_name = fmt::format("ClusterFile_{}", typestr);
|
||||||
|
|
||||||
|
py::class_<ClusterFile<ClusterType>>(m, class_name.c_str())
|
||||||
.def(py::init<const std::filesystem::path &, size_t,
|
.def(py::init<const std::filesystem::path &, size_t,
|
||||||
const std::string &>(),
|
const std::string &>(),
|
||||||
py::arg(), py::arg("chunk_size") = 1000, py::arg("mode") = "r")
|
py::arg(), py::arg("chunk_size") = 1000, py::arg("mode") = "r")
|
||||||
.def("read_clusters",
|
.def(
|
||||||
[](ClusterFile &self, size_t n_clusters) {
|
"read_clusters",
|
||||||
auto v = new ClusterVector<int32_t>(self.read_clusters(n_clusters));
|
[](ClusterFile<ClusterType> &self, size_t n_clusters) {
|
||||||
|
auto v = new ClusterVector<ClusterType>(
|
||||||
|
self.read_clusters(n_clusters));
|
||||||
return v;
|
return v;
|
||||||
},py::return_value_policy::take_ownership)
|
},
|
||||||
|
py::return_value_policy::take_ownership)
|
||||||
.def("read_frame",
|
.def("read_frame",
|
||||||
[](ClusterFile &self) {
|
[](ClusterFile<ClusterType> &self) {
|
||||||
auto v = new ClusterVector<int32_t>(self.read_frame());
|
auto v = new ClusterVector<ClusterType>(self.read_frame());
|
||||||
return v;
|
return v;
|
||||||
})
|
})
|
||||||
.def("set_roi", &ClusterFile::set_roi)
|
.def("set_roi", &ClusterFile<ClusterType>::set_roi)
|
||||||
.def("set_noise_map", [](ClusterFile &self, py::array_t<int32_t> noise_map) {
|
.def(
|
||||||
|
"set_noise_map",
|
||||||
|
[](ClusterFile<ClusterType> &self, py::array_t<int32_t> noise_map) {
|
||||||
auto view = make_view_2d(noise_map);
|
auto view = make_view_2d(noise_map);
|
||||||
self.set_noise_map(view);
|
self.set_noise_map(view);
|
||||||
})
|
})
|
||||||
.def("set_gain_map", [](ClusterFile &self, py::array_t<double> gain_map) {
|
|
||||||
|
.def("set_gain_map",
|
||||||
|
[](ClusterFile<ClusterType> &self, py::array_t<double> gain_map) {
|
||||||
auto view = make_view_2d(gain_map);
|
auto view = make_view_2d(gain_map);
|
||||||
self.set_gain_map(view);
|
self.set_gain_map(view);
|
||||||
})
|
})
|
||||||
.def("close", &ClusterFile::close)
|
|
||||||
.def("write_frame", &ClusterFile::write_frame)
|
.def("close", &ClusterFile<ClusterType>::close)
|
||||||
.def("__enter__", [](ClusterFile &self) { return &self; })
|
.def("write_frame", &ClusterFile<ClusterType>::write_frame)
|
||||||
|
.def("__enter__", [](ClusterFile<ClusterType> &self) { return &self; })
|
||||||
.def("__exit__",
|
.def("__exit__",
|
||||||
[](ClusterFile &self,
|
[](ClusterFile<ClusterType> &self,
|
||||||
const std::optional<pybind11::type> &exc_type,
|
const std::optional<pybind11::type> &exc_type,
|
||||||
const std::optional<pybind11::object> &exc_value,
|
const std::optional<pybind11::object> &exc_value,
|
||||||
const std::optional<pybind11::object> &traceback) {
|
const std::optional<pybind11::object> &traceback) {
|
||||||
self.close();
|
self.close();
|
||||||
})
|
})
|
||||||
.def("__iter__", [](ClusterFile &self) { return &self; })
|
.def("__iter__", [](ClusterFile<ClusterType> &self) { return &self; })
|
||||||
.def("__next__", [](ClusterFile &self) {
|
.def("__next__", [](ClusterFile<ClusterType> &self) {
|
||||||
auto v = new ClusterVector<int32_t>(self.read_clusters(self.chunk_size()));
|
auto v = new ClusterVector<ClusterType>(
|
||||||
|
self.read_clusters(self.chunk_size()));
|
||||||
if (v->size() == 0) {
|
if (v->size() == 0) {
|
||||||
throw py::stop_iteration();
|
throw py::stop_iteration();
|
||||||
}
|
}
|
||||||
return v;
|
return v;
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
m.def("calculate_eta2", []( aare::ClusterVector<int32_t> &clusters) {
|
template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
|
||||||
|
typename CoordType = uint16_t>
|
||||||
|
void register_calculate_eta(py::module &m) {
|
||||||
|
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
|
||||||
|
m.def("calculate_eta2",
|
||||||
|
[](const aare::ClusterVector<ClusterType> &clusters) {
|
||||||
auto eta2 = new NDArray<double, 2>(calculate_eta2(clusters));
|
auto eta2 = new NDArray<double, 2>(calculate_eta2(clusters));
|
||||||
return return_image_data(eta2);
|
return return_image_data(eta2);
|
||||||
});
|
});
|
||||||
|
@ -8,32 +8,56 @@
|
|||||||
#include <pybind11/stl.h>
|
#include <pybind11/stl.h>
|
||||||
|
|
||||||
namespace py = pybind11;
|
namespace py = pybind11;
|
||||||
|
|
||||||
|
template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
|
||||||
|
typename CoordType = uint16_t>
|
||||||
|
void register_interpolate(py::class_<aare::Interpolator> &interpolator) {
|
||||||
|
|
||||||
|
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
|
||||||
|
|
||||||
|
interpolator.def("interpolate",
|
||||||
|
[](aare::Interpolator &self,
|
||||||
|
const ClusterVector<ClusterType> &clusters) {
|
||||||
|
auto photons = self.interpolate<ClusterType>(clusters);
|
||||||
|
auto *ptr = new std::vector<Photon>{photons};
|
||||||
|
return return_vector(ptr);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
void define_interpolation_bindings(py::module &m) {
|
void define_interpolation_bindings(py::module &m) {
|
||||||
|
|
||||||
PYBIND11_NUMPY_DTYPE(aare::Photon, x,y,energy);
|
PYBIND11_NUMPY_DTYPE(aare::Photon, x, y, energy);
|
||||||
|
|
||||||
|
auto interpolator =
|
||||||
py::class_<aare::Interpolator>(m, "Interpolator")
|
py::class_<aare::Interpolator>(m, "Interpolator")
|
||||||
.def(py::init([](py::array_t<double, py::array::c_style | py::array::forcecast> etacube, py::array_t<double> xbins,
|
.def(py::init([](py::array_t<double, py::array::c_style |
|
||||||
py::array_t<double> ybins, py::array_t<double> ebins) {
|
py::array::forcecast>
|
||||||
|
etacube,
|
||||||
|
py::array_t<double> xbins,
|
||||||
|
py::array_t<double> ybins,
|
||||||
|
py::array_t<double> ebins) {
|
||||||
return Interpolator(make_view_3d(etacube), make_view_1d(xbins),
|
return Interpolator(make_view_3d(etacube), make_view_1d(xbins),
|
||||||
make_view_1d(ybins), make_view_1d(ebins));
|
make_view_1d(ybins), make_view_1d(ebins));
|
||||||
}))
|
}))
|
||||||
.def("get_ietax", [](Interpolator& self){
|
.def("get_ietax",
|
||||||
auto*ptr = new NDArray<double,3>{};
|
[](Interpolator &self) {
|
||||||
|
auto *ptr = new NDArray<double, 3>{};
|
||||||
*ptr = self.get_ietax();
|
*ptr = self.get_ietax();
|
||||||
return return_image_data(ptr);
|
return return_image_data(ptr);
|
||||||
})
|
})
|
||||||
.def("get_ietay", [](Interpolator& self){
|
.def("get_ietay", [](Interpolator &self) {
|
||||||
auto*ptr = new NDArray<double,3>{};
|
auto *ptr = new NDArray<double, 3>{};
|
||||||
*ptr = self.get_ietay();
|
*ptr = self.get_ietay();
|
||||||
return return_image_data(ptr);
|
return return_image_data(ptr);
|
||||||
})
|
|
||||||
.def("interpolate", [](Interpolator& self, const ClusterVector<int32_t>& clusters){
|
|
||||||
auto photons = self.interpolate(clusters);
|
|
||||||
auto* ptr = new std::vector<Photon>{photons};
|
|
||||||
return return_vector(ptr);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
register_interpolate<int, 3, 3, uint16_t>(interpolator);
|
||||||
|
register_interpolate<float, 3, 3, uint16_t>(interpolator);
|
||||||
|
register_interpolate<double, 3, 3, uint16_t>(interpolator);
|
||||||
|
register_interpolate<int, 2, 2, uint16_t>(interpolator);
|
||||||
|
register_interpolate<float, 2, 2, uint16_t>(interpolator);
|
||||||
|
register_interpolate<double, 2, 2, uint16_t>(interpolator);
|
||||||
|
|
||||||
// TODO! Evaluate without converting to double
|
// TODO! Evaluate without converting to double
|
||||||
m.def(
|
m.def(
|
||||||
"hej",
|
"hej",
|
||||||
|
@ -1,20 +1,24 @@
|
|||||||
//Files with bindings to the different classes
|
// Files with bindings to the different classes
|
||||||
#include "file.hpp"
|
|
||||||
#include "raw_file.hpp"
|
//New style file naming
|
||||||
#include "ctb_raw_file.hpp"
|
#include "bind_ClusterVector.hpp"
|
||||||
#include "raw_master_file.hpp"
|
|
||||||
#include "var_cluster.hpp"
|
//TODO! migrate the other names
|
||||||
#include "pixel_map.hpp"
|
|
||||||
#include "pedestal.hpp"
|
|
||||||
#include "cluster.hpp"
|
#include "cluster.hpp"
|
||||||
#include "cluster_file.hpp"
|
#include "cluster_file.hpp"
|
||||||
|
#include "ctb_raw_file.hpp"
|
||||||
|
#include "file.hpp"
|
||||||
#include "fit.hpp"
|
#include "fit.hpp"
|
||||||
#include "interpolation.hpp"
|
#include "interpolation.hpp"
|
||||||
#include "raw_sub_file.hpp"
|
#include "raw_sub_file.hpp"
|
||||||
|
#include "raw_master_file.hpp"
|
||||||
|
#include "raw_file.hpp"
|
||||||
|
#include "pixel_map.hpp"
|
||||||
|
#include "var_cluster.hpp"
|
||||||
|
#include "pedestal.hpp"
|
||||||
#include "jungfrau_data_file.hpp"
|
#include "jungfrau_data_file.hpp"
|
||||||
|
|
||||||
//Pybind stuff
|
// Pybind stuff
|
||||||
#include <pybind11/pybind11.h>
|
#include <pybind11/pybind11.h>
|
||||||
#include <pybind11/stl.h>
|
#include <pybind11/stl.h>
|
||||||
|
|
||||||
@ -30,13 +34,63 @@ PYBIND11_MODULE(_aare, m) {
|
|||||||
define_pixel_map_bindings(m);
|
define_pixel_map_bindings(m);
|
||||||
define_pedestal_bindings<double>(m, "Pedestal_d");
|
define_pedestal_bindings<double>(m, "Pedestal_d");
|
||||||
define_pedestal_bindings<float>(m, "Pedestal_f");
|
define_pedestal_bindings<float>(m, "Pedestal_f");
|
||||||
define_cluster_finder_bindings(m);
|
|
||||||
define_cluster_finder_mt_bindings(m);
|
|
||||||
define_cluster_file_io_bindings(m);
|
|
||||||
define_cluster_collector_bindings(m);
|
|
||||||
define_cluster_file_sink_bindings(m);
|
|
||||||
define_fit_bindings(m);
|
define_fit_bindings(m);
|
||||||
define_interpolation_bindings(m);
|
define_interpolation_bindings(m);
|
||||||
define_jungfrau_data_file_io_bindings(m);
|
define_jungfrau_data_file_io_bindings(m);
|
||||||
|
|
||||||
|
define_cluster_file_io_bindings<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||||
|
define_cluster_file_io_bindings<double, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||||
|
define_cluster_file_io_bindings<float, 3, 3, uint16_t>(m, "Cluster3x3f");
|
||||||
|
define_cluster_file_io_bindings<int, 2, 2, uint16_t>(m, "Cluster2x2i");
|
||||||
|
define_cluster_file_io_bindings<float, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||||
|
define_cluster_file_io_bindings<double, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||||
|
|
||||||
|
define_ClusterVector<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||||
|
define_ClusterVector<double, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||||
|
define_ClusterVector<float, 3, 3, uint16_t>(m, "Cluster3x3f");
|
||||||
|
define_ClusterVector<int, 2, 2, uint16_t>(m, "Cluster2x2i");
|
||||||
|
define_ClusterVector<double, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||||
|
define_ClusterVector<float, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||||
|
|
||||||
|
define_cluster_finder_bindings<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||||
|
define_cluster_finder_bindings<double, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||||
|
define_cluster_finder_bindings<float, 3, 3, uint16_t>(m, "Cluster3x3f");
|
||||||
|
define_cluster_finder_bindings<int, 2, 2, uint16_t>(m, "Cluster2x2i");
|
||||||
|
define_cluster_finder_bindings<double, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||||
|
define_cluster_finder_bindings<float, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||||
|
|
||||||
|
define_cluster_finder_mt_bindings<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||||
|
define_cluster_finder_mt_bindings<double, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||||
|
define_cluster_finder_mt_bindings<float, 3, 3, uint16_t>(m, "Cluster3x3f");
|
||||||
|
define_cluster_finder_mt_bindings<int, 2, 2, uint16_t>(m, "Cluster2x2i");
|
||||||
|
define_cluster_finder_mt_bindings<double, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||||
|
define_cluster_finder_mt_bindings<float, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||||
|
|
||||||
|
define_cluster_file_sink_bindings<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||||
|
define_cluster_file_sink_bindings<double, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||||
|
define_cluster_file_sink_bindings<float, 3, 3, uint16_t>(m, "Cluster3x3f");
|
||||||
|
define_cluster_file_sink_bindings<int, 2, 2, uint16_t>(m, "Cluster2x2i");
|
||||||
|
define_cluster_file_sink_bindings<double, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||||
|
define_cluster_file_sink_bindings<float, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||||
|
|
||||||
|
define_cluster_collector_bindings<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||||
|
define_cluster_collector_bindings<double, 3, 3, uint16_t>(m, "Cluster3x3f");
|
||||||
|
define_cluster_collector_bindings<float, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||||
|
define_cluster_collector_bindings<int, 2, 2, uint16_t>(m, "Cluster2x2i");
|
||||||
|
define_cluster_collector_bindings<double, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||||
|
define_cluster_collector_bindings<float, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||||
|
|
||||||
|
define_cluster<int, 3, 3, uint16_t>(m, "3x3i");
|
||||||
|
define_cluster<float, 3, 3, uint16_t>(m, "3x3f");
|
||||||
|
define_cluster<double, 3, 3, uint16_t>(m, "3x3d");
|
||||||
|
define_cluster<int, 2, 2, uint16_t>(m, "2x2i");
|
||||||
|
define_cluster<float, 2, 2, uint16_t>(m, "2x2f");
|
||||||
|
define_cluster<double, 2, 2, uint16_t>(m, "2x2d");
|
||||||
|
|
||||||
|
register_calculate_eta<int, 3, 3, uint16_t>(m);
|
||||||
|
register_calculate_eta<float, 3, 3, uint16_t>(m);
|
||||||
|
register_calculate_eta<double, 3, 3, uint16_t>(m);
|
||||||
|
register_calculate_eta<int, 2, 2, uint16_t>(m);
|
||||||
|
register_calculate_eta<float, 2, 2, uint16_t>(m);
|
||||||
|
register_calculate_eta<double, 2, 2, uint16_t>(m);
|
||||||
}
|
}
|
@ -10,6 +10,7 @@
|
|||||||
#include "aare/NDView.hpp"
|
#include "aare/NDView.hpp"
|
||||||
|
|
||||||
namespace py = pybind11;
|
namespace py = pybind11;
|
||||||
|
using namespace aare;
|
||||||
|
|
||||||
// Pass image data back to python as a numpy array
|
// Pass image data back to python as a numpy array
|
||||||
template <typename T, int64_t Ndim>
|
template <typename T, int64_t Ndim>
|
||||||
@ -40,25 +41,46 @@ template <typename T> py::array return_vector(std::vector<T> *vec) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// todo rewrite generic
|
// todo rewrite generic
|
||||||
template <class T, int Flags> auto get_shape_3d(const py::array_t<T, Flags>& arr) {
|
template <class T, int Flags>
|
||||||
|
auto get_shape_3d(const py::array_t<T, Flags> &arr) {
|
||||||
return aare::Shape<3>{arr.shape(0), arr.shape(1), arr.shape(2)};
|
return aare::Shape<3>{arr.shape(0), arr.shape(1), arr.shape(2)};
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T, int Flags> auto make_view_3d(py::array_t<T, Flags>& arr) {
|
template <class T, int Flags> auto make_view_3d(py::array_t<T, Flags> &arr) {
|
||||||
return aare::NDView<T, 3>(arr.mutable_data(), get_shape_3d<T, Flags>(arr));
|
return aare::NDView<T, 3>(arr.mutable_data(), get_shape_3d<T, Flags>(arr));
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T, int Flags> auto get_shape_2d(const py::array_t<T, Flags>& arr) {
|
template <class T, int Flags>
|
||||||
|
auto get_shape_2d(const py::array_t<T, Flags> &arr) {
|
||||||
return aare::Shape<2>{arr.shape(0), arr.shape(1)};
|
return aare::Shape<2>{arr.shape(0), arr.shape(1)};
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T, int Flags> auto get_shape_1d(const py::array_t<T, Flags>& arr) {
|
template <class T, int Flags>
|
||||||
|
auto get_shape_1d(const py::array_t<T, Flags> &arr) {
|
||||||
return aare::Shape<1>{arr.shape(0)};
|
return aare::Shape<1>{arr.shape(0)};
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T, int Flags> auto make_view_2d(py::array_t<T, Flags>& arr) {
|
template <class T, int Flags> auto make_view_2d(py::array_t<T, Flags> &arr) {
|
||||||
return aare::NDView<T, 2>(arr.mutable_data(), get_shape_2d<T, Flags>(arr));
|
return aare::NDView<T, 2>(arr.mutable_data(), get_shape_2d<T, Flags>(arr));
|
||||||
}
|
}
|
||||||
template <class T, int Flags> auto make_view_1d(py::array_t<T, Flags>& arr) {
|
template <class T, int Flags> auto make_view_1d(py::array_t<T, Flags> &arr) {
|
||||||
return aare::NDView<T, 1>(arr.mutable_data(), get_shape_1d<T, Flags>(arr));
|
return aare::NDView<T, 1>(arr.mutable_data(), get_shape_1d<T, Flags>(arr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename ClusterType> struct fmt_format_trait; // forward declaration
|
||||||
|
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType>
|
||||||
|
struct fmt_format_trait<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||||
|
|
||||||
|
static std::string value() {
|
||||||
|
return fmt::format("T{{{}:x:{}:y:{}:data:}}",
|
||||||
|
py::format_descriptor<CoordType>::format(),
|
||||||
|
py::format_descriptor<CoordType>::format(),
|
||||||
|
fmt::format("({},{}){}", ClusterSizeX, ClusterSizeY,
|
||||||
|
py::format_descriptor<T>::format()));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename ClusterType>
|
||||||
|
auto fmt_format = fmt_format_trait<ClusterType>::value();
|
@ -25,5 +25,10 @@ def pytest_collection_modifyitems(config, items):
|
|||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def test_data_path():
|
def test_data_path():
|
||||||
return Path(os.environ["AARE_TEST_DATA"])
|
env_value = os.environ.get("AARE_TEST_DATA")
|
||||||
|
if not env_value:
|
||||||
|
raise RuntimeError("Environment variable AARE_TEST_DATA is not set or is empty")
|
||||||
|
|
||||||
|
return Path(env_value)
|
||||||
|
|
||||||
|
|
||||||
|
110
python/tests/test_Cluster.py
Normal file
110
python/tests/test_Cluster.py
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
import pytest
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from aare import _aare #import the C++ module
|
||||||
|
from conftest import test_data_path
|
||||||
|
|
||||||
|
|
||||||
|
def test_cluster_vector_can_be_converted_to_numpy():
|
||||||
|
cv = _aare.ClusterVector_Cluster3x3i()
|
||||||
|
arr = np.array(cv, copy=False)
|
||||||
|
assert arr.shape == (0,) # 4 for x, y, size, energy and 9 for the cluster data
|
||||||
|
|
||||||
|
|
||||||
|
def test_ClusterVector():
|
||||||
|
"""Test ClusterVector"""
|
||||||
|
|
||||||
|
clustervector = _aare.ClusterVector_Cluster3x3i()
|
||||||
|
assert clustervector.cluster_size_x == 3
|
||||||
|
assert clustervector.cluster_size_y == 3
|
||||||
|
assert clustervector.item_size() == 4+9*4
|
||||||
|
assert clustervector.frame_number == 0
|
||||||
|
assert clustervector.size == 0
|
||||||
|
|
||||||
|
cluster = _aare.Cluster3x3i(0,0,np.ones(9, dtype=np.int32))
|
||||||
|
|
||||||
|
clustervector.push_back(cluster)
|
||||||
|
assert clustervector.size == 1
|
||||||
|
|
||||||
|
with pytest.raises(TypeError): # Or use the appropriate exception type
|
||||||
|
clustervector.push_back(_aare.Cluster2x2i(0,0,np.ones(4, dtype=np.int32)))
|
||||||
|
|
||||||
|
with pytest.raises(TypeError):
|
||||||
|
clustervector.push_back(_aare.Cluster3x3f(0,0,np.ones(9, dtype=np.float32)))
|
||||||
|
|
||||||
|
def test_Interpolator():
|
||||||
|
"""Test Interpolator"""
|
||||||
|
|
||||||
|
ebins = np.linspace(0,10, 20, dtype=np.float64)
|
||||||
|
xbins = np.linspace(0, 5, 30, dtype=np.float64)
|
||||||
|
ybins = np.linspace(0, 5, 30, dtype=np.float64)
|
||||||
|
|
||||||
|
etacube = np.zeros(shape=[30, 30, 20], dtype=np.float64)
|
||||||
|
interpolator = _aare.Interpolator(etacube, xbins, ybins, ebins)
|
||||||
|
|
||||||
|
assert interpolator.get_ietax().shape == (30,30,20)
|
||||||
|
assert interpolator.get_ietay().shape == (30,30,20)
|
||||||
|
clustervector = _aare.ClusterVector_Cluster3x3i()
|
||||||
|
|
||||||
|
cluster = _aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32))
|
||||||
|
clustervector.push_back(cluster)
|
||||||
|
|
||||||
|
interpolated_photons = interpolator.interpolate(clustervector)
|
||||||
|
|
||||||
|
assert interpolated_photons.size == 1
|
||||||
|
|
||||||
|
assert interpolated_photons[0]["x"] == -1
|
||||||
|
assert interpolated_photons[0]["y"] == -1
|
||||||
|
assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0
|
||||||
|
|
||||||
|
clustervector = _aare.ClusterVector_Cluster2x2i()
|
||||||
|
|
||||||
|
cluster = _aare.Cluster2x2i(0,0, np.ones(4, dtype=np.int32))
|
||||||
|
clustervector.push_back(cluster)
|
||||||
|
|
||||||
|
interpolated_photons = interpolator.interpolate(clustervector)
|
||||||
|
|
||||||
|
assert interpolated_photons.size == 1
|
||||||
|
|
||||||
|
assert interpolated_photons[0]["x"] == 0
|
||||||
|
assert interpolated_photons[0]["y"] == 0
|
||||||
|
assert interpolated_photons[0]["energy"] == 4
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def test_calculate_eta():
|
||||||
|
"""Calculate Eta"""
|
||||||
|
clusters = _aare.ClusterVector_Cluster3x3i()
|
||||||
|
clusters.push_back(_aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32)))
|
||||||
|
clusters.push_back(_aare.Cluster3x3i(0,0, np.array([1,1,1,2,2,2,3,3,3])))
|
||||||
|
|
||||||
|
eta2 = _aare.calculate_eta2(clusters)
|
||||||
|
|
||||||
|
assert eta2.shape == (2,2)
|
||||||
|
assert eta2[0,0] == 0.5
|
||||||
|
assert eta2[0,1] == 0.5
|
||||||
|
assert eta2[1,0] == 0.5
|
||||||
|
assert eta2[1,1] == 0.6 #1/5
|
||||||
|
|
||||||
|
def test_cluster_finder():
|
||||||
|
"""Test ClusterFinder"""
|
||||||
|
|
||||||
|
clusterfinder = _aare.ClusterFinder_Cluster3x3i([100,100])
|
||||||
|
|
||||||
|
#frame = np.random.rand(100,100)
|
||||||
|
frame = np.zeros(shape=[100,100])
|
||||||
|
|
||||||
|
clusterfinder.find_clusters(frame)
|
||||||
|
|
||||||
|
clusters = clusterfinder.steal_clusters(False) #conversion does not work
|
||||||
|
|
||||||
|
assert clusters.size == 0
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
64
python/tests/test_ClusterFile.py
Normal file
64
python/tests/test_ClusterFile.py
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
|
||||||
|
import pytest
|
||||||
|
import numpy as np
|
||||||
|
import boost_histogram as bh
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
import pickle
|
||||||
|
|
||||||
|
from aare import ClusterFile
|
||||||
|
from conftest import test_data_path
|
||||||
|
|
||||||
|
@pytest.mark.files
|
||||||
|
def test_cluster_file(test_data_path):
|
||||||
|
"""Test ClusterFile"""
|
||||||
|
f = ClusterFile(test_data_path / "clust/single_frame_97_clustrers.clust")
|
||||||
|
cv = f.read_clusters(10) #conversion does not work
|
||||||
|
|
||||||
|
|
||||||
|
assert cv.frame_number == 135
|
||||||
|
assert cv.size == 10
|
||||||
|
|
||||||
|
#Known data
|
||||||
|
#frame_number, num_clusters [135] 97
|
||||||
|
#[ 1 200] [0 1 2 3 4 5 6 7 8]
|
||||||
|
#[ 2 201] [ 9 10 11 12 13 14 15 16 17]
|
||||||
|
#[ 3 202] [18 19 20 21 22 23 24 25 26]
|
||||||
|
#[ 4 203] [27 28 29 30 31 32 33 34 35]
|
||||||
|
#[ 5 204] [36 37 38 39 40 41 42 43 44]
|
||||||
|
#[ 6 205] [45 46 47 48 49 50 51 52 53]
|
||||||
|
#[ 7 206] [54 55 56 57 58 59 60 61 62]
|
||||||
|
#[ 8 207] [63 64 65 66 67 68 69 70 71]
|
||||||
|
#[ 9 208] [72 73 74 75 76 77 78 79 80]
|
||||||
|
#[ 10 209] [81 82 83 84 85 86 87 88 89]
|
||||||
|
|
||||||
|
#conversion to numpy array
|
||||||
|
arr = np.array(cv, copy = False)
|
||||||
|
|
||||||
|
assert arr.size == 10
|
||||||
|
for i in range(10):
|
||||||
|
assert arr[i]['x'] == i+1
|
||||||
|
|
||||||
|
@pytest.mark.files
|
||||||
|
def test_read_clusters_and_fill_histogram(test_data_path):
|
||||||
|
# Create the histogram
|
||||||
|
n_bins = 100
|
||||||
|
xmin = -100
|
||||||
|
xmax = 1e4
|
||||||
|
hist_aare = bh.Histogram(bh.axis.Regular(n_bins, xmin, xmax))
|
||||||
|
|
||||||
|
fname = test_data_path / "clust/beam_En700eV_-40deg_300V_10us_d0_f0_100.clust"
|
||||||
|
|
||||||
|
#Read clusters and fill the histogram with pixel values
|
||||||
|
with ClusterFile(fname, chunk_size = 10000) as f:
|
||||||
|
for clusters in f:
|
||||||
|
arr = np.array(clusters, copy = False)
|
||||||
|
hist_aare.fill(arr['data'].flat)
|
||||||
|
|
||||||
|
|
||||||
|
#Load the histogram from the pickle file
|
||||||
|
with open(fname.with_suffix('.pkl'), 'rb') as f:
|
||||||
|
hist_py = pickle.load(f)
|
||||||
|
|
||||||
|
#Compare the two histograms
|
||||||
|
assert hist_aare == hist_py
|
54
python/tests/test_ClusterVector.py
Normal file
54
python/tests/test_ClusterVector.py
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
import pytest
|
||||||
|
import numpy as np
|
||||||
|
import boost_histogram as bh
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
import pickle
|
||||||
|
|
||||||
|
from aare import ClusterFile
|
||||||
|
from aare import _aare
|
||||||
|
from conftest import test_data_path
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_cluster_vector():
|
||||||
|
cv = _aare.ClusterVector_Cluster3x3i()
|
||||||
|
assert cv.cluster_size_x == 3
|
||||||
|
assert cv.cluster_size_y == 3
|
||||||
|
assert cv.size == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_push_back_on_cluster_vector():
|
||||||
|
cv = _aare.ClusterVector_Cluster2x2i()
|
||||||
|
assert cv.cluster_size_x == 2
|
||||||
|
assert cv.cluster_size_y == 2
|
||||||
|
assert cv.size == 0
|
||||||
|
|
||||||
|
cluster = _aare.Cluster2x2i(19, 22, np.ones(4, dtype=np.int32))
|
||||||
|
cv.push_back(cluster)
|
||||||
|
assert cv.size == 1
|
||||||
|
|
||||||
|
arr = np.array(cv, copy=False)
|
||||||
|
assert arr[0]['x'] == 19
|
||||||
|
assert arr[0]['y'] == 22
|
||||||
|
|
||||||
|
|
||||||
|
def test_make_a_hitmap_from_cluster_vector():
|
||||||
|
cv = _aare.ClusterVector_Cluster3x3i()
|
||||||
|
|
||||||
|
# Push back 4 clusters with different positions
|
||||||
|
cv.push_back(_aare.Cluster3x3i(0, 0, np.ones(9, dtype=np.int32)))
|
||||||
|
cv.push_back(_aare.Cluster3x3i(1, 1, np.ones(9, dtype=np.int32)))
|
||||||
|
cv.push_back(_aare.Cluster3x3i(1, 1, np.ones(9, dtype=np.int32)))
|
||||||
|
cv.push_back(_aare.Cluster3x3i(2, 2, np.ones(9, dtype=np.int32)))
|
||||||
|
|
||||||
|
ref = np.zeros((5, 5), dtype=np.int32)
|
||||||
|
ref[0,0] = 1
|
||||||
|
ref[1,1] = 2
|
||||||
|
ref[2,2] = 1
|
||||||
|
|
||||||
|
|
||||||
|
img = _aare.hitmap((5,5), cv)
|
||||||
|
# print(img)
|
||||||
|
# print(ref)
|
||||||
|
assert (img == ref).all()
|
||||||
|
|
127
src/CalculateEta.test.cpp
Normal file
127
src/CalculateEta.test.cpp
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
/************************************************
|
||||||
|
* @file CalculateEta.test.cpp
|
||||||
|
* @short test case to calculate_eta2
|
||||||
|
***********************************************/
|
||||||
|
|
||||||
|
#include "aare/CalculateEta.hpp"
|
||||||
|
#include "aare/Cluster.hpp"
|
||||||
|
#include "aare/ClusterFile.hpp"
|
||||||
|
|
||||||
|
// #include "catch.hpp"
|
||||||
|
#include <array>
|
||||||
|
#include <catch2/catch_all.hpp>
|
||||||
|
#include <catch2/catch_test_macros.hpp>
|
||||||
|
|
||||||
|
using namespace aare;
|
||||||
|
|
||||||
|
using ClusterTypes =
|
||||||
|
std::variant<Cluster<int, 2, 2>, Cluster<int, 3, 3>, Cluster<int, 5, 5>,
|
||||||
|
Cluster<int, 4, 2>, Cluster<int, 2, 3>>;
|
||||||
|
|
||||||
|
auto get_test_parameters() {
|
||||||
|
return GENERATE(
|
||||||
|
std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{0, 0, {1, 2, 3, 1}}},
|
||||||
|
Eta2<int>{2. / 3, 3. / 4,
|
||||||
|
static_cast<int>(corner::cBottomLeft), 7}),
|
||||||
|
std::make_tuple(
|
||||||
|
ClusterTypes{Cluster<int, 3, 3>{0, 0, {1, 2, 3, 4, 5, 6, 1, 2, 7}}},
|
||||||
|
Eta2<int>{6. / 11, 2. / 7, static_cast<int>(corner::cTopRight),
|
||||||
|
20}),
|
||||||
|
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
|
||||||
|
0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 2, 8, 9, 8,
|
||||||
|
1, 4, 5, 6, 7, 8, 4, 1, 1, 1, 1, 1}}},
|
||||||
|
Eta2<int>{8. / 17, 7. / 15, 9, 30}),
|
||||||
|
std::make_tuple(
|
||||||
|
ClusterTypes{Cluster<int, 4, 2>{0, 0, {1, 4, 7, 2, 5, 6, 4, 3}}},
|
||||||
|
Eta2<int>{4. / 10, 4. / 11, 1, 21}),
|
||||||
|
std::make_tuple(
|
||||||
|
ClusterTypes{Cluster<int, 2, 3>{0, 0, {1, 3, 2, 3, 4, 2}}},
|
||||||
|
Eta2<int>{3. / 5, 2. / 5, 1, 11}));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("compute_largest_2x2_subcluster", "[eta_calculation]") {
|
||||||
|
auto [cluster, expected_eta] = get_test_parameters();
|
||||||
|
|
||||||
|
auto [sum, index] = std::visit(
|
||||||
|
[](const auto &clustertype) { return clustertype.max_sum_2x2(); },
|
||||||
|
cluster);
|
||||||
|
CHECK(expected_eta.c == index);
|
||||||
|
CHECK(expected_eta.sum == sum);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("calculate_eta2", "[eta_calculation]") {
|
||||||
|
|
||||||
|
auto [cluster, expected_eta] = get_test_parameters();
|
||||||
|
|
||||||
|
auto eta = std::visit(
|
||||||
|
[](const auto &clustertype) { return calculate_eta2(clustertype); },
|
||||||
|
cluster);
|
||||||
|
|
||||||
|
CHECK(eta.x == expected_eta.x);
|
||||||
|
CHECK(eta.y == expected_eta.y);
|
||||||
|
CHECK(eta.c == expected_eta.c);
|
||||||
|
CHECK(eta.sum == expected_eta.sum);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3x3 cluster layout (rotated to match the cBottomLeft enum):
|
||||||
|
// 6, 7, 8
|
||||||
|
// 3, 4, 5
|
||||||
|
// 0, 1, 2
|
||||||
|
|
||||||
|
TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in "
|
||||||
|
"the bottom left",
|
||||||
|
"[eta_calculation]") {
|
||||||
|
|
||||||
|
// Create a 3x3 cluster
|
||||||
|
Cluster<int32_t, 3, 3> cl;
|
||||||
|
cl.x = 0;
|
||||||
|
cl.y = 0;
|
||||||
|
cl.data[0] = 30;
|
||||||
|
cl.data[1] = 23;
|
||||||
|
cl.data[2] = 5;
|
||||||
|
cl.data[3] = 20;
|
||||||
|
cl.data[4] = 50;
|
||||||
|
cl.data[5] = 3;
|
||||||
|
cl.data[6] = 8;
|
||||||
|
cl.data[7] = 2;
|
||||||
|
cl.data[8] = 3;
|
||||||
|
|
||||||
|
// 8, 2, 3
|
||||||
|
// 20, 50, 3
|
||||||
|
// 30, 23, 5
|
||||||
|
|
||||||
|
auto eta = calculate_eta2(cl);
|
||||||
|
CHECK(eta.c == static_cast<int>(corner::cBottomLeft));
|
||||||
|
CHECK(eta.x == 50.0 / (20 + 50)); // 4/(3+4)
|
||||||
|
CHECK(eta.y == 50.0 / (23 + 50)); // 4/(1+4)
|
||||||
|
CHECK(eta.sum == 30 + 23 + 20 + 50);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in "
|
||||||
|
"the top left",
|
||||||
|
"[eta_calculation]") {
|
||||||
|
|
||||||
|
// Create a 3x3 cluster
|
||||||
|
Cluster<int32_t, 3, 3> cl;
|
||||||
|
cl.x = 0;
|
||||||
|
cl.y = 0;
|
||||||
|
cl.data[0] = 8;
|
||||||
|
cl.data[1] = 12;
|
||||||
|
cl.data[2] = 5;
|
||||||
|
cl.data[3] = 77;
|
||||||
|
cl.data[4] = 80;
|
||||||
|
cl.data[5] = 3;
|
||||||
|
cl.data[6] = 82;
|
||||||
|
cl.data[7] = 91;
|
||||||
|
cl.data[8] = 3;
|
||||||
|
|
||||||
|
// 82, 91, 3
|
||||||
|
// 77, 80, 3
|
||||||
|
// 8, 12, 5
|
||||||
|
|
||||||
|
auto eta = calculate_eta2(cl);
|
||||||
|
CHECK(eta.c == static_cast<int>(corner::cTopLeft));
|
||||||
|
CHECK(eta.x == 80. / (77 + 80)); // 4/(3+4)
|
||||||
|
CHECK(eta.y == 91.0 / (91 + 80)); // 7/(7+4)
|
||||||
|
CHECK(eta.sum == 77 + 80 + 82 + 91);
|
||||||
|
}
|
21
src/Cluster.test.cpp
Normal file
21
src/Cluster.test.cpp
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
/************************************************
|
||||||
|
* @file test-Cluster.cpp
|
||||||
|
* @short test case for generic Cluster, ClusterVector, and calculate_eta2
|
||||||
|
***********************************************/
|
||||||
|
|
||||||
|
#include "aare/Cluster.hpp"
|
||||||
|
#include "aare/CalculateEta.hpp"
|
||||||
|
#include "aare/ClusterFile.hpp"
|
||||||
|
|
||||||
|
// #include "catch.hpp"
|
||||||
|
#include <array>
|
||||||
|
#include <catch2/catch_all.hpp>
|
||||||
|
#include <catch2/catch_test_macros.hpp>
|
||||||
|
|
||||||
|
using namespace aare;
|
||||||
|
|
||||||
|
TEST_CASE("Test sum of Cluster", "[.cluster]") {
|
||||||
|
Cluster<int, 2, 2> cluster{0, 0, {1, 2, 3, 4}};
|
||||||
|
|
||||||
|
CHECK(cluster.sum() == 10);
|
||||||
|
}
|
@ -1,402 +0,0 @@
|
|||||||
#include "aare/ClusterFile.hpp"
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
|
|
||||||
namespace aare {
|
|
||||||
|
|
||||||
ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size,
|
|
||||||
const std::string &mode)
|
|
||||||
: m_chunk_size(chunk_size), m_mode(mode) {
|
|
||||||
|
|
||||||
if (mode == "r") {
|
|
||||||
fp = fopen(fname.c_str(), "rb");
|
|
||||||
if (!fp) {
|
|
||||||
throw std::runtime_error("Could not open file for reading: " +
|
|
||||||
fname.string());
|
|
||||||
}
|
|
||||||
} else if (mode == "w") {
|
|
||||||
fp = fopen(fname.c_str(), "wb");
|
|
||||||
if (!fp) {
|
|
||||||
throw std::runtime_error("Could not open file for writing: " +
|
|
||||||
fname.string());
|
|
||||||
}
|
|
||||||
} else if (mode == "a") {
|
|
||||||
fp = fopen(fname.c_str(), "ab");
|
|
||||||
if (!fp) {
|
|
||||||
throw std::runtime_error("Could not open file for appending: " +
|
|
||||||
fname.string());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
throw std::runtime_error("Unsupported mode: " + mode);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ClusterFile::set_roi(ROI roi){
|
|
||||||
m_roi = roi;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ClusterFile::set_noise_map(const NDView<int32_t, 2> noise_map){
|
|
||||||
m_noise_map = NDArray<int32_t, 2>(noise_map);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ClusterFile::set_gain_map(const NDView<double, 2> gain_map){
|
|
||||||
m_gain_map = NDArray<double, 2>(gain_map);
|
|
||||||
|
|
||||||
// Gain map is passed as ADU/keV to avoid dividing in when applying the gain
|
|
||||||
// map we invert it here
|
|
||||||
for (auto &item : m_gain_map->view()) {
|
|
||||||
item = 1.0 / item;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterFile::~ClusterFile() { close(); }
|
|
||||||
|
|
||||||
void ClusterFile::close() {
|
|
||||||
if (fp) {
|
|
||||||
fclose(fp);
|
|
||||||
fp = nullptr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ClusterFile::write_frame(const ClusterVector<int32_t> &clusters) {
|
|
||||||
if (m_mode != "w" && m_mode != "a") {
|
|
||||||
throw std::runtime_error("File not opened for writing");
|
|
||||||
}
|
|
||||||
if (!(clusters.cluster_size_x() == 3) &&
|
|
||||||
!(clusters.cluster_size_y() == 3)) {
|
|
||||||
throw std::runtime_error("Only 3x3 clusters are supported");
|
|
||||||
}
|
|
||||||
//First write the frame number - 4 bytes
|
|
||||||
int32_t frame_number = clusters.frame_number();
|
|
||||||
if(fwrite(&frame_number, sizeof(frame_number), 1, fp)!=1){
|
|
||||||
throw std::runtime_error(LOCATION + "Could not write frame number");
|
|
||||||
}
|
|
||||||
|
|
||||||
//Then write the number of clusters - 4 bytes
|
|
||||||
uint32_t n_clusters = clusters.size();
|
|
||||||
if(fwrite(&n_clusters, sizeof(n_clusters), 1, fp)!=1){
|
|
||||||
throw std::runtime_error(LOCATION + "Could not write number of clusters");
|
|
||||||
}
|
|
||||||
|
|
||||||
//Now write the clusters in the frame
|
|
||||||
if(fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp)!=clusters.size()){
|
|
||||||
throw std::runtime_error(LOCATION + "Could not write clusters");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
ClusterVector<int32_t> ClusterFile::read_clusters(size_t n_clusters){
|
|
||||||
if (m_mode != "r") {
|
|
||||||
throw std::runtime_error("File not opened for reading");
|
|
||||||
}
|
|
||||||
if (m_noise_map || m_roi){
|
|
||||||
return read_clusters_with_cut(n_clusters);
|
|
||||||
}else{
|
|
||||||
return read_clusters_without_cut(n_clusters);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterVector<int32_t> ClusterFile::read_clusters_without_cut(size_t n_clusters) {
|
|
||||||
if (m_mode != "r") {
|
|
||||||
throw std::runtime_error("File not opened for reading");
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterVector<int32_t> clusters(3,3, n_clusters);
|
|
||||||
|
|
||||||
int32_t iframe = 0; // frame number needs to be 4 bytes!
|
|
||||||
size_t nph_read = 0;
|
|
||||||
uint32_t nn = m_num_left;
|
|
||||||
uint32_t nph = m_num_left; // number of clusters in frame needs to be 4
|
|
||||||
|
|
||||||
// auto buf = reinterpret_cast<Cluster3x3 *>(clusters.data());
|
|
||||||
auto buf = clusters.data();
|
|
||||||
// if there are photons left from previous frame read them first
|
|
||||||
if (nph) {
|
|
||||||
if (nph > n_clusters) {
|
|
||||||
// if we have more photons left in the frame then photons to read we
|
|
||||||
// read directly the requested number
|
|
||||||
nn = n_clusters;
|
|
||||||
} else {
|
|
||||||
nn = nph;
|
|
||||||
}
|
|
||||||
nph_read += fread((buf + nph_read*clusters.item_size()),
|
|
||||||
clusters.item_size(), nn, fp);
|
|
||||||
m_num_left = nph - nn; // write back the number of photons left
|
|
||||||
}
|
|
||||||
|
|
||||||
if (nph_read < n_clusters) {
|
|
||||||
// keep on reading frames and photons until reaching n_clusters
|
|
||||||
while (fread(&iframe, sizeof(iframe), 1, fp)) {
|
|
||||||
clusters.set_frame_number(iframe);
|
|
||||||
// read number of clusters in frame
|
|
||||||
if (fread(&nph, sizeof(nph), 1, fp)) {
|
|
||||||
if (nph > (n_clusters - nph_read))
|
|
||||||
nn = n_clusters - nph_read;
|
|
||||||
else
|
|
||||||
nn = nph;
|
|
||||||
|
|
||||||
nph_read += fread((buf + nph_read*clusters.item_size()),
|
|
||||||
clusters.item_size(), nn, fp);
|
|
||||||
m_num_left = nph - nn;
|
|
||||||
}
|
|
||||||
if (nph_read >= n_clusters)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resize the vector to the number of clusters.
|
|
||||||
// No new allocation, only change bounds.
|
|
||||||
clusters.resize(nph_read);
|
|
||||||
if(m_gain_map)
|
|
||||||
clusters.apply_gain_map(m_gain_map->view());
|
|
||||||
return clusters;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
ClusterVector<int32_t> ClusterFile::read_clusters_with_cut(size_t n_clusters) {
|
|
||||||
ClusterVector<int32_t> clusters(3,3);
|
|
||||||
clusters.reserve(n_clusters);
|
|
||||||
|
|
||||||
// if there are photons left from previous frame read them first
|
|
||||||
if (m_num_left) {
|
|
||||||
while(m_num_left && clusters.size() < n_clusters){
|
|
||||||
Cluster3x3 c = read_one_cluster();
|
|
||||||
if(is_selected(c)){
|
|
||||||
clusters.push_back(c.x, c.y, reinterpret_cast<std::byte*>(c.data));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// we did not have enough clusters left in the previous frame
|
|
||||||
// keep on reading frames until reaching n_clusters
|
|
||||||
if (clusters.size() < n_clusters) {
|
|
||||||
// sanity check
|
|
||||||
if (m_num_left) {
|
|
||||||
throw std::runtime_error(LOCATION + "Entered second loop with clusters left\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t frame_number = 0; // frame number needs to be 4 bytes!
|
|
||||||
while (fread(&frame_number, sizeof(frame_number), 1, fp)) {
|
|
||||||
if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) {
|
|
||||||
clusters.set_frame_number(frame_number); //cluster vector will hold the last frame number
|
|
||||||
while(m_num_left && clusters.size() < n_clusters){
|
|
||||||
Cluster3x3 c = read_one_cluster();
|
|
||||||
if(is_selected(c)){
|
|
||||||
clusters.push_back(c.x, c.y, reinterpret_cast<std::byte*>(c.data));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// we have enough clusters, break out of the outer while loop
|
|
||||||
if (clusters.size() >= n_clusters)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
if(m_gain_map)
|
|
||||||
clusters.apply_gain_map(m_gain_map->view());
|
|
||||||
|
|
||||||
return clusters;
|
|
||||||
}
|
|
||||||
|
|
||||||
Cluster3x3 ClusterFile::read_one_cluster(){
|
|
||||||
Cluster3x3 c;
|
|
||||||
auto rc = fread(&c, sizeof(c), 1, fp);
|
|
||||||
if (rc != 1) {
|
|
||||||
throw std::runtime_error(LOCATION + "Could not read cluster");
|
|
||||||
}
|
|
||||||
--m_num_left;
|
|
||||||
return c;
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterVector<int32_t> ClusterFile::read_frame(){
|
|
||||||
if (m_mode != "r") {
|
|
||||||
throw std::runtime_error(LOCATION + "File not opened for reading");
|
|
||||||
}
|
|
||||||
if (m_noise_map || m_roi){
|
|
||||||
return read_frame_with_cut();
|
|
||||||
}else{
|
|
||||||
return read_frame_without_cut();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterVector<int32_t> ClusterFile::read_frame_without_cut() {
|
|
||||||
if (m_mode != "r") {
|
|
||||||
throw std::runtime_error("File not opened for reading");
|
|
||||||
}
|
|
||||||
if (m_num_left) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
"There are still photons left in the last frame");
|
|
||||||
}
|
|
||||||
int32_t frame_number;
|
|
||||||
if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) {
|
|
||||||
throw std::runtime_error(LOCATION + "Could not read frame number");
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t n_clusters; // Saved as 32bit integer in the cluster file
|
|
||||||
if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) {
|
|
||||||
throw std::runtime_error(LOCATION + "Could not read number of clusters");
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterVector<int32_t> clusters(3, 3, n_clusters);
|
|
||||||
clusters.set_frame_number(frame_number);
|
|
||||||
|
|
||||||
if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) !=
|
|
||||||
static_cast<size_t>(n_clusters)) {
|
|
||||||
throw std::runtime_error(LOCATION + "Could not read clusters");
|
|
||||||
}
|
|
||||||
clusters.resize(n_clusters);
|
|
||||||
if (m_gain_map)
|
|
||||||
clusters.apply_gain_map(m_gain_map->view());
|
|
||||||
return clusters;
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterVector<int32_t> ClusterFile::read_frame_with_cut() {
|
|
||||||
if (m_mode != "r") {
|
|
||||||
throw std::runtime_error("File not opened for reading");
|
|
||||||
}
|
|
||||||
if (m_num_left) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
"There are still photons left in the last frame");
|
|
||||||
}
|
|
||||||
int32_t frame_number;
|
|
||||||
if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) {
|
|
||||||
throw std::runtime_error("Could not read frame number");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
if (fread(&m_num_left, sizeof(m_num_left), 1, fp) != 1) {
|
|
||||||
throw std::runtime_error("Could not read number of clusters");
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterVector<int32_t> clusters(3, 3);
|
|
||||||
clusters.reserve(m_num_left);
|
|
||||||
clusters.set_frame_number(frame_number);
|
|
||||||
while(m_num_left){
|
|
||||||
Cluster3x3 c = read_one_cluster();
|
|
||||||
if(is_selected(c)){
|
|
||||||
clusters.push_back(c.x, c.y, reinterpret_cast<std::byte*>(c.data));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (m_gain_map)
|
|
||||||
clusters.apply_gain_map(m_gain_map->view());
|
|
||||||
return clusters;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
bool ClusterFile::is_selected(Cluster3x3 &cl) {
|
|
||||||
//Should fail fast
|
|
||||||
if (m_roi) {
|
|
||||||
if (!(m_roi->contains(cl.x, cl.y))) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (m_noise_map){
|
|
||||||
int32_t sum_1x1 = cl.data[4]; // central pixel
|
|
||||||
int32_t sum_2x2 = cl.sum_2x2(); // highest sum of 2x2 subclusters
|
|
||||||
int32_t sum_3x3 = cl.sum(); // sum of all pixels
|
|
||||||
|
|
||||||
auto noise = (*m_noise_map)(cl.y, cl.x); //TODO! check if this is correct
|
|
||||||
if (sum_1x1 <= noise || sum_2x2 <= 2 * noise || sum_3x3 <= 3 * noise) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
//we passed all checks
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
NDArray<double, 2> calculate_eta2(ClusterVector<int> &clusters) {
|
|
||||||
//TOTO! make work with 2x2 clusters
|
|
||||||
NDArray<double, 2> eta2({static_cast<int64_t>(clusters.size()), 2});
|
|
||||||
|
|
||||||
if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) {
|
|
||||||
for (size_t i = 0; i < clusters.size(); i++) {
|
|
||||||
auto e = calculate_eta2(clusters.at<Cluster3x3>(i));
|
|
||||||
eta2(i, 0) = e.x;
|
|
||||||
eta2(i, 1) = e.y;
|
|
||||||
}
|
|
||||||
}else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){
|
|
||||||
for (size_t i = 0; i < clusters.size(); i++) {
|
|
||||||
auto e = calculate_eta2(clusters.at<Cluster2x2>(i));
|
|
||||||
eta2(i, 0) = e.x;
|
|
||||||
eta2(i, 1) = e.y;
|
|
||||||
}
|
|
||||||
}else{
|
|
||||||
throw std::runtime_error("Only 3x3 and 2x2 clusters are supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
return eta2;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 struct
|
|
||||||
* containing etay, etax and the corner of the cluster.
|
|
||||||
*/
|
|
||||||
Eta2 calculate_eta2(Cluster3x3 &cl) {
|
|
||||||
Eta2 eta{};
|
|
||||||
|
|
||||||
std::array<int32_t, 4> tot2;
|
|
||||||
tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4];
|
|
||||||
tot2[1] = cl.data[1] + cl.data[2] + cl.data[4] + cl.data[5];
|
|
||||||
tot2[2] = cl.data[3] + cl.data[4] + cl.data[6] + cl.data[7];
|
|
||||||
tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8];
|
|
||||||
|
|
||||||
auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin();
|
|
||||||
eta.sum = tot2[c];
|
|
||||||
switch (c) {
|
|
||||||
case cBottomLeft:
|
|
||||||
if ((cl.data[3] + cl.data[4]) != 0)
|
|
||||||
eta.x =
|
|
||||||
static_cast<double>(cl.data[4]) / (cl.data[3] + cl.data[4]);
|
|
||||||
if ((cl.data[1] + cl.data[4]) != 0)
|
|
||||||
eta.y =
|
|
||||||
static_cast<double>(cl.data[4]) / (cl.data[1] + cl.data[4]);
|
|
||||||
eta.c = cBottomLeft;
|
|
||||||
break;
|
|
||||||
case cBottomRight:
|
|
||||||
if ((cl.data[2] + cl.data[5]) != 0)
|
|
||||||
eta.x =
|
|
||||||
static_cast<double>(cl.data[5]) / (cl.data[4] + cl.data[5]);
|
|
||||||
if ((cl.data[1] + cl.data[4]) != 0)
|
|
||||||
eta.y =
|
|
||||||
static_cast<double>(cl.data[4]) / (cl.data[1] + cl.data[4]);
|
|
||||||
eta.c = cBottomRight;
|
|
||||||
break;
|
|
||||||
case cTopLeft:
|
|
||||||
if ((cl.data[7] + cl.data[4]) != 0)
|
|
||||||
eta.x =
|
|
||||||
static_cast<double>(cl.data[4]) / (cl.data[3] + cl.data[4]);
|
|
||||||
if ((cl.data[7] + cl.data[4]) != 0)
|
|
||||||
eta.y =
|
|
||||||
static_cast<double>(cl.data[7]) / (cl.data[7] + cl.data[4]);
|
|
||||||
eta.c = cTopLeft;
|
|
||||||
break;
|
|
||||||
case cTopRight:
|
|
||||||
if ((cl.data[5] + cl.data[4]) != 0)
|
|
||||||
eta.x =
|
|
||||||
static_cast<double>(cl.data[5]) / (cl.data[5] + cl.data[4]);
|
|
||||||
if ((cl.data[7] + cl.data[4]) != 0)
|
|
||||||
eta.y =
|
|
||||||
static_cast<double>(cl.data[7]) / (cl.data[7] + cl.data[4]);
|
|
||||||
eta.c = cTopRight;
|
|
||||||
break;
|
|
||||||
// no default to allow compiler to warn about missing cases
|
|
||||||
}
|
|
||||||
return eta;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Eta2 calculate_eta2(Cluster2x2 &cl) {
|
|
||||||
Eta2 eta{};
|
|
||||||
if ((cl.data[0] + cl.data[1]) != 0)
|
|
||||||
eta.x = static_cast<double>(cl.data[1]) / (cl.data[0] + cl.data[1]);
|
|
||||||
if ((cl.data[0] + cl.data[2]) != 0)
|
|
||||||
eta.y = static_cast<double>(cl.data[2]) / (cl.data[0] + cl.data[2]);
|
|
||||||
eta.sum = cl.data[0] + cl.data[1] + cl.data[2]+ cl.data[3];
|
|
||||||
eta.c = cBottomLeft; //TODO! This is not correct, but need to put something
|
|
||||||
return eta;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
} // namespace aare
|
|
@ -1,35 +1,39 @@
|
|||||||
#include "aare/ClusterFile.hpp"
|
#include "aare/ClusterFile.hpp"
|
||||||
#include "test_config.hpp"
|
#include "test_config.hpp"
|
||||||
|
|
||||||
|
|
||||||
#include "aare/defs.hpp"
|
#include "aare/defs.hpp"
|
||||||
|
#include <algorithm>
|
||||||
#include <catch2/catch_test_macros.hpp>
|
#include <catch2/catch_test_macros.hpp>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
|
|
||||||
|
using aare::Cluster;
|
||||||
|
|
||||||
|
|
||||||
using aare::ClusterFile;
|
using aare::ClusterFile;
|
||||||
|
using aare::ClusterVector;
|
||||||
|
|
||||||
|
|
||||||
TEST_CASE("Read one frame from a a cluster file", "[.files]") {
|
TEST_CASE("Read one frame from a cluster file", "[.files]") {
|
||||||
//We know that the frame has 97 clusters
|
//We know that the frame has 97 clusters
|
||||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
ClusterFile f(fpath);
|
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||||
auto clusters = f.read_frame();
|
auto clusters = f.read_frame();
|
||||||
REQUIRE(clusters.size() == 97);
|
CHECK(clusters.size() == 97);
|
||||||
REQUIRE(clusters.frame_number() == 135);
|
CHECK(clusters.frame_number() == 135);
|
||||||
|
CHECK(clusters[0].x == 1);
|
||||||
|
CHECK(clusters[0].y == 200);
|
||||||
|
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||||
|
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
|
||||||
|
std::begin(expected_cluster_data)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
TEST_CASE("Read one frame using ROI", "[.files]") {
|
TEST_CASE("Read one frame using ROI", "[.files]") {
|
||||||
//We know that the frame has 97 clusters
|
// We know that the frame has 97 clusters
|
||||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
ClusterFile f(fpath);
|
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||||
aare::ROI roi;
|
aare::ROI roi;
|
||||||
roi.xmin = 0;
|
roi.xmin = 0;
|
||||||
roi.xmax = 50;
|
roi.xmax = 50;
|
||||||
@ -40,45 +44,308 @@ TEST_CASE("Read one frame using ROI", "[.files]") {
|
|||||||
REQUIRE(clusters.size() == 49);
|
REQUIRE(clusters.size() == 49);
|
||||||
REQUIRE(clusters.frame_number() == 135);
|
REQUIRE(clusters.frame_number() == 135);
|
||||||
|
|
||||||
//Check that all clusters are within the ROI
|
// Check that all clusters are within the ROI
|
||||||
for (size_t i = 0; i < clusters.size(); i++) {
|
for (size_t i = 0; i < clusters.size(); i++) {
|
||||||
auto c = clusters.at<aare::Cluster3x3>(i);
|
auto c = clusters[i];
|
||||||
REQUIRE(c.x >= roi.xmin);
|
REQUIRE(c.x >= roi.xmin);
|
||||||
REQUIRE(c.x <= roi.xmax);
|
REQUIRE(c.x <= roi.xmax);
|
||||||
REQUIRE(c.y >= roi.ymin);
|
REQUIRE(c.y >= roi.ymin);
|
||||||
REQUIRE(c.y <= roi.ymax);
|
REQUIRE(c.y <= roi.ymax);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
CHECK(clusters[0].x == 1);
|
||||||
|
CHECK(clusters[0].y == 200);
|
||||||
|
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||||
|
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
|
||||||
|
std::begin(expected_cluster_data)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
TEST_CASE("Read clusters from single frame file", "[.files]") {
|
TEST_CASE("Read clusters from single frame file", "[.files]") {
|
||||||
|
|
||||||
|
// frame_number, num_clusters [135] 97
|
||||||
|
// [ 1 200] [0 1 2 3 4 5 6 7 8]
|
||||||
|
// [ 2 201] [ 9 10 11 12 13 14 15 16 17]
|
||||||
|
// [ 3 202] [18 19 20 21 22 23 24 25 26]
|
||||||
|
// [ 4 203] [27 28 29 30 31 32 33 34 35]
|
||||||
|
// [ 5 204] [36 37 38 39 40 41 42 43 44]
|
||||||
|
// [ 6 205] [45 46 47 48 49 50 51 52 53]
|
||||||
|
// [ 7 206] [54 55 56 57 58 59 60 61 62]
|
||||||
|
// [ 8 207] [63 64 65 66 67 68 69 70 71]
|
||||||
|
// [ 9 208] [72 73 74 75 76 77 78 79 80]
|
||||||
|
// [ 10 209] [81 82 83 84 85 86 87 88 89]
|
||||||
|
// [ 11 210] [90 91 92 93 94 95 96 97 98]
|
||||||
|
// [ 12 211] [ 99 100 101 102 103 104 105 106 107]
|
||||||
|
// [ 13 212] [108 109 110 111 112 113 114 115 116]
|
||||||
|
// [ 14 213] [117 118 119 120 121 122 123 124 125]
|
||||||
|
// [ 15 214] [126 127 128 129 130 131 132 133 134]
|
||||||
|
// [ 16 215] [135 136 137 138 139 140 141 142 143]
|
||||||
|
// [ 17 216] [144 145 146 147 148 149 150 151 152]
|
||||||
|
// [ 18 217] [153 154 155 156 157 158 159 160 161]
|
||||||
|
// [ 19 218] [162 163 164 165 166 167 168 169 170]
|
||||||
|
// [ 20 219] [171 172 173 174 175 176 177 178 179]
|
||||||
|
// [ 21 220] [180 181 182 183 184 185 186 187 188]
|
||||||
|
// [ 22 221] [189 190 191 192 193 194 195 196 197]
|
||||||
|
// [ 23 222] [198 199 200 201 202 203 204 205 206]
|
||||||
|
// [ 24 223] [207 208 209 210 211 212 213 214 215]
|
||||||
|
// [ 25 224] [216 217 218 219 220 221 222 223 224]
|
||||||
|
// [ 26 225] [225 226 227 228 229 230 231 232 233]
|
||||||
|
// [ 27 226] [234 235 236 237 238 239 240 241 242]
|
||||||
|
// [ 28 227] [243 244 245 246 247 248 249 250 251]
|
||||||
|
// [ 29 228] [252 253 254 255 256 257 258 259 260]
|
||||||
|
// [ 30 229] [261 262 263 264 265 266 267 268 269]
|
||||||
|
// [ 31 230] [270 271 272 273 274 275 276 277 278]
|
||||||
|
// [ 32 231] [279 280 281 282 283 284 285 286 287]
|
||||||
|
// [ 33 232] [288 289 290 291 292 293 294 295 296]
|
||||||
|
// [ 34 233] [297 298 299 300 301 302 303 304 305]
|
||||||
|
// [ 35 234] [306 307 308 309 310 311 312 313 314]
|
||||||
|
// [ 36 235] [315 316 317 318 319 320 321 322 323]
|
||||||
|
// [ 37 236] [324 325 326 327 328 329 330 331 332]
|
||||||
|
// [ 38 237] [333 334 335 336 337 338 339 340 341]
|
||||||
|
// [ 39 238] [342 343 344 345 346 347 348 349 350]
|
||||||
|
// [ 40 239] [351 352 353 354 355 356 357 358 359]
|
||||||
|
// [ 41 240] [360 361 362 363 364 365 366 367 368]
|
||||||
|
// [ 42 241] [369 370 371 372 373 374 375 376 377]
|
||||||
|
// [ 43 242] [378 379 380 381 382 383 384 385 386]
|
||||||
|
// [ 44 243] [387 388 389 390 391 392 393 394 395]
|
||||||
|
// [ 45 244] [396 397 398 399 400 401 402 403 404]
|
||||||
|
// [ 46 245] [405 406 407 408 409 410 411 412 413]
|
||||||
|
// [ 47 246] [414 415 416 417 418 419 420 421 422]
|
||||||
|
// [ 48 247] [423 424 425 426 427 428 429 430 431]
|
||||||
|
// [ 49 248] [432 433 434 435 436 437 438 439 440]
|
||||||
|
// [ 50 249] [441 442 443 444 445 446 447 448 449]
|
||||||
|
// [ 51 250] [450 451 452 453 454 455 456 457 458]
|
||||||
|
// [ 52 251] [459 460 461 462 463 464 465 466 467]
|
||||||
|
// [ 53 252] [468 469 470 471 472 473 474 475 476]
|
||||||
|
// [ 54 253] [477 478 479 480 481 482 483 484 485]
|
||||||
|
// [ 55 254] [486 487 488 489 490 491 492 493 494]
|
||||||
|
// [ 56 255] [495 496 497 498 499 500 501 502 503]
|
||||||
|
// [ 57 256] [504 505 506 507 508 509 510 511 512]
|
||||||
|
// [ 58 257] [513 514 515 516 517 518 519 520 521]
|
||||||
|
// [ 59 258] [522 523 524 525 526 527 528 529 530]
|
||||||
|
// [ 60 259] [531 532 533 534 535 536 537 538 539]
|
||||||
|
// [ 61 260] [540 541 542 543 544 545 546 547 548]
|
||||||
|
// [ 62 261] [549 550 551 552 553 554 555 556 557]
|
||||||
|
// [ 63 262] [558 559 560 561 562 563 564 565 566]
|
||||||
|
// [ 64 263] [567 568 569 570 571 572 573 574 575]
|
||||||
|
// [ 65 264] [576 577 578 579 580 581 582 583 584]
|
||||||
|
// [ 66 265] [585 586 587 588 589 590 591 592 593]
|
||||||
|
// [ 67 266] [594 595 596 597 598 599 600 601 602]
|
||||||
|
// [ 68 267] [603 604 605 606 607 608 609 610 611]
|
||||||
|
// [ 69 268] [612 613 614 615 616 617 618 619 620]
|
||||||
|
// [ 70 269] [621 622 623 624 625 626 627 628 629]
|
||||||
|
// [ 71 270] [630 631 632 633 634 635 636 637 638]
|
||||||
|
// [ 72 271] [639 640 641 642 643 644 645 646 647]
|
||||||
|
// [ 73 272] [648 649 650 651 652 653 654 655 656]
|
||||||
|
// [ 74 273] [657 658 659 660 661 662 663 664 665]
|
||||||
|
// [ 75 274] [666 667 668 669 670 671 672 673 674]
|
||||||
|
// [ 76 275] [675 676 677 678 679 680 681 682 683]
|
||||||
|
// [ 77 276] [684 685 686 687 688 689 690 691 692]
|
||||||
|
// [ 78 277] [693 694 695 696 697 698 699 700 701]
|
||||||
|
// [ 79 278] [702 703 704 705 706 707 708 709 710]
|
||||||
|
// [ 80 279] [711 712 713 714 715 716 717 718 719]
|
||||||
|
// [ 81 280] [720 721 722 723 724 725 726 727 728]
|
||||||
|
// [ 82 281] [729 730 731 732 733 734 735 736 737]
|
||||||
|
// [ 83 282] [738 739 740 741 742 743 744 745 746]
|
||||||
|
// [ 84 283] [747 748 749 750 751 752 753 754 755]
|
||||||
|
// [ 85 284] [756 757 758 759 760 761 762 763 764]
|
||||||
|
// [ 86 285] [765 766 767 768 769 770 771 772 773]
|
||||||
|
// [ 87 286] [774 775 776 777 778 779 780 781 782]
|
||||||
|
// [ 88 287] [783 784 785 786 787 788 789 790 791]
|
||||||
|
// [ 89 288] [792 793 794 795 796 797 798 799 800]
|
||||||
|
// [ 90 289] [801 802 803 804 805 806 807 808 809]
|
||||||
|
// [ 91 290] [810 811 812 813 814 815 816 817 818]
|
||||||
|
// [ 92 291] [819 820 821 822 823 824 825 826 827]
|
||||||
|
// [ 93 292] [828 829 830 831 832 833 834 835 836]
|
||||||
|
// [ 94 293] [837 838 839 840 841 842 843 844 845]
|
||||||
|
// [ 95 294] [846 847 848 849 850 851 852 853 854]
|
||||||
|
// [ 96 295] [855 856 857 858 859 860 861 862 863]
|
||||||
|
// [ 97 296] [864 865 866 867 868 869 870 871 872]
|
||||||
|
|
||||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||||
|
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
SECTION("Read fewer clusters than available") {
|
SECTION("Read fewer clusters than available") {
|
||||||
ClusterFile f(fpath);
|
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||||
auto clusters = f.read_clusters(50);
|
auto clusters = f.read_clusters(50);
|
||||||
REQUIRE(clusters.size() == 50);
|
REQUIRE(clusters.size() == 50);
|
||||||
REQUIRE(clusters.frame_number() == 135);
|
REQUIRE(clusters.frame_number() == 135);
|
||||||
|
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||||
|
REQUIRE(clusters[0].x == 1);
|
||||||
|
REQUIRE(clusters[0].y == 200);
|
||||||
|
CHECK(std::equal(std::begin(clusters[0].data),
|
||||||
|
std::end(clusters[0].data),
|
||||||
|
std::begin(expected_cluster_data)));
|
||||||
}
|
}
|
||||||
SECTION("Read more clusters than available") {
|
SECTION("Read more clusters than available") {
|
||||||
ClusterFile f(fpath);
|
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||||
// 100 is the maximum number of clusters read
|
// 100 is the maximum number of clusters read
|
||||||
auto clusters = f.read_clusters(100);
|
auto clusters = f.read_clusters(100);
|
||||||
REQUIRE(clusters.size() == 97);
|
REQUIRE(clusters.size() == 97);
|
||||||
REQUIRE(clusters.frame_number() == 135);
|
REQUIRE(clusters.frame_number() == 135);
|
||||||
|
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||||
|
REQUIRE(clusters[0].x == 1);
|
||||||
|
REQUIRE(clusters[0].y == 200);
|
||||||
|
CHECK(std::equal(std::begin(clusters[0].data),
|
||||||
|
std::end(clusters[0].data),
|
||||||
|
std::begin(expected_cluster_data)));
|
||||||
}
|
}
|
||||||
SECTION("Read all clusters") {
|
SECTION("Read all clusters") {
|
||||||
ClusterFile f(fpath);
|
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||||
auto clusters = f.read_clusters(97);
|
auto clusters = f.read_clusters(97);
|
||||||
REQUIRE(clusters.size() == 97);
|
REQUIRE(clusters.size() == 97);
|
||||||
REQUIRE(clusters.frame_number() == 135);
|
REQUIRE(clusters.frame_number() == 135);
|
||||||
|
REQUIRE(clusters[0].x == 1);
|
||||||
|
REQUIRE(clusters[0].y == 200);
|
||||||
|
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||||
|
CHECK(std::equal(std::begin(clusters[0].data),
|
||||||
|
std::end(clusters[0].data),
|
||||||
|
std::begin(expected_cluster_data)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Read clusters from single frame file with ROI", "[.files]") {
|
||||||
|
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||||
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
|
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||||
|
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 0;
|
||||||
|
roi.xmax = 50;
|
||||||
|
roi.ymin = 200;
|
||||||
|
roi.ymax = 249;
|
||||||
|
f.set_roi(roi);
|
||||||
|
|
||||||
|
auto clusters = f.read_clusters(10);
|
||||||
|
|
||||||
|
CHECK(clusters.size() == 10);
|
||||||
|
CHECK(clusters.frame_number() == 135);
|
||||||
|
CHECK(clusters[0].x == 1);
|
||||||
|
CHECK(clusters[0].y == 200);
|
||||||
|
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||||
|
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
|
||||||
|
std::begin(expected_cluster_data)));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Read cluster from multiple frame file", "[.files]") {
|
||||||
|
|
||||||
|
using ClusterType = Cluster<double, 2, 2>;
|
||||||
|
|
||||||
|
auto fpath =
|
||||||
|
test_data_path() / "clust" / "Two_frames_2x2double_test_clusters.clust";
|
||||||
|
|
||||||
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
|
// Two_frames_2x2double_test_clusters.clust
|
||||||
|
// frame number, num_clusters 0, 4
|
||||||
|
//[10, 20], {0. ,0., 0., 0.}
|
||||||
|
//[11, 30], {1., 1., 1., 1.}
|
||||||
|
//[12, 40], {2., 2., 2., 2.}
|
||||||
|
//[13, 50], {3., 3., 3., 3.}
|
||||||
|
// 1,4
|
||||||
|
//[10, 20], {4., 4., 4., 4.}
|
||||||
|
//[11, 30], {5., 5., 5., 5.}
|
||||||
|
//[12, 40], {6., 6., 6., 6.}
|
||||||
|
//[13, 50], {7., 7., 7., 7.}
|
||||||
|
|
||||||
|
SECTION("Read clusters from both frames") {
|
||||||
|
ClusterFile<ClusterType> f(fpath);
|
||||||
|
auto clusters = f.read_clusters(2);
|
||||||
|
REQUIRE(clusters.size() == 2);
|
||||||
|
REQUIRE(clusters.frame_number() == 0);
|
||||||
|
|
||||||
|
auto clusters1 = f.read_clusters(3);
|
||||||
|
|
||||||
|
REQUIRE(clusters1.size() == 3);
|
||||||
|
REQUIRE(clusters1.frame_number() == 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SECTION("Read all clusters") {
|
||||||
|
ClusterFile<ClusterType> f(fpath);
|
||||||
|
auto clusters = f.read_clusters(8);
|
||||||
|
REQUIRE(clusters.size() == 8);
|
||||||
|
REQUIRE(clusters.frame_number() == 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("Read clusters from one frame") {
|
||||||
|
ClusterFile<ClusterType> f(fpath);
|
||||||
|
auto clusters = f.read_clusters(2);
|
||||||
|
REQUIRE(clusters.size() == 2);
|
||||||
|
REQUIRE(clusters.frame_number() == 0);
|
||||||
|
|
||||||
|
auto clusters1 = f.read_clusters(1);
|
||||||
|
|
||||||
|
REQUIRE(clusters1.size() == 1);
|
||||||
|
REQUIRE(clusters1.frame_number() == 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Write cluster with potential padding", "[.files][.ClusterFile]") {
|
||||||
|
|
||||||
|
using ClusterType = Cluster<double, 3, 3>;
|
||||||
|
|
||||||
|
REQUIRE(std::filesystem::exists(test_data_path() / "clust"));
|
||||||
|
|
||||||
|
auto fpath = test_data_path() / "clust" / "single_frame_2_clusters.clust";
|
||||||
|
|
||||||
|
ClusterFile<ClusterType> file(fpath, 1000, "w");
|
||||||
|
|
||||||
|
ClusterVector<ClusterType> clustervec(2);
|
||||||
|
int16_t coordinate = 5;
|
||||||
|
clustervec.push_back(ClusterType{
|
||||||
|
coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}});
|
||||||
|
clustervec.push_back(ClusterType{
|
||||||
|
coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}});
|
||||||
|
|
||||||
|
file.write_frame(clustervec);
|
||||||
|
|
||||||
|
file.close();
|
||||||
|
|
||||||
|
file.open("r");
|
||||||
|
|
||||||
|
auto read_cluster_vector = file.read_frame();
|
||||||
|
|
||||||
|
CHECK(read_cluster_vector.size() == 2);
|
||||||
|
CHECK(read_cluster_vector.frame_number() == 0);
|
||||||
|
|
||||||
|
CHECK(read_cluster_vector[0].x == clustervec[0].x);
|
||||||
|
CHECK(read_cluster_vector[0].y == clustervec[0].y);
|
||||||
|
CHECK(std::equal(
|
||||||
|
clustervec[0].data.begin(), clustervec[0].data.end(),
|
||||||
|
read_cluster_vector[0].data.begin(), [](double a, double b) {
|
||||||
|
return std::abs(a - b) < std::numeric_limits<double>::epsilon();
|
||||||
|
}));
|
||||||
|
|
||||||
|
CHECK(read_cluster_vector[1].x == clustervec[1].x);
|
||||||
|
CHECK(read_cluster_vector[1].y == clustervec[1].y);
|
||||||
|
CHECK(std::equal(
|
||||||
|
clustervec[1].data.begin(), clustervec[1].data.end(),
|
||||||
|
read_cluster_vector[1].data.begin(), [](double a, double b) {
|
||||||
|
return std::abs(a - b) < std::numeric_limits<double>::epsilon();
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Read frame and modify cluster data", "[.files][.ClusterFile]") {
|
||||||
|
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||||
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
|
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||||
|
|
||||||
|
auto clusters = f.read_frame();
|
||||||
|
CHECK(clusters.size() == 97);
|
||||||
|
CHECK(clusters.frame_number() == 135);
|
||||||
|
|
||||||
|
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||||
|
clusters.push_back(
|
||||||
|
Cluster<int32_t, 3, 3>{0, 0, {0, 1, 2, 3, 4, 5, 6, 7, 8}});
|
||||||
|
|
||||||
|
CHECK(clusters.size() == 98);
|
||||||
|
CHECK(clusters[0].x == 1);
|
||||||
|
CHECK(clusters[0].y == 200);
|
||||||
|
|
||||||
|
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
|
||||||
|
std::begin(expected_cluster_data)));
|
||||||
}
|
}
|
||||||
|
@ -1,19 +1,18 @@
|
|||||||
#include "aare/ClusterFinder.hpp"
|
#include "aare/ClusterFinder.hpp"
|
||||||
#include "aare/Pedestal.hpp"
|
#include "aare/Pedestal.hpp"
|
||||||
#include <catch2/matchers/catch_matchers_floating_point.hpp>
|
|
||||||
#include <catch2/catch_test_macros.hpp>
|
#include <catch2/catch_test_macros.hpp>
|
||||||
|
#include <catch2/matchers/catch_matchers_floating_point.hpp>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <random>
|
#include <random>
|
||||||
|
|
||||||
using namespace aare;
|
using namespace aare;
|
||||||
|
|
||||||
//TODO! Find a way to test the cluster finder
|
// TODO! Find a way to test the cluster finder
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// class ClusterFinderUnitTest : public ClusterFinder {
|
// class ClusterFinderUnitTest : public ClusterFinder {
|
||||||
// public:
|
// public:
|
||||||
// ClusterFinderUnitTest(int cluster_sizeX, int cluster_sizeY, double nSigma = 5.0, double threshold = 0.0)
|
// ClusterFinderUnitTest(int cluster_sizeX, int cluster_sizeY, double nSigma
|
||||||
|
// = 5.0, double threshold = 0.0)
|
||||||
// : ClusterFinder(cluster_sizeX, cluster_sizeY, nSigma, threshold) {}
|
// : ClusterFinder(cluster_sizeX, cluster_sizeY, nSigma, threshold) {}
|
||||||
// double get_c2() { return c2; }
|
// double get_c2() { return c2; }
|
||||||
// double get_c3() { return c3; }
|
// double get_c3() { return c3; }
|
||||||
@ -37,8 +36,8 @@ using namespace aare;
|
|||||||
// REQUIRE_THAT(cf.get_c3(), Catch::Matchers::WithinRel(c3, 1e-9));
|
// REQUIRE_THAT(cf.get_c3(), Catch::Matchers::WithinRel(c3, 1e-9));
|
||||||
// }
|
// }
|
||||||
|
|
||||||
TEST_CASE("Construct a cluster finder"){
|
TEST_CASE("Construct a cluster finder") {
|
||||||
ClusterFinder clusterFinder({400,400}, {3,3});
|
ClusterFinder clusterFinder({400, 400});
|
||||||
// REQUIRE(clusterFinder.get_cluster_sizeX() == 3);
|
// REQUIRE(clusterFinder.get_cluster_sizeX() == 3);
|
||||||
// REQUIRE(clusterFinder.get_cluster_sizeY() == 3);
|
// REQUIRE(clusterFinder.get_cluster_sizeY() == 3);
|
||||||
// REQUIRE(clusterFinder.get_threshold() == 1);
|
// REQUIRE(clusterFinder.get_threshold() == 1);
|
||||||
@ -49,16 +48,17 @@ TEST_CASE("Construct a cluster finder"){
|
|||||||
// aare::Pedestal pedestal(10, 10, 5);
|
// aare::Pedestal pedestal(10, 10, 5);
|
||||||
// NDArray<double, 2> frame({10, 10});
|
// NDArray<double, 2> frame({10, 10});
|
||||||
// frame = 0;
|
// frame = 0;
|
||||||
// ClusterFinder clusterFinder(3, 3, 1, 1); // 3x3 cluster, 1 nSigma, 1 threshold
|
// ClusterFinder clusterFinder(3, 3, 1, 1); // 3x3 cluster, 1 nSigma, 1
|
||||||
|
// threshold
|
||||||
|
|
||||||
// auto clusters = clusterFinder.find_clusters_without_threshold(frame.span(), pedestal);
|
// auto clusters =
|
||||||
|
// clusterFinder.find_clusters_without_threshold(frame.span(), pedestal);
|
||||||
|
|
||||||
// REQUIRE(clusters.size() == 0);
|
// REQUIRE(clusters.size() == 0);
|
||||||
|
|
||||||
// frame(5, 5) = 10;
|
// frame(5, 5) = 10;
|
||||||
// clusters = clusterFinder.find_clusters_without_threshold(frame.span(), pedestal);
|
// clusters = clusterFinder.find_clusters_without_threshold(frame.span(),
|
||||||
// REQUIRE(clusters.size() == 1);
|
// pedestal); REQUIRE(clusters.size() == 1); REQUIRE(clusters[0].x == 5);
|
||||||
// REQUIRE(clusters[0].x == 5);
|
|
||||||
// REQUIRE(clusters[0].y == 5);
|
// REQUIRE(clusters[0].y == 5);
|
||||||
// for (int i = 0; i < 3; i++) {
|
// for (int i = 0; i < 3; i++) {
|
||||||
// for (int j = 0; j < 3; j++) {
|
// for (int j = 0; j < 3; j++) {
|
||||||
|
99
src/ClusterFinderMT.test.cpp
Normal file
99
src/ClusterFinderMT.test.cpp
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
|
||||||
|
#include "aare/ClusterFinderMT.hpp"
|
||||||
|
#include "aare/Cluster.hpp"
|
||||||
|
#include "aare/ClusterCollector.hpp"
|
||||||
|
#include "aare/File.hpp"
|
||||||
|
|
||||||
|
#include "test_config.hpp"
|
||||||
|
|
||||||
|
#include <catch2/catch_test_macros.hpp>
|
||||||
|
#include <filesystem>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
using namespace aare;
|
||||||
|
|
||||||
|
// wrapper function to access private member variables for testing
|
||||||
|
template <typename ClusterType, typename FRAME_TYPE = uint16_t,
|
||||||
|
typename PEDESTAL_TYPE = double>
|
||||||
|
class ClusterFinderMTWrapper
|
||||||
|
: public ClusterFinderMT<ClusterType, FRAME_TYPE, PEDESTAL_TYPE> {
|
||||||
|
|
||||||
|
public:
|
||||||
|
ClusterFinderMTWrapper(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
|
||||||
|
size_t capacity = 2000, size_t n_threads = 3)
|
||||||
|
: ClusterFinderMT<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>(
|
||||||
|
image_size, nSigma, capacity, n_threads) {}
|
||||||
|
|
||||||
|
size_t get_m_input_queues_size() const {
|
||||||
|
return this->m_input_queues.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t get_m_output_queues_size() const {
|
||||||
|
return this->m_output_queues.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t get_m_cluster_finders_size() const {
|
||||||
|
return this->m_cluster_finders.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool m_output_queues_are_empty() const {
|
||||||
|
for (auto &queue : this->m_output_queues) {
|
||||||
|
if (!queue->isEmpty())
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool m_input_queues_are_empty() const {
|
||||||
|
for (auto &queue : this->m_input_queues) {
|
||||||
|
if (!queue->isEmpty())
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool m_sink_is_empty() const { return this->m_sink.isEmpty(); }
|
||||||
|
|
||||||
|
size_t m_sink_size() const { return this->m_sink.sizeGuess(); }
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_CASE("multithreaded cluster finder", "[.files][.ClusterFinder]") {
|
||||||
|
auto fpath = "/mnt/sls_det_storage/matterhorn_data/aare_test_data/"
|
||||||
|
"Moench03new/cu_half_speed_master_4.json";
|
||||||
|
|
||||||
|
File file(fpath);
|
||||||
|
|
||||||
|
size_t n_threads = 2;
|
||||||
|
size_t n_frames_pd = 10;
|
||||||
|
|
||||||
|
using ClusterType = Cluster<int32_t, 3, 3>;
|
||||||
|
|
||||||
|
ClusterFinderMTWrapper<ClusterType> cf(
|
||||||
|
{static_cast<int64_t>(file.rows()), static_cast<int64_t>(file.cols())},
|
||||||
|
5, 2000, n_threads); // no idea what frame type is!!! default uint16_t
|
||||||
|
|
||||||
|
CHECK(cf.get_m_input_queues_size() == n_threads);
|
||||||
|
CHECK(cf.get_m_output_queues_size() == n_threads);
|
||||||
|
CHECK(cf.get_m_cluster_finders_size() == n_threads);
|
||||||
|
CHECK(cf.m_output_queues_are_empty() == true);
|
||||||
|
CHECK(cf.m_input_queues_are_empty() == true);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < n_frames_pd; ++i) {
|
||||||
|
cf.find_clusters(file.read_frame().view<uint16_t>());
|
||||||
|
}
|
||||||
|
|
||||||
|
cf.stop();
|
||||||
|
|
||||||
|
CHECK(cf.m_output_queues_are_empty() == true);
|
||||||
|
CHECK(cf.m_input_queues_are_empty() == true);
|
||||||
|
|
||||||
|
CHECK(cf.m_sink_size() == n_frames_pd);
|
||||||
|
ClusterCollector<ClusterType> clustercollector(&cf);
|
||||||
|
|
||||||
|
clustercollector.stop();
|
||||||
|
|
||||||
|
CHECK(cf.m_sink_size() == 0);
|
||||||
|
|
||||||
|
auto clustervec = clustercollector.steal_clusters();
|
||||||
|
// CHECK(clustervec.size() == ) //dont know how many clusters to expect
|
||||||
|
}
|
@ -1,21 +1,52 @@
|
|||||||
#include <cstdint>
|
|
||||||
#include "aare/ClusterVector.hpp"
|
#include "aare/ClusterVector.hpp"
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
#include <catch2/matchers/catch_matchers_floating_point.hpp>
|
#include <catch2/catch_all.hpp>
|
||||||
#include <catch2/catch_test_macros.hpp>
|
#include <catch2/catch_test_macros.hpp>
|
||||||
|
#include <catch2/matchers/catch_matchers_floating_point.hpp>
|
||||||
|
|
||||||
|
using aare::Cluster;
|
||||||
using aare::ClusterVector;
|
using aare::ClusterVector;
|
||||||
|
|
||||||
struct Cluster_i2x2 {
|
TEST_CASE("item_size return the size of the cluster stored") {
|
||||||
int16_t x;
|
using C1 = Cluster<int32_t, 2, 2>;
|
||||||
int16_t y;
|
ClusterVector<C1> cv(4);
|
||||||
int32_t data[4];
|
CHECK(cv.item_size() == sizeof(C1));
|
||||||
};
|
|
||||||
|
|
||||||
TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") {
|
// Sanity check
|
||||||
|
// 2*2*4 = 16 bytes of data for the cluster
|
||||||
|
// 2*2 = 4 bytes for the x and y coordinates
|
||||||
|
REQUIRE(cv.item_size() == 20);
|
||||||
|
|
||||||
|
using C2 = Cluster<int32_t, 3, 3>;
|
||||||
|
ClusterVector<C2> cv2(4);
|
||||||
|
CHECK(cv2.item_size() == sizeof(C2));
|
||||||
|
|
||||||
ClusterVector<int32_t> cv(2, 2, 4);
|
using C3 = Cluster<double, 2, 3>;
|
||||||
|
ClusterVector<C3> cv3(4);
|
||||||
|
CHECK(cv3.item_size() == sizeof(C3));
|
||||||
|
|
||||||
|
using C4 = Cluster<char, 10, 5>;
|
||||||
|
ClusterVector<C4> cv4(4);
|
||||||
|
CHECK(cv4.item_size() == sizeof(C4));
|
||||||
|
|
||||||
|
using C5 = Cluster<int32_t, 2, 3>;
|
||||||
|
ClusterVector<C5> cv5(4);
|
||||||
|
CHECK(cv5.item_size() == sizeof(C5));
|
||||||
|
|
||||||
|
using C6 = Cluster<double, 5, 5>;
|
||||||
|
ClusterVector<C6> cv6(4);
|
||||||
|
CHECK(cv6.item_size() == sizeof(C6));
|
||||||
|
|
||||||
|
using C7 = Cluster<double, 3, 3>;
|
||||||
|
ClusterVector<C7> cv7(4);
|
||||||
|
CHECK(cv7.item_size() == sizeof(C7));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read",
|
||||||
|
"[.ClusterVector]") {
|
||||||
|
|
||||||
|
ClusterVector<Cluster<int32_t, 2, 2>> cv(4);
|
||||||
REQUIRE(cv.capacity() == 4);
|
REQUIRE(cv.capacity() == 4);
|
||||||
REQUIRE(cv.size() == 0);
|
REQUIRE(cv.size() == 0);
|
||||||
REQUIRE(cv.cluster_size_x() == 2);
|
REQUIRE(cv.cluster_size_x() == 2);
|
||||||
@ -23,112 +54,102 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") {
|
|||||||
// int16_t, int16_t, 2x2 int32_t = 20 bytes
|
// int16_t, int16_t, 2x2 int32_t = 20 bytes
|
||||||
REQUIRE(cv.item_size() == 20);
|
REQUIRE(cv.item_size() == 20);
|
||||||
|
|
||||||
//Create a cluster and push back into the vector
|
// Create a cluster and push back into the vector
|
||||||
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
|
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
|
||||||
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
cv.push_back(c1);
|
||||||
REQUIRE(cv.size() == 1);
|
REQUIRE(cv.size() == 1);
|
||||||
REQUIRE(cv.capacity() == 4);
|
REQUIRE(cv.capacity() == 4);
|
||||||
|
|
||||||
//Read the cluster back out using copy. TODO! Can we improve the API?
|
auto c2 = cv[0];
|
||||||
Cluster_i2x2 c2;
|
|
||||||
std::byte *ptr = cv.element_ptr(0);
|
|
||||||
std::copy(ptr, ptr + cv.item_size(), reinterpret_cast<std::byte*>(&c2));
|
|
||||||
|
|
||||||
//Check that the data is the same
|
// Check that the data is the same
|
||||||
REQUIRE(c1.x == c2.x);
|
REQUIRE(c1.x == c2.x);
|
||||||
REQUIRE(c1.y == c2.y);
|
REQUIRE(c1.y == c2.y);
|
||||||
for(size_t i = 0; i < 4; i++) {
|
for (size_t i = 0; i < 4; i++) {
|
||||||
REQUIRE(c1.data[i] == c2.data[i]);
|
REQUIRE(c1.data[i] == c2.data[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Summing 3x1 clusters of int64"){
|
TEST_CASE("Summing 3x1 clusters of int64", "[.ClusterVector]") {
|
||||||
struct Cluster_l3x1{
|
ClusterVector<Cluster<int32_t, 3, 1>> cv(2);
|
||||||
int16_t x;
|
|
||||||
int16_t y;
|
|
||||||
int32_t data[3];
|
|
||||||
};
|
|
||||||
|
|
||||||
ClusterVector<int32_t> cv(3, 1, 2);
|
|
||||||
REQUIRE(cv.capacity() == 2);
|
REQUIRE(cv.capacity() == 2);
|
||||||
REQUIRE(cv.size() == 0);
|
REQUIRE(cv.size() == 0);
|
||||||
REQUIRE(cv.cluster_size_x() == 3);
|
REQUIRE(cv.cluster_size_x() == 3);
|
||||||
REQUIRE(cv.cluster_size_y() == 1);
|
REQUIRE(cv.cluster_size_y() == 1);
|
||||||
|
|
||||||
//Create a cluster and push back into the vector
|
// Create a cluster and push back into the vector
|
||||||
Cluster_l3x1 c1 = {1, 2, {3, 4, 5}};
|
Cluster<int32_t, 3, 1> c1 = {1, 2, {3, 4, 5}};
|
||||||
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
cv.push_back(c1);
|
||||||
REQUIRE(cv.capacity() == 2);
|
REQUIRE(cv.capacity() == 2);
|
||||||
REQUIRE(cv.size() == 1);
|
REQUIRE(cv.size() == 1);
|
||||||
|
|
||||||
Cluster_l3x1 c2 = {6, 7, {8, 9, 10}};
|
Cluster<int32_t, 3, 1> c2 = {6, 7, {8, 9, 10}};
|
||||||
cv.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
|
cv.push_back(c2);
|
||||||
REQUIRE(cv.capacity() == 2);
|
REQUIRE(cv.capacity() == 2);
|
||||||
REQUIRE(cv.size() == 2);
|
REQUIRE(cv.size() == 2);
|
||||||
|
|
||||||
Cluster_l3x1 c3 = {11, 12, {13, 14, 15}};
|
Cluster<int32_t, 3, 1> c3 = {11, 12, {13, 14, 15}};
|
||||||
cv.push_back(c3.x, c3.y, reinterpret_cast<std::byte*>(&c3.data[0]));
|
cv.push_back(c3);
|
||||||
REQUIRE(cv.capacity() == 4);
|
REQUIRE(cv.capacity() == 4);
|
||||||
REQUIRE(cv.size() == 3);
|
REQUIRE(cv.size() == 3);
|
||||||
|
|
||||||
|
/*
|
||||||
auto sums = cv.sum();
|
auto sums = cv.sum();
|
||||||
REQUIRE(sums.size() == 3);
|
REQUIRE(sums.size() == 3);
|
||||||
REQUIRE(sums[0] == 12);
|
REQUIRE(sums[0] == 12);
|
||||||
REQUIRE(sums[1] == 27);
|
REQUIRE(sums[1] == 27);
|
||||||
REQUIRE(sums[2] == 42);
|
REQUIRE(sums[2] == 42);
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Storing floats"){
|
TEST_CASE("Storing floats", "[.ClusterVector]") {
|
||||||
struct Cluster_f4x2{
|
ClusterVector<Cluster<float, 2, 4>> cv(10);
|
||||||
int16_t x;
|
|
||||||
int16_t y;
|
|
||||||
float data[8];
|
|
||||||
};
|
|
||||||
|
|
||||||
ClusterVector<float> cv(2, 4, 10);
|
|
||||||
REQUIRE(cv.capacity() == 10);
|
REQUIRE(cv.capacity() == 10);
|
||||||
REQUIRE(cv.size() == 0);
|
REQUIRE(cv.size() == 0);
|
||||||
REQUIRE(cv.cluster_size_x() == 2);
|
REQUIRE(cv.cluster_size_x() == 2);
|
||||||
REQUIRE(cv.cluster_size_y() == 4);
|
REQUIRE(cv.cluster_size_y() == 4);
|
||||||
|
|
||||||
//Create a cluster and push back into the vector
|
// Create a cluster and push back into the vector
|
||||||
Cluster_f4x2 c1 = {1, 2, {3.0, 4.0, 5.0, 6.0,3.0, 4.0, 5.0, 6.0}};
|
Cluster<float, 2, 4> c1 = {1, 2, {3.0, 4.0, 5.0, 6.0, 3.0, 4.0, 5.0, 6.0}};
|
||||||
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
cv.push_back(c1);
|
||||||
REQUIRE(cv.capacity() == 10);
|
REQUIRE(cv.capacity() == 10);
|
||||||
REQUIRE(cv.size() == 1);
|
REQUIRE(cv.size() == 1);
|
||||||
|
|
||||||
|
Cluster<float, 2, 4> c2 = {
|
||||||
Cluster_f4x2 c2 = {6, 7, {8.0, 9.0, 10.0, 11.0,8.0, 9.0, 10.0, 11.0}};
|
6, 7, {8.0, 9.0, 10.0, 11.0, 8.0, 9.0, 10.0, 11.0}};
|
||||||
cv.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
|
cv.push_back(c2);
|
||||||
REQUIRE(cv.capacity() == 10);
|
REQUIRE(cv.capacity() == 10);
|
||||||
REQUIRE(cv.size() == 2);
|
REQUIRE(cv.size() == 2);
|
||||||
|
|
||||||
|
/*
|
||||||
auto sums = cv.sum();
|
auto sums = cv.sum();
|
||||||
REQUIRE(sums.size() == 2);
|
REQUIRE(sums.size() == 2);
|
||||||
REQUIRE_THAT(sums[0], Catch::Matchers::WithinAbs(36.0, 1e-6));
|
REQUIRE_THAT(sums[0], Catch::Matchers::WithinAbs(36.0, 1e-6));
|
||||||
REQUIRE_THAT(sums[1], Catch::Matchers::WithinAbs(76.0, 1e-6));
|
REQUIRE_THAT(sums[1], Catch::Matchers::WithinAbs(76.0, 1e-6));
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Push back more than initial capacity"){
|
TEST_CASE("Push back more than initial capacity", "[.ClusterVector]") {
|
||||||
|
|
||||||
ClusterVector<int32_t> cv(2, 2, 2);
|
ClusterVector<Cluster<int32_t, 2, 2>> cv(2);
|
||||||
auto initial_data = cv.data();
|
auto initial_data = cv.data();
|
||||||
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
|
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
|
||||||
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
cv.push_back(c1);
|
||||||
REQUIRE(cv.size() == 1);
|
REQUIRE(cv.size() == 1);
|
||||||
REQUIRE(cv.capacity() == 2);
|
REQUIRE(cv.capacity() == 2);
|
||||||
|
|
||||||
Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}};
|
Cluster<int32_t, 2, 2> c2 = {6, 7, {8, 9, 10, 11}};
|
||||||
cv.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
|
cv.push_back(c2);
|
||||||
REQUIRE(cv.size() == 2);
|
REQUIRE(cv.size() == 2);
|
||||||
REQUIRE(cv.capacity() == 2);
|
REQUIRE(cv.capacity() == 2);
|
||||||
|
|
||||||
Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}};
|
Cluster<int32_t, 2, 2> c3 = {11, 12, {13, 14, 15, 16}};
|
||||||
cv.push_back(c3.x, c3.y, reinterpret_cast<std::byte*>(&c3.data[0]));
|
cv.push_back(c3);
|
||||||
REQUIRE(cv.size() == 3);
|
REQUIRE(cv.size() == 3);
|
||||||
REQUIRE(cv.capacity() == 4);
|
REQUIRE(cv.capacity() == 4);
|
||||||
|
|
||||||
Cluster_i2x2* ptr = reinterpret_cast<Cluster_i2x2*>(cv.data());
|
Cluster<int32_t, 2, 2> *ptr =
|
||||||
|
reinterpret_cast<Cluster<int32_t, 2, 2> *>(cv.data());
|
||||||
REQUIRE(ptr[0].x == 1);
|
REQUIRE(ptr[0].x == 1);
|
||||||
REQUIRE(ptr[0].y == 2);
|
REQUIRE(ptr[0].y == 2);
|
||||||
REQUIRE(ptr[1].x == 6);
|
REQUIRE(ptr[1].x == 6);
|
||||||
@ -136,29 +157,31 @@ TEST_CASE("Push back more than initial capacity"){
|
|||||||
REQUIRE(ptr[2].x == 11);
|
REQUIRE(ptr[2].x == 11);
|
||||||
REQUIRE(ptr[2].y == 12);
|
REQUIRE(ptr[2].y == 12);
|
||||||
|
|
||||||
//We should have allocated a new buffer, since we outgrew the initial capacity
|
// We should have allocated a new buffer, since we outgrew the initial
|
||||||
|
// capacity
|
||||||
REQUIRE(initial_data != cv.data());
|
REQUIRE(initial_data != cv.data());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Concatenate two cluster vectors where the first has enough capacity"){
|
TEST_CASE("Concatenate two cluster vectors where the first has enough capacity",
|
||||||
ClusterVector<int32_t> cv1(2, 2, 12);
|
"[.ClusterVector]") {
|
||||||
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
|
ClusterVector<Cluster<int32_t, 2, 2>> cv1(12);
|
||||||
cv1.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
|
||||||
Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}};
|
cv1.push_back(c1);
|
||||||
cv1.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
|
Cluster<int32_t, 2, 2> c2 = {6, 7, {8, 9, 10, 11}};
|
||||||
|
cv1.push_back(c2);
|
||||||
|
|
||||||
ClusterVector<int32_t> cv2(2, 2, 2);
|
ClusterVector<Cluster<int32_t, 2, 2>> cv2(2);
|
||||||
Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}};
|
Cluster<int32_t, 2, 2> c3 = {11, 12, {13, 14, 15, 16}};
|
||||||
cv2.push_back(c3.x, c3.y, reinterpret_cast<std::byte*>(&c3.data[0]));
|
cv2.push_back(c3);
|
||||||
Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}};
|
Cluster<int32_t, 2, 2> c4 = {16, 17, {18, 19, 20, 21}};
|
||||||
cv2.push_back(c4.x, c4.y, reinterpret_cast<std::byte*>(&c4.data[0]));
|
cv2.push_back(c4);
|
||||||
|
|
||||||
cv1 += cv2;
|
cv1 += cv2;
|
||||||
REQUIRE(cv1.size() == 4);
|
REQUIRE(cv1.size() == 4);
|
||||||
REQUIRE(cv1.capacity() == 12);
|
REQUIRE(cv1.capacity() == 12);
|
||||||
|
|
||||||
Cluster_i2x2* ptr = reinterpret_cast<Cluster_i2x2*>(cv1.data());
|
Cluster<int32_t, 2, 2> *ptr =
|
||||||
|
reinterpret_cast<Cluster<int32_t, 2, 2> *>(cv1.data());
|
||||||
REQUIRE(ptr[0].x == 1);
|
REQUIRE(ptr[0].x == 1);
|
||||||
REQUIRE(ptr[0].y == 2);
|
REQUIRE(ptr[0].y == 2);
|
||||||
REQUIRE(ptr[1].x == 6);
|
REQUIRE(ptr[1].x == 6);
|
||||||
@ -169,24 +192,26 @@ TEST_CASE("Concatenate two cluster vectors where the first has enough capacity")
|
|||||||
REQUIRE(ptr[3].y == 17);
|
REQUIRE(ptr[3].y == 17);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Concatenate two cluster vectors where we need to allocate"){
|
TEST_CASE("Concatenate two cluster vectors where we need to allocate",
|
||||||
ClusterVector<int32_t> cv1(2, 2, 2);
|
"[.ClusterVector]") {
|
||||||
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
|
ClusterVector<Cluster<int32_t, 2, 2>> cv1(2);
|
||||||
cv1.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
|
||||||
Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}};
|
cv1.push_back(c1);
|
||||||
cv1.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
|
Cluster<int32_t, 2, 2> c2 = {6, 7, {8, 9, 10, 11}};
|
||||||
|
cv1.push_back(c2);
|
||||||
|
|
||||||
ClusterVector<int32_t> cv2(2, 2, 2);
|
ClusterVector<Cluster<int32_t, 2, 2>> cv2(2);
|
||||||
Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}};
|
Cluster<int32_t, 2, 2> c3 = {11, 12, {13, 14, 15, 16}};
|
||||||
cv2.push_back(c3.x, c3.y, reinterpret_cast<std::byte*>(&c3.data[0]));
|
cv2.push_back(c3);
|
||||||
Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}};
|
Cluster<int32_t, 2, 2> c4 = {16, 17, {18, 19, 20, 21}};
|
||||||
cv2.push_back(c4.x, c4.y, reinterpret_cast<std::byte*>(&c4.data[0]));
|
cv2.push_back(c4);
|
||||||
|
|
||||||
cv1 += cv2;
|
cv1 += cv2;
|
||||||
REQUIRE(cv1.size() == 4);
|
REQUIRE(cv1.size() == 4);
|
||||||
REQUIRE(cv1.capacity() == 4);
|
REQUIRE(cv1.capacity() == 4);
|
||||||
|
|
||||||
Cluster_i2x2* ptr = reinterpret_cast<Cluster_i2x2*>(cv1.data());
|
Cluster<int32_t, 2, 2> *ptr =
|
||||||
|
reinterpret_cast<Cluster<int32_t, 2, 2> *>(cv1.data());
|
||||||
REQUIRE(ptr[0].x == 1);
|
REQUIRE(ptr[0].x == 1);
|
||||||
REQUIRE(ptr[0].y == 2);
|
REQUIRE(ptr[0].y == 2);
|
||||||
REQUIRE(ptr[1].x == 6);
|
REQUIRE(ptr[1].x == 6);
|
||||||
@ -196,3 +221,48 @@ TEST_CASE("Concatenate two cluster vectors where we need to allocate"){
|
|||||||
REQUIRE(ptr[3].x == 16);
|
REQUIRE(ptr[3].x == 16);
|
||||||
REQUIRE(ptr[3].y == 17);
|
REQUIRE(ptr[3].y == 17);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct ClusterTestData {
|
||||||
|
uint8_t ClusterSizeX;
|
||||||
|
uint8_t ClusterSizeY;
|
||||||
|
std::vector<int64_t> index_map_x;
|
||||||
|
std::vector<int64_t> index_map_y;
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_CASE("Gain Map Calculation Index Map", "[.ClusterVector][.gain_map]") {
|
||||||
|
|
||||||
|
auto clustertestdata = GENERATE(
|
||||||
|
ClusterTestData{3,
|
||||||
|
3,
|
||||||
|
{-1, 0, 1, -1, 0, 1, -1, 0, 1},
|
||||||
|
{-1, -1, -1, 0, 0, 0, 1, 1, 1}},
|
||||||
|
ClusterTestData{
|
||||||
|
4,
|
||||||
|
4,
|
||||||
|
{-2, -1, 0, 1, -2, -1, 0, 1, -2, -1, 0, 1, -2, -1, 0, 1},
|
||||||
|
{-2, -2, -2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1, 1, 1, 1}},
|
||||||
|
ClusterTestData{2, 2, {-1, 0, -1, 0}, {-1, -1, 0, 0}},
|
||||||
|
ClusterTestData{5,
|
||||||
|
5,
|
||||||
|
{-2, -1, 0, 1, 2, -2, -1, 0, 1, 2, -2, -1, 0,
|
||||||
|
1, 2, -2, -1, 0, 1, 2, -2, -1, 0, 1, 2},
|
||||||
|
{-2, -2, -2, -2, -2, -1, -1, -1, -1, -1, 0, 0, 0,
|
||||||
|
0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2}});
|
||||||
|
|
||||||
|
uint8_t ClusterSizeX = clustertestdata.ClusterSizeX;
|
||||||
|
uint8_t ClusterSizeY = clustertestdata.ClusterSizeY;
|
||||||
|
|
||||||
|
std::vector<int64_t> index_map_x(ClusterSizeX * ClusterSizeY);
|
||||||
|
std::vector<int64_t> index_map_y(ClusterSizeX * ClusterSizeY);
|
||||||
|
|
||||||
|
int64_t index_cluster_center_x = ClusterSizeX / 2;
|
||||||
|
int64_t index_cluster_center_y = ClusterSizeY / 2;
|
||||||
|
|
||||||
|
for (size_t j = 0; j < ClusterSizeX * ClusterSizeY; j++) {
|
||||||
|
index_map_x[j] = j % ClusterSizeX - index_cluster_center_x;
|
||||||
|
index_map_y[j] = j / ClusterSizeX - index_cluster_center_y;
|
||||||
|
}
|
||||||
|
|
||||||
|
CHECK(index_map_x == clustertestdata.index_map_x);
|
||||||
|
CHECK(index_map_y == clustertestdata.index_map_y);
|
||||||
|
}
|
@ -1,11 +1,11 @@
|
|||||||
#include "aare/Interpolator.hpp"
|
#include "aare/Interpolator.hpp"
|
||||||
#include "aare/algorithm.hpp"
|
|
||||||
|
|
||||||
namespace aare {
|
namespace aare {
|
||||||
|
|
||||||
Interpolator::Interpolator(NDView<double, 3> etacube, NDView<double, 1> xbins,
|
Interpolator::Interpolator(NDView<double, 3> etacube, NDView<double, 1> xbins,
|
||||||
NDView<double, 1> ybins, NDView<double, 1> ebins)
|
NDView<double, 1> ybins, NDView<double, 1> ebins)
|
||||||
: m_ietax(etacube), m_ietay(etacube), m_etabinsx(xbins), m_etabinsy(ybins), m_energy_bins(ebins) {
|
: m_ietax(etacube), m_ietay(etacube), m_etabinsx(xbins), m_etabinsy(ybins),
|
||||||
|
m_energy_bins(ebins) {
|
||||||
if (etacube.shape(0) != xbins.size() || etacube.shape(1) != ybins.size() ||
|
if (etacube.shape(0) != xbins.size() || etacube.shape(1) != ybins.size() ||
|
||||||
etacube.shape(2) != ebins.size()) {
|
etacube.shape(2) != ebins.size()) {
|
||||||
throw std::invalid_argument(
|
throw std::invalid_argument(
|
||||||
@ -53,87 +53,4 @@ Interpolator::Interpolator(NDView<double, 3> etacube, NDView<double, 1> xbins,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<Photon> Interpolator::interpolate(const ClusterVector<int32_t>& clusters) {
|
|
||||||
std::vector<Photon> photons;
|
|
||||||
photons.reserve(clusters.size());
|
|
||||||
|
|
||||||
if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) {
|
|
||||||
for (size_t i = 0; i<clusters.size(); i++){
|
|
||||||
|
|
||||||
auto cluster = clusters.at<Cluster3x3>(i);
|
|
||||||
Eta2 eta= calculate_eta2(cluster);
|
|
||||||
|
|
||||||
Photon photon;
|
|
||||||
photon.x = cluster.x;
|
|
||||||
photon.y = cluster.y;
|
|
||||||
photon.energy = eta.sum;
|
|
||||||
|
|
||||||
|
|
||||||
//Finding the index of the last element that is smaller
|
|
||||||
//should work fine as long as we have many bins
|
|
||||||
auto ie = last_smaller(m_energy_bins, photon.energy);
|
|
||||||
auto ix = last_smaller(m_etabinsx, eta.x);
|
|
||||||
auto iy = last_smaller(m_etabinsy, eta.y);
|
|
||||||
|
|
||||||
double dX{}, dY{};
|
|
||||||
// cBottomLeft = 0,
|
|
||||||
// cBottomRight = 1,
|
|
||||||
// cTopLeft = 2,
|
|
||||||
// cTopRight = 3
|
|
||||||
switch (eta.c) {
|
|
||||||
case cTopLeft:
|
|
||||||
dX = -1.;
|
|
||||||
dY = 0.;
|
|
||||||
break;
|
|
||||||
case cTopRight:;
|
|
||||||
dX = 0.;
|
|
||||||
dY = 0.;
|
|
||||||
break;
|
|
||||||
case cBottomLeft:
|
|
||||||
dX = -1.;
|
|
||||||
dY = -1.;
|
|
||||||
break;
|
|
||||||
case cBottomRight:
|
|
||||||
dX = 0.;
|
|
||||||
dY = -1.;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
photon.x += m_ietax(ix, iy, ie)*2 + dX;
|
|
||||||
photon.y += m_ietay(ix, iy, ie)*2 + dY;
|
|
||||||
photons.push_back(photon);
|
|
||||||
}
|
|
||||||
}else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){
|
|
||||||
for (size_t i = 0; i<clusters.size(); i++){
|
|
||||||
auto cluster = clusters.at<Cluster2x2>(i);
|
|
||||||
Eta2 eta= calculate_eta2(cluster);
|
|
||||||
|
|
||||||
Photon photon;
|
|
||||||
photon.x = cluster.x;
|
|
||||||
photon.y = cluster.y;
|
|
||||||
photon.energy = eta.sum;
|
|
||||||
|
|
||||||
//Now do some actual interpolation.
|
|
||||||
//Find which energy bin the cluster is in
|
|
||||||
// auto ie = nearest_index(m_energy_bins, photon.energy)-1;
|
|
||||||
// auto ix = nearest_index(m_etabinsx, eta.x)-1;
|
|
||||||
// auto iy = nearest_index(m_etabinsy, eta.y)-1;
|
|
||||||
//Finding the index of the last element that is smaller
|
|
||||||
//should work fine as long as we have many bins
|
|
||||||
auto ie = last_smaller(m_energy_bins, photon.energy);
|
|
||||||
auto ix = last_smaller(m_etabinsx, eta.x);
|
|
||||||
auto iy = last_smaller(m_etabinsy, eta.y);
|
|
||||||
|
|
||||||
photon.x += m_ietax(ix, iy, ie)*2; //eta goes between 0 and 1 but we could move the hit anywhere in the 2x2
|
|
||||||
photon.y += m_ietay(ix, iy, ie)*2;
|
|
||||||
photons.push_back(photon);
|
|
||||||
}
|
|
||||||
|
|
||||||
}else{
|
|
||||||
throw std::runtime_error("Only 3x3 and 2x2 clusters are supported for interpolation");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
return photons;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
@ -1,8 +1,7 @@
|
|||||||
|
|
||||||
|
|
||||||
#include <catch2/catch_test_macros.hpp>
|
|
||||||
#include <aare/algorithm.hpp>
|
#include <aare/algorithm.hpp>
|
||||||
|
#include <catch2/catch_test_macros.hpp>
|
||||||
|
|
||||||
TEST_CASE("Find the closed index in a 1D array", "[algorithm]") {
|
TEST_CASE("Find the closed index in a 1D array", "[algorithm]") {
|
||||||
aare::NDArray<double, 1> arr({5});
|
aare::NDArray<double, 1> arr({5});
|
||||||
@ -17,7 +16,7 @@ TEST_CASE("Find the closed index in a 1D array", "[algorithm]") {
|
|||||||
REQUIRE(aare::nearest_index(arr, -1.0) == 0);
|
REQUIRE(aare::nearest_index(arr, -1.0) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Passing integers to nearest_index works", "[algorithm]"){
|
TEST_CASE("Passing integers to nearest_index works", "[algorithm]") {
|
||||||
aare::NDArray<int, 1> arr({5});
|
aare::NDArray<int, 1> arr({5});
|
||||||
for (ssize_t i = 0; i < arr.size(); i++) {
|
for (ssize_t i = 0; i < arr.size(); i++) {
|
||||||
arr[i] = i;
|
arr[i] = i;
|
||||||
@ -30,8 +29,7 @@ TEST_CASE("Passing integers to nearest_index works", "[algorithm]"){
|
|||||||
REQUIRE(aare::nearest_index(arr, -1) == 0);
|
REQUIRE(aare::nearest_index(arr, -1) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_CASE("nearest_index works with std::vector", "[algorithm]") {
|
||||||
TEST_CASE("nearest_index works with std::vector", "[algorithm]"){
|
|
||||||
std::vector<double> vec = {0, 1, 2, 3, 4};
|
std::vector<double> vec = {0, 1, 2, 3, 4};
|
||||||
REQUIRE(aare::nearest_index(vec, 2.123) == 2);
|
REQUIRE(aare::nearest_index(vec, 2.123) == 2);
|
||||||
REQUIRE(aare::nearest_index(vec, 2.66) == 3);
|
REQUIRE(aare::nearest_index(vec, 2.66) == 3);
|
||||||
@ -40,7 +38,7 @@ TEST_CASE("nearest_index works with std::vector", "[algorithm]"){
|
|||||||
REQUIRE(aare::nearest_index(vec, -10.0) == 0);
|
REQUIRE(aare::nearest_index(vec, -10.0) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("nearest index works with std::array", "[algorithm]"){
|
TEST_CASE("nearest index works with std::array", "[algorithm]") {
|
||||||
std::array<double, 5> arr = {0, 1, 2, 3, 4};
|
std::array<double, 5> arr = {0, 1, 2, 3, 4};
|
||||||
REQUIRE(aare::nearest_index(arr, 2.123) == 2);
|
REQUIRE(aare::nearest_index(arr, 2.123) == 2);
|
||||||
REQUIRE(aare::nearest_index(arr, 2.501) == 3);
|
REQUIRE(aare::nearest_index(arr, 2.501) == 3);
|
||||||
@ -49,18 +47,20 @@ TEST_CASE("nearest index works with std::array", "[algorithm]"){
|
|||||||
REQUIRE(aare::nearest_index(arr, -10.0) == 0);
|
REQUIRE(aare::nearest_index(arr, -10.0) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("nearest index when there is no different uses the first element", "[algorithm]"){
|
TEST_CASE("nearest index when there is no different uses the first element",
|
||||||
|
"[algorithm]") {
|
||||||
std::vector<int> vec = {5, 5, 5, 5, 5};
|
std::vector<int> vec = {5, 5, 5, 5, 5};
|
||||||
REQUIRE(aare::nearest_index(vec, 5) == 0);
|
REQUIRE(aare::nearest_index(vec, 5) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("nearest index when there is no different uses the first element also when all smaller", "[algorithm]"){
|
TEST_CASE("nearest index when there is no different uses the first element "
|
||||||
|
"also when all smaller",
|
||||||
|
"[algorithm]") {
|
||||||
std::vector<int> vec = {5, 5, 5, 5, 5};
|
std::vector<int> vec = {5, 5, 5, 5, 5};
|
||||||
REQUIRE(aare::nearest_index(vec, 10) == 0);
|
REQUIRE(aare::nearest_index(vec, 10) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_CASE("last smaller", "[algorithm]") {
|
||||||
TEST_CASE("last smaller", "[algorithm]"){
|
|
||||||
aare::NDArray<double, 1> arr({5});
|
aare::NDArray<double, 1> arr({5});
|
||||||
for (ssize_t i = 0; i < arr.size(); i++) {
|
for (ssize_t i = 0; i < arr.size(); i++) {
|
||||||
arr[i] = i;
|
arr[i] = i;
|
||||||
@ -72,17 +72,17 @@ TEST_CASE("last smaller", "[algorithm]"){
|
|||||||
REQUIRE(aare::last_smaller(arr, 253.) == 4);
|
REQUIRE(aare::last_smaller(arr, 253.) == 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("returns last bin strictly smaller", "[algorithm]"){
|
TEST_CASE("returns last bin strictly smaller", "[algorithm]") {
|
||||||
aare::NDArray<double, 1> arr({5});
|
aare::NDArray<double, 1> arr({5});
|
||||||
for (ssize_t i = 0; i < arr.size(); i++) {
|
for (ssize_t i = 0; i < arr.size(); i++) {
|
||||||
arr[i] = i;
|
arr[i] = i;
|
||||||
}
|
}
|
||||||
// arr 0, 1, 2, 3, 4
|
// arr 0, 1, 2, 3, 4
|
||||||
REQUIRE(aare::last_smaller(arr, 2.0) == 1);
|
REQUIRE(aare::last_smaller(arr, 2.0) == 1);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("last_smaller with all elements smaller returns last element", "[algorithm]"){
|
TEST_CASE("last_smaller with all elements smaller returns last element",
|
||||||
|
"[algorithm]") {
|
||||||
aare::NDArray<double, 1> arr({5});
|
aare::NDArray<double, 1> arr({5});
|
||||||
for (ssize_t i = 0; i < arr.size(); i++) {
|
for (ssize_t i = 0; i < arr.size(); i++) {
|
||||||
arr[i] = i;
|
arr[i] = i;
|
||||||
@ -91,7 +91,8 @@ TEST_CASE("last_smaller with all elements smaller returns last element", "[algor
|
|||||||
REQUIRE(aare::last_smaller(arr, 50.) == 4);
|
REQUIRE(aare::last_smaller(arr, 50.) == 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("last_smaller with all elements bigger returns first element", "[algorithm]"){
|
TEST_CASE("last_smaller with all elements bigger returns first element",
|
||||||
|
"[algorithm]") {
|
||||||
aare::NDArray<double, 1> arr({5});
|
aare::NDArray<double, 1> arr({5});
|
||||||
for (ssize_t i = 0; i < arr.size(); i++) {
|
for (ssize_t i = 0; i < arr.size(); i++) {
|
||||||
arr[i] = i;
|
arr[i] = i;
|
||||||
@ -100,38 +101,41 @@ TEST_CASE("last_smaller with all elements bigger returns first element", "[algor
|
|||||||
REQUIRE(aare::last_smaller(arr, -50.) == 0);
|
REQUIRE(aare::last_smaller(arr, -50.) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("last smaller with all elements equal returns the first element", "[algorithm]"){
|
TEST_CASE("last smaller with all elements equal returns the first element",
|
||||||
std::vector<int> vec = {5,5,5,5,5,5,5};
|
"[algorithm]") {
|
||||||
|
std::vector<int> vec = {5, 5, 5, 5, 5, 5, 5};
|
||||||
REQUIRE(aare::last_smaller(vec, 5) == 0);
|
REQUIRE(aare::last_smaller(vec, 5) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_CASE("first_lager with vector", "[algorithm]") {
|
||||||
TEST_CASE("first_lager with vector", "[algorithm]"){
|
|
||||||
std::vector<double> vec = {0, 1, 2, 3, 4};
|
std::vector<double> vec = {0, 1, 2, 3, 4};
|
||||||
REQUIRE(aare::first_larger(vec, 2.5) == 3);
|
REQUIRE(aare::first_larger(vec, 2.5) == 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("first_lager with all elements smaller returns last element", "[algorithm]"){
|
TEST_CASE("first_lager with all elements smaller returns last element",
|
||||||
|
"[algorithm]") {
|
||||||
std::vector<double> vec = {0, 1, 2, 3, 4};
|
std::vector<double> vec = {0, 1, 2, 3, 4};
|
||||||
REQUIRE(aare::first_larger(vec, 50.) == 4);
|
REQUIRE(aare::first_larger(vec, 50.) == 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("first_lager with all elements bigger returns first element", "[algorithm]"){
|
TEST_CASE("first_lager with all elements bigger returns first element",
|
||||||
|
"[algorithm]") {
|
||||||
std::vector<double> vec = {0, 1, 2, 3, 4};
|
std::vector<double> vec = {0, 1, 2, 3, 4};
|
||||||
REQUIRE(aare::first_larger(vec, -50.) == 0);
|
REQUIRE(aare::first_larger(vec, -50.) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("first_lager with all elements the same as the check returns last", "[algorithm]"){
|
TEST_CASE("first_lager with all elements the same as the check returns last",
|
||||||
|
"[algorithm]") {
|
||||||
std::vector<int> vec = {14, 14, 14, 14, 14};
|
std::vector<int> vec = {14, 14, 14, 14, 14};
|
||||||
REQUIRE(aare::first_larger(vec, 14) == 4);
|
REQUIRE(aare::first_larger(vec, 14) == 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("first larger with the same element", "[algorithm]"){
|
TEST_CASE("first larger with the same element", "[algorithm]") {
|
||||||
std::vector<int> vec = {7,8,9,10,11};
|
std::vector<int> vec = {7, 8, 9, 10, 11};
|
||||||
REQUIRE(aare::first_larger(vec, 9) == 3);
|
REQUIRE(aare::first_larger(vec, 9) == 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("cumsum works", "[algorithm]"){
|
TEST_CASE("cumsum works", "[algorithm]") {
|
||||||
std::vector<double> vec = {0, 1, 2, 3, 4};
|
std::vector<double> vec = {0, 1, 2, 3, 4};
|
||||||
auto result = aare::cumsum(vec);
|
auto result = aare::cumsum(vec);
|
||||||
REQUIRE(result.size() == vec.size());
|
REQUIRE(result.size() == vec.size());
|
||||||
@ -141,12 +145,12 @@ TEST_CASE("cumsum works", "[algorithm]"){
|
|||||||
REQUIRE(result[3] == 6);
|
REQUIRE(result[3] == 6);
|
||||||
REQUIRE(result[4] == 10);
|
REQUIRE(result[4] == 10);
|
||||||
}
|
}
|
||||||
TEST_CASE("cumsum works with empty vector", "[algorithm]"){
|
TEST_CASE("cumsum works with empty vector", "[algorithm]") {
|
||||||
std::vector<double> vec = {};
|
std::vector<double> vec = {};
|
||||||
auto result = aare::cumsum(vec);
|
auto result = aare::cumsum(vec);
|
||||||
REQUIRE(result.size() == 0);
|
REQUIRE(result.size() == 0);
|
||||||
}
|
}
|
||||||
TEST_CASE("cumsum works with negative numbers", "[algorithm]"){
|
TEST_CASE("cumsum works with negative numbers", "[algorithm]") {
|
||||||
std::vector<double> vec = {0, -1, -2, -3, -4};
|
std::vector<double> vec = {0, -1, -2, -3, -4};
|
||||||
auto result = aare::cumsum(vec);
|
auto result = aare::cumsum(vec);
|
||||||
REQUIRE(result.size() == vec.size());
|
REQUIRE(result.size() == vec.size());
|
||||||
@ -156,4 +160,3 @@ TEST_CASE("cumsum works with negative numbers", "[algorithm]"){
|
|||||||
REQUIRE(result[3] == -6);
|
REQUIRE(result[3] == -6);
|
||||||
REQUIRE(result[4] == -10);
|
REQUIRE(result[4] == -10);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user