Compare commits

..

1 Commits

Author SHA1 Message Date
root
c035491b63 Added ability to save a cluster larger than we search for 2025-11-25 14:20:55 +01:00
39 changed files with 224 additions and 2108 deletions

View File

@@ -388,7 +388,7 @@ set(SourceFiles
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp
) )
add_library(aare_core STATIC ${SourceFiles}) add_library(aare_core STATIC ${SourceFiles})
target_include_directories(aare_core PUBLIC target_include_directories(aare_core PUBLIC
@@ -412,8 +412,6 @@ target_link_libraries(
) )
set_property(TARGET aare_core PROPERTY POSITION_INDEPENDENT_CODE ON)
if(AARE_TESTS) if(AARE_TESTS)
target_compile_definitions(aare_core PRIVATE AARE_TESTS) target_compile_definitions(aare_core PRIVATE AARE_TESTS)
endif() endif()
@@ -433,6 +431,10 @@ set_target_properties(aare_core PROPERTIES
PUBLIC_HEADER "${PUBLICHEADERS}" PUBLIC_HEADER "${PUBLICHEADERS}"
) )
if (AARE_PYTHON_BINDINGS)
set_property(TARGET aare_core PROPERTY POSITION_INDEPENDENT_CODE ON)
endif()
if(AARE_TESTS) if(AARE_TESTS)
set(TestSources set(TestSources
${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp
@@ -463,7 +465,6 @@ if(AARE_TESTS)
target_sources(tests PRIVATE ${TestSources} ) target_sources(tests PRIVATE ${TestSources} )
endif() endif()
if(AARE_MASTER_PROJECT) if(AARE_MASTER_PROJECT)
install(TARGETS aare_core aare_compiler_flags install(TARGETS aare_core aare_compiler_flags
EXPORT "${TARGETS_EXPORT_NAME}" EXPORT "${TARGETS_EXPORT_NAME}"
@@ -473,6 +474,7 @@ if(AARE_MASTER_PROJECT)
) )
endif() endif()
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
set(CMAKE_INSTALL_RPATH $ORIGIN) set(CMAKE_INSTALL_RPATH $ORIGIN)
set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE) set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)

View File

@@ -1,27 +1,16 @@
# Release notes # Release notes
### 2025.10.1
Bugfixes: ### head
- File supports reading new master json file format (multiple ROI's not supported yet)
### 2025.8.22
Features: Features:
- Apply calibration works in G0 if passes a 2D calibration and pedestal - Apply calibration works in G0 if passes a 2D calibration and pedestal
- count pixels that switch - count pixels that switch
- calculate pedestal (also g0 version) - calculate pedestal (also g0 version)
- NDArray::view() needs an lvalue to reduce issues with the view outliving the array
Bugfixes: ### 2025.07.18
- Now using glibc 2.17 in conda builds (was using the host)
- Fixed shifted pixels in clusters close to the edge of a frame
### 2025.7.18
Features: Features:
@@ -35,7 +24,7 @@ Bugfixes:
- Removed unused file: ClusterFile.cpp - Removed unused file: ClusterFile.cpp
### 2025.5.22 ### 2025.05.22
Features: Features:
@@ -48,6 +37,3 @@ Bugfixes:

View File

@@ -1 +1 @@
2025.8.22 2025.7.18

View File

@@ -15,7 +15,7 @@ FetchContent_MakeAvailable(benchmark)
add_executable(benchmarks) add_executable(benchmarks)
target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp reduce_benchmark.cpp) target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp)
# Link Google Benchmark and other necessary libraries # Link Google Benchmark and other necessary libraries
target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags) target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags)

View File

@@ -1,168 +0,0 @@
#include "aare/Cluster.hpp"
#include <benchmark/benchmark.h>
using namespace aare;
class ClustersForReduceFixture : public benchmark::Fixture {
public:
Cluster<int, 5, 5> cluster_5x5{};
Cluster<int, 3, 3> cluster_3x3{};
private:
using benchmark::Fixture::SetUp;
void SetUp([[maybe_unused]] const benchmark::State &state) override {
int temp_data[25] = {1, 1, 1, 1, 1, 1, 1, 2, 1, 1,
1, 2, 3, 1, 2, 1, 1, 1, 1, 2};
std::copy(std::begin(temp_data), std::end(temp_data),
std::begin(cluster_5x5.data));
cluster_5x5.x = 5;
cluster_5x5.y = 5;
int temp_data2[9] = {1, 1, 1, 2, 3, 1, 2, 2, 1};
std::copy(std::begin(temp_data2), std::end(temp_data2),
std::begin(cluster_3x3.data));
cluster_3x3.x = 5;
cluster_3x3.y = 5;
}
// void TearDown(::benchmark::State& state) {
// }
};
template <typename T>
Cluster<T, 3, 3, int16_t> reduce_to_3x3(const Cluster<T, 5, 5, int16_t> &c) {
Cluster<T, 3, 3, int16_t> result;
// Write out the sums in the hope that the compiler can optimize this
std::array<T, 9> sum_3x3_subclusters;
// Write out the sums in the hope that the compiler can optimize this
sum_3x3_subclusters[0] = c.data[0] + c.data[1] + c.data[2] + c.data[5] +
c.data[6] + c.data[7] + c.data[10] + c.data[11] +
c.data[12];
sum_3x3_subclusters[1] = c.data[1] + c.data[2] + c.data[3] + c.data[6] +
c.data[7] + c.data[8] + c.data[11] + c.data[12] +
c.data[13];
sum_3x3_subclusters[2] = c.data[2] + c.data[3] + c.data[4] + c.data[7] +
c.data[8] + c.data[9] + c.data[12] + c.data[13] +
c.data[14];
sum_3x3_subclusters[3] = c.data[5] + c.data[6] + c.data[7] + c.data[10] +
c.data[11] + c.data[12] + c.data[15] + c.data[16] +
c.data[17];
sum_3x3_subclusters[4] = c.data[6] + c.data[7] + c.data[8] + c.data[11] +
c.data[12] + c.data[13] + c.data[16] + c.data[17] +
c.data[18];
sum_3x3_subclusters[5] = c.data[7] + c.data[8] + c.data[9] + c.data[12] +
c.data[13] + c.data[14] + c.data[17] + c.data[18] +
c.data[19];
sum_3x3_subclusters[6] = c.data[10] + c.data[11] + c.data[12] + c.data[15] +
c.data[16] + c.data[17] + c.data[20] + c.data[21] +
c.data[22];
sum_3x3_subclusters[7] = c.data[11] + c.data[12] + c.data[13] + c.data[16] +
c.data[17] + c.data[18] + c.data[21] + c.data[22] +
c.data[23];
sum_3x3_subclusters[8] = c.data[12] + c.data[13] + c.data[14] + c.data[17] +
c.data[18] + c.data[19] + c.data[22] + c.data[23] +
c.data[24];
auto index = std::max_element(sum_3x3_subclusters.begin(),
sum_3x3_subclusters.end()) -
sum_3x3_subclusters.begin();
switch (index) {
case 0:
result.x = c.x - 1;
result.y = c.y + 1;
result.data = {c.data[0], c.data[1], c.data[2], c.data[5], c.data[6],
c.data[7], c.data[10], c.data[11], c.data[12]};
break;
case 1:
result.x = c.x;
result.y = c.y + 1;
result.data = {c.data[1], c.data[2], c.data[3], c.data[6], c.data[7],
c.data[8], c.data[11], c.data[12], c.data[13]};
break;
case 2:
result.x = c.x + 1;
result.y = c.y + 1;
result.data = {c.data[2], c.data[3], c.data[4], c.data[7], c.data[8],
c.data[9], c.data[12], c.data[13], c.data[14]};
break;
case 3:
result.x = c.x - 1;
result.y = c.y;
result.data = {c.data[5], c.data[6], c.data[7],
c.data[10], c.data[11], c.data[12],
c.data[15], c.data[16], c.data[17]};
break;
case 4:
result.x = c.x + 1;
result.y = c.y;
result.data = {c.data[6], c.data[7], c.data[8],
c.data[11], c.data[12], c.data[13],
c.data[16], c.data[17], c.data[18]};
break;
case 5:
result.x = c.x + 1;
result.y = c.y;
result.data = {c.data[7], c.data[8], c.data[9],
c.data[12], c.data[13], c.data[14],
c.data[17], c.data[18], c.data[19]};
break;
case 6:
result.x = c.x + 1;
result.y = c.y - 1;
result.data = {c.data[10], c.data[11], c.data[12],
c.data[15], c.data[16], c.data[17],
c.data[20], c.data[21], c.data[22]};
break;
case 7:
result.x = c.x + 1;
result.y = c.y - 1;
result.data = {c.data[11], c.data[12], c.data[13],
c.data[16], c.data[17], c.data[18],
c.data[21], c.data[22], c.data[23]};
break;
case 8:
result.x = c.x + 1;
result.y = c.y - 1;
result.data = {c.data[12], c.data[13], c.data[14],
c.data[17], c.data[18], c.data[19],
c.data[22], c.data[23], c.data[24]};
break;
}
return result;
}
BENCHMARK_F(ClustersForReduceFixture, Reduce2x2)(benchmark::State &st) {
for (auto _ : st) {
// This code gets timed
benchmark::DoNotOptimize(reduce_to_2x2<int, 3, 3, int16_t>(
cluster_3x3)); // make sure compiler evaluates the expression
}
}
BENCHMARK_F(ClustersForReduceFixture, SpecificReduce2x2)(benchmark::State &st) {
for (auto _ : st) {
// This code gets timed
benchmark::DoNotOptimize(reduce_to_2x2<int>(cluster_3x3));
}
}
BENCHMARK_F(ClustersForReduceFixture, Reduce3x3)(benchmark::State &st) {
for (auto _ : st) {
// This code gets timed
benchmark::DoNotOptimize(
reduce_to_3x3<int, 5, 5, int16_t>(cluster_5x5));
}
}
BENCHMARK_F(ClustersForReduceFixture, SpecificReduce3x3)(benchmark::State &st) {
for (auto _ : st) {
// This code gets timed
benchmark::DoNotOptimize(reduce_to_3x3<int>(cluster_5x5));
}
}

View File

@@ -1,17 +1,5 @@
python: python:
# - 3.11 - 3.11
# - 3.12 - 3.12
# - 3.13 - 3.13
- 3.14
c_compiler:
- gcc # [linux]
c_stdlib:
- sysroot # [linux]
cxx_compiler:
- gxx # [linux]
c_stdlib_version: # [linux]
- 2.17 # [linux]

View File

@@ -16,8 +16,6 @@ build:
requirements: requirements:
build: build:
- {{ compiler('c') }}
- {{ stdlib("c") }}
- {{ compiler('cxx') }} - {{ compiler('cxx') }}
- cmake - cmake
- ninja - ninja
@@ -25,7 +23,7 @@ requirements:
host: host:
- python - python
- pip - pip
- numpy=2.3 - numpy=2.1
- scikit-build-core - scikit-build-core
- pybind11 >=2.13.0 - pybind11 >=2.13.0
- matplotlib # needed in host to solve the environment for run - matplotlib # needed in host to solve the environment for run
@@ -42,11 +40,11 @@ test:
- aare - aare
requires: requires:
- pytest - pytest
# - boost-histogram - boost-histogram
source_files: source_files:
# - python/tests - python/tests
commands: commands:
# - python -m pytest python/tests - python -m pytest python/tests
about: about:
summary: Data analysis library for hybrid pixel detectors from PSI summary: Data analysis library for hybrid pixel detectors from PSI

View File

@@ -12,11 +12,4 @@ ClusterVector
:members: :members:
:undoc-members: :undoc-members:
:private-members: :private-members:
**Free Functions:**
.. doxygenfunction:: aare::reduce_to_3x3(const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>&)
.. doxygenfunction:: aare::reduce_to_2x2(const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>&)

View File

@@ -33,17 +33,4 @@ C++ functions that support the ClusterVector or to view it as a numpy array.
:members: :members:
:undoc-members: :undoc-members:
:show-inheritance: :show-inheritance:
:inherited-members: :inherited-members:
**Free Functions:**
.. autofunction:: reduce_to_3x3
:noindex:
Reduce a single Cluster to 3x3 by taking the 3x3 subcluster with highest photon energy.
.. autofunction:: reduce_to_2x2
:noindex:
Reduce a single Cluster to 2x2 by taking the 2x2 subcluster with highest photon energy.

View File

@@ -13,5 +13,4 @@ dependencies:
- pybind11 - pybind11
- numpy - numpy
- matplotlib - matplotlib
- nlohmann_json

View File

@@ -7,10 +7,10 @@
namespace aare { namespace aare {
enum class corner : int { enum class corner : int {
cTopLeft = 0, cBottomLeft = 0,
cTopRight = 1, cBottomRight = 1,
cBottomLeft = 2, cTopLeft = 2,
cBottomRight = 3 cTopRight = 3
}; };
enum class pixel : int { enum class pixel : int {
@@ -28,7 +28,7 @@ enum class pixel : int {
template <typename T> struct Eta2 { template <typename T> struct Eta2 {
double x; double x;
double y; double y;
int c{0}; int c;
T sum; T sum;
}; };
@@ -58,126 +58,85 @@ template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType> typename CoordType>
Eta2<T> Eta2<T>
calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) { calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
static_assert(ClusterSizeX > 1 && ClusterSizeY > 1);
Eta2<T> eta{}; Eta2<T> eta{};
auto max_sum = cl.max_sum_2x2();
eta.sum = max_sum.first;
auto c = max_sum.second;
size_t cluster_center_index = size_t cluster_center_index =
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX; (ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
auto max_sum = cl.max_sum_2x2(); size_t index_bottom_left_max_2x2_subcluster =
eta.sum = max_sum.first; (int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1);
int c = max_sum.second;
// subcluster top right from center // check that cluster center is in max subcluster
switch (static_cast<corner>(c)) { if (cluster_center_index != index_bottom_left_max_2x2_subcluster &&
case (corner::cTopLeft): cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 &&
if ((cl.data[cluster_center_index - 1] + cluster_center_index !=
cl.data[cluster_center_index]) != 0) index_bottom_left_max_2x2_subcluster + ClusterSizeX &&
eta.x = static_cast<double>(cl.data[cluster_center_index - 1]) / cluster_center_index !=
static_cast<double>(cl.data[cluster_center_index - 1] + index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1)
cl.data[cluster_center_index]); throw std::runtime_error("Photon center is not in max 2x2_subcluster");
if ((cl.data[cluster_center_index - ClusterSizeX] +
cl.data[cluster_center_index]) != 0) if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) %
eta.y = static_cast<double>( ClusterSizeX ==
cl.data[cluster_center_index - ClusterSizeX]) / 0) {
static_cast<double>( if ((cl.data[cluster_center_index + 1] +
cl.data[cluster_center_index - ClusterSizeX] + cl.data[cluster_center_index]) != 0)
cl.data[cluster_center_index]);
eta.x = static_cast<double>(cl.data[cluster_center_index + 1]) /
static_cast<double>((cl.data[cluster_center_index + 1] +
cl.data[cluster_center_index]));
} else {
if ((cl.data[cluster_center_index] +
cl.data[cluster_center_index - 1]) != 0)
// dx = 0
// dy = 0
break;
case (corner::cTopRight):
if (cl.data[cluster_center_index] + cl.data[cluster_center_index + 1] !=
0)
eta.x = static_cast<double>(cl.data[cluster_center_index]) / eta.x = static_cast<double>(cl.data[cluster_center_index]) /
static_cast<double>(cl.data[cluster_center_index] + static_cast<double>((cl.data[cluster_center_index - 1] +
cl.data[cluster_center_index + 1]); cl.data[cluster_center_index]));
if ((cl.data[cluster_center_index - ClusterSizeX] + }
cl.data[cluster_center_index]) != 0) if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) /
eta.y = static_cast<double>( ClusterSizeX <
cl.data[cluster_center_index - ClusterSizeX]) / 1) {
static_cast<double>( assert(cluster_center_index + ClusterSizeX <
cl.data[cluster_center_index - ClusterSizeX] + ClusterSizeX * ClusterSizeY); // suppress warning
cl.data[cluster_center_index]);
// dx = 1
// dy = 0
break;
case (corner::cBottomLeft):
if ((cl.data[cluster_center_index - 1] +
cl.data[cluster_center_index]) != 0)
eta.x = static_cast<double>(cl.data[cluster_center_index - 1]) /
static_cast<double>(cl.data[cluster_center_index - 1] +
cl.data[cluster_center_index]);
if ((cl.data[cluster_center_index] + if ((cl.data[cluster_center_index] +
cl.data[cluster_center_index + ClusterSizeX]) != 0) cl.data[cluster_center_index + ClusterSizeX]) != 0)
eta.y = static_cast<double>(cl.data[cluster_center_index]) / eta.y = static_cast<double>(
cl.data[cluster_center_index + ClusterSizeX]) /
static_cast<double>( static_cast<double>(
cl.data[cluster_center_index] + (cl.data[cluster_center_index] +
cl.data[cluster_center_index + ClusterSizeX]); cl.data[cluster_center_index + ClusterSizeX]));
// dx = 0 } else {
// dy = 1
break;
case (corner::cBottomRight):
if (cl.data[cluster_center_index] + cl.data[cluster_center_index + 1] !=
0)
eta.x = static_cast<double>(cl.data[cluster_center_index]) /
static_cast<double>(cl.data[cluster_center_index] +
cl.data[cluster_center_index + 1]);
if ((cl.data[cluster_center_index] + if ((cl.data[cluster_center_index] +
cl.data[cluster_center_index + ClusterSizeX]) != 0) cl.data[cluster_center_index - ClusterSizeX]) != 0)
eta.y = static_cast<double>(cl.data[cluster_center_index]) / eta.y = static_cast<double>(cl.data[cluster_center_index]) /
static_cast<double>( static_cast<double>(
cl.data[cluster_center_index] + (cl.data[cluster_center_index] +
cl.data[cluster_center_index + ClusterSizeX]); cl.data[cluster_center_index - ClusterSizeX]));
// dx = 1
// dy = 1
break;
} }
eta.c = c; eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no
// underyling enum class
return eta; return eta;
} }
// TODO! Look up eta2 calculation - photon center should be bottom right corner // TODO! Look up eta2 calculation - photon center should be top right corner
template <typename T> template <typename T>
Eta2<T> calculate_eta2(const Cluster<T, 2, 2, int16_t> &cl) { Eta2<T> calculate_eta2(const Cluster<T, 2, 2, int16_t> &cl) {
Eta2<T> eta{}; Eta2<T> eta{};
if ((cl.data[0] + cl.data[1]) != 0) if ((cl.data[0] + cl.data[1]) != 0)
eta.x = static_cast<double>(cl.data[2]) / eta.x = static_cast<double>(cl.data[1]) / (cl.data[0] + cl.data[1]);
(cl.data[2] + cl.data[3]); // between (0,1) the closer to zero
// left value probably larger
if ((cl.data[0] + cl.data[2]) != 0) if ((cl.data[0] + cl.data[2]) != 0)
eta.y = static_cast<double>(cl.data[1]) / eta.y = static_cast<double>(cl.data[2]) / (cl.data[0] + cl.data[2]);
(cl.data[1] + cl.data[3]); // between (0,1) the closer to zero
// bottom value probably larger
eta.sum = cl.sum(); eta.sum = cl.sum();
eta.c = static_cast<int>(corner::cBottomLeft); // TODO! This is not correct,
// but need to put something
return eta; return eta;
} }
// TODO generalize
template <typename T>
Eta2<T> calculate_eta2(const Cluster<T, 1, 2, int16_t> &cl) {
Eta2<T> eta{};
eta.x = 0;
eta.y = static_cast<double>(cl.data[0]) / cl.data[1];
eta.sum = cl.sum();
}
template <typename T>
Eta2<T> calculate_eta2(const Cluster<T, 2, 1, int16_t> &cl) {
Eta2<T> eta{};
eta.x = static_cast<double>(cl.data[0]) / cl.data[1];
eta.y = 0;
eta.sum = cl.sum();
}
// calculates Eta3 for 3x3 cluster based on code from analyze_cluster // calculates Eta3 for 3x3 cluster based on code from analyze_cluster
// TODO only supported for 3x3 Clusters // TODO only supported for 3x3 Clusters
template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) { template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) {
@@ -191,11 +150,13 @@ template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) {
eta.sum = sum; eta.sum = sum;
eta.c = corner::cBottomLeft;
if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0) if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0)
eta.x = static_cast<double>(-cl.data[3] + cl.data[3 + 2]) / eta.x = static_cast<double>(-cl.data[3] + cl.data[3 + 2]) /
(cl.data[3] + cl.data[4] + cl.data[5]); // (-1,1) (cl.data[3] + cl.data[4] + cl.data[5]);
if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0) if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0)

207
include/aare/Cluster.hpp Executable file → Normal file
View File

@@ -18,7 +18,7 @@ namespace aare {
// requires clause c++20 maybe update // requires clause c++20 maybe update
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY, template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t> typename CoordType = int16_t>
struct Cluster { struct Cluster {
static_assert(std::is_arithmetic_v<T>, "T needs to be an arithmetic type"); static_assert(std::is_arithmetic_v<T>, "T needs to be an arithmetic type");
@@ -38,13 +38,6 @@ struct Cluster {
T sum() const { return std::accumulate(data.begin(), data.end(), T{}); } T sum() const { return std::accumulate(data.begin(), data.end(), T{}); }
// TODO: handle 1 dimensional clusters
// TODO: change int to corner
/**
* @brief sum of 2x2 subcluster with highest energy
* @return photon energy of subcluster, 2x2 subcluster index relative to
* cluster center
*/
std::pair<T, int> max_sum_2x2() const { std::pair<T, int> max_sum_2x2() const {
if constexpr (cluster_size_x == 3 && cluster_size_y == 3) { if constexpr (cluster_size_x == 3 && cluster_size_y == 3) {
@@ -60,38 +53,17 @@ struct Cluster {
} else if constexpr (cluster_size_x == 2 && cluster_size_y == 2) { } else if constexpr (cluster_size_x == 2 && cluster_size_y == 2) {
return std::make_pair(data[0] + data[1] + data[2] + data[3], 0); return std::make_pair(data[0] + data[1] + data[2] + data[3], 0);
} else { } else {
constexpr size_t cluster_center_index = constexpr size_t num_2x2_subclusters =
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX; (ClusterSizeX - 1) * (ClusterSizeY - 1);
std::array<T, 4> sum_2x2_subcluster{0}; std::array<T, num_2x2_subclusters> sum_2x2_subcluster;
// subcluster top left from center for (size_t i = 0; i < ClusterSizeY - 1; ++i) {
sum_2x2_subcluster[0] = for (size_t j = 0; j < ClusterSizeX - 1; ++j)
data[cluster_center_index] + data[cluster_center_index - 1] + sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] =
data[cluster_center_index - ClusterSizeX] + data[i * ClusterSizeX + j] +
data[cluster_center_index - 1 - ClusterSizeX]; data[i * ClusterSizeX + j + 1] +
// subcluster top right from center data[(i + 1) * ClusterSizeX + j] +
if (ClusterSizeX > 2) { data[(i + 1) * ClusterSizeX + j + 1];
sum_2x2_subcluster[1] =
data[cluster_center_index] +
data[cluster_center_index + 1] +
data[cluster_center_index - ClusterSizeX] +
data[cluster_center_index - ClusterSizeX + 1];
}
// subcluster bottom left from center
if (ClusterSizeY > 2) {
sum_2x2_subcluster[2] =
data[cluster_center_index] +
data[cluster_center_index - 1] +
data[cluster_center_index + ClusterSizeX] +
data[cluster_center_index + ClusterSizeX - 1];
}
// subcluster bottom right from center
if (ClusterSizeX > 2 && ClusterSizeY > 2) {
sum_2x2_subcluster[3] =
data[cluster_center_index] +
data[cluster_center_index + 1] +
data[cluster_center_index + ClusterSizeX] +
data[cluster_center_index + ClusterSizeX + 1];
} }
int index = std::max_element(sum_2x2_subcluster.begin(), int index = std::max_element(sum_2x2_subcluster.begin(),
@@ -102,163 +74,6 @@ struct Cluster {
} }
}; };
/**
* @brief Reduce a cluster to a 2x2 cluster by selecting the 2x2 block with the
* highest sum.
* @param c Cluster to reduce
* @return reduced cluster
*/
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = int16_t>
Cluster<T, 2, 2, CoordType>
reduce_to_2x2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
static_assert(ClusterSizeX >= 2 && ClusterSizeY >= 2,
"Cluster sizes must be at least 2x2 for reduction to 2x2");
// TODO maybe add sanity check and check that center is in max subcluster
Cluster<T, 2, 2, CoordType> result;
auto [sum, index] = c.max_sum_2x2();
int16_t cluster_center_index =
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
int16_t index_bottom_left_max_2x2_subcluster =
(int(index / (ClusterSizeX - 1))) * ClusterSizeX +
index % (ClusterSizeX - 1);
result.x =
c.x + (index_bottom_left_max_2x2_subcluster - cluster_center_index) %
ClusterSizeX;
result.y =
c.y - (index_bottom_left_max_2x2_subcluster - cluster_center_index) /
ClusterSizeX;
result.data = {
c.data[index_bottom_left_max_2x2_subcluster],
c.data[index_bottom_left_max_2x2_subcluster + 1],
c.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX],
c.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1]};
return result;
}
template <typename T>
Cluster<T, 2, 2, int16_t> reduce_to_2x2(const Cluster<T, 3, 3, int16_t> &c) {
Cluster<T, 2, 2, int16_t> result;
auto [s, i] = c.max_sum_2x2();
switch (i) {
case 0:
result.x = c.x - 1;
result.y = c.y + 1;
result.data = {c.data[0], c.data[1], c.data[3], c.data[4]};
break;
case 1:
result.x = c.x;
result.y = c.y + 1;
result.data = {c.data[1], c.data[2], c.data[4], c.data[5]};
break;
case 2:
result.x = c.x - 1;
result.y = c.y;
result.data = {c.data[3], c.data[4], c.data[6], c.data[7]};
break;
case 3:
result.x = c.x;
result.y = c.y;
result.data = {c.data[4], c.data[5], c.data[7], c.data[8]};
break;
}
return result;
}
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = int16_t>
inline std::pair<T, uint16_t>
max_3x3_sum(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cluster) {
if constexpr (ClusterSizeX == 3 && ClusterSizeY == 3) {
return std::make_pair(cluster.sum(), 0);
} else {
size_t index = 0;
T max_3x3_subcluster_sum = 0;
for (size_t i = 0; i < ClusterSizeY - 2; ++i) {
for (size_t j = 0; j < ClusterSizeX - 2; ++j) {
T sum = cluster.data[i * ClusterSizeX + j] +
cluster.data[i * ClusterSizeX + j + 1] +
cluster.data[i * ClusterSizeX + j + 2] +
cluster.data[(i + 1) * ClusterSizeX + j] +
cluster.data[(i + 1) * ClusterSizeX + j + 1] +
cluster.data[(i + 1) * ClusterSizeX + j + 2] +
cluster.data[(i + 2) * ClusterSizeX + j] +
cluster.data[(i + 2) * ClusterSizeX + j + 1] +
cluster.data[(i + 2) * ClusterSizeX + j + 2];
if (sum > max_3x3_subcluster_sum) {
max_3x3_subcluster_sum = sum;
index = i * (ClusterSizeX - 2) + j;
}
}
}
return std::make_pair(max_3x3_subcluster_sum, index);
}
}
/**
* @brief Reduce a cluster to a 3x3 cluster by selecting the 3x3 block with the
* highest sum.
* @param c Cluster to reduce
* @return reduced cluster
*/
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = int16_t>
Cluster<T, 3, 3, CoordType>
reduce_to_3x3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
static_assert(ClusterSizeX >= 3 && ClusterSizeY >= 3,
"Cluster sizes must be at least 3x3 for reduction to 3x3");
Cluster<T, 3, 3, CoordType> result;
// TODO maybe add sanity check and check that center is in max subcluster
auto [sum, index] = max_3x3_sum(c);
int16_t cluster_center_index =
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
int16_t index_center_max_3x3_subcluster =
(int(index / (ClusterSizeX - 2))) * ClusterSizeX + ClusterSizeX +
index % (ClusterSizeX - 2) + 1;
int16_t index_3x3_subcluster_cluster_center =
int((cluster_center_index - 1 - ClusterSizeX) / ClusterSizeX) *
(ClusterSizeX - 2) +
(cluster_center_index - 1 - ClusterSizeX) % ClusterSizeX;
result.x =
c.x + (index % (ClusterSizeX - 2) -
(index_3x3_subcluster_cluster_center % (ClusterSizeX - 2)));
result.y =
c.y - (index / (ClusterSizeX - 2) -
(index_3x3_subcluster_cluster_center / (ClusterSizeX - 2)));
result.data = {c.data[index_center_max_3x3_subcluster - ClusterSizeX - 1],
c.data[index_center_max_3x3_subcluster - ClusterSizeX],
c.data[index_center_max_3x3_subcluster - ClusterSizeX + 1],
c.data[index_center_max_3x3_subcluster - 1],
c.data[index_center_max_3x3_subcluster],
c.data[index_center_max_3x3_subcluster + 1],
c.data[index_center_max_3x3_subcluster + ClusterSizeX - 1],
c.data[index_center_max_3x3_subcluster + ClusterSizeX],
c.data[index_center_max_3x3_subcluster + ClusterSizeX + 1]};
return result;
}
// Type Traits for is_cluster_type // Type Traits for is_cluster_type
template <typename T> template <typename T>
struct is_cluster : std::false_type {}; // Default case: Not a Cluster struct is_cluster : std::false_type {}; // Default case: Not a Cluster

View File

@@ -19,9 +19,11 @@ class ClusterFinder {
const PEDESTAL_TYPE c3; const PEDESTAL_TYPE c3;
Pedestal<PEDESTAL_TYPE> m_pedestal; Pedestal<PEDESTAL_TYPE> m_pedestal;
ClusterVector<ClusterType> m_clusters; ClusterVector<ClusterType> m_clusters;
const uint32_t ClusterSizeX;
const uint32_t ClusterSizeY;
static const uint8_t ClusterSizeX = ClusterType::cluster_size_x; static const uint8_t SavedClusterSizeX = ClusterType::cluster_size_x;
static const uint8_t ClusterSizeY = ClusterType::cluster_size_y; static const uint8_t SavedClusterSizeY = ClusterType::cluster_size_y;
using CT = typename ClusterType::value_type; using CT = typename ClusterType::value_type;
public: public:
@@ -34,10 +36,12 @@ class ClusterFinder {
* *
*/ */
ClusterFinder(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0, ClusterFinder(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
size_t capacity = 1000000) size_t capacity = 1000000,
uint32_t cluster_size_x = 3, uint32_t cluster_size_y = 3)
: m_image_size(image_size), m_nSigma(nSigma), : m_image_size(image_size), m_nSigma(nSigma),
c2(sqrt((ClusterSizeY + 1) / 2 * (ClusterSizeX + 1) / 2)), c2(sqrt((cluster_size_y + 1) / 2 * (cluster_size_x + 1) / 2)),
c3(sqrt(ClusterSizeX * ClusterSizeY)), c3(sqrt(cluster_size_x * cluster_size_y)),
ClusterSizeX(cluster_size_x), ClusterSizeY(cluster_size_y),
m_pedestal(image_size[0], image_size[1]), m_clusters(capacity) { m_pedestal(image_size[0], image_size[1]), m_clusters(capacity) {
LOG(logDEBUG) << "ClusterFinder: " LOG(logDEBUG) << "ClusterFinder: "
<< "image_size: " << image_size[0] << "x" << image_size[1] << "image_size: " << image_size[0] << "x" << image_size[1]
@@ -74,6 +78,9 @@ class ClusterFinder {
// // 4,4 -> +/- 2 // // 4,4 -> +/- 2
int dy = ClusterSizeY / 2; int dy = ClusterSizeY / 2;
int dx = ClusterSizeX / 2; int dx = ClusterSizeX / 2;
int dy2 = SavedClusterSizeY / 2;
int dx2 = SavedClusterSizeX / 2;
int has_center_pixel_x = int has_center_pixel_x =
ClusterSizeX % ClusterSizeX %
2; // for even sized clusters there is no proper cluster center and 2; // for even sized clusters there is no proper cluster center and
@@ -135,16 +142,14 @@ class ClusterFinder {
// It's worth redoing the look since most of the time we // It's worth redoing the look since most of the time we
// don't have a photon // don't have a photon
int i = 0; int i = 0;
for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) { for (int ir = -dy2; ir < dy2 + has_center_pixel_y; ir++) {
for (int ic = -dx; ic < dx + has_center_pixel_x; ic++) { for (int ic = -dx2; ic < dx2 + has_center_pixel_y; ic++) {
if (ix + ic >= 0 && ix + ic < frame.shape(1) && if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
iy + ir >= 0 && iy + ir < frame.shape(0)) { iy + ir >= 0 && iy + ir < frame.shape(0)) {
CT tmp =
static_cast<CT>(frame(iy + ir, ix + ic)) - CT tmp = static_cast<CT>(frame(iy + ir, ix + ic)) - static_cast<CT>(m_pedestal.mean(iy + ir, ix + ic));
static_cast<CT>( cluster.data[i] = tmp; // Watch for out of bounds access
m_pedestal.mean(iy + ir, ix + ic));
cluster.data[i] =
tmp; // Watch for out of bounds access
} }
i++; i++;
} }

View File

@@ -121,7 +121,8 @@ class ClusterFinderMT {
* @param n_threads number of threads to use * @param n_threads number of threads to use
*/ */
ClusterFinderMT(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0, ClusterFinderMT(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
size_t capacity = 2000, size_t n_threads = 3) size_t capacity = 2000, size_t n_threads = 3,
uint32_t cluster_size_x = 3, uint32_t cluster_size_y = 3)
: m_n_threads(n_threads) { : m_n_threads(n_threads) {
LOG(logDEBUG1) << "ClusterFinderMT: " LOG(logDEBUG1) << "ClusterFinderMT: "
@@ -134,7 +135,7 @@ class ClusterFinderMT {
m_cluster_finders.push_back( m_cluster_finders.push_back(
std::make_unique< std::make_unique<
ClusterFinder<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>>( ClusterFinder<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>>(
image_size, nSigma, capacity)); image_size, nSigma, capacity, cluster_size_x, cluster_size_y));
} }
for (size_t i = 0; i < n_threads; i++) { for (size_t i = 0; i < n_threads; i++) {
m_input_queues.emplace_back(std::make_unique<InputQueue>(200)); m_input_queues.emplace_back(std::make_unique<InputQueue>(200));

View File

@@ -32,7 +32,8 @@ class ClusterVector; // Forward declaration
*/ */
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY, template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType> typename CoordType>
class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> { class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
{
std::vector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> m_data{}; std::vector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> m_data{};
int32_t m_frame_number{0}; // TODO! Check frame number size and type int32_t m_frame_number{0}; // TODO! Check frame number size and type
@@ -172,40 +173,4 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
} }
}; };
/**
* @brief Reduce a cluster to a 2x2 cluster by selecting the 2x2 block with the
* highest sum.
* @param cv Clustervector containing clusters to reduce
* @return Clustervector with reduced clusters
*/
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
ClusterVector<Cluster<T, 2, 2, CoordType>> reduce_to_2x2(
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
&cv) {
ClusterVector<Cluster<T, 2, 2, CoordType>> result;
for (const auto &c : cv) {
result.push_back(reduce_to_2x2(c));
}
return result;
}
/**
* @brief Reduce a cluster to a 3x3 cluster by selecting the 3x3 block with the
* highest sum.
* @param cv Clustervector containing clusters to reduce
* @return Clustervector with reduced clusters
*/
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
ClusterVector<Cluster<T, 3, 3, CoordType>> reduce_to_3x3(
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
&cv) {
ClusterVector<Cluster<T, 3, 3, CoordType>> result;
for (const auto &c : cv) {
result.push_back(reduce_to_3x3(c));
}
return result;
}
} // namespace aare } // namespace aare

View File

@@ -105,7 +105,7 @@ class Frame {
* @tparam T type of the pixels * @tparam T type of the pixels
* @return NDView<T, 2> * @return NDView<T, 2>
*/ */
template <typename T> NDView<T, 2> view() & { template <typename T> NDView<T, 2> view() {
std::array<ssize_t, 2> shape = {static_cast<ssize_t>(m_rows), std::array<ssize_t, 2> shape = {static_cast<ssize_t>(m_rows),
static_cast<ssize_t>(m_cols)}; static_cast<ssize_t>(m_cols)};
T *data = reinterpret_cast<T *>(m_data); T *data = reinterpret_cast<T *>(m_data);

View File

@@ -69,27 +69,26 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
// cBottomRight = 1, // cBottomRight = 1,
// cTopLeft = 2, // cTopLeft = 2,
// cTopRight = 3 // cTopRight = 3
// TODO: could also chaneg the sign of the eta calculation
switch (static_cast<corner>(eta.c)) { switch (static_cast<corner>(eta.c)) {
case corner::cTopLeft: case corner::cTopLeft:
dX = 0.0; dX = -1.;
dY = 0.0; dY = 0;
break; break;
case corner::cTopRight:; case corner::cTopRight:;
dX = 1.0; dX = 0;
dY = 0.0; dY = 0;
break; break;
case corner::cBottomLeft: case corner::cBottomLeft:
dX = 0.0; dX = -1.;
dY = 1.0; dY = -1.;
break; break;
case corner::cBottomRight: case corner::cBottomRight:
dX = 1.0; dX = 0.;
dY = 1.0; dY = -1.;
break; break;
} }
photon.x -= m_ietax(ix, iy, ie) - dX; photon.x += m_ietax(ix, iy, ie) * 2 + dX;
photon.y -= m_ietay(ix, iy, ie) - dY; photon.y += m_ietay(ix, iy, ie) * 2 + dY;
photons.push_back(photon); photons.push_back(photon);
} }
} else if (clusters.cluster_size_x() == 2 || } else if (clusters.cluster_size_x() == 2 ||
@@ -113,11 +112,10 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
auto ix = last_smaller(m_etabinsx, eta.x); auto ix = last_smaller(m_etabinsx, eta.x);
auto iy = last_smaller(m_etabinsy, eta.y); auto iy = last_smaller(m_etabinsy, eta.y);
// TODO: why 2? photon.x += m_ietax(ix, iy, ie) *
photon.x -= 2; // eta goes between 0 and 1 but we could move the hit
m_ietax(ix, iy, ie); // eta goes between 0 and 1 but we could // anywhere in the 2x2
// move the hit anywhere in the 2x2 photon.y += m_ietay(ix, iy, ie) * 2;
photon.y -= m_ietay(ix, iy, ie);
photons.push_back(photon); photons.push_back(photon);
} }

View File

@@ -42,16 +42,14 @@ class RawFileNameComponents {
class ScanParameters { class ScanParameters {
bool m_enabled = false; bool m_enabled = false;
DACIndex m_dac{}; std::string m_dac;
int m_start = 0; int m_start = 0;
int m_stop = 0; int m_stop = 0;
int m_step = 0; int m_step = 0;
int64_t m_settleTime = 0; // [ns] // TODO! add settleTime, requires string to time conversion
public: public:
ScanParameters(const std::string &par); ScanParameters(const std::string &par);
ScanParameters(const bool enabled, const DACIndex dac, const int start,
const int stop, const int step, const int64_t settleTime);
ScanParameters() = default; ScanParameters() = default;
ScanParameters(const ScanParameters &) = default; ScanParameters(const ScanParameters &) = default;
ScanParameters &operator=(const ScanParameters &) = default; ScanParameters &operator=(const ScanParameters &) = default;
@@ -59,9 +57,8 @@ class ScanParameters {
int start() const; int start() const;
int stop() const; int stop() const;
int step() const; int step() const;
DACIndex dac() const; const std::string &dac() const;
bool enabled() const; bool enabled() const;
int64_t settleTime() const;
void increment_stop(); void increment_stop();
}; };

View File

@@ -215,122 +215,6 @@ enum class DetectorType {
Unknown Unknown
}; };
/**
* @brief Enum class to define the Digital to Analog converter
* The values are the same as in slsDetectorPackage
*/
enum DACIndex {
DAC_0,
DAC_1,
DAC_2,
DAC_3,
DAC_4,
DAC_5,
DAC_6,
DAC_7,
DAC_8,
DAC_9,
DAC_10,
DAC_11,
DAC_12,
DAC_13,
DAC_14,
DAC_15,
DAC_16,
DAC_17,
VSVP,
VTRIM,
VRPREAMP,
VRSHAPER,
VSVN,
VTGSTV,
VCMP_LL,
VCMP_LR,
VCAL,
VCMP_RL,
RXB_RB,
RXB_LB,
VCMP_RR,
VCP,
VCN,
VISHAPER,
VTHRESHOLD,
IO_DELAY,
VREF_DS,
VOUT_CM,
VIN_CM,
VREF_COMP,
VB_COMP,
VDD_PROT,
VIN_COM,
VREF_PRECH,
VB_PIXBUF,
VB_DS,
VREF_H_ADC,
VB_COMP_FE,
VB_COMP_ADC,
VCOM_CDS,
VREF_RSTORE,
VB_OPA_1ST,
VREF_COMP_FE,
VCOM_ADC1,
VREF_L_ADC,
VREF_CDS,
VB_CS,
VB_OPA_FD,
VCOM_ADC2,
VCASSH,
VTH2,
VRSHAPER_N,
VIPRE_OUT,
VTH3,
VTH1,
VICIN,
VCAS,
VCAL_N,
VIPRE,
VCAL_P,
VDCSH,
VBP_COLBUF,
VB_SDA,
VCASC_SFP,
VIPRE_CDS,
IBIAS_SFP,
ADC_VPP,
HIGH_VOLTAGE,
TEMPERATURE_ADC,
TEMPERATURE_FPGA,
TEMPERATURE_FPGAEXT,
TEMPERATURE_10GE,
TEMPERATURE_DCDC,
TEMPERATURE_SODL,
TEMPERATURE_SODR,
TEMPERATURE_FPGA2,
TEMPERATURE_FPGA3,
TRIMBIT_SCAN,
V_POWER_A = 100,
V_POWER_B = 101,
V_POWER_C = 102,
V_POWER_D = 103,
V_POWER_IO = 104,
V_POWER_CHIP = 105,
I_POWER_A = 106,
I_POWER_B = 107,
I_POWER_C = 108,
I_POWER_D = 109,
I_POWER_IO = 110,
V_LIMIT = 111,
SLOW_ADC0 = 1000,
SLOW_ADC1,
SLOW_ADC2,
SLOW_ADC3,
SLOW_ADC4,
SLOW_ADC5,
SLOW_ADC6,
SLOW_ADC7,
SLOW_ADC_TEMP
};
enum class TimingMode { Auto, Trigger }; enum class TimingMode { Auto, Trigger };
enum class FrameDiscardPolicy { NoDiscard, Discard, DiscardPartial }; enum class FrameDiscardPolicy { NoDiscard, Discard, DiscardPartial };
@@ -347,15 +231,6 @@ template <> FrameDiscardPolicy StringTo(const std::string & /*mode*/);
using DataTypeVariants = std::variant<uint16_t, uint32_t>; using DataTypeVariants = std::variant<uint16_t, uint32_t>;
constexpr uint16_t ADC_MASK = constexpr uint16_t ADC_MASK = 0x3FFF; // used to mask out the gain bits in Jungfrau
0x3FFF; // used to mask out the gain bits in Jungfrau
/**
* @brief Convert a string to a DACIndex
* @param arg string representation of the dacIndex
* @return DACIndex
* @throw invalid argument error if the string does not match any DACIndex
*/
template <> DACIndex StringTo(const std::string &arg);
} // namespace aare } // namespace aare

View File

@@ -26,33 +26,34 @@ def _get_class(name, cluster_size, dtype):
def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024): def ClusterFinder(image_size, saved_cluster_size, checked_cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024):
""" """
Factory function to create a ClusterFinder object. Provides a cleaner syntax for Factory function to create a ClusterFinder object. Provides a cleaner syntax for
the templated ClusterFinder in C++. the templated ClusterFinder in C++.
""" """
cls = _get_class("ClusterFinder", cluster_size, dtype) cls = _get_class("ClusterFinder", saved_cluster_size, dtype)
return cls(image_size, n_sigma=n_sigma, capacity=capacity) return cls(image_size, n_sigma=n_sigma, capacity=capacity, cluster_size_x=checked_cluster_size[0], cluster_size_y=checked_cluster_size[1])
def ClusterFinderMT(image_size, cluster_size = (3,3), dtype=np.int32, n_sigma=5, capacity = 1024, n_threads = 3): def ClusterFinderMT(image_size, saved_cluster_size = (3,3), checked_cluster_size = (3,3), dtype=np.int32, n_sigma=5, capacity = 1024, n_threads = 3):
""" """
Factory function to create a ClusterFinderMT object. Provides a cleaner syntax for Factory function to create a ClusterFinderMT object. Provides a cleaner syntax for
the templated ClusterFinderMT in C++. the templated ClusterFinderMT in C++.
""" """
cls = _get_class("ClusterFinderMT", cluster_size, dtype) cls = _get_class("ClusterFinderMT", saved_cluster_size, dtype)
return cls(image_size, n_sigma=n_sigma, capacity=capacity, n_threads=n_threads) return cls(image_size, n_sigma=n_sigma, capacity=capacity, n_threads=n_threads, cluster_size_x=checked_cluster_size[0], cluster_size_y=checked_cluster_size[1])
def ClusterCollector(clusterfindermt, dtype=np.int32):
def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32):
""" """
Factory function to create a ClusterCollector object. Provides a cleaner syntax for Factory function to create a ClusterCollector object. Provides a cleaner syntax for
the templated ClusterCollector in C++. the templated ClusterCollector in C++.
""" """
cls = _get_class("ClusterCollector", clusterfindermt.cluster_size, dtype) cls = _get_class("ClusterCollector", cluster_size, dtype)
return cls(clusterfindermt) return cls(clusterfindermt)
def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32): def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):

View File

@@ -17,7 +17,7 @@ from .ClusterVector import ClusterVector
from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2 from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2
from ._aare import Interpolator from ._aare import Interpolator
from ._aare import calculate_eta2 from ._aare import calculate_eta2
from ._aare import reduce_to_2x2, reduce_to_3x3
from ._aare import apply_custom_weights from ._aare import apply_custom_weights

View File

@@ -24,8 +24,7 @@ void define_Cluster(py::module &m, const std::string &typestr) {
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>( py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>(
m, class_name.c_str(), py::buffer_protocol()) m, class_name.c_str(), py::buffer_protocol())
.def(py::init([](CoordType x, CoordType y, .def(py::init([](uint8_t x, uint8_t y, py::array_t<Type> data) {
py::array_t<Type, py::array::forcecast> data) {
py::buffer_info buf_info = data.request(); py::buffer_info buf_info = data.request();
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster; Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster;
cluster.x = x; cluster.x = x;
@@ -35,58 +34,31 @@ void define_Cluster(py::module &m, const std::string &typestr) {
cluster.data[i] = r(i); cluster.data[i] = r(i);
} }
return cluster; return cluster;
})) }));
// TODO! Review if to keep or not /*
.def_property_readonly( //TODO! Review if to keep or not
"data", .def_property(
[](Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> &c) "data",
-> py::array { [](ClusterType &c) -> py::array {
return py::array(py::buffer_info( return py::array(py::buffer_info(
c.data.data(), sizeof(Type), c.data, sizeof(Type),
py::format_descriptor<Type>::format(), // Type py::format_descriptor<Type>::format(), // Type
// format // format
2, // Number of dimensions 1, // Number of dimensions
{static_cast<ssize_t>(ClusterSizeX), {static_cast<ssize_t>(ClusterSizeX *
static_cast<ssize_t>(ClusterSizeY)}, // Shape (flattened) ClusterSizeY)}, // Shape (flattened)
{sizeof(Type) * ClusterSizeY, sizeof(Type)} {sizeof(Type)} // Stride (step size between elements)
// Stride (step size between elements) ));
));
})
.def_readonly("x",
&Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>::x)
.def_readonly("y",
&Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>::y);
}
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = int16_t>
void reduce_to_3x3(py::module &m) {
m.def(
"reduce_to_3x3",
[](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
return reduce_to_3x3(cl);
}, },
py::return_value_policy::move, [](ClusterType &c, py::array_t<Type> arr) {
"Reduce cluster to 3x3 subcluster by taking the 3x3 subcluster with " py::buffer_info buf_info = arr.request();
"the highest photon energy."); Type *ptr = static_cast<Type *>(buf_info.ptr);
} std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY,
c.data); // TODO dont iterate over centers!!!
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY, });
typename CoordType = int16_t> */
void reduce_to_2x2(py::module &m) {
m.def(
"reduce_to_2x2",
[](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
return reduce_to_2x2(cl);
},
py::return_value_policy::move,
"Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with "
"the highest photon energy.");
} }
#pragma GCC diagnostic pop #pragma GCC diagnostic pop

View File

@@ -44,14 +44,14 @@ void define_ClusterFile(py::module &m, const std::string &typestr) {
auto v = new ClusterVector<ClusterType>(self.read_frame()); auto v = new ClusterVector<ClusterType>(self.read_frame());
return v; return v;
}) })
.def("set_roi", &ClusterFile<ClusterType>::set_roi, py::arg("roi")) .def("set_roi", &ClusterFile<ClusterType>::set_roi,
py::arg("roi"))
.def( .def(
"set_noise_map", "set_noise_map",
[](ClusterFile<ClusterType> &self, py::array_t<int32_t> noise_map) { [](ClusterFile<ClusterType> &self, py::array_t<int32_t> noise_map) {
auto view = make_view_2d(noise_map); auto view = make_view_2d(noise_map);
self.set_noise_map(view); self.set_noise_map(view);
}, }, py::arg("noise_map"))
py::arg("noise_map"))
.def("set_gain_map", .def("set_gain_map",
[](ClusterFile<ClusterType> &self, py::array_t<double> gain_map) { [](ClusterFile<ClusterType> &self, py::array_t<double> gain_map) {
@@ -84,19 +84,11 @@ template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
typename CoordType = uint16_t> typename CoordType = uint16_t>
void register_calculate_eta(py::module &m) { void register_calculate_eta(py::module &m) {
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>; using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
m.def("calculate_eta2", m.def("calculate_eta2",
[](const aare::ClusterVector<ClusterType> &clusters) { [](const aare::ClusterVector<ClusterType> &clusters) {
auto eta2 = new NDArray<double, 2>(calculate_eta2(clusters)); auto eta2 = new NDArray<double, 2>(calculate_eta2(clusters));
return return_image_data(eta2); return return_image_data(eta2);
}); });
m.def("calculate_eta2", [](const aare::Cluster<Type, CoordSizeX, CoordSizeY,
CoordType> &cluster) {
auto eta2 = calculate_eta2(cluster);
// TODO return proper eta class
return py::make_tuple(eta2.x, eta2.y, eta2.sum);
});
} }
#pragma GCC diagnostic pop #pragma GCC diagnostic pop

View File

@@ -30,8 +30,9 @@ void define_ClusterFinder(py::module &m, const std::string &typestr) {
py::class_<ClusterFinder<ClusterType, uint16_t, pd_type>>( py::class_<ClusterFinder<ClusterType, uint16_t, pd_type>>(
m, class_name.c_str()) m, class_name.c_str())
.def(py::init<Shape<2>, pd_type, size_t>(), py::arg("image_size"), .def(py::init<Shape<2>, pd_type, size_t, uint32_t, uint32_t>(), py::arg("image_size"),
py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000) py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000,
py::arg("cluster_size_x") = 3, py::arg("cluster_size_y") = 3)
.def("push_pedestal_frame", .def("push_pedestal_frame",
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self, [](ClusterFinder<ClusterType, uint16_t, pd_type> &self,
py::array_t<uint16_t> frame) { py::array_t<uint16_t> frame) {

View File

@@ -30,9 +30,10 @@ void define_ClusterFinderMT(py::module &m, const std::string &typestr) {
py::class_<ClusterFinderMT<ClusterType, uint16_t, pd_type>>( py::class_<ClusterFinderMT<ClusterType, uint16_t, pd_type>>(
m, class_name.c_str()) m, class_name.c_str())
.def(py::init<Shape<2>, pd_type, size_t, size_t>(), .def(py::init<Shape<2>, pd_type, size_t, size_t, uint32_t, uint32_t>(),
py::arg("image_size"), py::arg("n_sigma") = 5.0, py::arg("image_size"), py::arg("n_sigma") = 5.0,
py::arg("capacity") = 2048, py::arg("n_threads") = 3) py::arg("capacity") = 2048, py::arg("n_threads") = 3,
py::arg("cluster_size_x") = 3, py::arg("cluster_size_y") = 3)
.def("push_pedestal_frame", .def("push_pedestal_frame",
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self, [](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
py::array_t<uint16_t> frame) { py::array_t<uint16_t> frame) {

View File

@@ -104,47 +104,4 @@ void define_ClusterVector(py::module &m, const std::string &typestr) {
}); });
} }
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
void define_2x2_reduction(py::module &m) {
m.def(
"reduce_to_2x2",
[](const ClusterVector<
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>> &cv) {
return new ClusterVector<Cluster<Type, 2, 2, CoordType>>(
reduce_to_2x2(cv));
},
R"(
Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with
the highest photon energy."
Parameters
----------
cv : ClusterVector
)",
py::arg("clustervector"));
}
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
void define_3x3_reduction(py::module &m) {
m.def(
"reduce_to_3x3",
[](const ClusterVector<
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>> &cv) {
return new ClusterVector<Cluster<Type, 3, 3, CoordType>>(
reduce_to_3x3(cv));
},
R"(
Reduce cluster to 3x3 subcluster by taking the 3x3 subcluster with
the highest photon energy."
Parameters
----------
cv : ClusterVector
)",
py::arg("clustervector"));
}
#pragma GCC diagnostic pop #pragma GCC diagnostic pop

View File

@@ -47,9 +47,7 @@ double, 'f' for float)
define_ClusterFileSink<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \ define_ClusterFileSink<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_ClusterCollector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \ define_ClusterCollector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_Cluster<T, N, M, U>(m, #N "x" #M #TYPE_CODE); \ define_Cluster<T, N, M, U>(m, #N "x" #M #TYPE_CODE); \
register_calculate_eta<T, N, M, U>(m); \ register_calculate_eta<T, N, M, U>(m);
define_2x2_reduction<T, N, M, U>(m); \
reduce_to_2x2<T, N, M, U>(m);
PYBIND11_MODULE(_aare, m) { PYBIND11_MODULE(_aare, m) {
define_file_io_bindings(m); define_file_io_bindings(m);
@@ -87,31 +85,8 @@ PYBIND11_MODULE(_aare, m) {
DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d); DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d);
DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f); DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f);
// DEFINE_CLUSTER_BINDINGS(double, 2, 1, uint16_t, d); DEFINE_CLUSTER_BINDINGS(int, 21, 21, uint16_t, i);
DEFINE_CLUSTER_BINDINGS(double, 21, 21, uint16_t, d);
define_3x3_reduction<int, 3, 3, uint16_t>(m); DEFINE_CLUSTER_BINDINGS(float, 21, 21, uint16_t, f);
define_3x3_reduction<double, 3, 3, uint16_t>(m);
define_3x3_reduction<float, 3, 3, uint16_t>(m);
define_3x3_reduction<int, 5, 5, uint16_t>(m);
define_3x3_reduction<double, 5, 5, uint16_t>(m);
define_3x3_reduction<float, 5, 5, uint16_t>(m);
define_3x3_reduction<int, 7, 7, uint16_t>(m);
define_3x3_reduction<double, 7, 7, uint16_t>(m);
define_3x3_reduction<float, 7, 7, uint16_t>(m);
define_3x3_reduction<int, 9, 9, uint16_t>(m);
define_3x3_reduction<double, 9, 9, uint16_t>(m);
define_3x3_reduction<float, 9, 9, uint16_t>(m);
reduce_to_3x3<int, 3, 3, uint16_t>(m);
reduce_to_3x3<double, 3, 3, uint16_t>(m);
reduce_to_3x3<float, 3, 3, uint16_t>(m);
reduce_to_3x3<int, 5, 5, uint16_t>(m);
reduce_to_3x3<double, 5, 5, uint16_t>(m);
reduce_to_3x3<float, 5, 5, uint16_t>(m);
reduce_to_3x3<int, 7, 7, uint16_t>(m);
reduce_to_3x3<double, 7, 7, uint16_t>(m);
reduce_to_3x3<float, 7, 7, uint16_t>(m);
reduce_to_3x3<int, 9, 9, uint16_t>(m);
reduce_to_3x3<double, 9, 9, uint16_t>(m);
reduce_to_3x3<float, 9, 9, uint16_t>(m);
} }

View File

@@ -53,8 +53,8 @@ def test_Interpolator():
assert interpolated_photons.size == 1 assert interpolated_photons.size == 1
assert interpolated_photons[0]["x"] == 0 assert interpolated_photons[0]["x"] == -1
assert interpolated_photons[0]["y"] == 0 assert interpolated_photons[0]["y"] == -1
assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0 assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0
clustervector = _aare.ClusterVector_Cluster2x2i() clustervector = _aare.ClusterVector_Cluster2x2i()
@@ -84,7 +84,7 @@ def test_calculate_eta():
assert eta2[0,0] == 0.5 assert eta2[0,0] == 0.5
assert eta2[0,1] == 0.5 assert eta2[0,1] == 0.5
assert eta2[1,0] == 0.5 assert eta2[1,0] == 0.5
assert eta2[1,1] == 0.4 #2/5 assert eta2[1,1] == 0.6 #1/5
def test_cluster_finder(): def test_cluster_finder():
"""Test ClusterFinder""" """Test ClusterFinder"""
@@ -101,27 +101,6 @@ def test_cluster_finder():
assert clusters.size == 0 assert clusters.size == 0
def test_2x2_reduction():
"""Test 2x2 Reduction"""
cluster = _aare.Cluster3x3i(5,5,np.array([1, 1, 1, 2, 3, 1, 2, 2, 1], dtype=np.int32))
reduced_cluster = _aare.reduce_to_2x2(cluster)
assert reduced_cluster.x == 4
assert reduced_cluster.y == 5
assert (reduced_cluster.data == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
def test_3x3_reduction():
"""Test 3x3 Reduction"""
cluster = _aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double))
reduced_cluster = _aare.reduce_to_3x3(cluster)
assert reduced_cluster.x == 4
assert reduced_cluster.y == 5
assert (reduced_cluster.data == np.array([[1.0, 2.0, 1.0], [2.0, 2.0, 3.0], [1.0, 2.0, 1.0]], dtype=np.double)).all()

View File

@@ -5,7 +5,7 @@ import time
from pathlib import Path from pathlib import Path
import pickle import pickle
from aare import ClusterFile, ClusterVector from aare import ClusterFile
from aare import _aare from aare import _aare
from conftest import test_data_path from conftest import test_data_path
@@ -51,36 +51,4 @@ def test_make_a_hitmap_from_cluster_vector():
# print(img) # print(img)
# print(ref) # print(ref)
assert (img == ref).all() assert (img == ref).all()
def test_2x2_reduction():
cv = ClusterVector((3,3))
cv.push_back(_aare.Cluster3x3i(5, 5, np.array([1, 1, 1, 2, 3, 1, 2, 2, 1], dtype=np.int32)))
cv.push_back(_aare.Cluster3x3i(5, 5, np.array([2, 2, 1, 2, 3, 1, 1, 1, 1], dtype=np.int32)))
reduced_cv = np.array(_aare.reduce_to_2x2(cv), copy=False)
assert reduced_cv.size == 2
assert reduced_cv[0]["x"] == 4
assert reduced_cv[0]["y"] == 5
assert (reduced_cv[0]["data"] == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
assert reduced_cv[1]["x"] == 4
assert reduced_cv[1]["y"] == 6
assert (reduced_cv[1]["data"] == np.array([[2, 2], [2, 3]], dtype=np.int32)).all()
def test_3x3_reduction():
cv = _aare.ClusterVector_Cluster5x5d()
cv.push_back(_aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double)))
cv.push_back(_aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double)))
reduced_cv = np.array(_aare.reduce_to_3x3(cv), copy=False)
assert reduced_cv.size == 2
assert reduced_cv[0]["x"] == 4
assert reduced_cv[0]["y"] == 5
assert (reduced_cv[0]["data"] == np.array([[1.0, 2.0, 1.0], [2.0, 2.0, 3.0], [1.0, 2.0, 1.0]], dtype=np.double)).all()

View File

@@ -1,148 +0,0 @@
import pytest
import numpy as np
import boost_histogram as bh
import pickle
from scipy.stats import multivariate_normal
from aare import Interpolator, calculate_eta2
from aare._aare import ClusterVector_Cluster2x2d, Cluster2x2d, Cluster3x3d, ClusterVector_Cluster3x3d
from conftest import test_data_path
pixel_width = 1e-4
values = np.arange(0.5*pixel_width, 0.1, pixel_width)
num_pixels = values.size
X, Y = np.meshgrid(values, values)
data_points = np.stack([X.ravel(), Y.ravel()], axis=1)
variance = 10*pixel_width
covariance_matrix = np.array([[variance, 0],[0, variance]])
def create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points):
gaussian = multivariate_normal(mean=mean, cov=covariance_matrix)
probability_values = gaussian.pdf(data_points)
return (probability_values.reshape(X.shape)).round() #python bindings only support frame types of uint16_t
def photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, photon_hit):
scaled_photon_hit_x = cluster_center - (1 - photon_hit[0][0])*pixels_per_superpixel*pixel_width
scaled_photon_hit_y = cluster_center - (1 - photon_hit[0][1])*pixels_per_superpixel*pixel_width
return (scaled_photon_hit_x, scaled_photon_hit_y)
def create_2x2cluster_from_frame(frame, pixels_per_superpixel):
return Cluster2x2d(1, 1, np.array([frame[0:pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
frame[0:pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
frame[pixels_per_superpixel:2*pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
frame[pixels_per_superpixel:2*pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum()], dtype=np.float64))
def create_3x3cluster_from_frame(frame, pixels_per_superpixel):
return Cluster3x3d(1, 1, np.array([frame[0:pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
frame[0:pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
frame[0:pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum(),
frame[pixels_per_superpixel:2*pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
frame[pixels_per_superpixel:2*pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
frame[pixels_per_superpixel:2*pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum(),
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum()], dtype=np.float64))
def calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, cluster_2x2 = True):
hist = bh.Histogram(
bh.axis.Regular(100, -0.2, 1.2),
bh.axis.Regular(100, -0.2, 1.2), bh.axis.Regular(1, 0, num_pixels*num_pixels*1/(variance*2*np.pi)))
for _ in range(0, num_frames):
mean_x = random_number_generator.uniform(pixels_per_superpixel*pixel_width, 2*pixels_per_superpixel*pixel_width)
mean_y = random_number_generator.uniform(pixels_per_superpixel*pixel_width, 2*pixels_per_superpixel*pixel_width)
frame = create_photon_hit_with_gaussian_distribution(np.array([mean_x, mean_y]), variance, data_points)
cluster = None
if cluster_2x2:
cluster = create_2x2cluster_from_frame(frame, pixels_per_superpixel)
else:
cluster = create_3x3cluster_from_frame(frame, pixels_per_superpixel)
eta2 = calculate_eta2(cluster)
hist.fill(eta2[0], eta2[1], eta2[2])
return hist
@pytest.mark.withdata
def test_interpolation_of_2x2_cluster(test_data_path):
"""Test Interpolation of 2x2 cluster from Photon hit with Gaussian Distribution"""
#TODO maybe better to compute in test instead of loading - depends on eta
"""
filename = test_data_path/"eta_distributions"/"eta_distribution_2x2cluster_gaussian.pkl"
with open(filename, "rb") as f:
eta_distribution = pickle.load(f)
"""
num_frames = 1000
pixels_per_superpixel = int(num_pixels*0.5)
random_number_generator = np.random.default_rng(42)
eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator)
interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges[:-1], eta_distribution.axes[1].edges[:-1], eta_distribution.axes[2].edges[:-1])
#actual photon hit
mean = 1.2*pixels_per_superpixel*pixel_width
mean = np.array([mean, mean])
frame = create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points)
cluster = create_2x2cluster_from_frame(frame, pixels_per_superpixel)
clustervec = ClusterVector_Cluster2x2d()
clustervec.push_back(cluster)
interpolated_photon = interpolation.interpolate(clustervec)
assert interpolated_photon.size == 1
cluster_center = 1.5*pixels_per_superpixel*pixel_width
scaled_photon_hit = photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, interpolated_photon)
assert (np.linalg.norm(scaled_photon_hit - mean) < np.linalg.norm(np.array([cluster_center, cluster_center] - mean)))
@pytest.mark.withdata
def test_interpolation_of_3x3_cluster(test_data_path):
"""Test Interpolation of 3x3 Cluster from Photon hit with Gaussian Distribution"""
#TODO maybe better to compute in test instead of loading - depends on eta
"""
filename = test_data_path/"eta_distributions"/"eta_distribution_3x3cluster_gaussian.pkl"
with open(filename, "rb") as f:
eta_distribution = pickle.load(f)
"""
num_frames = 1000
pixels_per_superpixel = int(num_pixels/3)
random_number_generator = np.random.default_rng(42)
eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, False)
interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges[:-1], eta_distribution.axes[1].edges[:-1], eta_distribution.axes[2].edges[:-1])
#actual photon hit
mean = 1.2*pixels_per_superpixel*pixel_width
mean = np.array([mean, mean])
frame = create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points)
cluster = create_3x3cluster_from_frame(frame, pixels_per_superpixel)
clustervec = ClusterVector_Cluster3x3d()
clustervec.push_back(cluster)
interpolated_photon = interpolation.interpolate(clustervec)
assert interpolated_photon.size == 1
cluster_center = 1.5*pixels_per_superpixel*pixel_width
scaled_photon_hit = photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, interpolated_photon)
assert (np.linalg.norm(scaled_photon_hit - mean) < np.linalg.norm(np.array([cluster_center, cluster_center] - mean)))

File diff suppressed because one or more lines are too long

View File

@@ -18,86 +18,4 @@ TEST_CASE("Test sum of Cluster", "[.cluster]") {
Cluster<int, 2, 2> cluster{0, 0, {1, 2, 3, 4}}; Cluster<int, 2, 2> cluster{0, 0, {1, 2, 3, 4}};
CHECK(cluster.sum() == 10); CHECK(cluster.sum() == 10);
}
using ClusterTypes = std::variant<Cluster<int, 2, 2>, Cluster<int, 3, 3>,
Cluster<int, 5, 5>, Cluster<int, 2, 3>>;
using ClusterTypesLargerThan2x2 =
std::variant<Cluster<int, 3, 3>, Cluster<int, 4, 4>, Cluster<int, 5, 5>>;
TEST_CASE("Test reduce to 2x2 Cluster", "[.cluster]") {
auto [cluster, expected_reduced_cluster] = GENERATE(
std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{5, 5, {1, 2, 3, 4}}},
Cluster<int, 2, 2>{4, 6, {1, 2, 3, 4}}),
std::make_tuple(
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 2, 1, 2, 2}}},
Cluster<int, 2, 2>{5, 5, {3, 2, 2, 2}}),
std::make_tuple(
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 1, 1, 2, 3, 1, 2, 2, 1}}},
Cluster<int, 2, 2>{4, 5, {2, 3, 2, 2}}),
std::make_tuple(
ClusterTypes{Cluster<int, 3, 3>{5, 5, {2, 2, 1, 2, 3, 1, 1, 1, 1}}},
Cluster<int, 2, 2>{4, 6, {2, 2, 2, 3}}),
std::make_tuple(
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 2, 2, 1, 3, 2, 1, 1, 1}}},
Cluster<int, 2, 2>{5, 6, {2, 2, 3, 2}}),
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
5, 5, {1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 3,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}},
Cluster<int, 2, 2>{5, 6, {2, 2, 3, 2}}),
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
5, 5, {1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 3,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}},
Cluster<int, 2, 2>{4, 6, {2, 2, 2, 3}}),
std::make_tuple(
ClusterTypes{Cluster<int, 2, 3>{5, 5, {2, 2, 3, 2, 1, 1}}},
Cluster<int, 2, 2>{4, 6, {2, 2, 3, 2}}));
auto reduced_cluster = std::visit(
[](const auto &clustertype) { return reduce_to_2x2(clustertype); },
cluster);
CHECK(reduced_cluster.x == expected_reduced_cluster.x);
CHECK(reduced_cluster.y == expected_reduced_cluster.y);
CHECK(std::equal(reduced_cluster.data.begin(),
reduced_cluster.data.begin() + 4,
expected_reduced_cluster.data.begin()));
}
TEST_CASE("Test reduce to 3x3 Cluster", "[.cluster]") {
auto [cluster, expected_reduced_cluster] = GENERATE(
std::make_tuple(ClusterTypesLargerThan2x2{Cluster<int, 3, 3>{
5, 5, {1, 1, 1, 1, 3, 1, 1, 1, 1}}},
Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 1, 1, 1, 1}}),
std::make_tuple(
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
5, 5, {2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1}}},
Cluster<int, 3, 3>{4, 6, {2, 2, 1, 2, 2, 1, 1, 1, 3}}),
std::make_tuple(
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
5, 5, {1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 3, 1, 1, 1, 1, 1}}},
Cluster<int, 3, 3>{5, 6, {1, 2, 2, 1, 2, 2, 1, 3, 1}}),
std::make_tuple(
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
5, 5, {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 1, 1, 2, 2}}},
Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 2, 1, 2, 2}}),
std::make_tuple(
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
5, 5, {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 1, 2, 2, 1, 1}}},
Cluster<int, 3, 3>{4, 5, {1, 1, 1, 2, 2, 3, 2, 2, 1}}),
std::make_tuple(ClusterTypesLargerThan2x2{Cluster<int, 5, 5>{
5, 5, {1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 3,
1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1}}},
Cluster<int, 3, 3>{4, 5, {1, 2, 1, 2, 2, 3, 1, 2, 1}}));
auto reduced_cluster = std::visit(
[](const auto &clustertype) { return reduce_to_3x3(clustertype); },
cluster);
CHECK(reduced_cluster.x == expected_reduced_cluster.x);
CHECK(reduced_cluster.y == expected_reduced_cluster.y);
CHECK(std::equal(reduced_cluster.data.begin(),
reduced_cluster.data.begin() + 9,
expected_reduced_cluster.data.begin()));
} }

View File

@@ -279,8 +279,7 @@ TEST_CASE("Read cluster from multiple frame file", "[.with-data]") {
} }
} }
TEST_CASE("Write cluster with potential padding", TEST_CASE("Write cluster with potential padding", "[.with-data][.ClusterFile]") {
"[.with-data][.ClusterFile]") {
using ClusterType = Cluster<double, 3, 3>; using ClusterType = Cluster<double, 3, 3>;
@@ -291,7 +290,7 @@ TEST_CASE("Write cluster with potential padding",
ClusterFile<ClusterType> file(fpath, 1000, "w"); ClusterFile<ClusterType> file(fpath, 1000, "w");
ClusterVector<ClusterType> clustervec(2); ClusterVector<ClusterType> clustervec(2);
uint16_t coordinate = 5; int16_t coordinate = 5;
clustervec.push_back(ClusterType{ clustervec.push_back(ClusterType{
coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}}); coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}});
clustervec.push_back(ClusterType{ clustervec.push_back(ClusterType{

View File

@@ -57,7 +57,6 @@ class ClusterFinderMTWrapper
size_t m_sink_size() const { return this->m_sink.sizeGuess(); } size_t m_sink_size() const { return this->m_sink.sizeGuess(); }
}; };
TEST_CASE("multithreaded cluster finder", "[.with-data]") { TEST_CASE("multithreaded cluster finder", "[.with-data]") {
auto fpath = auto fpath =
test_data_path() / "raw/moench03/cu_half_speed_master_4.json"; test_data_path() / "raw/moench03/cu_half_speed_master_4.json";
@@ -82,8 +81,7 @@ TEST_CASE("multithreaded cluster finder", "[.with-data]") {
CHECK(cf.m_input_queues_are_empty() == true); CHECK(cf.m_input_queues_are_empty() == true);
for (size_t i = 0; i < n_frames_pd; ++i) { for (size_t i = 0; i < n_frames_pd; ++i) {
auto frame = file.read_frame(); cf.find_clusters(file.read_frame().view<uint16_t>());
cf.find_clusters(frame.view<uint16_t>());
} }
cf.stop(); cf.stop();

View File

@@ -99,8 +99,7 @@ TEST_CASE("Read data from a jungfrau 500k single port raw file",
} }
TEST_CASE("Read frame numbers from a raw file", "[.with-data]") { TEST_CASE("Read frame numbers from a raw file", "[.with-data]") {
auto fpath = auto fpath = test_data_path() / "raw/eiger" / "eiger_500k_16bit_master_0.json";
test_data_path() / "raw/eiger" / "eiger_500k_16bit_master_0.json";
REQUIRE(std::filesystem::exists(fpath)); REQUIRE(std::filesystem::exists(fpath));
// we know this file has 3 frames with frame numbers 14, 15, 16 // we know this file has 3 frames with frame numbers 14, 15, 16
@@ -289,7 +288,8 @@ TEST_CASE("check find_geometry", "[.with-data]") {
} }
} }
TEST_CASE("Open multi module file with ROI", "[.with-data]") { TEST_CASE("Open multi module file with ROI",
"[.with-data]") {
auto fpath = test_data_path() / "raw/SingleChipROI/Data_master_0.json"; auto fpath = test_data_path() / "raw/SingleChipROI/Data_master_0.json";
REQUIRE(std::filesystem::exists(fpath)); REQUIRE(std::filesystem::exists(fpath));
@@ -319,4 +319,4 @@ TEST_CASE("Read file with unordered frames", "[.with-data]") {
REQUIRE(std::filesystem::exists(fpath)); REQUIRE(std::filesystem::exists(fpath));
File f(fpath); File f(fpath);
REQUIRE_THROWS((f.read_frame())); REQUIRE_THROWS((f.read_frame()));
} }

View File

@@ -64,12 +64,6 @@ const std::string &RawFileNameComponents::base_name() const {
const std::string &RawFileNameComponents::ext() const { return m_ext; } const std::string &RawFileNameComponents::ext() const { return m_ext; }
int RawFileNameComponents::file_index() const { return m_file_index; } int RawFileNameComponents::file_index() const { return m_file_index; }
ScanParameters::ScanParameters(const bool enabled, const DACIndex dac,
const int start, const int stop, const int step,
const int64_t settleTime)
: m_enabled(enabled), m_dac(dac), m_start(start), m_stop(stop),
m_step(step), m_settleTime(settleTime){};
// "[enabled\ndac dac 4\nstart 500\nstop 2200\nstep 5\nsettleTime 100us\n]" // "[enabled\ndac dac 4\nstart 500\nstop 2200\nstep 5\nsettleTime 100us\n]"
ScanParameters::ScanParameters(const std::string &par) { ScanParameters::ScanParameters(const std::string &par) {
std::istringstream iss(par.substr(1, par.size() - 2)); std::istringstream iss(par.substr(1, par.size() - 2));
@@ -78,7 +72,7 @@ ScanParameters::ScanParameters(const std::string &par) {
if (line == "enabled") { if (line == "enabled") {
m_enabled = true; m_enabled = true;
} else if (line.find("dac") != std::string::npos) { } else if (line.find("dac") != std::string::npos) {
m_dac = StringTo<DACIndex>(line.substr(4)); m_dac = line.substr(4);
} else if (line.find("start") != std::string::npos) { } else if (line.find("start") != std::string::npos) {
m_start = std::stoi(line.substr(6)); m_start = std::stoi(line.substr(6));
} else if (line.find("stop") != std::string::npos) { } else if (line.find("stop") != std::string::npos) {
@@ -93,9 +87,8 @@ int ScanParameters::start() const { return m_start; }
int ScanParameters::stop() const { return m_stop; } int ScanParameters::stop() const { return m_stop; }
void ScanParameters::increment_stop() { m_stop += 1; } void ScanParameters::increment_stop() { m_stop += 1; }
int ScanParameters::step() const { return m_step; } int ScanParameters::step() const { return m_step; }
DACIndex ScanParameters::dac() const { return m_dac; } const std::string &ScanParameters::dac() const { return m_dac; }
bool ScanParameters::enabled() const { return m_enabled; } bool ScanParameters::enabled() const { return m_enabled; }
int64_t ScanParameters::settleTime() const { return m_settleTime; }
RawMasterFile::RawMasterFile(const std::filesystem::path &fpath) RawMasterFile::RawMasterFile(const std::filesystem::path &fpath)
: m_fnc(fpath) { : m_fnc(fpath) {
@@ -177,7 +170,6 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
std::ifstream ifs(fpath); std::ifstream ifs(fpath);
json j; json j;
ifs >> j; ifs >> j;
double v = j["Version"]; double v = j["Version"];
m_version = fmt::format("{:.1f}", v); m_version = fmt::format("{:.1f}", v);
@@ -189,9 +181,7 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
j["Geometry"]["x"]}; // TODO: isnt it only available for version > 7.1? j["Geometry"]["x"]}; // TODO: isnt it only available for version > 7.1?
// - try block default should be 1x1 // - try block default should be 1x1
m_image_size_in_bytes = m_image_size_in_bytes = j["Image Size in bytes"];
v < 8.0 ? j["Image Size in bytes"] : j["Image Size"];
m_frames_in_file = j["Frames in File"]; m_frames_in_file = j["Frames in File"];
m_pixels_y = j["Pixels"]["y"]; m_pixels_y = j["Pixels"]["y"];
m_pixels_x = j["Pixels"]["x"]; m_pixels_x = j["Pixels"]["x"];
@@ -216,6 +206,7 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
} catch (const json::out_of_range &e) { } catch (const json::out_of_range &e) {
// keep the optional empty // keep the optional empty
} }
// ---------------------------------------------------------------- // ----------------------------------------------------------------
// Special treatment of analog flag because of Moench03 // Special treatment of analog flag because of Moench03
try { try {
@@ -236,6 +227,7 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
m_analog_flag = 0; m_analog_flag = 0;
} }
//----------------------------------------------------------------- //-----------------------------------------------------------------
try { try {
m_quad = j.at("Quad"); m_quad = j.at("Quad");
} catch (const json::out_of_range &e) { } catch (const json::out_of_range &e) {
@@ -247,6 +239,7 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
// }catch (const json::out_of_range &e) { // }catch (const json::out_of_range &e) {
// m_adc_mask = 0; // m_adc_mask = 0;
// } // }
try { try {
int digital_flag = j.at("Digital Flag"); int digital_flag = j.at("Digital Flag");
if (digital_flag) { if (digital_flag) {
@@ -255,6 +248,7 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
} catch (const json::out_of_range &e) { } catch (const json::out_of_range &e) {
// keep the optional empty // keep the optional empty
} }
try { try {
m_transceiver_flag = j.at("Transceiver Flag"); m_transceiver_flag = j.at("Transceiver Flag");
if (m_transceiver_flag) { if (m_transceiver_flag) {
@@ -263,20 +257,10 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
} catch (const json::out_of_range &e) { } catch (const json::out_of_range &e) {
// keep the optional empty // keep the optional empty
} }
try { try {
if (v < 8.0) { std::string scan_parameters = j.at("Scan Parameters");
std::string scan_parameters = j.at("Scan Parameters"); m_scan_parameters = ScanParameters(scan_parameters);
m_scan_parameters = ScanParameters(scan_parameters);
} else {
auto json_obj = j.at("Scan Parameters");
m_scan_parameters = ScanParameters(
json_obj.at("enable").get<int>(),
static_cast<DACIndex>(json_obj.at("dacInd").get<int>()),
json_obj.at("start offset").get<int>(),
json_obj.at("stop offset").get<int>(),
json_obj.at("step size").get<int>(),
json_obj.at("dac settle time ns").get<int>());
}
if (v < 7.21) { if (v < 7.21) {
m_scan_parameters m_scan_parameters
.increment_stop(); // adjust for endpoint being included .increment_stop(); // adjust for endpoint being included
@@ -284,7 +268,6 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
} catch (const json::out_of_range &e) { } catch (const json::out_of_range &e) {
// not a scan // not a scan
} }
try { try {
m_udp_interfaces_per_module = {j.at("Number of UDP Interfaces"), 1}; m_udp_interfaces_per_module = {j.at("Number of UDP Interfaces"), 1};
} catch (const json::out_of_range &e) { } catch (const json::out_of_range &e) {
@@ -294,36 +277,35 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
m_udp_interfaces_per_module = {1, 2}; m_udp_interfaces_per_module = {1, 2};
} }
} }
try { try {
ROI tmp_roi; ROI tmp_roi;
if (v < 8.0) { auto obj = j.at("Receiver Roi");
auto obj = j.at("Receiver Roi"); tmp_roi.xmin = obj.at("xmin");
tmp_roi.xmin = obj.at("xmin"); tmp_roi.xmax = obj.at("xmax");
tmp_roi.xmax = obj.at("xmax"); tmp_roi.ymin = obj.at("ymin");
tmp_roi.ymin = obj.at("ymin"); tmp_roi.ymax = obj.at("ymax");
tmp_roi.ymax = obj.at("ymax");
} else {
// TODO: for now only handle single ROI
auto obj = j.at("Receiver Rois");
tmp_roi.xmin = obj[0].at("xmin");
tmp_roi.xmax = obj[0].at("xmax");
tmp_roi.ymin = obj[0].at("ymin");
tmp_roi.ymax = obj[0].at("ymax");
}
// if any of the values are set update the roi // if any of the values are set update the roi
if (tmp_roi.xmin != 4294967295 || tmp_roi.xmax != 4294967295 || if (tmp_roi.xmin != 4294967295 || tmp_roi.xmax != 4294967295 ||
tmp_roi.ymin != 4294967295 || tmp_roi.ymax != 4294967295) { tmp_roi.ymin != 4294967295 || tmp_roi.ymax != 4294967295) {
tmp_roi.xmax++;
tmp_roi.ymax++; if (v < 7.21) {
tmp_roi.xmax++; // why is it updated
tmp_roi.ymax++;
}
m_roi = tmp_roi; m_roi = tmp_roi;
} }
} catch (const json::out_of_range &e) { } catch (const json::out_of_range &e) {
LOG(TLogLevel::logERROR) << e.what() << std::endl; std::cout << e.what() << std::endl;
// leave the optional empty // leave the optional empty
} }
// if we have an roi we need to update the geometry for the subfiles
if (m_roi) {
}
// Update detector type for Moench // Update detector type for Moench
// TODO! How does this work with old .raw master files? // TODO! How does this work with old .raw master files?
#ifdef AARE_VERBOSE #ifdef AARE_VERBOSE

View File

@@ -51,7 +51,7 @@ TEST_CASE("Parse scan parameters") {
ScanParameters s("[enabled\ndac dac 4\nstart 500\nstop 2200\nstep " ScanParameters s("[enabled\ndac dac 4\nstart 500\nstop 2200\nstep "
"5\nsettleTime 100us\n]"); "5\nsettleTime 100us\n]");
REQUIRE(s.enabled()); REQUIRE(s.enabled());
REQUIRE(s.dac() == DACIndex::DAC_4); REQUIRE(s.dac() == "dac 4");
REQUIRE(s.start() == 500); REQUIRE(s.start() == 500);
REQUIRE(s.stop() == 2200); REQUIRE(s.stop() == 2200);
REQUIRE(s.step() == 5); REQUIRE(s.step() == 5);
@@ -60,7 +60,7 @@ TEST_CASE("Parse scan parameters") {
TEST_CASE("A disabled scan") { TEST_CASE("A disabled scan") {
ScanParameters s("[disabled]"); ScanParameters s("[disabled]");
REQUIRE_FALSE(s.enabled()); REQUIRE_FALSE(s.enabled());
REQUIRE(s.dac() == DACIndex::DAC_0); REQUIRE(s.dac() == "");
REQUIRE(s.start() == 0); REQUIRE(s.start() == 0);
REQUIRE(s.stop() == 0); REQUIRE(s.stop() == 0);
REQUIRE(s.step() == 0); REQUIRE(s.step() == 0);
@@ -68,7 +68,7 @@ TEST_CASE("A disabled scan") {
TEST_CASE("Parse a master file in .json format", "[.integration]") { TEST_CASE("Parse a master file in .json format", "[.integration]") {
auto fpath = auto fpath =
test_data_path() / "raw" / "jungfrau" / "jungfrau_single_master_0.json"; test_data_path() / "jungfrau" / "jungfrau_single_master_0.json";
REQUIRE(std::filesystem::exists(fpath)); REQUIRE(std::filesystem::exists(fpath));
RawMasterFile f(fpath); RawMasterFile f(fpath);
@@ -224,41 +224,6 @@ TEST_CASE("Parse a master file in .raw format", "[.integration]") {
// Packets Caught Mask : 64 bytes // Packets Caught Mask : 64 bytes
} }
TEST_CASE("Parse a master file in new .json format",
"[.integration][.width-data]") {
auto file_path =
test_data_path() / "raw" / "newmythen03" / "run_87_master_0.json";
REQUIRE(std::filesystem::exists(file_path));
RawMasterFile f(file_path);
// Version : 8.0
REQUIRE(f.version() == "8.0");
REQUIRE(f.detector_type() == DetectorType::Mythen3);
// Timing Mode : auto
REQUIRE(f.timing_mode() == TimingMode::Auto);
// Geometry : [2, 1]
REQUIRE(f.geometry().col == 2);
REQUIRE(f.geometry().row == 1);
// Image Size : 5120 bytes
REQUIRE(f.image_size_in_bytes() == 5120);
REQUIRE(f.scan_parameters().enabled() == false);
REQUIRE(f.scan_parameters().dac() == DACIndex::DAC_0);
REQUIRE(f.scan_parameters().start() == 0);
REQUIRE(f.scan_parameters().stop() == 0);
REQUIRE(f.scan_parameters().step() == 0);
REQUIRE(f.scan_parameters().settleTime() == 0);
auto roi = f.roi().value();
REQUIRE(roi.xmin == 0);
REQUIRE(roi.xmax == 2559);
REQUIRE(roi.ymin == -1);
REQUIRE(roi.ymax == -1);
}
TEST_CASE("Read eiger master file", "[.integration]") { TEST_CASE("Read eiger master file", "[.integration]") {
auto fpath = test_data_path() / "eiger" / "eiger_500k_32bit_master_0.json"; auto fpath = test_data_path() / "eiger" / "eiger_500k_32bit_master_0.json";
REQUIRE(std::filesystem::exists(fpath)); REQUIRE(std::filesystem::exists(fpath));
@@ -327,4 +292,4 @@ TEST_CASE("Read eiger master file", "[.integration]") {
// "Packets Caught Mask": "64 bytes" // "Packets Caught Mask": "64 bytes"
// } // }
// } // }
} }

View File

@@ -115,186 +115,4 @@ template <> FrameDiscardPolicy StringTo(const std::string &arg) {
// template <> TimingMode StringTo<TimingMode>(std::string mode); // template <> TimingMode StringTo<TimingMode>(std::string mode);
template <> DACIndex StringTo(const std::string &arg) {
if (arg == "dac 0")
return DACIndex::DAC_0;
else if (arg == "dac 1")
return DACIndex::DAC_1;
else if (arg == "dac 2")
return DACIndex::DAC_2;
else if (arg == "dac 3")
return DACIndex::DAC_3;
else if (arg == "dac 4")
return DACIndex::DAC_4;
else if (arg == "dac 5")
return DACIndex::DAC_5;
else if (arg == "dac 6")
return DACIndex::DAC_6;
else if (arg == "dac 7")
return DACIndex::DAC_7;
else if (arg == "dac 8")
return DACIndex::DAC_8;
else if (arg == "dac 9")
return DACIndex::DAC_9;
else if (arg == "dac 10")
return DACIndex::DAC_10;
else if (arg == "dac 11")
return DACIndex::DAC_11;
else if (arg == "dac 12")
return DACIndex::DAC_12;
else if (arg == "dac 13")
return DACIndex::DAC_13;
else if (arg == "dac 14")
return DACIndex::DAC_14;
else if (arg == "dac 15")
return DACIndex::DAC_15;
else if (arg == "dac 16")
return DACIndex::DAC_16;
else if (arg == "dac 17")
return DACIndex::DAC_17;
else if (arg == "vsvp")
return DACIndex::VSVP;
else if (arg == "vtrim")
return DACIndex::VTRIM;
else if (arg == "vrpreamp")
return DACIndex::VRPREAMP;
else if (arg == "vrshaper")
return DACIndex::VRSHAPER;
else if (arg == "vsvn")
return DACIndex::VSVN;
else if (arg == "vtgstv")
return DACIndex::VTGSTV;
else if (arg == "vcmp_ll")
return DACIndex::VCMP_LL;
else if (arg == "vcmp_lr")
return DACIndex::VCMP_LR;
else if (arg == "vcal")
return DACIndex::VCAL;
else if (arg == "vcmp_rl")
return DACIndex::VCMP_RL;
else if (arg == "rxb_rb")
return DACIndex::RXB_RB;
else if (arg == "rxb_lb")
return DACIndex::RXB_LB;
else if (arg == "vcmp_rr")
return DACIndex::VCMP_RR;
else if (arg == "vcp")
return DACIndex::VCP;
else if (arg == "vcn")
return DACIndex::VCN;
else if (arg == "vishaper")
return DACIndex::VISHAPER;
else if (arg == "vthreshold")
return DACIndex::VTHRESHOLD;
else if (arg == "vref_ds")
return DACIndex::VREF_DS;
else if (arg == "vout_cm")
return DACIndex::VOUT_CM;
else if (arg == "vin_cm")
return DACIndex::VIN_CM;
else if (arg == "vref_comp")
return DACIndex::VREF_COMP;
else if (arg == "vb_comp")
return DACIndex::VB_COMP;
else if (arg == "vdd_prot")
return DACIndex::VDD_PROT;
else if (arg == "vin_com")
return DACIndex::VIN_COM;
else if (arg == "vref_prech")
return DACIndex::VREF_PRECH;
else if (arg == "vb_pixbuf")
return DACIndex::VB_PIXBUF;
else if (arg == "vb_ds")
return DACIndex::VB_DS;
else if (arg == "vref_h_adc")
return DACIndex::VREF_H_ADC;
else if (arg == "vb_comp_fe")
return DACIndex::VB_COMP_FE;
else if (arg == "vb_comp_adc")
return DACIndex::VB_COMP_ADC;
else if (arg == "vcom_cds")
return DACIndex::VCOM_CDS;
else if (arg == "vref_rstore")
return DACIndex::VREF_RSTORE;
else if (arg == "vb_opa_1st")
return DACIndex::VB_OPA_1ST;
else if (arg == "vref_comp_fe")
return DACIndex::VREF_COMP_FE;
else if (arg == "vcom_adc1")
return DACIndex::VCOM_ADC1;
else if (arg == "vref_l_adc")
return DACIndex::VREF_L_ADC;
else if (arg == "vref_cds")
return DACIndex::VREF_CDS;
else if (arg == "vb_cs")
return DACIndex::VB_CS;
else if (arg == "vb_opa_fd")
return DACIndex::VB_OPA_FD;
else if (arg == "vcom_adc2")
return DACIndex::VCOM_ADC2;
else if (arg == "vcassh")
return DACIndex::VCASSH;
else if (arg == "vth2")
return DACIndex::VTH2;
else if (arg == "vrshaper_n")
return DACIndex::VRSHAPER_N;
else if (arg == "vipre_out")
return DACIndex::VIPRE_OUT;
else if (arg == "vth3")
return DACIndex::VTH3;
else if (arg == "vth1")
return DACIndex::VTH1;
else if (arg == "vicin")
return DACIndex::VICIN;
else if (arg == "vcas")
return DACIndex::VCAS;
else if (arg == "vcal_n")
return DACIndex::VCAL_N;
else if (arg == "vipre")
return DACIndex::VIPRE;
else if (arg == "vcal_p")
return DACIndex::VCAL_P;
else if (arg == "vdcsh")
return DACIndex::VDCSH;
else if (arg == "vbp_colbuf")
return DACIndex::VBP_COLBUF;
else if (arg == "vb_sda")
return DACIndex::VB_SDA;
else if (arg == "vcasc_sfp")
return DACIndex::VCASC_SFP;
else if (arg == "vipre_cds")
return DACIndex::VIPRE_CDS;
else if (arg == "ibias_sfp")
return DACIndex::IBIAS_SFP;
else if (arg == "trimbits")
return DACIndex::TRIMBIT_SCAN;
else if (arg == "highvoltage")
return DACIndex::HIGH_VOLTAGE;
else if (arg == "iodelay")
return DACIndex::IO_DELAY;
else if (arg == "temp_adc")
return DACIndex::TEMPERATURE_ADC;
else if (arg == "temp_fpga")
return DACIndex::TEMPERATURE_FPGA;
else if (arg == "temp_fpgaext")
return DACIndex::TEMPERATURE_FPGAEXT;
else if (arg == "temp_10ge")
return DACIndex::TEMPERATURE_10GE;
else if (arg == "temp_dcdc")
return DACIndex::TEMPERATURE_DCDC;
else if (arg == "temp_sodl")
return DACIndex::TEMPERATURE_SODL;
else if (arg == "temp_sodr")
return DACIndex::TEMPERATURE_SODR;
else if (arg == "temp_fpgafl")
return DACIndex::TEMPERATURE_FPGA2;
else if (arg == "temp_fpgafr")
return DACIndex::TEMPERATURE_FPGA3;
else if (arg == "temp_slowadc")
return DACIndex::SLOW_ADC_TEMP;
else
throw std::invalid_argument("Could not decode DACIndex from: \"" + arg +
"\"");
}
} // namespace aare } // namespace aare

View File

@@ -7,7 +7,6 @@ Script to update VERSION file with semantic versioning if provided as an argumen
import sys import sys
import os import os
import re import re
from datetime import datetime
from packaging.version import Version, InvalidVersion from packaging.version import Version, InvalidVersion
@@ -27,9 +26,9 @@ def get_version():
# Check at least one argument is passed # Check at least one argument is passed
if len(sys.argv) < 2: if len(sys.argv) < 2:
version = datetime.today().strftime('%Y.%-m.%-d') return "0.0.0"
else:
version = sys.argv[1] version = sys.argv[1]
try: try:
v = Version(version) # normalize check if version follows PEP 440 specification v = Version(version) # normalize check if version follows PEP 440 specification
@@ -55,4 +54,4 @@ def write_version_to_file(version):
if __name__ == "__main__": if __name__ == "__main__":
version = get_version() version = get_version()
write_version_to_file(version) write_version_to_file(version)