mirror of
https://github.com/slsdetectorgroup/aare.git
synced 2026-01-01 01:41:29 +01:00
Compare commits
10 Commits
feature_id
...
dev/highz/
| Author | SHA1 | Date | |
|---|---|---|---|
| 1b8657c524 | |||
| de1fd62e66 | |||
| 6b894a5083 | |||
| faaa831238 | |||
| 12498dacaa | |||
| 7ea20c6b9d | |||
| 29a2374446 | |||
| efb16ea8c1 | |||
| 7aa3fcfcd0 | |||
| 836dddbc26 |
@@ -388,7 +388,7 @@ set(SourceFiles
|
|||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
add_library(aare_core STATIC ${SourceFiles})
|
add_library(aare_core STATIC ${SourceFiles})
|
||||||
target_include_directories(aare_core PUBLIC
|
target_include_directories(aare_core PUBLIC
|
||||||
@@ -412,8 +412,6 @@ target_link_libraries(
|
|||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
set_property(TARGET aare_core PROPERTY POSITION_INDEPENDENT_CODE ON)
|
|
||||||
|
|
||||||
if(AARE_TESTS)
|
if(AARE_TESTS)
|
||||||
target_compile_definitions(aare_core PRIVATE AARE_TESTS)
|
target_compile_definitions(aare_core PRIVATE AARE_TESTS)
|
||||||
endif()
|
endif()
|
||||||
@@ -433,6 +431,10 @@ set_target_properties(aare_core PROPERTIES
|
|||||||
PUBLIC_HEADER "${PUBLICHEADERS}"
|
PUBLIC_HEADER "${PUBLICHEADERS}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if (AARE_PYTHON_BINDINGS)
|
||||||
|
set_property(TARGET aare_core PROPERTY POSITION_INDEPENDENT_CODE ON)
|
||||||
|
endif()
|
||||||
|
|
||||||
if(AARE_TESTS)
|
if(AARE_TESTS)
|
||||||
set(TestSources
|
set(TestSources
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp
|
||||||
@@ -463,7 +465,6 @@ if(AARE_TESTS)
|
|||||||
target_sources(tests PRIVATE ${TestSources} )
|
target_sources(tests PRIVATE ${TestSources} )
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
||||||
if(AARE_MASTER_PROJECT)
|
if(AARE_MASTER_PROJECT)
|
||||||
install(TARGETS aare_core aare_compiler_flags
|
install(TARGETS aare_core aare_compiler_flags
|
||||||
EXPORT "${TARGETS_EXPORT_NAME}"
|
EXPORT "${TARGETS_EXPORT_NAME}"
|
||||||
@@ -473,6 +474,7 @@ if(AARE_MASTER_PROJECT)
|
|||||||
)
|
)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||||
set(CMAKE_INSTALL_RPATH $ORIGIN)
|
set(CMAKE_INSTALL_RPATH $ORIGIN)
|
||||||
set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
|
set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
|
||||||
|
|
||||||
|
|||||||
20
RELEASE.md
20
RELEASE.md
@@ -1,27 +1,16 @@
|
|||||||
# Release notes
|
# Release notes
|
||||||
|
|
||||||
### 2025.10.1
|
|
||||||
|
|
||||||
Bugfixes:
|
### head
|
||||||
|
|
||||||
- File supports reading new master json file format (multiple ROI's not supported yet)
|
|
||||||
|
|
||||||
### 2025.8.22
|
|
||||||
|
|
||||||
Features:
|
Features:
|
||||||
|
|
||||||
- Apply calibration works in G0 if passes a 2D calibration and pedestal
|
- Apply calibration works in G0 if passes a 2D calibration and pedestal
|
||||||
- count pixels that switch
|
- count pixels that switch
|
||||||
- calculate pedestal (also g0 version)
|
- calculate pedestal (also g0 version)
|
||||||
- NDArray::view() needs an lvalue to reduce issues with the view outliving the array
|
|
||||||
|
|
||||||
|
|
||||||
Bugfixes:
|
### 2025.07.18
|
||||||
|
|
||||||
- Now using glibc 2.17 in conda builds (was using the host)
|
|
||||||
- Fixed shifted pixels in clusters close to the edge of a frame
|
|
||||||
|
|
||||||
### 2025.7.18
|
|
||||||
|
|
||||||
Features:
|
Features:
|
||||||
|
|
||||||
@@ -35,7 +24,7 @@ Bugfixes:
|
|||||||
- Removed unused file: ClusterFile.cpp
|
- Removed unused file: ClusterFile.cpp
|
||||||
|
|
||||||
|
|
||||||
### 2025.5.22
|
### 2025.05.22
|
||||||
|
|
||||||
Features:
|
Features:
|
||||||
|
|
||||||
@@ -48,6 +37,3 @@ Bugfixes:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ FetchContent_MakeAvailable(benchmark)
|
|||||||
|
|
||||||
add_executable(benchmarks)
|
add_executable(benchmarks)
|
||||||
|
|
||||||
target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp reduce_benchmark.cpp)
|
target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp)
|
||||||
|
|
||||||
# Link Google Benchmark and other necessary libraries
|
# Link Google Benchmark and other necessary libraries
|
||||||
target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags)
|
target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags)
|
||||||
|
|||||||
@@ -1,168 +0,0 @@
|
|||||||
#include "aare/Cluster.hpp"
|
|
||||||
#include <benchmark/benchmark.h>
|
|
||||||
|
|
||||||
using namespace aare;
|
|
||||||
|
|
||||||
class ClustersForReduceFixture : public benchmark::Fixture {
|
|
||||||
public:
|
|
||||||
Cluster<int, 5, 5> cluster_5x5{};
|
|
||||||
Cluster<int, 3, 3> cluster_3x3{};
|
|
||||||
|
|
||||||
private:
|
|
||||||
using benchmark::Fixture::SetUp;
|
|
||||||
|
|
||||||
void SetUp([[maybe_unused]] const benchmark::State &state) override {
|
|
||||||
int temp_data[25] = {1, 1, 1, 1, 1, 1, 1, 2, 1, 1,
|
|
||||||
1, 2, 3, 1, 2, 1, 1, 1, 1, 2};
|
|
||||||
std::copy(std::begin(temp_data), std::end(temp_data),
|
|
||||||
std::begin(cluster_5x5.data));
|
|
||||||
|
|
||||||
cluster_5x5.x = 5;
|
|
||||||
cluster_5x5.y = 5;
|
|
||||||
|
|
||||||
int temp_data2[9] = {1, 1, 1, 2, 3, 1, 2, 2, 1};
|
|
||||||
std::copy(std::begin(temp_data2), std::end(temp_data2),
|
|
||||||
std::begin(cluster_3x3.data));
|
|
||||||
|
|
||||||
cluster_3x3.x = 5;
|
|
||||||
cluster_3x3.y = 5;
|
|
||||||
}
|
|
||||||
|
|
||||||
// void TearDown(::benchmark::State& state) {
|
|
||||||
// }
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
Cluster<T, 3, 3, int16_t> reduce_to_3x3(const Cluster<T, 5, 5, int16_t> &c) {
|
|
||||||
Cluster<T, 3, 3, int16_t> result;
|
|
||||||
|
|
||||||
// Write out the sums in the hope that the compiler can optimize this
|
|
||||||
std::array<T, 9> sum_3x3_subclusters;
|
|
||||||
|
|
||||||
// Write out the sums in the hope that the compiler can optimize this
|
|
||||||
sum_3x3_subclusters[0] = c.data[0] + c.data[1] + c.data[2] + c.data[5] +
|
|
||||||
c.data[6] + c.data[7] + c.data[10] + c.data[11] +
|
|
||||||
c.data[12];
|
|
||||||
sum_3x3_subclusters[1] = c.data[1] + c.data[2] + c.data[3] + c.data[6] +
|
|
||||||
c.data[7] + c.data[8] + c.data[11] + c.data[12] +
|
|
||||||
c.data[13];
|
|
||||||
sum_3x3_subclusters[2] = c.data[2] + c.data[3] + c.data[4] + c.data[7] +
|
|
||||||
c.data[8] + c.data[9] + c.data[12] + c.data[13] +
|
|
||||||
c.data[14];
|
|
||||||
sum_3x3_subclusters[3] = c.data[5] + c.data[6] + c.data[7] + c.data[10] +
|
|
||||||
c.data[11] + c.data[12] + c.data[15] + c.data[16] +
|
|
||||||
c.data[17];
|
|
||||||
sum_3x3_subclusters[4] = c.data[6] + c.data[7] + c.data[8] + c.data[11] +
|
|
||||||
c.data[12] + c.data[13] + c.data[16] + c.data[17] +
|
|
||||||
c.data[18];
|
|
||||||
sum_3x3_subclusters[5] = c.data[7] + c.data[8] + c.data[9] + c.data[12] +
|
|
||||||
c.data[13] + c.data[14] + c.data[17] + c.data[18] +
|
|
||||||
c.data[19];
|
|
||||||
sum_3x3_subclusters[6] = c.data[10] + c.data[11] + c.data[12] + c.data[15] +
|
|
||||||
c.data[16] + c.data[17] + c.data[20] + c.data[21] +
|
|
||||||
c.data[22];
|
|
||||||
sum_3x3_subclusters[7] = c.data[11] + c.data[12] + c.data[13] + c.data[16] +
|
|
||||||
c.data[17] + c.data[18] + c.data[21] + c.data[22] +
|
|
||||||
c.data[23];
|
|
||||||
sum_3x3_subclusters[8] = c.data[12] + c.data[13] + c.data[14] + c.data[17] +
|
|
||||||
c.data[18] + c.data[19] + c.data[22] + c.data[23] +
|
|
||||||
c.data[24];
|
|
||||||
|
|
||||||
auto index = std::max_element(sum_3x3_subclusters.begin(),
|
|
||||||
sum_3x3_subclusters.end()) -
|
|
||||||
sum_3x3_subclusters.begin();
|
|
||||||
|
|
||||||
switch (index) {
|
|
||||||
case 0:
|
|
||||||
result.x = c.x - 1;
|
|
||||||
result.y = c.y + 1;
|
|
||||||
result.data = {c.data[0], c.data[1], c.data[2], c.data[5], c.data[6],
|
|
||||||
c.data[7], c.data[10], c.data[11], c.data[12]};
|
|
||||||
break;
|
|
||||||
case 1:
|
|
||||||
result.x = c.x;
|
|
||||||
result.y = c.y + 1;
|
|
||||||
result.data = {c.data[1], c.data[2], c.data[3], c.data[6], c.data[7],
|
|
||||||
c.data[8], c.data[11], c.data[12], c.data[13]};
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
result.x = c.x + 1;
|
|
||||||
result.y = c.y + 1;
|
|
||||||
result.data = {c.data[2], c.data[3], c.data[4], c.data[7], c.data[8],
|
|
||||||
c.data[9], c.data[12], c.data[13], c.data[14]};
|
|
||||||
break;
|
|
||||||
case 3:
|
|
||||||
result.x = c.x - 1;
|
|
||||||
result.y = c.y;
|
|
||||||
result.data = {c.data[5], c.data[6], c.data[7],
|
|
||||||
c.data[10], c.data[11], c.data[12],
|
|
||||||
c.data[15], c.data[16], c.data[17]};
|
|
||||||
break;
|
|
||||||
case 4:
|
|
||||||
result.x = c.x + 1;
|
|
||||||
result.y = c.y;
|
|
||||||
result.data = {c.data[6], c.data[7], c.data[8],
|
|
||||||
c.data[11], c.data[12], c.data[13],
|
|
||||||
c.data[16], c.data[17], c.data[18]};
|
|
||||||
break;
|
|
||||||
case 5:
|
|
||||||
result.x = c.x + 1;
|
|
||||||
result.y = c.y;
|
|
||||||
result.data = {c.data[7], c.data[8], c.data[9],
|
|
||||||
c.data[12], c.data[13], c.data[14],
|
|
||||||
c.data[17], c.data[18], c.data[19]};
|
|
||||||
break;
|
|
||||||
case 6:
|
|
||||||
result.x = c.x + 1;
|
|
||||||
result.y = c.y - 1;
|
|
||||||
result.data = {c.data[10], c.data[11], c.data[12],
|
|
||||||
c.data[15], c.data[16], c.data[17],
|
|
||||||
c.data[20], c.data[21], c.data[22]};
|
|
||||||
break;
|
|
||||||
case 7:
|
|
||||||
result.x = c.x + 1;
|
|
||||||
result.y = c.y - 1;
|
|
||||||
result.data = {c.data[11], c.data[12], c.data[13],
|
|
||||||
c.data[16], c.data[17], c.data[18],
|
|
||||||
c.data[21], c.data[22], c.data[23]};
|
|
||||||
break;
|
|
||||||
case 8:
|
|
||||||
result.x = c.x + 1;
|
|
||||||
result.y = c.y - 1;
|
|
||||||
result.data = {c.data[12], c.data[13], c.data[14],
|
|
||||||
c.data[17], c.data[18], c.data[19],
|
|
||||||
c.data[22], c.data[23], c.data[24]};
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
BENCHMARK_F(ClustersForReduceFixture, Reduce2x2)(benchmark::State &st) {
|
|
||||||
for (auto _ : st) {
|
|
||||||
// This code gets timed
|
|
||||||
benchmark::DoNotOptimize(reduce_to_2x2<int, 3, 3, int16_t>(
|
|
||||||
cluster_3x3)); // make sure compiler evaluates the expression
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
BENCHMARK_F(ClustersForReduceFixture, SpecificReduce2x2)(benchmark::State &st) {
|
|
||||||
for (auto _ : st) {
|
|
||||||
// This code gets timed
|
|
||||||
benchmark::DoNotOptimize(reduce_to_2x2<int>(cluster_3x3));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
BENCHMARK_F(ClustersForReduceFixture, Reduce3x3)(benchmark::State &st) {
|
|
||||||
for (auto _ : st) {
|
|
||||||
// This code gets timed
|
|
||||||
benchmark::DoNotOptimize(
|
|
||||||
reduce_to_3x3<int, 5, 5, int16_t>(cluster_5x5));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
BENCHMARK_F(ClustersForReduceFixture, SpecificReduce3x3)(benchmark::State &st) {
|
|
||||||
for (auto _ : st) {
|
|
||||||
// This code gets timed
|
|
||||||
benchmark::DoNotOptimize(reduce_to_3x3<int>(cluster_5x5));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,14 +3,3 @@ python:
|
|||||||
- 3.12
|
- 3.12
|
||||||
- 3.13
|
- 3.13
|
||||||
|
|
||||||
c_compiler:
|
|
||||||
- gcc # [linux]
|
|
||||||
|
|
||||||
c_stdlib:
|
|
||||||
- sysroot # [linux]
|
|
||||||
|
|
||||||
cxx_compiler:
|
|
||||||
- gxx # [linux]
|
|
||||||
|
|
||||||
c_stdlib_version: # [linux]
|
|
||||||
- 2.17 # [linux]
|
|
||||||
|
|||||||
@@ -16,8 +16,6 @@ build:
|
|||||||
|
|
||||||
requirements:
|
requirements:
|
||||||
build:
|
build:
|
||||||
- {{ compiler('c') }}
|
|
||||||
- {{ stdlib("c") }}
|
|
||||||
- {{ compiler('cxx') }}
|
- {{ compiler('cxx') }}
|
||||||
- cmake
|
- cmake
|
||||||
- ninja
|
- ninja
|
||||||
|
|||||||
@@ -12,11 +12,4 @@ ClusterVector
|
|||||||
:members:
|
:members:
|
||||||
:undoc-members:
|
:undoc-members:
|
||||||
:private-members:
|
:private-members:
|
||||||
|
|
||||||
|
|
||||||
**Free Functions:**
|
|
||||||
|
|
||||||
.. doxygenfunction:: aare::reduce_to_3x3(const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>&)
|
|
||||||
|
|
||||||
.. doxygenfunction:: aare::reduce_to_2x2(const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>&)
|
|
||||||
|
|
||||||
@@ -33,17 +33,4 @@ C++ functions that support the ClusterVector or to view it as a numpy array.
|
|||||||
:members:
|
:members:
|
||||||
:undoc-members:
|
:undoc-members:
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
:inherited-members:
|
:inherited-members:
|
||||||
|
|
||||||
|
|
||||||
**Free Functions:**
|
|
||||||
|
|
||||||
.. autofunction:: reduce_to_3x3
|
|
||||||
:noindex:
|
|
||||||
|
|
||||||
Reduce a single Cluster to 3x3 by taking the 3x3 subcluster with highest photon energy.
|
|
||||||
|
|
||||||
.. autofunction:: reduce_to_2x2
|
|
||||||
:noindex:
|
|
||||||
|
|
||||||
Reduce a single Cluster to 2x2 by taking the 2x2 subcluster with highest photon energy.
|
|
||||||
@@ -28,7 +28,7 @@ enum class pixel : int {
|
|||||||
template <typename T> struct Eta2 {
|
template <typename T> struct Eta2 {
|
||||||
double x;
|
double x;
|
||||||
double y;
|
double y;
|
||||||
int c{0};
|
int c;
|
||||||
T sum;
|
T sum;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -70,8 +70,6 @@ calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
|||||||
size_t index_bottom_left_max_2x2_subcluster =
|
size_t index_bottom_left_max_2x2_subcluster =
|
||||||
(int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1);
|
(int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1);
|
||||||
|
|
||||||
// calculate direction of gradient
|
|
||||||
|
|
||||||
// check that cluster center is in max subcluster
|
// check that cluster center is in max subcluster
|
||||||
if (cluster_center_index != index_bottom_left_max_2x2_subcluster &&
|
if (cluster_center_index != index_bottom_left_max_2x2_subcluster &&
|
||||||
cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 &&
|
cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 &&
|
||||||
@@ -130,15 +128,12 @@ Eta2<T> calculate_eta2(const Cluster<T, 2, 2, int16_t> &cl) {
|
|||||||
Eta2<T> eta{};
|
Eta2<T> eta{};
|
||||||
|
|
||||||
if ((cl.data[0] + cl.data[1]) != 0)
|
if ((cl.data[0] + cl.data[1]) != 0)
|
||||||
eta.x = static_cast<double>(cl.data[1]) /
|
eta.x = static_cast<double>(cl.data[1]) / (cl.data[0] + cl.data[1]);
|
||||||
(cl.data[0] + cl.data[1]); // between (0,1) the closer to zero
|
|
||||||
// left value probably larger
|
|
||||||
if ((cl.data[0] + cl.data[2]) != 0)
|
if ((cl.data[0] + cl.data[2]) != 0)
|
||||||
eta.y = static_cast<double>(cl.data[2]) /
|
eta.y = static_cast<double>(cl.data[2]) / (cl.data[0] + cl.data[2]);
|
||||||
(cl.data[0] + cl.data[2]); // between (0,1) the closer to zero
|
|
||||||
// bottom value probably larger
|
|
||||||
eta.sum = cl.sum();
|
eta.sum = cl.sum();
|
||||||
|
eta.c = static_cast<int>(corner::cBottomLeft); // TODO! This is not correct,
|
||||||
|
// but need to put something
|
||||||
return eta;
|
return eta;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -155,11 +150,13 @@ template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) {
|
|||||||
|
|
||||||
eta.sum = sum;
|
eta.sum = sum;
|
||||||
|
|
||||||
|
eta.c = corner::cBottomLeft;
|
||||||
|
|
||||||
if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0)
|
if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0)
|
||||||
|
|
||||||
eta.x = static_cast<double>(-cl.data[3] + cl.data[3 + 2]) /
|
eta.x = static_cast<double>(-cl.data[3] + cl.data[3 + 2]) /
|
||||||
|
|
||||||
(cl.data[3] + cl.data[4] + cl.data[5]); // (-1,1)
|
(cl.data[3] + cl.data[4] + cl.data[5]);
|
||||||
|
|
||||||
if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0)
|
if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0)
|
||||||
|
|
||||||
|
|||||||
152
include/aare/ChunkedPedestal.hpp
Normal file
152
include/aare/ChunkedPedestal.hpp
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
#pragma once
|
||||||
|
#include "aare/Frame.hpp"
|
||||||
|
#include "aare/NDArray.hpp"
|
||||||
|
#include "aare/NDView.hpp"
|
||||||
|
#include <cstddef>
|
||||||
|
|
||||||
|
//JMulvey
|
||||||
|
//This is a new way to do pedestals (inspired by Dominic's cluster finder)
|
||||||
|
//Instead of pedestal tracking, we split the data (photon data) up into chunks (say 50K frames)
|
||||||
|
//For each chunk, we look at the spectra and fit to the noise peak. When we run the cluster finder, we then use this chunked pedestal data
|
||||||
|
//The smaller the chunk size, the more accurate, but also the longer it takes to process.
|
||||||
|
//It is essentially a pre-processing step.
|
||||||
|
//Ideally this new class will do that processing.
|
||||||
|
//But for now we will just implement a method to pass in the chunked pedestal values directly (I have my own script which does it for now)
|
||||||
|
//I've cut this down a lot, knowing full well it'll need changing if we want to merge it with main (happy to do that once I get it work for what I need)
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Calculate the pedestal of a series of frames. Can be used as
|
||||||
|
* standalone but mostly used in the ClusterFinder.
|
||||||
|
*
|
||||||
|
* @tparam SUM_TYPE type of the sum
|
||||||
|
*/
|
||||||
|
template <typename SUM_TYPE = double> class ChunkedPedestal {
|
||||||
|
uint32_t m_rows;
|
||||||
|
uint32_t m_cols;
|
||||||
|
uint32_t m_n_chunks;
|
||||||
|
uint64_t m_current_frame_number;
|
||||||
|
uint64_t m_current_chunk_number;
|
||||||
|
|
||||||
|
NDArray<SUM_TYPE, 3> m_mean;
|
||||||
|
NDArray<SUM_TYPE, 3> m_std;
|
||||||
|
uint32_t m_chunk_size;
|
||||||
|
|
||||||
|
public:
|
||||||
|
ChunkedPedestal(uint32_t rows, uint32_t cols, uint32_t chunk_size = 50000, uint32_t n_chunks = 10)
|
||||||
|
: m_rows(rows), m_cols(cols), m_chunk_size(chunk_size), m_n_chunks(n_chunks),
|
||||||
|
m_mean(NDArray<SUM_TYPE, 3>({n_chunks, rows, cols})), m_std(NDArray<SUM_TYPE, 3>({n_chunks, rows, cols})) {
|
||||||
|
assert(rows > 0 && cols > 0 && chunk_size > 0);
|
||||||
|
m_mean = 0;
|
||||||
|
m_std = 0;
|
||||||
|
m_current_frame_number = 0;
|
||||||
|
m_current_chunk_number = 0;
|
||||||
|
}
|
||||||
|
~ChunkedPedestal() = default;
|
||||||
|
|
||||||
|
NDArray<SUM_TYPE, 3> mean() { return m_mean; }
|
||||||
|
NDArray<SUM_TYPE, 3> std() { return m_std; }
|
||||||
|
|
||||||
|
void set_frame_number (uint64_t frame_number) {
|
||||||
|
m_current_frame_number = frame_number;
|
||||||
|
m_current_chunk_number = std::floor(frame_number / m_chunk_size);
|
||||||
|
|
||||||
|
//Debug
|
||||||
|
// if (frame_number % 10000 == 0)
|
||||||
|
// {
|
||||||
|
// std::cout << "frame_number: " << frame_number << " -> chunk_number: " << m_current_chunk_number << " pedestal at (100, 100): " << m_mean(m_current_chunk_number, 100, 100) << std::endl;
|
||||||
|
// }
|
||||||
|
|
||||||
|
if (m_current_chunk_number >= m_n_chunks)
|
||||||
|
{
|
||||||
|
m_current_chunk_number = 0;
|
||||||
|
throw std::runtime_error(
|
||||||
|
"Chunk number exceeds the number of chunks");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SUM_TYPE mean(const uint32_t row, const uint32_t col) const {
|
||||||
|
return m_mean(m_current_chunk_number, row, col);
|
||||||
|
}
|
||||||
|
|
||||||
|
SUM_TYPE std(const uint32_t row, const uint32_t col) const {
|
||||||
|
return m_std(m_current_chunk_number, row, col);
|
||||||
|
}
|
||||||
|
|
||||||
|
SUM_TYPE* get_mean_chunk_ptr() {
|
||||||
|
return &m_mean(m_current_chunk_number, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
SUM_TYPE* get_std_chunk_ptr() {
|
||||||
|
return &m_std(m_current_chunk_number, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void clear() {
|
||||||
|
m_mean = 0;
|
||||||
|
m_std = 0;
|
||||||
|
m_n_chunks = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
//Probably don't need to do this one at a time, but let's keep it simple for now
|
||||||
|
template <typename T> void push_mean(NDView<T, 2> frame, uint32_t chunk_number) {
|
||||||
|
assert(frame.size() == m_rows * m_cols);
|
||||||
|
|
||||||
|
if (chunk_number >= m_n_chunks)
|
||||||
|
throw std::runtime_error(
|
||||||
|
"Chunk number is larger than the number of chunks");
|
||||||
|
|
||||||
|
// TODO! move away from m_rows, m_cols
|
||||||
|
if (frame.shape() != std::array<ssize_t, 2>{m_rows, m_cols}) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
"Frame shape does not match pedestal shape");
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t row = 0; row < m_rows; row++) {
|
||||||
|
for (size_t col = 0; col < m_cols; col++) {
|
||||||
|
push_mean<T>(row, col, chunk_number, frame(row, col));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T> void push_std(NDView<T, 2> frame, uint32_t chunk_number) {
|
||||||
|
assert(frame.size() == m_rows * m_cols);
|
||||||
|
|
||||||
|
if (chunk_number >= m_n_chunks)
|
||||||
|
throw std::runtime_error(
|
||||||
|
"Chunk number is larger than the number of chunks");
|
||||||
|
|
||||||
|
// TODO! move away from m_rows, m_cols
|
||||||
|
if (frame.shape() != std::array<ssize_t, 2>{m_rows, m_cols}) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
"Frame shape does not match pedestal shape");
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t row = 0; row < m_rows; row++) {
|
||||||
|
for (size_t col = 0; col < m_cols; col++) {
|
||||||
|
push_std<T>(row, col, chunk_number, frame(row, col));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// pixel level operations (should be refactored to allow users to implement
|
||||||
|
// their own pixel level operations)
|
||||||
|
template <typename T>
|
||||||
|
void push_mean(const uint32_t row, const uint32_t col, const uint32_t chunk_number, const T val_) {
|
||||||
|
m_mean(chunk_number, row, col) = val_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void push_std(const uint32_t row, const uint32_t col, const uint32_t chunk_number, const T val_) {
|
||||||
|
m_std(chunk_number, row, col) = val_;
|
||||||
|
}
|
||||||
|
|
||||||
|
// getter functions
|
||||||
|
uint32_t rows() const { return m_rows; }
|
||||||
|
uint32_t cols() const { return m_cols; }
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
158
include/aare/Cluster.hpp
Executable file → Normal file
158
include/aare/Cluster.hpp
Executable file → Normal file
@@ -8,7 +8,6 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "logger.hpp"
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
@@ -75,163 +74,6 @@ struct Cluster {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Reduce a cluster to a 2x2 cluster by selecting the 2x2 block with the
|
|
||||||
* highest sum.
|
|
||||||
* @param c Cluster to reduce
|
|
||||||
* @return reduced cluster
|
|
||||||
*/
|
|
||||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
|
||||||
typename CoordType = int16_t>
|
|
||||||
Cluster<T, 2, 2, CoordType>
|
|
||||||
reduce_to_2x2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
|
|
||||||
|
|
||||||
static_assert(ClusterSizeX >= 2 && ClusterSizeY >= 2,
|
|
||||||
"Cluster sizes must be at least 2x2 for reduction to 2x2");
|
|
||||||
|
|
||||||
// TODO maybe add sanity check and check that center is in max subcluster
|
|
||||||
Cluster<T, 2, 2, CoordType> result;
|
|
||||||
|
|
||||||
auto [sum, index] = c.max_sum_2x2();
|
|
||||||
|
|
||||||
int16_t cluster_center_index =
|
|
||||||
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
|
||||||
|
|
||||||
int16_t index_bottom_left_max_2x2_subcluster =
|
|
||||||
(int(index / (ClusterSizeX - 1))) * ClusterSizeX +
|
|
||||||
index % (ClusterSizeX - 1);
|
|
||||||
|
|
||||||
result.x =
|
|
||||||
c.x + (index_bottom_left_max_2x2_subcluster - cluster_center_index) %
|
|
||||||
ClusterSizeX;
|
|
||||||
|
|
||||||
result.y =
|
|
||||||
c.y - (index_bottom_left_max_2x2_subcluster - cluster_center_index) /
|
|
||||||
ClusterSizeX;
|
|
||||||
result.data = {
|
|
||||||
c.data[index_bottom_left_max_2x2_subcluster],
|
|
||||||
c.data[index_bottom_left_max_2x2_subcluster + 1],
|
|
||||||
c.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX],
|
|
||||||
c.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1]};
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
Cluster<T, 2, 2, int16_t> reduce_to_2x2(const Cluster<T, 3, 3, int16_t> &c) {
|
|
||||||
Cluster<T, 2, 2, int16_t> result;
|
|
||||||
|
|
||||||
auto [s, i] = c.max_sum_2x2();
|
|
||||||
switch (i) {
|
|
||||||
case 0:
|
|
||||||
result.x = c.x - 1;
|
|
||||||
result.y = c.y + 1;
|
|
||||||
result.data = {c.data[0], c.data[1], c.data[3], c.data[4]};
|
|
||||||
break;
|
|
||||||
case 1:
|
|
||||||
result.x = c.x;
|
|
||||||
result.y = c.y + 1;
|
|
||||||
result.data = {c.data[1], c.data[2], c.data[4], c.data[5]};
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
result.x = c.x - 1;
|
|
||||||
result.y = c.y;
|
|
||||||
result.data = {c.data[3], c.data[4], c.data[6], c.data[7]};
|
|
||||||
break;
|
|
||||||
case 3:
|
|
||||||
result.x = c.x;
|
|
||||||
result.y = c.y;
|
|
||||||
result.data = {c.data[4], c.data[5], c.data[7], c.data[8]};
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
|
||||||
typename CoordType = int16_t>
|
|
||||||
inline std::pair<T, uint16_t>
|
|
||||||
max_3x3_sum(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cluster) {
|
|
||||||
|
|
||||||
if constexpr (ClusterSizeX == 3 && ClusterSizeY == 3) {
|
|
||||||
return std::make_pair(cluster.sum(), 0);
|
|
||||||
} else {
|
|
||||||
|
|
||||||
size_t index = 0;
|
|
||||||
T max_3x3_subcluster_sum = 0;
|
|
||||||
for (size_t i = 0; i < ClusterSizeY - 2; ++i) {
|
|
||||||
for (size_t j = 0; j < ClusterSizeX - 2; ++j) {
|
|
||||||
|
|
||||||
T sum = cluster.data[i * ClusterSizeX + j] +
|
|
||||||
cluster.data[i * ClusterSizeX + j + 1] +
|
|
||||||
cluster.data[i * ClusterSizeX + j + 2] +
|
|
||||||
cluster.data[(i + 1) * ClusterSizeX + j] +
|
|
||||||
cluster.data[(i + 1) * ClusterSizeX + j + 1] +
|
|
||||||
cluster.data[(i + 1) * ClusterSizeX + j + 2] +
|
|
||||||
cluster.data[(i + 2) * ClusterSizeX + j] +
|
|
||||||
cluster.data[(i + 2) * ClusterSizeX + j + 1] +
|
|
||||||
cluster.data[(i + 2) * ClusterSizeX + j + 2];
|
|
||||||
if (sum > max_3x3_subcluster_sum) {
|
|
||||||
max_3x3_subcluster_sum = sum;
|
|
||||||
index = i * (ClusterSizeX - 2) + j;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return std::make_pair(max_3x3_subcluster_sum, index);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Reduce a cluster to a 3x3 cluster by selecting the 3x3 block with the
|
|
||||||
* highest sum.
|
|
||||||
* @param c Cluster to reduce
|
|
||||||
* @return reduced cluster
|
|
||||||
*/
|
|
||||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
|
||||||
typename CoordType = int16_t>
|
|
||||||
Cluster<T, 3, 3, CoordType>
|
|
||||||
reduce_to_3x3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
|
|
||||||
|
|
||||||
static_assert(ClusterSizeX >= 3 && ClusterSizeY >= 3,
|
|
||||||
"Cluster sizes must be at least 3x3 for reduction to 3x3");
|
|
||||||
|
|
||||||
Cluster<T, 3, 3, CoordType> result;
|
|
||||||
|
|
||||||
// TODO maybe add sanity check and check that center is in max subcluster
|
|
||||||
|
|
||||||
auto [sum, index] = max_3x3_sum(c);
|
|
||||||
|
|
||||||
int16_t cluster_center_index =
|
|
||||||
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
|
||||||
|
|
||||||
int16_t index_center_max_3x3_subcluster =
|
|
||||||
(int(index / (ClusterSizeX - 2))) * ClusterSizeX + ClusterSizeX +
|
|
||||||
index % (ClusterSizeX - 2) + 1;
|
|
||||||
|
|
||||||
int16_t index_3x3_subcluster_cluster_center =
|
|
||||||
int((cluster_center_index - 1 - ClusterSizeX) / ClusterSizeX) *
|
|
||||||
(ClusterSizeX - 2) +
|
|
||||||
(cluster_center_index - 1 - ClusterSizeX) % ClusterSizeX;
|
|
||||||
|
|
||||||
result.x =
|
|
||||||
c.x + (index % (ClusterSizeX - 2) -
|
|
||||||
(index_3x3_subcluster_cluster_center % (ClusterSizeX - 2)));
|
|
||||||
result.y =
|
|
||||||
c.y - (index / (ClusterSizeX - 2) -
|
|
||||||
(index_3x3_subcluster_cluster_center / (ClusterSizeX - 2)));
|
|
||||||
|
|
||||||
result.data = {c.data[index_center_max_3x3_subcluster - ClusterSizeX - 1],
|
|
||||||
c.data[index_center_max_3x3_subcluster - ClusterSizeX],
|
|
||||||
c.data[index_center_max_3x3_subcluster - ClusterSizeX + 1],
|
|
||||||
c.data[index_center_max_3x3_subcluster - 1],
|
|
||||||
c.data[index_center_max_3x3_subcluster],
|
|
||||||
c.data[index_center_max_3x3_subcluster + 1],
|
|
||||||
c.data[index_center_max_3x3_subcluster + ClusterSizeX - 1],
|
|
||||||
c.data[index_center_max_3x3_subcluster + ClusterSizeX],
|
|
||||||
c.data[index_center_max_3x3_subcluster + ClusterSizeX + 1]};
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type Traits for is_cluster_type
|
// Type Traits for is_cluster_type
|
||||||
template <typename T>
|
template <typename T>
|
||||||
struct is_cluster : std::false_type {}; // Default case: Not a Cluster
|
struct is_cluster : std::false_type {}; // Default case: Not a Cluster
|
||||||
|
|||||||
@@ -4,9 +4,11 @@
|
|||||||
#include "aare/Dtype.hpp"
|
#include "aare/Dtype.hpp"
|
||||||
#include "aare/NDArray.hpp"
|
#include "aare/NDArray.hpp"
|
||||||
#include "aare/NDView.hpp"
|
#include "aare/NDView.hpp"
|
||||||
#include "aare/Pedestal.hpp"
|
// #include "aare/Pedestal.hpp"
|
||||||
|
#include "aare/ChunkedPedestal.hpp"
|
||||||
#include "aare/defs.hpp"
|
#include "aare/defs.hpp"
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
namespace aare {
|
namespace aare {
|
||||||
|
|
||||||
@@ -17,11 +19,13 @@ class ClusterFinder {
|
|||||||
const PEDESTAL_TYPE m_nSigma;
|
const PEDESTAL_TYPE m_nSigma;
|
||||||
const PEDESTAL_TYPE c2;
|
const PEDESTAL_TYPE c2;
|
||||||
const PEDESTAL_TYPE c3;
|
const PEDESTAL_TYPE c3;
|
||||||
Pedestal<PEDESTAL_TYPE> m_pedestal;
|
ChunkedPedestal<PEDESTAL_TYPE> m_pedestal;
|
||||||
ClusterVector<ClusterType> m_clusters;
|
ClusterVector<ClusterType> m_clusters;
|
||||||
|
const uint32_t ClusterSizeX;
|
||||||
|
const uint32_t ClusterSizeY;
|
||||||
|
|
||||||
static const uint8_t ClusterSizeX = ClusterType::cluster_size_x;
|
static const uint8_t SavedClusterSizeX = ClusterType::cluster_size_x;
|
||||||
static const uint8_t ClusterSizeY = ClusterType::cluster_size_y;
|
static const uint8_t SavedClusterSizeY = ClusterType::cluster_size_y;
|
||||||
using CT = typename ClusterType::value_type;
|
using CT = typename ClusterType::value_type;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@@ -34,25 +38,36 @@ class ClusterFinder {
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
ClusterFinder(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
|
ClusterFinder(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
|
||||||
size_t capacity = 1000000)
|
size_t capacity = 1000000,
|
||||||
|
uint32_t chunk_size = 50000, uint32_t n_chunks = 10,
|
||||||
|
uint32_t cluster_size_x = 3, uint32_t cluster_size_y = 3)
|
||||||
: m_image_size(image_size), m_nSigma(nSigma),
|
: m_image_size(image_size), m_nSigma(nSigma),
|
||||||
c2(sqrt((ClusterSizeY + 1) / 2 * (ClusterSizeX + 1) / 2)),
|
c2(sqrt((cluster_size_y + 1) / 2 * (cluster_size_x + 1) / 2)),
|
||||||
c3(sqrt(ClusterSizeX * ClusterSizeY)),
|
c3(sqrt(cluster_size_x * cluster_size_y)),
|
||||||
m_pedestal(image_size[0], image_size[1]), m_clusters(capacity) {
|
ClusterSizeX(cluster_size_x), ClusterSizeY(cluster_size_y),
|
||||||
|
m_pedestal(image_size[0], image_size[1], chunk_size, n_chunks), m_clusters(capacity) {
|
||||||
LOG(logDEBUG) << "ClusterFinder: "
|
LOG(logDEBUG) << "ClusterFinder: "
|
||||||
<< "image_size: " << image_size[0] << "x" << image_size[1]
|
<< "image_size: " << image_size[0] << "x" << image_size[1]
|
||||||
<< ", nSigma: " << nSigma << ", capacity: " << capacity;
|
<< ", nSigma: " << nSigma << ", capacity: " << capacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
void push_pedestal_frame(NDView<FRAME_TYPE, 2> frame) {
|
// void push_pedestal_frame(NDView<FRAME_TYPE, 2> frame) {
|
||||||
m_pedestal.push(frame);
|
// m_pedestal.push(frame);
|
||||||
|
// }
|
||||||
|
void push_pedestal_mean(NDView<PEDESTAL_TYPE, 2> frame, uint32_t chunk_number) {
|
||||||
|
m_pedestal.push_mean(frame, chunk_number);
|
||||||
}
|
}
|
||||||
|
void push_pedestal_std(NDView<PEDESTAL_TYPE, 2> frame, uint32_t chunk_number) {
|
||||||
|
m_pedestal.push_std(frame, chunk_number);
|
||||||
|
}
|
||||||
|
//This is here purely to keep the compiler happy for now
|
||||||
|
void push_pedestal_frame(NDView<FRAME_TYPE, 2> frame) {}
|
||||||
|
|
||||||
NDArray<PEDESTAL_TYPE, 2> pedestal() { return m_pedestal.mean(); }
|
NDArray<PEDESTAL_TYPE, 2> pedestal() { return m_pedestal.mean(); }
|
||||||
NDArray<PEDESTAL_TYPE, 2> noise() { return m_pedestal.std(); }
|
NDArray<PEDESTAL_TYPE, 2> noise() { return m_pedestal.std(); }
|
||||||
void clear_pedestal() { m_pedestal.clear(); }
|
void clear_pedestal() { m_pedestal.clear(); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Move the clusters from the ClusterVector in the ClusterFinder to a
|
* @brief Move the clusters from the ClusterVector in the ClusterFinder to a
|
||||||
* new ClusterVector and return it.
|
* new ClusterVector and return it.
|
||||||
* @param realloc_same_capacity if true the new ClusterVector will have the
|
* @param realloc_same_capacity if true the new ClusterVector will have the
|
||||||
@@ -69,11 +84,13 @@ class ClusterFinder {
|
|||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
void find_clusters(NDView<FRAME_TYPE, 2> frame, uint64_t frame_number = 0) {
|
void find_clusters(NDView<FRAME_TYPE, 2> frame, uint64_t frame_number = 0) {
|
||||||
// // TODO! deal with even size clusters
|
|
||||||
// // currently 3,3 -> +/- 1
|
// // currently 3,3 -> +/- 1
|
||||||
// // 4,4 -> +/- 2
|
// // 4,4 -> +/- 2
|
||||||
int dy = ClusterSizeY / 2;
|
int dy = ClusterSizeY / 2;
|
||||||
int dx = ClusterSizeX / 2;
|
int dx = ClusterSizeX / 2;
|
||||||
|
int dy2 = SavedClusterSizeY / 2;
|
||||||
|
int dx2 = SavedClusterSizeX / 2;
|
||||||
|
|
||||||
int has_center_pixel_x =
|
int has_center_pixel_x =
|
||||||
ClusterSizeX %
|
ClusterSizeX %
|
||||||
2; // for even sized clusters there is no proper cluster center and
|
2; // for even sized clusters there is no proper cluster center and
|
||||||
@@ -81,27 +98,39 @@ class ClusterFinder {
|
|||||||
int has_center_pixel_y = ClusterSizeY % 2;
|
int has_center_pixel_y = ClusterSizeY % 2;
|
||||||
|
|
||||||
m_clusters.set_frame_number(frame_number);
|
m_clusters.set_frame_number(frame_number);
|
||||||
|
m_pedestal.set_frame_number(frame_number);
|
||||||
|
auto mean_ptr = m_pedestal.get_mean_chunk_ptr();
|
||||||
|
auto std_ptr = m_pedestal.get_std_chunk_ptr();
|
||||||
|
|
||||||
for (int iy = 0; iy < frame.shape(0); iy++) {
|
for (int iy = 0; iy < frame.shape(0); iy++) {
|
||||||
|
size_t row_offset = iy * frame.shape(1);
|
||||||
for (int ix = 0; ix < frame.shape(1); ix++) {
|
for (int ix = 0; ix < frame.shape(1); ix++) {
|
||||||
|
|
||||||
|
// PEDESTAL_TYPE rms = m_pedestal.std(iy, ix);
|
||||||
|
PEDESTAL_TYPE rms = std_ptr[row_offset + ix];
|
||||||
|
if (rms == 0) continue;
|
||||||
|
|
||||||
PEDESTAL_TYPE max = std::numeric_limits<FRAME_TYPE>::min();
|
PEDESTAL_TYPE max = std::numeric_limits<FRAME_TYPE>::min();
|
||||||
PEDESTAL_TYPE total = 0;
|
PEDESTAL_TYPE total = 0;
|
||||||
|
|
||||||
// What can we short circuit here?
|
// What can we short circuit here?
|
||||||
PEDESTAL_TYPE rms = m_pedestal.std(iy, ix);
|
// PEDESTAL_TYPE value = (frame(iy, ix) - m_pedestal.mean(iy, ix));
|
||||||
PEDESTAL_TYPE value = (frame(iy, ix) - m_pedestal.mean(iy, ix));
|
PEDESTAL_TYPE value = (frame(iy, ix) - mean_ptr[row_offset + ix]);
|
||||||
|
|
||||||
if (value < -m_nSigma * rms)
|
if (value < -m_nSigma * rms)
|
||||||
continue; // NEGATIVE_PEDESTAL go to next pixel
|
continue; // NEGATIVE_PEDESTAL go to next pixel
|
||||||
// TODO! No pedestal update???
|
// TODO! No pedestal update???
|
||||||
|
|
||||||
for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) {
|
for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) {
|
||||||
|
size_t inner_row_offset = row_offset + (ir * frame.shape(1));
|
||||||
for (int ic = -dx; ic < dx + has_center_pixel_x; ic++) {
|
for (int ic = -dx; ic < dx + has_center_pixel_x; ic++) {
|
||||||
if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
|
if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
|
||||||
iy + ir >= 0 && iy + ir < frame.shape(0)) {
|
iy + ir >= 0 && iy + ir < frame.shape(0)) {
|
||||||
PEDESTAL_TYPE val =
|
// if (m_pedestal.std(iy + ir, ix + ic) == 0) continue;
|
||||||
frame(iy + ir, ix + ic) -
|
if (std_ptr[inner_row_offset + ix + ic] == 0) continue;
|
||||||
m_pedestal.mean(iy + ir, ix + ic);
|
|
||||||
|
// PEDESTAL_TYPE val = frame(iy + ir, ix + ic) - m_pedestal.mean(iy + ir, ix + ic);
|
||||||
|
PEDESTAL_TYPE val = frame(iy + ir, ix + ic) - mean_ptr[inner_row_offset + ix + ic];
|
||||||
|
|
||||||
total += val;
|
total += val;
|
||||||
max = std::max(max, val);
|
max = std::max(max, val);
|
||||||
@@ -109,24 +138,64 @@ class ClusterFinder {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((max > m_nSigma * rms)) {
|
// if (frame_number < 1)
|
||||||
if (value < max)
|
// if ( (ix == 115 && iy == 122) )
|
||||||
continue; // Not max go to the next pixel
|
// if ( (ix == 175 && iy == 175) )
|
||||||
// but also no pedestal update
|
// {
|
||||||
} else if (total > c3 * m_nSigma * rms) {
|
// // std::cout << std::endl;
|
||||||
|
// // std::cout << std::endl;
|
||||||
|
// // std::cout << "frame_number: " << frame_number << std::endl;
|
||||||
|
// // std::cout << "(" << ix << ", " << iy << "): " << std::endl;
|
||||||
|
// // std::cout << "frame.shape: (" << frame.shape(0) << ", " << frame.shape(1) << "): " << std::endl;
|
||||||
|
// // std::cout << "frame(175, 175): " << frame(175, 175) << std::endl;
|
||||||
|
// // std::cout << "frame(77, 98): " << frame(77, 98) << std::endl;
|
||||||
|
// // std::cout << "frame(82, 100): " << frame(82, 100) << std::endl;
|
||||||
|
// // std::cout << "frame(iy, ix): " << frame(iy, ix) << std::endl;
|
||||||
|
// // std::cout << "mean_ptr[row_offset + ix]: " << mean_ptr[row_offset + ix] << std::endl;
|
||||||
|
// // std::cout << "total: " << total << std::endl;
|
||||||
|
|
||||||
|
// std::cout << "(" << ix << ", " << iy << "): " << frame(iy, ix) << std::endl;
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if ((max > m_nSigma * rms)) {
|
||||||
|
// if (value < max)
|
||||||
|
// continue; // Not max go to the next pixel
|
||||||
|
// // but also no pedestal update
|
||||||
|
// } else
|
||||||
|
if (total > c3 * m_nSigma * rms) {
|
||||||
// pass
|
// pass
|
||||||
} else {
|
} else {
|
||||||
// m_pedestal.push(iy, ix, frame(iy, ix)); // Safe option
|
// m_pedestal.push(iy, ix, frame(iy, ix)); // Safe option
|
||||||
m_pedestal.push_fast(
|
|
||||||
iy, ix,
|
//Not needed for chunked pedestal
|
||||||
frame(iy,
|
// m_pedestal.push_fast(
|
||||||
ix)); // Assume we have reached n_samples in the
|
// iy, ix,
|
||||||
// pedestal, slight performance improvement
|
// frame(iy,
|
||||||
|
// ix)); // Assume we have reached n_samples in the
|
||||||
|
// // pedestal, slight performance improvement
|
||||||
continue; // It was a pedestal value nothing to store
|
continue; // It was a pedestal value nothing to store
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store cluster
|
// Store cluster
|
||||||
if (value == max) {
|
if (value == max) {
|
||||||
|
|
||||||
|
// if (total < 0)
|
||||||
|
// {
|
||||||
|
// std::cout << "" << std::endl;
|
||||||
|
// std::cout << "frame_number: " << frame_number << std::endl;
|
||||||
|
// std::cout << "ix: " << ix << std::endl;
|
||||||
|
// std::cout << "iy: " << iy << std::endl;
|
||||||
|
// std::cout << "frame(iy, ix): " << frame(iy, ix) << std::endl;
|
||||||
|
// std::cout << "m_pedestal.mean(iy, ix): " << m_pedestal.mean(iy, ix) << std::endl;
|
||||||
|
// std::cout << "m_pedestal.std(iy, ix): " << m_pedestal.std(iy, ix) << std::endl;
|
||||||
|
// std::cout << "max: " << max << std::endl;
|
||||||
|
// std::cout << "value: " << value << std::endl;
|
||||||
|
// std::cout << "m_nSigma * rms: " << (m_nSigma * rms) << std::endl;
|
||||||
|
// std::cout << "total: " << total << std::endl;
|
||||||
|
// std::cout << "c3 * m_nSigma * rms: " << (c3 * m_nSigma * rms) << std::endl;
|
||||||
|
// }
|
||||||
|
|
||||||
ClusterType cluster{};
|
ClusterType cluster{};
|
||||||
cluster.x = ix;
|
cluster.x = ix;
|
||||||
cluster.y = iy;
|
cluster.y = iy;
|
||||||
@@ -135,16 +204,22 @@ class ClusterFinder {
|
|||||||
// It's worth redoing the look since most of the time we
|
// It's worth redoing the look since most of the time we
|
||||||
// don't have a photon
|
// don't have a photon
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) {
|
for (int ir = -dy2; ir < dy2 + has_center_pixel_y; ir++) {
|
||||||
for (int ic = -dx; ic < dx + has_center_pixel_y; ic++) {
|
size_t inner_row_offset = row_offset + (ir * frame.shape(1));
|
||||||
|
for (int ic = -dx2; ic < dx2 + has_center_pixel_y; ic++) {
|
||||||
if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
|
if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
|
||||||
iy + ir >= 0 && iy + ir < frame.shape(0)) {
|
iy + ir >= 0 && iy + ir < frame.shape(0)) {
|
||||||
CT tmp =
|
// if (m_pedestal.std(iy + ir, ix + ic) == 0) continue;
|
||||||
static_cast<CT>(frame(iy + ir, ix + ic)) -
|
// if (std_ptr[inner_row_offset + ix + ic] == 0) continue;
|
||||||
static_cast<CT>(
|
|
||||||
m_pedestal.mean(iy + ir, ix + ic));
|
// CT tmp = static_cast<CT>(frame(iy + ir, ix + ic)) - static_cast<CT>(m_pedestal.mean(iy + ir, ix + ic));
|
||||||
cluster.data[i] =
|
|
||||||
tmp; // Watch for out of bounds access
|
// CT tmp = 0;
|
||||||
|
if (std_ptr[inner_row_offset + ix + ic] != 0)
|
||||||
|
{
|
||||||
|
CT tmp = static_cast<CT>(frame(iy + ir, ix + ic)) - static_cast<CT>(mean_ptr[inner_row_offset + ix + ic]);
|
||||||
|
cluster.data[i] = tmp; // Watch for out of bounds access
|
||||||
|
}
|
||||||
}
|
}
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
@@ -158,4 +233,4 @@ class ClusterFinder {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
||||||
|
|||||||
@@ -20,9 +20,15 @@ enum class FrameType {
|
|||||||
struct FrameWrapper {
|
struct FrameWrapper {
|
||||||
FrameType type;
|
FrameType type;
|
||||||
uint64_t frame_number;
|
uint64_t frame_number;
|
||||||
|
// NDArray<T, 2> data;
|
||||||
NDArray<uint16_t, 2> data;
|
NDArray<uint16_t, 2> data;
|
||||||
|
// NDArray<double, 2> data;
|
||||||
|
// void* data_ptr;
|
||||||
|
// std::type_index data_type;
|
||||||
|
uint32_t chunk_number;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief ClusterFinderMT is a multi-threaded version of ClusterFinder. It uses
|
* @brief ClusterFinderMT is a multi-threaded version of ClusterFinder. It uses
|
||||||
* a producer-consumer queue to distribute the frames to the threads. The
|
* a producer-consumer queue to distribute the frames to the threads. The
|
||||||
@@ -68,6 +74,7 @@ class ClusterFinderMT {
|
|||||||
while (!m_stop_requested || !q->isEmpty()) {
|
while (!m_stop_requested || !q->isEmpty()) {
|
||||||
if (FrameWrapper *frame = q->frontPtr(); frame != nullptr) {
|
if (FrameWrapper *frame = q->frontPtr(); frame != nullptr) {
|
||||||
|
|
||||||
|
|
||||||
switch (frame->type) {
|
switch (frame->type) {
|
||||||
case FrameType::DATA:
|
case FrameType::DATA:
|
||||||
cf->find_clusters(frame->data.view(), frame->frame_number);
|
cf->find_clusters(frame->data.view(), frame->frame_number);
|
||||||
@@ -121,7 +128,9 @@ class ClusterFinderMT {
|
|||||||
* @param n_threads number of threads to use
|
* @param n_threads number of threads to use
|
||||||
*/
|
*/
|
||||||
ClusterFinderMT(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
|
ClusterFinderMT(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
|
||||||
size_t capacity = 2000, size_t n_threads = 3)
|
size_t capacity = 2000, size_t n_threads = 3,
|
||||||
|
uint32_t chunk_size = 50000, uint32_t n_chunks = 10,
|
||||||
|
uint32_t cluster_size_x = 3, uint32_t cluster_size_y = 3)
|
||||||
: m_n_threads(n_threads) {
|
: m_n_threads(n_threads) {
|
||||||
|
|
||||||
LOG(logDEBUG1) << "ClusterFinderMT: "
|
LOG(logDEBUG1) << "ClusterFinderMT: "
|
||||||
@@ -134,7 +143,7 @@ class ClusterFinderMT {
|
|||||||
m_cluster_finders.push_back(
|
m_cluster_finders.push_back(
|
||||||
std::make_unique<
|
std::make_unique<
|
||||||
ClusterFinder<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>>(
|
ClusterFinder<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>>(
|
||||||
image_size, nSigma, capacity));
|
image_size, nSigma, capacity, chunk_size, n_chunks, cluster_size_x, cluster_size_y));
|
||||||
}
|
}
|
||||||
for (size_t i = 0; i < n_threads; i++) {
|
for (size_t i = 0; i < n_threads; i++) {
|
||||||
m_input_queues.emplace_back(std::make_unique<InputQueue>(200));
|
m_input_queues.emplace_back(std::make_unique<InputQueue>(200));
|
||||||
@@ -208,7 +217,7 @@ class ClusterFinderMT {
|
|||||||
*/
|
*/
|
||||||
void push_pedestal_frame(NDView<FRAME_TYPE, 2> frame) {
|
void push_pedestal_frame(NDView<FRAME_TYPE, 2> frame) {
|
||||||
FrameWrapper fw{FrameType::PEDESTAL, 0,
|
FrameWrapper fw{FrameType::PEDESTAL, 0,
|
||||||
NDArray(frame)}; // TODO! copies the data!
|
NDArray(frame), 0}; // TODO! copies the data!
|
||||||
|
|
||||||
for (auto &queue : m_input_queues) {
|
for (auto &queue : m_input_queues) {
|
||||||
while (!queue->write(fw)) {
|
while (!queue->write(fw)) {
|
||||||
@@ -217,6 +226,23 @@ class ClusterFinderMT {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void push_pedestal_mean(NDView<PEDESTAL_TYPE, 2> frame, uint32_t chunk_number) {
|
||||||
|
if (!m_processing_threads_stopped) {
|
||||||
|
throw std::runtime_error("ClusterFinderMT is still running");
|
||||||
|
}
|
||||||
|
for (auto &cf : m_cluster_finders) {
|
||||||
|
cf->push_pedestal_mean(frame, chunk_number);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
void push_pedestal_std(NDView<PEDESTAL_TYPE, 2> frame, uint32_t chunk_number) {
|
||||||
|
if (!m_processing_threads_stopped) {
|
||||||
|
throw std::runtime_error("ClusterFinderMT is still running");
|
||||||
|
}
|
||||||
|
for (auto &cf : m_cluster_finders) {
|
||||||
|
cf->push_pedestal_std(frame, chunk_number);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Push the frame to the queue of the next available thread. Function
|
* @brief Push the frame to the queue of the next available thread. Function
|
||||||
* returns once the frame is in a queue.
|
* returns once the frame is in a queue.
|
||||||
@@ -224,7 +250,10 @@ class ClusterFinderMT {
|
|||||||
*/
|
*/
|
||||||
void find_clusters(NDView<FRAME_TYPE, 2> frame, uint64_t frame_number = 0) {
|
void find_clusters(NDView<FRAME_TYPE, 2> frame, uint64_t frame_number = 0) {
|
||||||
FrameWrapper fw{FrameType::DATA, frame_number,
|
FrameWrapper fw{FrameType::DATA, frame_number,
|
||||||
NDArray(frame)}; // TODO! copies the data!
|
NDArray(frame), 0}; // TODO! copies the data!
|
||||||
|
|
||||||
|
// std::cout << "frame(122, 115): " << frame(122, 115) << std::endl;
|
||||||
|
|
||||||
while (!m_input_queues[m_current_thread % m_n_threads]->write(fw)) {
|
while (!m_input_queues[m_current_thread % m_n_threads]->write(fw)) {
|
||||||
std::this_thread::sleep_for(m_default_wait);
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
}
|
}
|
||||||
@@ -281,4 +310,4 @@ class ClusterFinderMT {
|
|||||||
// }
|
// }
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
||||||
|
|||||||
@@ -32,7 +32,8 @@ class ClusterVector; // Forward declaration
|
|||||||
*/
|
*/
|
||||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
typename CoordType>
|
typename CoordType>
|
||||||
class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
|
||||||
|
{
|
||||||
|
|
||||||
std::vector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> m_data{};
|
std::vector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> m_data{};
|
||||||
int32_t m_frame_number{0}; // TODO! Check frame number size and type
|
int32_t m_frame_number{0}; // TODO! Check frame number size and type
|
||||||
@@ -172,40 +173,4 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Reduce a cluster to a 2x2 cluster by selecting the 2x2 block with the
|
|
||||||
* highest sum.
|
|
||||||
* @param cv Clustervector containing clusters to reduce
|
|
||||||
* @return Clustervector with reduced clusters
|
|
||||||
*/
|
|
||||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
|
||||||
typename CoordType = uint16_t>
|
|
||||||
ClusterVector<Cluster<T, 2, 2, CoordType>> reduce_to_2x2(
|
|
||||||
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
|
|
||||||
&cv) {
|
|
||||||
ClusterVector<Cluster<T, 2, 2, CoordType>> result;
|
|
||||||
for (const auto &c : cv) {
|
|
||||||
result.push_back(reduce_to_2x2(c));
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Reduce a cluster to a 3x3 cluster by selecting the 3x3 block with the
|
|
||||||
* highest sum.
|
|
||||||
* @param cv Clustervector containing clusters to reduce
|
|
||||||
* @return Clustervector with reduced clusters
|
|
||||||
*/
|
|
||||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
|
||||||
typename CoordType = uint16_t>
|
|
||||||
ClusterVector<Cluster<T, 3, 3, CoordType>> reduce_to_3x3(
|
|
||||||
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
|
|
||||||
&cv) {
|
|
||||||
ClusterVector<Cluster<T, 3, 3, CoordType>> result;
|
|
||||||
for (const auto &c : cv) {
|
|
||||||
result.push_back(reduce_to_3x3(c));
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
||||||
@@ -105,7 +105,7 @@ class Frame {
|
|||||||
* @tparam T type of the pixels
|
* @tparam T type of the pixels
|
||||||
* @return NDView<T, 2>
|
* @return NDView<T, 2>
|
||||||
*/
|
*/
|
||||||
template <typename T> NDView<T, 2> view() & {
|
template <typename T> NDView<T, 2> view() {
|
||||||
std::array<ssize_t, 2> shape = {static_cast<ssize_t>(m_rows),
|
std::array<ssize_t, 2> shape = {static_cast<ssize_t>(m_rows),
|
||||||
static_cast<ssize_t>(m_cols)};
|
static_cast<ssize_t>(m_cols)};
|
||||||
T *data = reinterpret_cast<T *>(m_data);
|
T *data = reinterpret_cast<T *>(m_data);
|
||||||
|
|||||||
@@ -93,12 +93,6 @@ class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
|
|||||||
: buffer_(buffer), strides_(c_strides<Ndim>(shape)), shape_(shape),
|
: buffer_(buffer), strides_(c_strides<Ndim>(shape)), shape_(shape),
|
||||||
size_(std::accumulate(std::begin(shape), std::end(shape), 1,
|
size_(std::accumulate(std::begin(shape), std::end(shape), 1,
|
||||||
std::multiplies<>())) {}
|
std::multiplies<>())) {}
|
||||||
|
|
||||||
// stride-aware constructor
|
|
||||||
NDView(T* buffer, std::array<ssize_t, Ndim> shape, std::array<ssize_t, Ndim> strides)
|
|
||||||
: buffer_(buffer), shape_(shape), strides_(strides),
|
|
||||||
size_(std::accumulate(std::begin(shape), std::end(shape), 1,
|
|
||||||
std::multiplies<>())) {}
|
|
||||||
|
|
||||||
template <typename... Ix>
|
template <typename... Ix>
|
||||||
std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) {
|
std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) {
|
||||||
|
|||||||
@@ -42,16 +42,14 @@ class RawFileNameComponents {
|
|||||||
|
|
||||||
class ScanParameters {
|
class ScanParameters {
|
||||||
bool m_enabled = false;
|
bool m_enabled = false;
|
||||||
DACIndex m_dac{};
|
std::string m_dac;
|
||||||
int m_start = 0;
|
int m_start = 0;
|
||||||
int m_stop = 0;
|
int m_stop = 0;
|
||||||
int m_step = 0;
|
int m_step = 0;
|
||||||
int64_t m_settleTime = 0; // [ns]
|
// TODO! add settleTime, requires string to time conversion
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ScanParameters(const std::string &par);
|
ScanParameters(const std::string &par);
|
||||||
ScanParameters(const bool enabled, const DACIndex dac, const int start,
|
|
||||||
const int stop, const int step, const int64_t settleTime);
|
|
||||||
ScanParameters() = default;
|
ScanParameters() = default;
|
||||||
ScanParameters(const ScanParameters &) = default;
|
ScanParameters(const ScanParameters &) = default;
|
||||||
ScanParameters &operator=(const ScanParameters &) = default;
|
ScanParameters &operator=(const ScanParameters &) = default;
|
||||||
@@ -59,9 +57,8 @@ class ScanParameters {
|
|||||||
int start() const;
|
int start() const;
|
||||||
int stop() const;
|
int stop() const;
|
||||||
int step() const;
|
int step() const;
|
||||||
DACIndex dac() const;
|
const std::string &dac() const;
|
||||||
bool enabled() const;
|
bool enabled() const;
|
||||||
int64_t settleTime() const;
|
|
||||||
void increment_stop();
|
void increment_stop();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -215,122 +215,6 @@ enum class DetectorType {
|
|||||||
Unknown
|
Unknown
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Enum class to define the Digital to Analog converter
|
|
||||||
* The values are the same as in slsDetectorPackage
|
|
||||||
*/
|
|
||||||
enum DACIndex {
|
|
||||||
DAC_0,
|
|
||||||
DAC_1,
|
|
||||||
DAC_2,
|
|
||||||
DAC_3,
|
|
||||||
DAC_4,
|
|
||||||
DAC_5,
|
|
||||||
DAC_6,
|
|
||||||
DAC_7,
|
|
||||||
DAC_8,
|
|
||||||
DAC_9,
|
|
||||||
DAC_10,
|
|
||||||
DAC_11,
|
|
||||||
DAC_12,
|
|
||||||
DAC_13,
|
|
||||||
DAC_14,
|
|
||||||
DAC_15,
|
|
||||||
DAC_16,
|
|
||||||
DAC_17,
|
|
||||||
VSVP,
|
|
||||||
VTRIM,
|
|
||||||
VRPREAMP,
|
|
||||||
VRSHAPER,
|
|
||||||
VSVN,
|
|
||||||
VTGSTV,
|
|
||||||
VCMP_LL,
|
|
||||||
VCMP_LR,
|
|
||||||
VCAL,
|
|
||||||
VCMP_RL,
|
|
||||||
RXB_RB,
|
|
||||||
RXB_LB,
|
|
||||||
VCMP_RR,
|
|
||||||
VCP,
|
|
||||||
VCN,
|
|
||||||
VISHAPER,
|
|
||||||
VTHRESHOLD,
|
|
||||||
IO_DELAY,
|
|
||||||
VREF_DS,
|
|
||||||
VOUT_CM,
|
|
||||||
VIN_CM,
|
|
||||||
VREF_COMP,
|
|
||||||
VB_COMP,
|
|
||||||
VDD_PROT,
|
|
||||||
VIN_COM,
|
|
||||||
VREF_PRECH,
|
|
||||||
VB_PIXBUF,
|
|
||||||
VB_DS,
|
|
||||||
VREF_H_ADC,
|
|
||||||
VB_COMP_FE,
|
|
||||||
VB_COMP_ADC,
|
|
||||||
VCOM_CDS,
|
|
||||||
VREF_RSTORE,
|
|
||||||
VB_OPA_1ST,
|
|
||||||
VREF_COMP_FE,
|
|
||||||
VCOM_ADC1,
|
|
||||||
VREF_L_ADC,
|
|
||||||
VREF_CDS,
|
|
||||||
VB_CS,
|
|
||||||
VB_OPA_FD,
|
|
||||||
VCOM_ADC2,
|
|
||||||
VCASSH,
|
|
||||||
VTH2,
|
|
||||||
VRSHAPER_N,
|
|
||||||
VIPRE_OUT,
|
|
||||||
VTH3,
|
|
||||||
VTH1,
|
|
||||||
VICIN,
|
|
||||||
VCAS,
|
|
||||||
VCAL_N,
|
|
||||||
VIPRE,
|
|
||||||
VCAL_P,
|
|
||||||
VDCSH,
|
|
||||||
VBP_COLBUF,
|
|
||||||
VB_SDA,
|
|
||||||
VCASC_SFP,
|
|
||||||
VIPRE_CDS,
|
|
||||||
IBIAS_SFP,
|
|
||||||
ADC_VPP,
|
|
||||||
HIGH_VOLTAGE,
|
|
||||||
TEMPERATURE_ADC,
|
|
||||||
TEMPERATURE_FPGA,
|
|
||||||
TEMPERATURE_FPGAEXT,
|
|
||||||
TEMPERATURE_10GE,
|
|
||||||
TEMPERATURE_DCDC,
|
|
||||||
TEMPERATURE_SODL,
|
|
||||||
TEMPERATURE_SODR,
|
|
||||||
TEMPERATURE_FPGA2,
|
|
||||||
TEMPERATURE_FPGA3,
|
|
||||||
TRIMBIT_SCAN,
|
|
||||||
V_POWER_A = 100,
|
|
||||||
V_POWER_B = 101,
|
|
||||||
V_POWER_C = 102,
|
|
||||||
V_POWER_D = 103,
|
|
||||||
V_POWER_IO = 104,
|
|
||||||
V_POWER_CHIP = 105,
|
|
||||||
I_POWER_A = 106,
|
|
||||||
I_POWER_B = 107,
|
|
||||||
I_POWER_C = 108,
|
|
||||||
I_POWER_D = 109,
|
|
||||||
I_POWER_IO = 110,
|
|
||||||
V_LIMIT = 111,
|
|
||||||
SLOW_ADC0 = 1000,
|
|
||||||
SLOW_ADC1,
|
|
||||||
SLOW_ADC2,
|
|
||||||
SLOW_ADC3,
|
|
||||||
SLOW_ADC4,
|
|
||||||
SLOW_ADC5,
|
|
||||||
SLOW_ADC6,
|
|
||||||
SLOW_ADC7,
|
|
||||||
SLOW_ADC_TEMP
|
|
||||||
};
|
|
||||||
|
|
||||||
enum class TimingMode { Auto, Trigger };
|
enum class TimingMode { Auto, Trigger };
|
||||||
enum class FrameDiscardPolicy { NoDiscard, Discard, DiscardPartial };
|
enum class FrameDiscardPolicy { NoDiscard, Discard, DiscardPartial };
|
||||||
|
|
||||||
@@ -347,15 +231,6 @@ template <> FrameDiscardPolicy StringTo(const std::string & /*mode*/);
|
|||||||
|
|
||||||
using DataTypeVariants = std::variant<uint16_t, uint32_t>;
|
using DataTypeVariants = std::variant<uint16_t, uint32_t>;
|
||||||
|
|
||||||
constexpr uint16_t ADC_MASK =
|
constexpr uint16_t ADC_MASK = 0x3FFF; // used to mask out the gain bits in Jungfrau
|
||||||
0x3FFF; // used to mask out the gain bits in Jungfrau
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Convert a string to a DACIndex
|
|
||||||
* @param arg string representation of the dacIndex
|
|
||||||
* @return DACIndex
|
|
||||||
* @throw invalid argument error if the string does not match any DACIndex
|
|
||||||
*/
|
|
||||||
template <> DACIndex StringTo(const std::string &arg);
|
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
||||||
@@ -26,33 +26,34 @@ def _get_class(name, cluster_size, dtype):
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024):
|
def ClusterFinder(image_size, saved_cluster_size, checked_cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024, chunk_size=50000, n_chunks = 10):
|
||||||
"""
|
"""
|
||||||
Factory function to create a ClusterFinder object. Provides a cleaner syntax for
|
Factory function to create a ClusterFinder object. Provides a cleaner syntax for
|
||||||
the templated ClusterFinder in C++.
|
the templated ClusterFinder in C++.
|
||||||
"""
|
"""
|
||||||
cls = _get_class("ClusterFinder", cluster_size, dtype)
|
cls = _get_class("ClusterFinder", saved_cluster_size, dtype)
|
||||||
return cls(image_size, n_sigma=n_sigma, capacity=capacity)
|
return cls(image_size, n_sigma=n_sigma, capacity=capacity, chunk_size=chunk_size, n_chunks=n_chunks, cluster_size_x=checked_cluster_size[0], cluster_size_y=checked_cluster_size[1])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def ClusterFinderMT(image_size, cluster_size = (3,3), dtype=np.int32, n_sigma=5, capacity = 1024, n_threads = 3):
|
def ClusterFinderMT(image_size, saved_cluster_size = (3,3), checked_cluster_size = (3,3), dtype=np.int32, n_sigma=5, capacity = 1024, n_threads = 3, chunk_size=50000, n_chunks = 10):
|
||||||
"""
|
"""
|
||||||
Factory function to create a ClusterFinderMT object. Provides a cleaner syntax for
|
Factory function to create a ClusterFinderMT object. Provides a cleaner syntax for
|
||||||
the templated ClusterFinderMT in C++.
|
the templated ClusterFinderMT in C++.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cls = _get_class("ClusterFinderMT", cluster_size, dtype)
|
cls = _get_class("ClusterFinderMT", saved_cluster_size, dtype)
|
||||||
return cls(image_size, n_sigma=n_sigma, capacity=capacity, n_threads=n_threads)
|
return cls(image_size, n_sigma=n_sigma, capacity=capacity, n_threads=n_threads, chunk_size=chunk_size, n_chunks=n_chunks, cluster_size_x=checked_cluster_size[0], cluster_size_y=checked_cluster_size[1])
|
||||||
|
|
||||||
|
|
||||||
def ClusterCollector(clusterfindermt, dtype=np.int32):
|
|
||||||
|
def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32):
|
||||||
"""
|
"""
|
||||||
Factory function to create a ClusterCollector object. Provides a cleaner syntax for
|
Factory function to create a ClusterCollector object. Provides a cleaner syntax for
|
||||||
the templated ClusterCollector in C++.
|
the templated ClusterCollector in C++.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cls = _get_class("ClusterCollector", clusterfindermt.cluster_size, dtype)
|
cls = _get_class("ClusterCollector", cluster_size, dtype)
|
||||||
return cls(clusterfindermt)
|
return cls(clusterfindermt)
|
||||||
|
|
||||||
def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):
|
def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ from .ClusterVector import ClusterVector
|
|||||||
from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2
|
from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2
|
||||||
from ._aare import Interpolator
|
from ._aare import Interpolator
|
||||||
from ._aare import calculate_eta2
|
from ._aare import calculate_eta2
|
||||||
from ._aare import reduce_to_2x2, reduce_to_3x3
|
|
||||||
|
|
||||||
from ._aare import apply_custom_weights
|
from ._aare import apply_custom_weights
|
||||||
|
|
||||||
|
|||||||
@@ -24,8 +24,7 @@ void define_Cluster(py::module &m, const std::string &typestr) {
|
|||||||
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>(
|
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>(
|
||||||
m, class_name.c_str(), py::buffer_protocol())
|
m, class_name.c_str(), py::buffer_protocol())
|
||||||
|
|
||||||
.def(py::init([](uint8_t x, uint8_t y,
|
.def(py::init([](uint8_t x, uint8_t y, py::array_t<Type> data) {
|
||||||
py::array_t<Type, py::array::forcecast> data) {
|
|
||||||
py::buffer_info buf_info = data.request();
|
py::buffer_info buf_info = data.request();
|
||||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster;
|
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster;
|
||||||
cluster.x = x;
|
cluster.x = x;
|
||||||
@@ -35,58 +34,31 @@ void define_Cluster(py::module &m, const std::string &typestr) {
|
|||||||
cluster.data[i] = r(i);
|
cluster.data[i] = r(i);
|
||||||
}
|
}
|
||||||
return cluster;
|
return cluster;
|
||||||
}))
|
}));
|
||||||
|
|
||||||
// TODO! Review if to keep or not
|
/*
|
||||||
.def_property_readonly(
|
//TODO! Review if to keep or not
|
||||||
"data",
|
.def_property(
|
||||||
[](Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> &c)
|
"data",
|
||||||
-> py::array {
|
[](ClusterType &c) -> py::array {
|
||||||
return py::array(py::buffer_info(
|
return py::array(py::buffer_info(
|
||||||
c.data.data(), sizeof(Type),
|
c.data, sizeof(Type),
|
||||||
py::format_descriptor<Type>::format(), // Type
|
py::format_descriptor<Type>::format(), // Type
|
||||||
// format
|
// format
|
||||||
2, // Number of dimensions
|
1, // Number of dimensions
|
||||||
{static_cast<ssize_t>(ClusterSizeX),
|
{static_cast<ssize_t>(ClusterSizeX *
|
||||||
static_cast<ssize_t>(ClusterSizeY)}, // Shape (flattened)
|
ClusterSizeY)}, // Shape (flattened)
|
||||||
{sizeof(Type) * ClusterSizeY, sizeof(Type)}
|
{sizeof(Type)} // Stride (step size between elements)
|
||||||
// Stride (step size between elements)
|
));
|
||||||
));
|
|
||||||
})
|
|
||||||
|
|
||||||
.def_readonly("x",
|
|
||||||
&Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>::x)
|
|
||||||
|
|
||||||
.def_readonly("y",
|
|
||||||
&Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>::y);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
|
||||||
typename CoordType = int16_t>
|
|
||||||
void reduce_to_3x3(py::module &m) {
|
|
||||||
|
|
||||||
m.def(
|
|
||||||
"reduce_to_3x3",
|
|
||||||
[](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
|
||||||
return reduce_to_3x3(cl);
|
|
||||||
},
|
},
|
||||||
py::return_value_policy::move,
|
[](ClusterType &c, py::array_t<Type> arr) {
|
||||||
"Reduce cluster to 3x3 subcluster by taking the 3x3 subcluster with "
|
py::buffer_info buf_info = arr.request();
|
||||||
"the highest photon energy.");
|
Type *ptr = static_cast<Type *>(buf_info.ptr);
|
||||||
}
|
std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY,
|
||||||
|
c.data); // TODO dont iterate over centers!!!
|
||||||
|
|
||||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
});
|
||||||
typename CoordType = int16_t>
|
*/
|
||||||
void reduce_to_2x2(py::module &m) {
|
|
||||||
|
|
||||||
m.def(
|
|
||||||
"reduce_to_2x2",
|
|
||||||
[](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
|
||||||
return reduce_to_2x2(cl);
|
|
||||||
},
|
|
||||||
py::return_value_policy::move,
|
|
||||||
"Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with "
|
|
||||||
"the highest photon energy.");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#pragma GCC diagnostic pop
|
#pragma GCC diagnostic pop
|
||||||
@@ -30,14 +30,30 @@ void define_ClusterFinder(py::module &m, const std::string &typestr) {
|
|||||||
|
|
||||||
py::class_<ClusterFinder<ClusterType, uint16_t, pd_type>>(
|
py::class_<ClusterFinder<ClusterType, uint16_t, pd_type>>(
|
||||||
m, class_name.c_str())
|
m, class_name.c_str())
|
||||||
.def(py::init<Shape<2>, pd_type, size_t>(), py::arg("image_size"),
|
.def(py::init<Shape<2>, pd_type, size_t, uint32_t, uint32_t, uint32_t, uint32_t>(),
|
||||||
py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000)
|
py::arg("image_size"), py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000,
|
||||||
|
py::arg("chunk_size") = 50'000, py::arg("n_chunks") = 10,
|
||||||
|
py::arg("cluster_size_x") = 3, py::arg("cluster_size_y") = 3)
|
||||||
.def("push_pedestal_frame",
|
.def("push_pedestal_frame",
|
||||||
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self,
|
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self,
|
||||||
py::array_t<uint16_t> frame) {
|
py::array_t<uint16_t> frame) {
|
||||||
auto view = make_view_2d(frame);
|
auto view = make_view_2d(frame);
|
||||||
self.push_pedestal_frame(view);
|
self.push_pedestal_frame(view);
|
||||||
})
|
})
|
||||||
|
|
||||||
|
.def("push_pedestal_mean",
|
||||||
|
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self,
|
||||||
|
py::array_t<double> frame, uint32_t chunk_number) {
|
||||||
|
auto view = make_view_2d(frame);
|
||||||
|
self.push_pedestal_mean(view, chunk_number);
|
||||||
|
})
|
||||||
|
.def("push_pedestal_std",
|
||||||
|
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self,
|
||||||
|
py::array_t<double> frame, uint32_t chunk_number) {
|
||||||
|
auto view = make_view_2d(frame);
|
||||||
|
self.push_pedestal_std(view, chunk_number);
|
||||||
|
})
|
||||||
|
|
||||||
.def("clear_pedestal",
|
.def("clear_pedestal",
|
||||||
&ClusterFinder<ClusterType, uint16_t, pd_type>::clear_pedestal)
|
&ClusterFinder<ClusterType, uint16_t, pd_type>::clear_pedestal)
|
||||||
.def_property_readonly(
|
.def_property_readonly(
|
||||||
|
|||||||
@@ -30,15 +30,31 @@ void define_ClusterFinderMT(py::module &m, const std::string &typestr) {
|
|||||||
|
|
||||||
py::class_<ClusterFinderMT<ClusterType, uint16_t, pd_type>>(
|
py::class_<ClusterFinderMT<ClusterType, uint16_t, pd_type>>(
|
||||||
m, class_name.c_str())
|
m, class_name.c_str())
|
||||||
.def(py::init<Shape<2>, pd_type, size_t, size_t>(),
|
.def(py::init<Shape<2>, pd_type, size_t, size_t, uint32_t, uint32_t, uint32_t, uint32_t>(),
|
||||||
py::arg("image_size"), py::arg("n_sigma") = 5.0,
|
py::arg("image_size"), py::arg("n_sigma") = 5.0,
|
||||||
py::arg("capacity") = 2048, py::arg("n_threads") = 3)
|
py::arg("capacity") = 2048, py::arg("n_threads") = 3,
|
||||||
|
py::arg("chunk_size") = 50'000, py::arg("n_chunks") = 10,
|
||||||
|
py::arg("cluster_size_x") = 3, py::arg("cluster_size_y") = 3)
|
||||||
.def("push_pedestal_frame",
|
.def("push_pedestal_frame",
|
||||||
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
||||||
py::array_t<uint16_t> frame) {
|
py::array_t<uint16_t> frame) {
|
||||||
auto view = make_view_2d(frame);
|
auto view = make_view_2d(frame);
|
||||||
self.push_pedestal_frame(view);
|
self.push_pedestal_frame(view);
|
||||||
})
|
})
|
||||||
|
|
||||||
|
.def("push_pedestal_mean",
|
||||||
|
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
||||||
|
py::array_t<double> frame, uint32_t chunk_number) {
|
||||||
|
auto view = make_view_2d(frame);
|
||||||
|
self.push_pedestal_mean(view, chunk_number);
|
||||||
|
})
|
||||||
|
.def("push_pedestal_std",
|
||||||
|
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
||||||
|
py::array_t<double> frame, uint32_t chunk_number) {
|
||||||
|
auto view = make_view_2d(frame);
|
||||||
|
self.push_pedestal_std(view, chunk_number);
|
||||||
|
})
|
||||||
|
|
||||||
.def(
|
.def(
|
||||||
"find_clusters",
|
"find_clusters",
|
||||||
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
||||||
|
|||||||
@@ -104,47 +104,4 @@ void define_ClusterVector(py::module &m, const std::string &typestr) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
|
||||||
typename CoordType = uint16_t>
|
|
||||||
void define_2x2_reduction(py::module &m) {
|
|
||||||
m.def(
|
|
||||||
"reduce_to_2x2",
|
|
||||||
[](const ClusterVector<
|
|
||||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>> &cv) {
|
|
||||||
return new ClusterVector<Cluster<Type, 2, 2, CoordType>>(
|
|
||||||
reduce_to_2x2(cv));
|
|
||||||
},
|
|
||||||
R"(
|
|
||||||
|
|
||||||
Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with
|
|
||||||
the highest photon energy."
|
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
cv : ClusterVector
|
|
||||||
)",
|
|
||||||
py::arg("clustervector"));
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
|
||||||
typename CoordType = uint16_t>
|
|
||||||
void define_3x3_reduction(py::module &m) {
|
|
||||||
|
|
||||||
m.def(
|
|
||||||
"reduce_to_3x3",
|
|
||||||
[](const ClusterVector<
|
|
||||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>> &cv) {
|
|
||||||
return new ClusterVector<Cluster<Type, 3, 3, CoordType>>(
|
|
||||||
reduce_to_3x3(cv));
|
|
||||||
},
|
|
||||||
R"(
|
|
||||||
|
|
||||||
Reduce cluster to 3x3 subcluster by taking the 3x3 subcluster with
|
|
||||||
the highest photon energy."
|
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
cv : ClusterVector
|
|
||||||
)",
|
|
||||||
py::arg("clustervector"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#pragma GCC diagnostic pop
|
#pragma GCC diagnostic pop
|
||||||
@@ -47,9 +47,7 @@ double, 'f' for float)
|
|||||||
define_ClusterFileSink<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
|
define_ClusterFileSink<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
|
||||||
define_ClusterCollector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
|
define_ClusterCollector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
|
||||||
define_Cluster<T, N, M, U>(m, #N "x" #M #TYPE_CODE); \
|
define_Cluster<T, N, M, U>(m, #N "x" #M #TYPE_CODE); \
|
||||||
register_calculate_eta<T, N, M, U>(m); \
|
register_calculate_eta<T, N, M, U>(m);
|
||||||
define_2x2_reduction<T, N, M, U>(m); \
|
|
||||||
reduce_to_2x2<T, N, M, U>(m);
|
|
||||||
|
|
||||||
PYBIND11_MODULE(_aare, m) {
|
PYBIND11_MODULE(_aare, m) {
|
||||||
define_file_io_bindings(m);
|
define_file_io_bindings(m);
|
||||||
@@ -86,30 +84,4 @@ PYBIND11_MODULE(_aare, m) {
|
|||||||
DEFINE_CLUSTER_BINDINGS(int, 9, 9, uint16_t, i);
|
DEFINE_CLUSTER_BINDINGS(int, 9, 9, uint16_t, i);
|
||||||
DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d);
|
DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d);
|
||||||
DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f);
|
DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f);
|
||||||
|
|
||||||
define_3x3_reduction<int, 3, 3, uint16_t>(m);
|
|
||||||
define_3x3_reduction<double, 3, 3, uint16_t>(m);
|
|
||||||
define_3x3_reduction<float, 3, 3, uint16_t>(m);
|
|
||||||
define_3x3_reduction<int, 5, 5, uint16_t>(m);
|
|
||||||
define_3x3_reduction<double, 5, 5, uint16_t>(m);
|
|
||||||
define_3x3_reduction<float, 5, 5, uint16_t>(m);
|
|
||||||
define_3x3_reduction<int, 7, 7, uint16_t>(m);
|
|
||||||
define_3x3_reduction<double, 7, 7, uint16_t>(m);
|
|
||||||
define_3x3_reduction<float, 7, 7, uint16_t>(m);
|
|
||||||
define_3x3_reduction<int, 9, 9, uint16_t>(m);
|
|
||||||
define_3x3_reduction<double, 9, 9, uint16_t>(m);
|
|
||||||
define_3x3_reduction<float, 9, 9, uint16_t>(m);
|
|
||||||
|
|
||||||
reduce_to_3x3<int, 3, 3, uint16_t>(m);
|
|
||||||
reduce_to_3x3<double, 3, 3, uint16_t>(m);
|
|
||||||
reduce_to_3x3<float, 3, 3, uint16_t>(m);
|
|
||||||
reduce_to_3x3<int, 5, 5, uint16_t>(m);
|
|
||||||
reduce_to_3x3<double, 5, 5, uint16_t>(m);
|
|
||||||
reduce_to_3x3<float, 5, 5, uint16_t>(m);
|
|
||||||
reduce_to_3x3<int, 7, 7, uint16_t>(m);
|
|
||||||
reduce_to_3x3<double, 7, 7, uint16_t>(m);
|
|
||||||
reduce_to_3x3<float, 7, 7, uint16_t>(m);
|
|
||||||
reduce_to_3x3<int, 9, 9, uint16_t>(m);
|
|
||||||
reduce_to_3x3<double, 9, 9, uint16_t>(m);
|
|
||||||
reduce_to_3x3<float, 9, 9, uint16_t>(m);
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -101,27 +101,6 @@ def test_cluster_finder():
|
|||||||
assert clusters.size == 0
|
assert clusters.size == 0
|
||||||
|
|
||||||
|
|
||||||
def test_2x2_reduction():
|
|
||||||
"""Test 2x2 Reduction"""
|
|
||||||
cluster = _aare.Cluster3x3i(5,5,np.array([1, 1, 1, 2, 3, 1, 2, 2, 1], dtype=np.int32))
|
|
||||||
|
|
||||||
reduced_cluster = _aare.reduce_to_2x2(cluster)
|
|
||||||
|
|
||||||
assert reduced_cluster.x == 4
|
|
||||||
assert reduced_cluster.y == 5
|
|
||||||
assert (reduced_cluster.data == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
|
|
||||||
|
|
||||||
|
|
||||||
def test_3x3_reduction():
|
|
||||||
"""Test 3x3 Reduction"""
|
|
||||||
cluster = _aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
|
|
||||||
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double))
|
|
||||||
|
|
||||||
reduced_cluster = _aare.reduce_to_3x3(cluster)
|
|
||||||
|
|
||||||
assert reduced_cluster.x == 4
|
|
||||||
assert reduced_cluster.y == 5
|
|
||||||
assert (reduced_cluster.data == np.array([[1.0, 2.0, 1.0], [2.0, 2.0, 3.0], [1.0, 2.0, 1.0]], dtype=np.double)).all()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import time
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import pickle
|
import pickle
|
||||||
|
|
||||||
from aare import ClusterFile, ClusterVector
|
from aare import ClusterFile
|
||||||
from aare import _aare
|
from aare import _aare
|
||||||
from conftest import test_data_path
|
from conftest import test_data_path
|
||||||
|
|
||||||
@@ -51,36 +51,4 @@ def test_make_a_hitmap_from_cluster_vector():
|
|||||||
# print(img)
|
# print(img)
|
||||||
# print(ref)
|
# print(ref)
|
||||||
assert (img == ref).all()
|
assert (img == ref).all()
|
||||||
|
|
||||||
|
|
||||||
def test_2x2_reduction():
|
|
||||||
cv = ClusterVector((3,3))
|
|
||||||
|
|
||||||
cv.push_back(_aare.Cluster3x3i(5, 5, np.array([1, 1, 1, 2, 3, 1, 2, 2, 1], dtype=np.int32)))
|
|
||||||
cv.push_back(_aare.Cluster3x3i(5, 5, np.array([2, 2, 1, 2, 3, 1, 1, 1, 1], dtype=np.int32)))
|
|
||||||
|
|
||||||
reduced_cv = np.array(_aare.reduce_to_2x2(cv), copy=False)
|
|
||||||
|
|
||||||
assert reduced_cv.size == 2
|
|
||||||
assert reduced_cv[0]["x"] == 4
|
|
||||||
assert reduced_cv[0]["y"] == 5
|
|
||||||
assert (reduced_cv[0]["data"] == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
|
|
||||||
assert reduced_cv[1]["x"] == 4
|
|
||||||
assert reduced_cv[1]["y"] == 6
|
|
||||||
assert (reduced_cv[1]["data"] == np.array([[2, 2], [2, 3]], dtype=np.int32)).all()
|
|
||||||
|
|
||||||
|
|
||||||
def test_3x3_reduction():
|
|
||||||
cv = _aare.ClusterVector_Cluster5x5d()
|
|
||||||
|
|
||||||
cv.push_back(_aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
|
|
||||||
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double)))
|
|
||||||
cv.push_back(_aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
|
|
||||||
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double)))
|
|
||||||
|
|
||||||
reduced_cv = np.array(_aare.reduce_to_3x3(cv), copy=False)
|
|
||||||
|
|
||||||
assert reduced_cv.size == 2
|
|
||||||
assert reduced_cv[0]["x"] == 4
|
|
||||||
assert reduced_cv[0]["y"] == 5
|
|
||||||
assert (reduced_cv[0]["data"] == np.array([[1.0, 2.0, 1.0], [2.0, 2.0, 3.0], [1.0, 2.0, 1.0]], dtype=np.double)).all()
|
|
||||||
@@ -18,86 +18,4 @@ TEST_CASE("Test sum of Cluster", "[.cluster]") {
|
|||||||
Cluster<int, 2, 2> cluster{0, 0, {1, 2, 3, 4}};
|
Cluster<int, 2, 2> cluster{0, 0, {1, 2, 3, 4}};
|
||||||
|
|
||||||
CHECK(cluster.sum() == 10);
|
CHECK(cluster.sum() == 10);
|
||||||
}
|
|
||||||
|
|
||||||
using ClusterTypes = std::variant<Cluster<int, 2, 2>, Cluster<int, 3, 3>,
|
|
||||||
Cluster<int, 5, 5>, Cluster<int, 2, 3>>;
|
|
||||||
|
|
||||||
using ClusterTypesLargerThan2x2 =
|
|
||||||
std::variant<Cluster<int, 3, 3>, Cluster<int, 4, 4>, Cluster<int, 5, 5>>;
|
|
||||||
|
|
||||||
TEST_CASE("Test reduce to 2x2 Cluster", "[.cluster]") {
|
|
||||||
auto [cluster, expected_reduced_cluster] = GENERATE(
|
|
||||||
std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{5, 5, {1, 2, 3, 4}}},
|
|
||||||
Cluster<int, 2, 2>{4, 6, {1, 2, 3, 4}}),
|
|
||||||
std::make_tuple(
|
|
||||||
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 2, 1, 2, 2}}},
|
|
||||||
Cluster<int, 2, 2>{5, 5, {3, 2, 2, 2}}),
|
|
||||||
std::make_tuple(
|
|
||||||
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 1, 1, 2, 3, 1, 2, 2, 1}}},
|
|
||||||
Cluster<int, 2, 2>{4, 5, {2, 3, 2, 2}}),
|
|
||||||
std::make_tuple(
|
|
||||||
ClusterTypes{Cluster<int, 3, 3>{5, 5, {2, 2, 1, 2, 3, 1, 1, 1, 1}}},
|
|
||||||
Cluster<int, 2, 2>{4, 6, {2, 2, 2, 3}}),
|
|
||||||
std::make_tuple(
|
|
||||||
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 2, 2, 1, 3, 2, 1, 1, 1}}},
|
|
||||||
Cluster<int, 2, 2>{5, 6, {2, 2, 3, 2}}),
|
|
||||||
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
|
|
||||||
5, 5, {1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 3,
|
|
||||||
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}},
|
|
||||||
Cluster<int, 2, 2>{5, 6, {2, 2, 3, 2}}),
|
|
||||||
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
|
|
||||||
5, 5, {1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 3,
|
|
||||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}},
|
|
||||||
Cluster<int, 2, 2>{4, 6, {2, 2, 2, 3}}),
|
|
||||||
std::make_tuple(
|
|
||||||
ClusterTypes{Cluster<int, 2, 3>{5, 5, {2, 2, 3, 2, 1, 1}}},
|
|
||||||
Cluster<int, 2, 2>{4, 6, {2, 2, 3, 2}}));
|
|
||||||
|
|
||||||
auto reduced_cluster = std::visit(
|
|
||||||
[](const auto &clustertype) { return reduce_to_2x2(clustertype); },
|
|
||||||
cluster);
|
|
||||||
|
|
||||||
CHECK(reduced_cluster.x == expected_reduced_cluster.x);
|
|
||||||
CHECK(reduced_cluster.y == expected_reduced_cluster.y);
|
|
||||||
CHECK(std::equal(reduced_cluster.data.begin(),
|
|
||||||
reduced_cluster.data.begin() + 4,
|
|
||||||
expected_reduced_cluster.data.begin()));
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_CASE("Test reduce to 3x3 Cluster", "[.cluster]") {
|
|
||||||
auto [cluster, expected_reduced_cluster] = GENERATE(
|
|
||||||
std::make_tuple(ClusterTypesLargerThan2x2{Cluster<int, 3, 3>{
|
|
||||||
5, 5, {1, 1, 1, 1, 3, 1, 1, 1, 1}}},
|
|
||||||
Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 1, 1, 1, 1}}),
|
|
||||||
std::make_tuple(
|
|
||||||
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
|
|
||||||
5, 5, {2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1}}},
|
|
||||||
Cluster<int, 3, 3>{4, 6, {2, 2, 1, 2, 2, 1, 1, 1, 3}}),
|
|
||||||
std::make_tuple(
|
|
||||||
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
|
|
||||||
5, 5, {1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 3, 1, 1, 1, 1, 1}}},
|
|
||||||
Cluster<int, 3, 3>{5, 6, {1, 2, 2, 1, 2, 2, 1, 3, 1}}),
|
|
||||||
std::make_tuple(
|
|
||||||
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
|
|
||||||
5, 5, {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 1, 1, 2, 2}}},
|
|
||||||
Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 2, 1, 2, 2}}),
|
|
||||||
std::make_tuple(
|
|
||||||
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
|
|
||||||
5, 5, {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 1, 2, 2, 1, 1}}},
|
|
||||||
Cluster<int, 3, 3>{4, 5, {1, 1, 1, 2, 2, 3, 2, 2, 1}}),
|
|
||||||
std::make_tuple(ClusterTypesLargerThan2x2{Cluster<int, 5, 5>{
|
|
||||||
5, 5, {1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 3,
|
|
||||||
1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1}}},
|
|
||||||
Cluster<int, 3, 3>{4, 5, {1, 2, 1, 2, 2, 3, 1, 2, 1}}));
|
|
||||||
|
|
||||||
auto reduced_cluster = std::visit(
|
|
||||||
[](const auto &clustertype) { return reduce_to_3x3(clustertype); },
|
|
||||||
cluster);
|
|
||||||
|
|
||||||
CHECK(reduced_cluster.x == expected_reduced_cluster.x);
|
|
||||||
CHECK(reduced_cluster.y == expected_reduced_cluster.y);
|
|
||||||
CHECK(std::equal(reduced_cluster.data.begin(),
|
|
||||||
reduced_cluster.data.begin() + 9,
|
|
||||||
expected_reduced_cluster.data.begin()));
|
|
||||||
}
|
}
|
||||||
@@ -57,7 +57,6 @@ class ClusterFinderMTWrapper
|
|||||||
size_t m_sink_size() const { return this->m_sink.sizeGuess(); }
|
size_t m_sink_size() const { return this->m_sink.sizeGuess(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
TEST_CASE("multithreaded cluster finder", "[.with-data]") {
|
TEST_CASE("multithreaded cluster finder", "[.with-data]") {
|
||||||
auto fpath =
|
auto fpath =
|
||||||
test_data_path() / "raw/moench03/cu_half_speed_master_4.json";
|
test_data_path() / "raw/moench03/cu_half_speed_master_4.json";
|
||||||
@@ -82,8 +81,7 @@ TEST_CASE("multithreaded cluster finder", "[.with-data]") {
|
|||||||
CHECK(cf.m_input_queues_are_empty() == true);
|
CHECK(cf.m_input_queues_are_empty() == true);
|
||||||
|
|
||||||
for (size_t i = 0; i < n_frames_pd; ++i) {
|
for (size_t i = 0; i < n_frames_pd; ++i) {
|
||||||
auto frame = file.read_frame();
|
cf.find_clusters(file.read_frame().view<uint16_t>());
|
||||||
cf.find_clusters(frame.view<uint16_t>());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cf.stop();
|
cf.stop();
|
||||||
|
|||||||
@@ -99,8 +99,7 @@ TEST_CASE("Read data from a jungfrau 500k single port raw file",
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Read frame numbers from a raw file", "[.with-data]") {
|
TEST_CASE("Read frame numbers from a raw file", "[.with-data]") {
|
||||||
auto fpath =
|
auto fpath = test_data_path() / "raw/eiger" / "eiger_500k_16bit_master_0.json";
|
||||||
test_data_path() / "raw/eiger" / "eiger_500k_16bit_master_0.json";
|
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
// we know this file has 3 frames with frame numbers 14, 15, 16
|
// we know this file has 3 frames with frame numbers 14, 15, 16
|
||||||
@@ -289,7 +288,8 @@ TEST_CASE("check find_geometry", "[.with-data]") {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Open multi module file with ROI", "[.with-data]") {
|
TEST_CASE("Open multi module file with ROI",
|
||||||
|
"[.with-data]") {
|
||||||
|
|
||||||
auto fpath = test_data_path() / "raw/SingleChipROI/Data_master_0.json";
|
auto fpath = test_data_path() / "raw/SingleChipROI/Data_master_0.json";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
@@ -319,4 +319,4 @@ TEST_CASE("Read file with unordered frames", "[.with-data]") {
|
|||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
File f(fpath);
|
File f(fpath);
|
||||||
REQUIRE_THROWS((f.read_frame()));
|
REQUIRE_THROWS((f.read_frame()));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -64,12 +64,6 @@ const std::string &RawFileNameComponents::base_name() const {
|
|||||||
const std::string &RawFileNameComponents::ext() const { return m_ext; }
|
const std::string &RawFileNameComponents::ext() const { return m_ext; }
|
||||||
int RawFileNameComponents::file_index() const { return m_file_index; }
|
int RawFileNameComponents::file_index() const { return m_file_index; }
|
||||||
|
|
||||||
ScanParameters::ScanParameters(const bool enabled, const DACIndex dac,
|
|
||||||
const int start, const int stop, const int step,
|
|
||||||
const int64_t settleTime)
|
|
||||||
: m_enabled(enabled), m_dac(dac), m_start(start), m_stop(stop),
|
|
||||||
m_step(step), m_settleTime(settleTime){};
|
|
||||||
|
|
||||||
// "[enabled\ndac dac 4\nstart 500\nstop 2200\nstep 5\nsettleTime 100us\n]"
|
// "[enabled\ndac dac 4\nstart 500\nstop 2200\nstep 5\nsettleTime 100us\n]"
|
||||||
ScanParameters::ScanParameters(const std::string &par) {
|
ScanParameters::ScanParameters(const std::string &par) {
|
||||||
std::istringstream iss(par.substr(1, par.size() - 2));
|
std::istringstream iss(par.substr(1, par.size() - 2));
|
||||||
@@ -78,7 +72,7 @@ ScanParameters::ScanParameters(const std::string &par) {
|
|||||||
if (line == "enabled") {
|
if (line == "enabled") {
|
||||||
m_enabled = true;
|
m_enabled = true;
|
||||||
} else if (line.find("dac") != std::string::npos) {
|
} else if (line.find("dac") != std::string::npos) {
|
||||||
m_dac = StringTo<DACIndex>(line.substr(4));
|
m_dac = line.substr(4);
|
||||||
} else if (line.find("start") != std::string::npos) {
|
} else if (line.find("start") != std::string::npos) {
|
||||||
m_start = std::stoi(line.substr(6));
|
m_start = std::stoi(line.substr(6));
|
||||||
} else if (line.find("stop") != std::string::npos) {
|
} else if (line.find("stop") != std::string::npos) {
|
||||||
@@ -93,9 +87,8 @@ int ScanParameters::start() const { return m_start; }
|
|||||||
int ScanParameters::stop() const { return m_stop; }
|
int ScanParameters::stop() const { return m_stop; }
|
||||||
void ScanParameters::increment_stop() { m_stop += 1; }
|
void ScanParameters::increment_stop() { m_stop += 1; }
|
||||||
int ScanParameters::step() const { return m_step; }
|
int ScanParameters::step() const { return m_step; }
|
||||||
DACIndex ScanParameters::dac() const { return m_dac; }
|
const std::string &ScanParameters::dac() const { return m_dac; }
|
||||||
bool ScanParameters::enabled() const { return m_enabled; }
|
bool ScanParameters::enabled() const { return m_enabled; }
|
||||||
int64_t ScanParameters::settleTime() const { return m_settleTime; }
|
|
||||||
|
|
||||||
RawMasterFile::RawMasterFile(const std::filesystem::path &fpath)
|
RawMasterFile::RawMasterFile(const std::filesystem::path &fpath)
|
||||||
: m_fnc(fpath) {
|
: m_fnc(fpath) {
|
||||||
@@ -177,7 +170,6 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
std::ifstream ifs(fpath);
|
std::ifstream ifs(fpath);
|
||||||
json j;
|
json j;
|
||||||
ifs >> j;
|
ifs >> j;
|
||||||
|
|
||||||
double v = j["Version"];
|
double v = j["Version"];
|
||||||
m_version = fmt::format("{:.1f}", v);
|
m_version = fmt::format("{:.1f}", v);
|
||||||
|
|
||||||
@@ -189,9 +181,7 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
j["Geometry"]["x"]}; // TODO: isnt it only available for version > 7.1?
|
j["Geometry"]["x"]}; // TODO: isnt it only available for version > 7.1?
|
||||||
// - try block default should be 1x1
|
// - try block default should be 1x1
|
||||||
|
|
||||||
m_image_size_in_bytes =
|
m_image_size_in_bytes = j["Image Size in bytes"];
|
||||||
v < 8.0 ? j["Image Size in bytes"] : j["Image Size"];
|
|
||||||
|
|
||||||
m_frames_in_file = j["Frames in File"];
|
m_frames_in_file = j["Frames in File"];
|
||||||
m_pixels_y = j["Pixels"]["y"];
|
m_pixels_y = j["Pixels"]["y"];
|
||||||
m_pixels_x = j["Pixels"]["x"];
|
m_pixels_x = j["Pixels"]["x"];
|
||||||
@@ -216,6 +206,7 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
} catch (const json::out_of_range &e) {
|
} catch (const json::out_of_range &e) {
|
||||||
// keep the optional empty
|
// keep the optional empty
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------------------------------------------------------------
|
// ----------------------------------------------------------------
|
||||||
// Special treatment of analog flag because of Moench03
|
// Special treatment of analog flag because of Moench03
|
||||||
try {
|
try {
|
||||||
@@ -236,6 +227,7 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
m_analog_flag = 0;
|
m_analog_flag = 0;
|
||||||
}
|
}
|
||||||
//-----------------------------------------------------------------
|
//-----------------------------------------------------------------
|
||||||
|
|
||||||
try {
|
try {
|
||||||
m_quad = j.at("Quad");
|
m_quad = j.at("Quad");
|
||||||
} catch (const json::out_of_range &e) {
|
} catch (const json::out_of_range &e) {
|
||||||
@@ -247,6 +239,7 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
// }catch (const json::out_of_range &e) {
|
// }catch (const json::out_of_range &e) {
|
||||||
// m_adc_mask = 0;
|
// m_adc_mask = 0;
|
||||||
// }
|
// }
|
||||||
|
|
||||||
try {
|
try {
|
||||||
int digital_flag = j.at("Digital Flag");
|
int digital_flag = j.at("Digital Flag");
|
||||||
if (digital_flag) {
|
if (digital_flag) {
|
||||||
@@ -255,6 +248,7 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
} catch (const json::out_of_range &e) {
|
} catch (const json::out_of_range &e) {
|
||||||
// keep the optional empty
|
// keep the optional empty
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
m_transceiver_flag = j.at("Transceiver Flag");
|
m_transceiver_flag = j.at("Transceiver Flag");
|
||||||
if (m_transceiver_flag) {
|
if (m_transceiver_flag) {
|
||||||
@@ -263,20 +257,10 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
} catch (const json::out_of_range &e) {
|
} catch (const json::out_of_range &e) {
|
||||||
// keep the optional empty
|
// keep the optional empty
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (v < 8.0) {
|
std::string scan_parameters = j.at("Scan Parameters");
|
||||||
std::string scan_parameters = j.at("Scan Parameters");
|
m_scan_parameters = ScanParameters(scan_parameters);
|
||||||
m_scan_parameters = ScanParameters(scan_parameters);
|
|
||||||
} else {
|
|
||||||
auto json_obj = j.at("Scan Parameters");
|
|
||||||
m_scan_parameters = ScanParameters(
|
|
||||||
json_obj.at("enable").get<int>(),
|
|
||||||
static_cast<DACIndex>(json_obj.at("dacInd").get<int>()),
|
|
||||||
json_obj.at("start offset").get<int>(),
|
|
||||||
json_obj.at("stop offset").get<int>(),
|
|
||||||
json_obj.at("step size").get<int>(),
|
|
||||||
json_obj.at("dac settle time ns").get<int>());
|
|
||||||
}
|
|
||||||
if (v < 7.21) {
|
if (v < 7.21) {
|
||||||
m_scan_parameters
|
m_scan_parameters
|
||||||
.increment_stop(); // adjust for endpoint being included
|
.increment_stop(); // adjust for endpoint being included
|
||||||
@@ -284,7 +268,6 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
} catch (const json::out_of_range &e) {
|
} catch (const json::out_of_range &e) {
|
||||||
// not a scan
|
// not a scan
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
m_udp_interfaces_per_module = {j.at("Number of UDP Interfaces"), 1};
|
m_udp_interfaces_per_module = {j.at("Number of UDP Interfaces"), 1};
|
||||||
} catch (const json::out_of_range &e) {
|
} catch (const json::out_of_range &e) {
|
||||||
@@ -294,22 +277,14 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
m_udp_interfaces_per_module = {1, 2};
|
m_udp_interfaces_per_module = {1, 2};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
ROI tmp_roi;
|
ROI tmp_roi;
|
||||||
if (v < 8.0) {
|
auto obj = j.at("Receiver Roi");
|
||||||
auto obj = j.at("Receiver Roi");
|
tmp_roi.xmin = obj.at("xmin");
|
||||||
tmp_roi.xmin = obj.at("xmin");
|
tmp_roi.xmax = obj.at("xmax");
|
||||||
tmp_roi.xmax = obj.at("xmax");
|
tmp_roi.ymin = obj.at("ymin");
|
||||||
tmp_roi.ymin = obj.at("ymin");
|
tmp_roi.ymax = obj.at("ymax");
|
||||||
tmp_roi.ymax = obj.at("ymax");
|
|
||||||
} else {
|
|
||||||
// TODO: for now only handle single ROI
|
|
||||||
auto obj = j.at("Receiver Rois");
|
|
||||||
tmp_roi.xmin = obj[0].at("xmin");
|
|
||||||
tmp_roi.xmax = obj[0].at("xmax");
|
|
||||||
tmp_roi.ymin = obj[0].at("ymin");
|
|
||||||
tmp_roi.ymax = obj[0].at("ymax");
|
|
||||||
}
|
|
||||||
|
|
||||||
// if any of the values are set update the roi
|
// if any of the values are set update the roi
|
||||||
if (tmp_roi.xmin != 4294967295 || tmp_roi.xmax != 4294967295 ||
|
if (tmp_roi.xmin != 4294967295 || tmp_roi.xmax != 4294967295 ||
|
||||||
@@ -323,10 +298,14 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
} catch (const json::out_of_range &e) {
|
} catch (const json::out_of_range &e) {
|
||||||
LOG(TLogLevel::logERROR) << e.what() << std::endl;
|
std::cout << e.what() << std::endl;
|
||||||
// leave the optional empty
|
// leave the optional empty
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if we have an roi we need to update the geometry for the subfiles
|
||||||
|
if (m_roi) {
|
||||||
|
}
|
||||||
|
|
||||||
// Update detector type for Moench
|
// Update detector type for Moench
|
||||||
// TODO! How does this work with old .raw master files?
|
// TODO! How does this work with old .raw master files?
|
||||||
#ifdef AARE_VERBOSE
|
#ifdef AARE_VERBOSE
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ TEST_CASE("Parse scan parameters") {
|
|||||||
ScanParameters s("[enabled\ndac dac 4\nstart 500\nstop 2200\nstep "
|
ScanParameters s("[enabled\ndac dac 4\nstart 500\nstop 2200\nstep "
|
||||||
"5\nsettleTime 100us\n]");
|
"5\nsettleTime 100us\n]");
|
||||||
REQUIRE(s.enabled());
|
REQUIRE(s.enabled());
|
||||||
REQUIRE(s.dac() == DACIndex::DAC_4);
|
REQUIRE(s.dac() == "dac 4");
|
||||||
REQUIRE(s.start() == 500);
|
REQUIRE(s.start() == 500);
|
||||||
REQUIRE(s.stop() == 2200);
|
REQUIRE(s.stop() == 2200);
|
||||||
REQUIRE(s.step() == 5);
|
REQUIRE(s.step() == 5);
|
||||||
@@ -60,7 +60,7 @@ TEST_CASE("Parse scan parameters") {
|
|||||||
TEST_CASE("A disabled scan") {
|
TEST_CASE("A disabled scan") {
|
||||||
ScanParameters s("[disabled]");
|
ScanParameters s("[disabled]");
|
||||||
REQUIRE_FALSE(s.enabled());
|
REQUIRE_FALSE(s.enabled());
|
||||||
REQUIRE(s.dac() == DACIndex::DAC_0);
|
REQUIRE(s.dac() == "");
|
||||||
REQUIRE(s.start() == 0);
|
REQUIRE(s.start() == 0);
|
||||||
REQUIRE(s.stop() == 0);
|
REQUIRE(s.stop() == 0);
|
||||||
REQUIRE(s.step() == 0);
|
REQUIRE(s.step() == 0);
|
||||||
@@ -68,7 +68,7 @@ TEST_CASE("A disabled scan") {
|
|||||||
|
|
||||||
TEST_CASE("Parse a master file in .json format", "[.integration]") {
|
TEST_CASE("Parse a master file in .json format", "[.integration]") {
|
||||||
auto fpath =
|
auto fpath =
|
||||||
test_data_path() / "raw" / "jungfrau" / "jungfrau_single_master_0.json";
|
test_data_path() / "jungfrau" / "jungfrau_single_master_0.json";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
RawMasterFile f(fpath);
|
RawMasterFile f(fpath);
|
||||||
|
|
||||||
@@ -224,41 +224,6 @@ TEST_CASE("Parse a master file in .raw format", "[.integration]") {
|
|||||||
// Packets Caught Mask : 64 bytes
|
// Packets Caught Mask : 64 bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Parse a master file in new .json format",
|
|
||||||
"[.integration][.width-data]") {
|
|
||||||
|
|
||||||
auto file_path =
|
|
||||||
test_data_path() / "raw" / "newmythen03" / "run_87_master_0.json";
|
|
||||||
REQUIRE(std::filesystem::exists(file_path));
|
|
||||||
|
|
||||||
RawMasterFile f(file_path);
|
|
||||||
|
|
||||||
// Version : 8.0
|
|
||||||
REQUIRE(f.version() == "8.0");
|
|
||||||
|
|
||||||
REQUIRE(f.detector_type() == DetectorType::Mythen3);
|
|
||||||
// Timing Mode : auto
|
|
||||||
REQUIRE(f.timing_mode() == TimingMode::Auto);
|
|
||||||
// Geometry : [2, 1]
|
|
||||||
REQUIRE(f.geometry().col == 2);
|
|
||||||
REQUIRE(f.geometry().row == 1);
|
|
||||||
// Image Size : 5120 bytes
|
|
||||||
REQUIRE(f.image_size_in_bytes() == 5120);
|
|
||||||
|
|
||||||
REQUIRE(f.scan_parameters().enabled() == false);
|
|
||||||
REQUIRE(f.scan_parameters().dac() == DACIndex::DAC_0);
|
|
||||||
REQUIRE(f.scan_parameters().start() == 0);
|
|
||||||
REQUIRE(f.scan_parameters().stop() == 0);
|
|
||||||
REQUIRE(f.scan_parameters().step() == 0);
|
|
||||||
REQUIRE(f.scan_parameters().settleTime() == 0);
|
|
||||||
|
|
||||||
auto roi = f.roi().value();
|
|
||||||
REQUIRE(roi.xmin == 0);
|
|
||||||
REQUIRE(roi.xmax == 2559);
|
|
||||||
REQUIRE(roi.ymin == -1);
|
|
||||||
REQUIRE(roi.ymax == -1);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_CASE("Read eiger master file", "[.integration]") {
|
TEST_CASE("Read eiger master file", "[.integration]") {
|
||||||
auto fpath = test_data_path() / "eiger" / "eiger_500k_32bit_master_0.json";
|
auto fpath = test_data_path() / "eiger" / "eiger_500k_32bit_master_0.json";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
@@ -327,4 +292,4 @@ TEST_CASE("Read eiger master file", "[.integration]") {
|
|||||||
// "Packets Caught Mask": "64 bytes"
|
// "Packets Caught Mask": "64 bytes"
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
}
|
}
|
||||||
182
src/defs.cpp
182
src/defs.cpp
@@ -115,186 +115,4 @@ template <> FrameDiscardPolicy StringTo(const std::string &arg) {
|
|||||||
|
|
||||||
// template <> TimingMode StringTo<TimingMode>(std::string mode);
|
// template <> TimingMode StringTo<TimingMode>(std::string mode);
|
||||||
|
|
||||||
template <> DACIndex StringTo(const std::string &arg) {
|
|
||||||
if (arg == "dac 0")
|
|
||||||
return DACIndex::DAC_0;
|
|
||||||
else if (arg == "dac 1")
|
|
||||||
return DACIndex::DAC_1;
|
|
||||||
else if (arg == "dac 2")
|
|
||||||
return DACIndex::DAC_2;
|
|
||||||
else if (arg == "dac 3")
|
|
||||||
return DACIndex::DAC_3;
|
|
||||||
else if (arg == "dac 4")
|
|
||||||
return DACIndex::DAC_4;
|
|
||||||
else if (arg == "dac 5")
|
|
||||||
return DACIndex::DAC_5;
|
|
||||||
else if (arg == "dac 6")
|
|
||||||
return DACIndex::DAC_6;
|
|
||||||
else if (arg == "dac 7")
|
|
||||||
return DACIndex::DAC_7;
|
|
||||||
else if (arg == "dac 8")
|
|
||||||
return DACIndex::DAC_8;
|
|
||||||
else if (arg == "dac 9")
|
|
||||||
return DACIndex::DAC_9;
|
|
||||||
else if (arg == "dac 10")
|
|
||||||
return DACIndex::DAC_10;
|
|
||||||
else if (arg == "dac 11")
|
|
||||||
return DACIndex::DAC_11;
|
|
||||||
else if (arg == "dac 12")
|
|
||||||
return DACIndex::DAC_12;
|
|
||||||
else if (arg == "dac 13")
|
|
||||||
return DACIndex::DAC_13;
|
|
||||||
else if (arg == "dac 14")
|
|
||||||
return DACIndex::DAC_14;
|
|
||||||
else if (arg == "dac 15")
|
|
||||||
return DACIndex::DAC_15;
|
|
||||||
else if (arg == "dac 16")
|
|
||||||
return DACIndex::DAC_16;
|
|
||||||
else if (arg == "dac 17")
|
|
||||||
return DACIndex::DAC_17;
|
|
||||||
else if (arg == "vsvp")
|
|
||||||
return DACIndex::VSVP;
|
|
||||||
else if (arg == "vtrim")
|
|
||||||
return DACIndex::VTRIM;
|
|
||||||
else if (arg == "vrpreamp")
|
|
||||||
return DACIndex::VRPREAMP;
|
|
||||||
else if (arg == "vrshaper")
|
|
||||||
return DACIndex::VRSHAPER;
|
|
||||||
else if (arg == "vsvn")
|
|
||||||
return DACIndex::VSVN;
|
|
||||||
else if (arg == "vtgstv")
|
|
||||||
return DACIndex::VTGSTV;
|
|
||||||
else if (arg == "vcmp_ll")
|
|
||||||
return DACIndex::VCMP_LL;
|
|
||||||
else if (arg == "vcmp_lr")
|
|
||||||
return DACIndex::VCMP_LR;
|
|
||||||
else if (arg == "vcal")
|
|
||||||
return DACIndex::VCAL;
|
|
||||||
else if (arg == "vcmp_rl")
|
|
||||||
return DACIndex::VCMP_RL;
|
|
||||||
else if (arg == "rxb_rb")
|
|
||||||
return DACIndex::RXB_RB;
|
|
||||||
else if (arg == "rxb_lb")
|
|
||||||
return DACIndex::RXB_LB;
|
|
||||||
else if (arg == "vcmp_rr")
|
|
||||||
return DACIndex::VCMP_RR;
|
|
||||||
else if (arg == "vcp")
|
|
||||||
return DACIndex::VCP;
|
|
||||||
else if (arg == "vcn")
|
|
||||||
return DACIndex::VCN;
|
|
||||||
else if (arg == "vishaper")
|
|
||||||
return DACIndex::VISHAPER;
|
|
||||||
else if (arg == "vthreshold")
|
|
||||||
return DACIndex::VTHRESHOLD;
|
|
||||||
else if (arg == "vref_ds")
|
|
||||||
return DACIndex::VREF_DS;
|
|
||||||
else if (arg == "vout_cm")
|
|
||||||
return DACIndex::VOUT_CM;
|
|
||||||
else if (arg == "vin_cm")
|
|
||||||
return DACIndex::VIN_CM;
|
|
||||||
else if (arg == "vref_comp")
|
|
||||||
return DACIndex::VREF_COMP;
|
|
||||||
else if (arg == "vb_comp")
|
|
||||||
return DACIndex::VB_COMP;
|
|
||||||
else if (arg == "vdd_prot")
|
|
||||||
return DACIndex::VDD_PROT;
|
|
||||||
else if (arg == "vin_com")
|
|
||||||
return DACIndex::VIN_COM;
|
|
||||||
else if (arg == "vref_prech")
|
|
||||||
return DACIndex::VREF_PRECH;
|
|
||||||
else if (arg == "vb_pixbuf")
|
|
||||||
return DACIndex::VB_PIXBUF;
|
|
||||||
else if (arg == "vb_ds")
|
|
||||||
return DACIndex::VB_DS;
|
|
||||||
else if (arg == "vref_h_adc")
|
|
||||||
return DACIndex::VREF_H_ADC;
|
|
||||||
else if (arg == "vb_comp_fe")
|
|
||||||
return DACIndex::VB_COMP_FE;
|
|
||||||
else if (arg == "vb_comp_adc")
|
|
||||||
return DACIndex::VB_COMP_ADC;
|
|
||||||
else if (arg == "vcom_cds")
|
|
||||||
return DACIndex::VCOM_CDS;
|
|
||||||
else if (arg == "vref_rstore")
|
|
||||||
return DACIndex::VREF_RSTORE;
|
|
||||||
else if (arg == "vb_opa_1st")
|
|
||||||
return DACIndex::VB_OPA_1ST;
|
|
||||||
else if (arg == "vref_comp_fe")
|
|
||||||
return DACIndex::VREF_COMP_FE;
|
|
||||||
else if (arg == "vcom_adc1")
|
|
||||||
return DACIndex::VCOM_ADC1;
|
|
||||||
else if (arg == "vref_l_adc")
|
|
||||||
return DACIndex::VREF_L_ADC;
|
|
||||||
else if (arg == "vref_cds")
|
|
||||||
return DACIndex::VREF_CDS;
|
|
||||||
else if (arg == "vb_cs")
|
|
||||||
return DACIndex::VB_CS;
|
|
||||||
else if (arg == "vb_opa_fd")
|
|
||||||
return DACIndex::VB_OPA_FD;
|
|
||||||
else if (arg == "vcom_adc2")
|
|
||||||
return DACIndex::VCOM_ADC2;
|
|
||||||
else if (arg == "vcassh")
|
|
||||||
return DACIndex::VCASSH;
|
|
||||||
else if (arg == "vth2")
|
|
||||||
return DACIndex::VTH2;
|
|
||||||
else if (arg == "vrshaper_n")
|
|
||||||
return DACIndex::VRSHAPER_N;
|
|
||||||
else if (arg == "vipre_out")
|
|
||||||
return DACIndex::VIPRE_OUT;
|
|
||||||
else if (arg == "vth3")
|
|
||||||
return DACIndex::VTH3;
|
|
||||||
else if (arg == "vth1")
|
|
||||||
return DACIndex::VTH1;
|
|
||||||
else if (arg == "vicin")
|
|
||||||
return DACIndex::VICIN;
|
|
||||||
else if (arg == "vcas")
|
|
||||||
return DACIndex::VCAS;
|
|
||||||
else if (arg == "vcal_n")
|
|
||||||
return DACIndex::VCAL_N;
|
|
||||||
else if (arg == "vipre")
|
|
||||||
return DACIndex::VIPRE;
|
|
||||||
else if (arg == "vcal_p")
|
|
||||||
return DACIndex::VCAL_P;
|
|
||||||
else if (arg == "vdcsh")
|
|
||||||
return DACIndex::VDCSH;
|
|
||||||
else if (arg == "vbp_colbuf")
|
|
||||||
return DACIndex::VBP_COLBUF;
|
|
||||||
else if (arg == "vb_sda")
|
|
||||||
return DACIndex::VB_SDA;
|
|
||||||
else if (arg == "vcasc_sfp")
|
|
||||||
return DACIndex::VCASC_SFP;
|
|
||||||
else if (arg == "vipre_cds")
|
|
||||||
return DACIndex::VIPRE_CDS;
|
|
||||||
else if (arg == "ibias_sfp")
|
|
||||||
return DACIndex::IBIAS_SFP;
|
|
||||||
else if (arg == "trimbits")
|
|
||||||
return DACIndex::TRIMBIT_SCAN;
|
|
||||||
else if (arg == "highvoltage")
|
|
||||||
return DACIndex::HIGH_VOLTAGE;
|
|
||||||
else if (arg == "iodelay")
|
|
||||||
return DACIndex::IO_DELAY;
|
|
||||||
else if (arg == "temp_adc")
|
|
||||||
return DACIndex::TEMPERATURE_ADC;
|
|
||||||
else if (arg == "temp_fpga")
|
|
||||||
return DACIndex::TEMPERATURE_FPGA;
|
|
||||||
else if (arg == "temp_fpgaext")
|
|
||||||
return DACIndex::TEMPERATURE_FPGAEXT;
|
|
||||||
else if (arg == "temp_10ge")
|
|
||||||
return DACIndex::TEMPERATURE_10GE;
|
|
||||||
else if (arg == "temp_dcdc")
|
|
||||||
return DACIndex::TEMPERATURE_DCDC;
|
|
||||||
else if (arg == "temp_sodl")
|
|
||||||
return DACIndex::TEMPERATURE_SODL;
|
|
||||||
else if (arg == "temp_sodr")
|
|
||||||
return DACIndex::TEMPERATURE_SODR;
|
|
||||||
else if (arg == "temp_fpgafl")
|
|
||||||
return DACIndex::TEMPERATURE_FPGA2;
|
|
||||||
else if (arg == "temp_fpgafr")
|
|
||||||
return DACIndex::TEMPERATURE_FPGA3;
|
|
||||||
else if (arg == "temp_slowadc")
|
|
||||||
return DACIndex::SLOW_ADC_TEMP;
|
|
||||||
else
|
|
||||||
throw std::invalid_argument("Could not decode DACIndex from: \"" + arg +
|
|
||||||
"\"");
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
||||||
@@ -7,7 +7,6 @@ Script to update VERSION file with semantic versioning if provided as an argumen
|
|||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
from packaging.version import Version, InvalidVersion
|
from packaging.version import Version, InvalidVersion
|
||||||
|
|
||||||
@@ -27,9 +26,9 @@ def get_version():
|
|||||||
|
|
||||||
# Check at least one argument is passed
|
# Check at least one argument is passed
|
||||||
if len(sys.argv) < 2:
|
if len(sys.argv) < 2:
|
||||||
version = datetime.today().strftime('%Y.%-m.%-d')
|
return "0.0.0"
|
||||||
else:
|
|
||||||
version = sys.argv[1]
|
version = sys.argv[1]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
v = Version(version) # normalize check if version follows PEP 440 specification
|
v = Version(version) # normalize check if version follows PEP 440 specification
|
||||||
@@ -55,4 +54,4 @@ def write_version_to_file(version):
|
|||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
||||||
version = get_version()
|
version = get_version()
|
||||||
write_version_to_file(version)
|
write_version_to_file(version)
|
||||||
Reference in New Issue
Block a user