mirror of
https://github.com/slsdetectorgroup/aare.git
synced 2025-12-15 17:41:25 +01:00
Compare commits
50 Commits
2025.7.18
...
feature_id
| Author | SHA1 | Date | |
|---|---|---|---|
| e290c8f820 | |||
| c0ee17275e | |||
| ad3ef88607 | |||
| f814b3f4e7 | |||
| 1f46266183 | |||
| d3d9f760b3 | |||
| 0891ffb1ee | |||
| 0b74bc25d5 | |||
| 3ec40fa809 | |||
| 74280379ce | |||
| 474c35cc6b | |||
| e2a97d3c45 | |||
| bce8e9d5fc | |||
| 4c1e276e2c | |||
| 12114e7275 | |||
| 7926993bb2 | |||
| ed7fb1f1f9 | |||
|
|
8ab98b356b | ||
| d908ad3636 | |||
| 8733a1d66f | |||
| 437f7cec89 | |||
|
|
6c3524298f | ||
| b59277c4bf | |||
| cb163c79b4 | |||
|
|
a0fb4900f0 | ||
|
|
91d74110fa | ||
| f54e76e6bf | |||
|
|
c6da36d10b | ||
| 5107513ff5 | |||
| f7aa66a2c9 | |||
| 3ac94641e3 | |||
|
|
89bb8776ea | ||
|
|
1527a45cf3 | ||
|
|
3d6858ad33 | ||
|
|
d6222027d0 | ||
| 1195a5e100 | |||
| 1347158235 | |||
|
|
8c4d8b687e | ||
|
|
b8e91d0282 | ||
|
|
46876bfa73 | ||
|
|
348fd0f937 | ||
|
|
0fea0f5b0e | ||
|
|
cb439efb48 | ||
|
|
5de402f91b | ||
|
|
9a7713e98a | ||
|
|
9a3694b980 | ||
|
|
85c3bf7bed | ||
|
|
8eb7fec435 | ||
|
|
83717571c8 | ||
|
|
5a9c3b717e |
@@ -368,6 +368,7 @@ set(PUBLICHEADERS
|
||||
|
||||
|
||||
set(SourceFiles
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/calibration.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/CtbRawFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp
|
||||
@@ -387,7 +388,7 @@ set(SourceFiles
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp
|
||||
)
|
||||
)
|
||||
|
||||
add_library(aare_core STATIC ${SourceFiles})
|
||||
target_include_directories(aare_core PUBLIC
|
||||
@@ -411,6 +412,8 @@ target_link_libraries(
|
||||
|
||||
)
|
||||
|
||||
set_property(TARGET aare_core PROPERTY POSITION_INDEPENDENT_CODE ON)
|
||||
|
||||
if(AARE_TESTS)
|
||||
target_compile_definitions(aare_core PRIVATE AARE_TESTS)
|
||||
endif()
|
||||
@@ -430,13 +433,10 @@ set_target_properties(aare_core PROPERTIES
|
||||
PUBLIC_HEADER "${PUBLICHEADERS}"
|
||||
)
|
||||
|
||||
if (AARE_PYTHON_BINDINGS)
|
||||
set_property(TARGET aare_core PROPERTY POSITION_INDEPENDENT_CODE ON)
|
||||
endif()
|
||||
|
||||
if(AARE_TESTS)
|
||||
set(TestSources
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/calibration.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp
|
||||
@@ -463,6 +463,7 @@ if(AARE_TESTS)
|
||||
target_sources(tests PRIVATE ${TestSources} )
|
||||
endif()
|
||||
|
||||
|
||||
if(AARE_MASTER_PROJECT)
|
||||
install(TARGETS aare_core aare_compiler_flags
|
||||
EXPORT "${TARGETS_EXPORT_NAME}"
|
||||
@@ -472,7 +473,6 @@ if(AARE_MASTER_PROJECT)
|
||||
)
|
||||
endif()
|
||||
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
set(CMAKE_INSTALL_RPATH $ORIGIN)
|
||||
set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
|
||||
|
||||
|
||||
27
RELEASE.md
27
RELEASE.md
@@ -1,7 +1,27 @@
|
||||
# Release notes
|
||||
|
||||
### 2025.10.1
|
||||
|
||||
### 2025.07.18
|
||||
Bugfixes:
|
||||
|
||||
- File supports reading new master json file format (multiple ROI's not supported yet)
|
||||
|
||||
### 2025.8.22
|
||||
|
||||
Features:
|
||||
|
||||
- Apply calibration works in G0 if passes a 2D calibration and pedestal
|
||||
- count pixels that switch
|
||||
- calculate pedestal (also g0 version)
|
||||
- NDArray::view() needs an lvalue to reduce issues with the view outliving the array
|
||||
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- Now using glibc 2.17 in conda builds (was using the host)
|
||||
- Fixed shifted pixels in clusters close to the edge of a frame
|
||||
|
||||
### 2025.7.18
|
||||
|
||||
Features:
|
||||
|
||||
@@ -15,7 +35,7 @@ Bugfixes:
|
||||
- Removed unused file: ClusterFile.cpp
|
||||
|
||||
|
||||
### 2025.05.22
|
||||
### 2025.5.22
|
||||
|
||||
Features:
|
||||
|
||||
@@ -28,3 +48,6 @@ Bugfixes:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ FetchContent_MakeAvailable(benchmark)
|
||||
|
||||
add_executable(benchmarks)
|
||||
|
||||
target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp)
|
||||
target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp reduce_benchmark.cpp)
|
||||
|
||||
# Link Google Benchmark and other necessary libraries
|
||||
target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags)
|
||||
|
||||
168
benchmarks/reduce_benchmark.cpp
Normal file
168
benchmarks/reduce_benchmark.cpp
Normal file
@@ -0,0 +1,168 @@
|
||||
#include "aare/Cluster.hpp"
|
||||
#include <benchmark/benchmark.h>
|
||||
|
||||
using namespace aare;
|
||||
|
||||
class ClustersForReduceFixture : public benchmark::Fixture {
|
||||
public:
|
||||
Cluster<int, 5, 5> cluster_5x5{};
|
||||
Cluster<int, 3, 3> cluster_3x3{};
|
||||
|
||||
private:
|
||||
using benchmark::Fixture::SetUp;
|
||||
|
||||
void SetUp([[maybe_unused]] const benchmark::State &state) override {
|
||||
int temp_data[25] = {1, 1, 1, 1, 1, 1, 1, 2, 1, 1,
|
||||
1, 2, 3, 1, 2, 1, 1, 1, 1, 2};
|
||||
std::copy(std::begin(temp_data), std::end(temp_data),
|
||||
std::begin(cluster_5x5.data));
|
||||
|
||||
cluster_5x5.x = 5;
|
||||
cluster_5x5.y = 5;
|
||||
|
||||
int temp_data2[9] = {1, 1, 1, 2, 3, 1, 2, 2, 1};
|
||||
std::copy(std::begin(temp_data2), std::end(temp_data2),
|
||||
std::begin(cluster_3x3.data));
|
||||
|
||||
cluster_3x3.x = 5;
|
||||
cluster_3x3.y = 5;
|
||||
}
|
||||
|
||||
// void TearDown(::benchmark::State& state) {
|
||||
// }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
Cluster<T, 3, 3, int16_t> reduce_to_3x3(const Cluster<T, 5, 5, int16_t> &c) {
|
||||
Cluster<T, 3, 3, int16_t> result;
|
||||
|
||||
// Write out the sums in the hope that the compiler can optimize this
|
||||
std::array<T, 9> sum_3x3_subclusters;
|
||||
|
||||
// Write out the sums in the hope that the compiler can optimize this
|
||||
sum_3x3_subclusters[0] = c.data[0] + c.data[1] + c.data[2] + c.data[5] +
|
||||
c.data[6] + c.data[7] + c.data[10] + c.data[11] +
|
||||
c.data[12];
|
||||
sum_3x3_subclusters[1] = c.data[1] + c.data[2] + c.data[3] + c.data[6] +
|
||||
c.data[7] + c.data[8] + c.data[11] + c.data[12] +
|
||||
c.data[13];
|
||||
sum_3x3_subclusters[2] = c.data[2] + c.data[3] + c.data[4] + c.data[7] +
|
||||
c.data[8] + c.data[9] + c.data[12] + c.data[13] +
|
||||
c.data[14];
|
||||
sum_3x3_subclusters[3] = c.data[5] + c.data[6] + c.data[7] + c.data[10] +
|
||||
c.data[11] + c.data[12] + c.data[15] + c.data[16] +
|
||||
c.data[17];
|
||||
sum_3x3_subclusters[4] = c.data[6] + c.data[7] + c.data[8] + c.data[11] +
|
||||
c.data[12] + c.data[13] + c.data[16] + c.data[17] +
|
||||
c.data[18];
|
||||
sum_3x3_subclusters[5] = c.data[7] + c.data[8] + c.data[9] + c.data[12] +
|
||||
c.data[13] + c.data[14] + c.data[17] + c.data[18] +
|
||||
c.data[19];
|
||||
sum_3x3_subclusters[6] = c.data[10] + c.data[11] + c.data[12] + c.data[15] +
|
||||
c.data[16] + c.data[17] + c.data[20] + c.data[21] +
|
||||
c.data[22];
|
||||
sum_3x3_subclusters[7] = c.data[11] + c.data[12] + c.data[13] + c.data[16] +
|
||||
c.data[17] + c.data[18] + c.data[21] + c.data[22] +
|
||||
c.data[23];
|
||||
sum_3x3_subclusters[8] = c.data[12] + c.data[13] + c.data[14] + c.data[17] +
|
||||
c.data[18] + c.data[19] + c.data[22] + c.data[23] +
|
||||
c.data[24];
|
||||
|
||||
auto index = std::max_element(sum_3x3_subclusters.begin(),
|
||||
sum_3x3_subclusters.end()) -
|
||||
sum_3x3_subclusters.begin();
|
||||
|
||||
switch (index) {
|
||||
case 0:
|
||||
result.x = c.x - 1;
|
||||
result.y = c.y + 1;
|
||||
result.data = {c.data[0], c.data[1], c.data[2], c.data[5], c.data[6],
|
||||
c.data[7], c.data[10], c.data[11], c.data[12]};
|
||||
break;
|
||||
case 1:
|
||||
result.x = c.x;
|
||||
result.y = c.y + 1;
|
||||
result.data = {c.data[1], c.data[2], c.data[3], c.data[6], c.data[7],
|
||||
c.data[8], c.data[11], c.data[12], c.data[13]};
|
||||
break;
|
||||
case 2:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y + 1;
|
||||
result.data = {c.data[2], c.data[3], c.data[4], c.data[7], c.data[8],
|
||||
c.data[9], c.data[12], c.data[13], c.data[14]};
|
||||
break;
|
||||
case 3:
|
||||
result.x = c.x - 1;
|
||||
result.y = c.y;
|
||||
result.data = {c.data[5], c.data[6], c.data[7],
|
||||
c.data[10], c.data[11], c.data[12],
|
||||
c.data[15], c.data[16], c.data[17]};
|
||||
break;
|
||||
case 4:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y;
|
||||
result.data = {c.data[6], c.data[7], c.data[8],
|
||||
c.data[11], c.data[12], c.data[13],
|
||||
c.data[16], c.data[17], c.data[18]};
|
||||
break;
|
||||
case 5:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y;
|
||||
result.data = {c.data[7], c.data[8], c.data[9],
|
||||
c.data[12], c.data[13], c.data[14],
|
||||
c.data[17], c.data[18], c.data[19]};
|
||||
break;
|
||||
case 6:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y - 1;
|
||||
result.data = {c.data[10], c.data[11], c.data[12],
|
||||
c.data[15], c.data[16], c.data[17],
|
||||
c.data[20], c.data[21], c.data[22]};
|
||||
break;
|
||||
case 7:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y - 1;
|
||||
result.data = {c.data[11], c.data[12], c.data[13],
|
||||
c.data[16], c.data[17], c.data[18],
|
||||
c.data[21], c.data[22], c.data[23]};
|
||||
break;
|
||||
case 8:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y - 1;
|
||||
result.data = {c.data[12], c.data[13], c.data[14],
|
||||
c.data[17], c.data[18], c.data[19],
|
||||
c.data[22], c.data[23], c.data[24]};
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
BENCHMARK_F(ClustersForReduceFixture, Reduce2x2)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
benchmark::DoNotOptimize(reduce_to_2x2<int, 3, 3, int16_t>(
|
||||
cluster_3x3)); // make sure compiler evaluates the expression
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_F(ClustersForReduceFixture, SpecificReduce2x2)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
benchmark::DoNotOptimize(reduce_to_2x2<int>(cluster_3x3));
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_F(ClustersForReduceFixture, Reduce3x3)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
benchmark::DoNotOptimize(
|
||||
reduce_to_3x3<int, 5, 5, int16_t>(cluster_5x5));
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_F(ClustersForReduceFixture, SpecificReduce3x3)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
benchmark::DoNotOptimize(reduce_to_3x3<int>(cluster_5x5));
|
||||
}
|
||||
}
|
||||
@@ -3,3 +3,14 @@ python:
|
||||
- 3.12
|
||||
- 3.13
|
||||
|
||||
c_compiler:
|
||||
- gcc # [linux]
|
||||
|
||||
c_stdlib:
|
||||
- sysroot # [linux]
|
||||
|
||||
cxx_compiler:
|
||||
- gxx # [linux]
|
||||
|
||||
c_stdlib_version: # [linux]
|
||||
- 2.17 # [linux]
|
||||
|
||||
@@ -16,6 +16,8 @@ build:
|
||||
|
||||
requirements:
|
||||
build:
|
||||
- {{ compiler('c') }}
|
||||
- {{ stdlib("c") }}
|
||||
- {{ compiler('cxx') }}
|
||||
- cmake
|
||||
- ninja
|
||||
|
||||
@@ -12,4 +12,11 @@ ClusterVector
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
|
||||
|
||||
**Free Functions:**
|
||||
|
||||
.. doxygenfunction:: aare::reduce_to_3x3(const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>&)
|
||||
|
||||
.. doxygenfunction:: aare::reduce_to_2x2(const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>&)
|
||||
|
||||
@@ -33,4 +33,17 @@ C++ functions that support the ClusterVector or to view it as a numpy array.
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
:inherited-members:
|
||||
:inherited-members:
|
||||
|
||||
|
||||
**Free Functions:**
|
||||
|
||||
.. autofunction:: reduce_to_3x3
|
||||
:noindex:
|
||||
|
||||
Reduce a single Cluster to 3x3 by taking the 3x3 subcluster with highest photon energy.
|
||||
|
||||
.. autofunction:: reduce_to_2x2
|
||||
:noindex:
|
||||
|
||||
Reduce a single Cluster to 2x2 by taking the 2x2 subcluster with highest photon energy.
|
||||
|
||||
@@ -17,8 +17,24 @@ Functions for applying calibration to data.
|
||||
# Apply calibration to raw data to convert from raw ADC values to keV
|
||||
data = aare.apply_calibration(raw_data, pd=pedestal, cal=calibration)
|
||||
|
||||
# If you pass a 2D pedestal and calibration only G0 will be used for the conversion
|
||||
# Pixels that switched to G1 or G2 will be set to 0
|
||||
data = aare.apply_calibration(raw_data, pd=pedestal[0], cal=calibration[0])
|
||||
|
||||
|
||||
|
||||
.. py:currentmodule:: aare
|
||||
|
||||
.. autofunction:: apply_calibration
|
||||
|
||||
.. autofunction:: load_calibration
|
||||
|
||||
.. autofunction:: calculate_pedestal
|
||||
|
||||
.. autofunction:: calculate_pedestal_float
|
||||
|
||||
.. autofunction:: calculate_pedestal_g0
|
||||
|
||||
.. autofunction:: calculate_pedestal_g0_float
|
||||
|
||||
.. autofunction:: count_switching_pixels
|
||||
|
||||
@@ -28,7 +28,7 @@ enum class pixel : int {
|
||||
template <typename T> struct Eta2 {
|
||||
double x;
|
||||
double y;
|
||||
int c;
|
||||
int c{0};
|
||||
T sum;
|
||||
};
|
||||
|
||||
@@ -70,6 +70,8 @@ calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||
size_t index_bottom_left_max_2x2_subcluster =
|
||||
(int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1);
|
||||
|
||||
// calculate direction of gradient
|
||||
|
||||
// check that cluster center is in max subcluster
|
||||
if (cluster_center_index != index_bottom_left_max_2x2_subcluster &&
|
||||
cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 &&
|
||||
@@ -128,12 +130,15 @@ Eta2<T> calculate_eta2(const Cluster<T, 2, 2, int16_t> &cl) {
|
||||
Eta2<T> eta{};
|
||||
|
||||
if ((cl.data[0] + cl.data[1]) != 0)
|
||||
eta.x = static_cast<double>(cl.data[1]) / (cl.data[0] + cl.data[1]);
|
||||
eta.x = static_cast<double>(cl.data[1]) /
|
||||
(cl.data[0] + cl.data[1]); // between (0,1) the closer to zero
|
||||
// left value probably larger
|
||||
if ((cl.data[0] + cl.data[2]) != 0)
|
||||
eta.y = static_cast<double>(cl.data[2]) / (cl.data[0] + cl.data[2]);
|
||||
eta.y = static_cast<double>(cl.data[2]) /
|
||||
(cl.data[0] + cl.data[2]); // between (0,1) the closer to zero
|
||||
// bottom value probably larger
|
||||
eta.sum = cl.sum();
|
||||
eta.c = static_cast<int>(corner::cBottomLeft); // TODO! This is not correct,
|
||||
// but need to put something
|
||||
|
||||
return eta;
|
||||
}
|
||||
|
||||
@@ -150,13 +155,11 @@ template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) {
|
||||
|
||||
eta.sum = sum;
|
||||
|
||||
eta.c = corner::cBottomLeft;
|
||||
|
||||
if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0)
|
||||
|
||||
eta.x = static_cast<double>(-cl.data[3] + cl.data[3 + 2]) /
|
||||
|
||||
(cl.data[3] + cl.data[4] + cl.data[5]);
|
||||
(cl.data[3] + cl.data[4] + cl.data[5]); // (-1,1)
|
||||
|
||||
if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0)
|
||||
|
||||
|
||||
158
include/aare/Cluster.hpp
Normal file → Executable file
158
include/aare/Cluster.hpp
Normal file → Executable file
@@ -8,6 +8,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "logger.hpp"
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <cstdint>
|
||||
@@ -74,6 +75,163 @@ struct Cluster {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Reduce a cluster to a 2x2 cluster by selecting the 2x2 block with the
|
||||
* highest sum.
|
||||
* @param c Cluster to reduce
|
||||
* @return reduced cluster
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = int16_t>
|
||||
Cluster<T, 2, 2, CoordType>
|
||||
reduce_to_2x2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
|
||||
|
||||
static_assert(ClusterSizeX >= 2 && ClusterSizeY >= 2,
|
||||
"Cluster sizes must be at least 2x2 for reduction to 2x2");
|
||||
|
||||
// TODO maybe add sanity check and check that center is in max subcluster
|
||||
Cluster<T, 2, 2, CoordType> result;
|
||||
|
||||
auto [sum, index] = c.max_sum_2x2();
|
||||
|
||||
int16_t cluster_center_index =
|
||||
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||
|
||||
int16_t index_bottom_left_max_2x2_subcluster =
|
||||
(int(index / (ClusterSizeX - 1))) * ClusterSizeX +
|
||||
index % (ClusterSizeX - 1);
|
||||
|
||||
result.x =
|
||||
c.x + (index_bottom_left_max_2x2_subcluster - cluster_center_index) %
|
||||
ClusterSizeX;
|
||||
|
||||
result.y =
|
||||
c.y - (index_bottom_left_max_2x2_subcluster - cluster_center_index) /
|
||||
ClusterSizeX;
|
||||
result.data = {
|
||||
c.data[index_bottom_left_max_2x2_subcluster],
|
||||
c.data[index_bottom_left_max_2x2_subcluster + 1],
|
||||
c.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX],
|
||||
c.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1]};
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Cluster<T, 2, 2, int16_t> reduce_to_2x2(const Cluster<T, 3, 3, int16_t> &c) {
|
||||
Cluster<T, 2, 2, int16_t> result;
|
||||
|
||||
auto [s, i] = c.max_sum_2x2();
|
||||
switch (i) {
|
||||
case 0:
|
||||
result.x = c.x - 1;
|
||||
result.y = c.y + 1;
|
||||
result.data = {c.data[0], c.data[1], c.data[3], c.data[4]};
|
||||
break;
|
||||
case 1:
|
||||
result.x = c.x;
|
||||
result.y = c.y + 1;
|
||||
result.data = {c.data[1], c.data[2], c.data[4], c.data[5]};
|
||||
break;
|
||||
case 2:
|
||||
result.x = c.x - 1;
|
||||
result.y = c.y;
|
||||
result.data = {c.data[3], c.data[4], c.data[6], c.data[7]};
|
||||
break;
|
||||
case 3:
|
||||
result.x = c.x;
|
||||
result.y = c.y;
|
||||
result.data = {c.data[4], c.data[5], c.data[7], c.data[8]};
|
||||
break;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = int16_t>
|
||||
inline std::pair<T, uint16_t>
|
||||
max_3x3_sum(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cluster) {
|
||||
|
||||
if constexpr (ClusterSizeX == 3 && ClusterSizeY == 3) {
|
||||
return std::make_pair(cluster.sum(), 0);
|
||||
} else {
|
||||
|
||||
size_t index = 0;
|
||||
T max_3x3_subcluster_sum = 0;
|
||||
for (size_t i = 0; i < ClusterSizeY - 2; ++i) {
|
||||
for (size_t j = 0; j < ClusterSizeX - 2; ++j) {
|
||||
|
||||
T sum = cluster.data[i * ClusterSizeX + j] +
|
||||
cluster.data[i * ClusterSizeX + j + 1] +
|
||||
cluster.data[i * ClusterSizeX + j + 2] +
|
||||
cluster.data[(i + 1) * ClusterSizeX + j] +
|
||||
cluster.data[(i + 1) * ClusterSizeX + j + 1] +
|
||||
cluster.data[(i + 1) * ClusterSizeX + j + 2] +
|
||||
cluster.data[(i + 2) * ClusterSizeX + j] +
|
||||
cluster.data[(i + 2) * ClusterSizeX + j + 1] +
|
||||
cluster.data[(i + 2) * ClusterSizeX + j + 2];
|
||||
if (sum > max_3x3_subcluster_sum) {
|
||||
max_3x3_subcluster_sum = sum;
|
||||
index = i * (ClusterSizeX - 2) + j;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return std::make_pair(max_3x3_subcluster_sum, index);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reduce a cluster to a 3x3 cluster by selecting the 3x3 block with the
|
||||
* highest sum.
|
||||
* @param c Cluster to reduce
|
||||
* @return reduced cluster
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = int16_t>
|
||||
Cluster<T, 3, 3, CoordType>
|
||||
reduce_to_3x3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
|
||||
|
||||
static_assert(ClusterSizeX >= 3 && ClusterSizeY >= 3,
|
||||
"Cluster sizes must be at least 3x3 for reduction to 3x3");
|
||||
|
||||
Cluster<T, 3, 3, CoordType> result;
|
||||
|
||||
// TODO maybe add sanity check and check that center is in max subcluster
|
||||
|
||||
auto [sum, index] = max_3x3_sum(c);
|
||||
|
||||
int16_t cluster_center_index =
|
||||
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||
|
||||
int16_t index_center_max_3x3_subcluster =
|
||||
(int(index / (ClusterSizeX - 2))) * ClusterSizeX + ClusterSizeX +
|
||||
index % (ClusterSizeX - 2) + 1;
|
||||
|
||||
int16_t index_3x3_subcluster_cluster_center =
|
||||
int((cluster_center_index - 1 - ClusterSizeX) / ClusterSizeX) *
|
||||
(ClusterSizeX - 2) +
|
||||
(cluster_center_index - 1 - ClusterSizeX) % ClusterSizeX;
|
||||
|
||||
result.x =
|
||||
c.x + (index % (ClusterSizeX - 2) -
|
||||
(index_3x3_subcluster_cluster_center % (ClusterSizeX - 2)));
|
||||
result.y =
|
||||
c.y - (index / (ClusterSizeX - 2) -
|
||||
(index_3x3_subcluster_cluster_center / (ClusterSizeX - 2)));
|
||||
|
||||
result.data = {c.data[index_center_max_3x3_subcluster - ClusterSizeX - 1],
|
||||
c.data[index_center_max_3x3_subcluster - ClusterSizeX],
|
||||
c.data[index_center_max_3x3_subcluster - ClusterSizeX + 1],
|
||||
c.data[index_center_max_3x3_subcluster - 1],
|
||||
c.data[index_center_max_3x3_subcluster],
|
||||
c.data[index_center_max_3x3_subcluster + 1],
|
||||
c.data[index_center_max_3x3_subcluster + ClusterSizeX - 1],
|
||||
c.data[index_center_max_3x3_subcluster + ClusterSizeX],
|
||||
c.data[index_center_max_3x3_subcluster + ClusterSizeX + 1]};
|
||||
return result;
|
||||
}
|
||||
|
||||
// Type Traits for is_cluster_type
|
||||
template <typename T>
|
||||
struct is_cluster : std::false_type {}; // Default case: Not a Cluster
|
||||
|
||||
@@ -144,9 +144,9 @@ class ClusterFinder {
|
||||
static_cast<CT>(
|
||||
m_pedestal.mean(iy + ir, ix + ic));
|
||||
cluster.data[i] =
|
||||
tmp; // Watch for out of bounds access
|
||||
i++;
|
||||
tmp; // Watch for out of bounds access
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -32,8 +32,7 @@ class ClusterVector; // Forward declaration
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType>
|
||||
class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
|
||||
{
|
||||
class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
|
||||
std::vector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> m_data{};
|
||||
int32_t m_frame_number{0}; // TODO! Check frame number size and type
|
||||
@@ -173,4 +172,40 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Reduce a cluster to a 2x2 cluster by selecting the 2x2 block with the
|
||||
* highest sum.
|
||||
* @param cv Clustervector containing clusters to reduce
|
||||
* @return Clustervector with reduced clusters
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
ClusterVector<Cluster<T, 2, 2, CoordType>> reduce_to_2x2(
|
||||
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
|
||||
&cv) {
|
||||
ClusterVector<Cluster<T, 2, 2, CoordType>> result;
|
||||
for (const auto &c : cv) {
|
||||
result.push_back(reduce_to_2x2(c));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reduce a cluster to a 3x3 cluster by selecting the 3x3 block with the
|
||||
* highest sum.
|
||||
* @param cv Clustervector containing clusters to reduce
|
||||
* @return Clustervector with reduced clusters
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
ClusterVector<Cluster<T, 3, 3, CoordType>> reduce_to_3x3(
|
||||
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
|
||||
&cv) {
|
||||
ClusterVector<Cluster<T, 3, 3, CoordType>> result;
|
||||
for (const auto &c : cv) {
|
||||
result.push_back(reduce_to_3x3(c));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -105,7 +105,7 @@ class Frame {
|
||||
* @tparam T type of the pixels
|
||||
* @return NDView<T, 2>
|
||||
*/
|
||||
template <typename T> NDView<T, 2> view() {
|
||||
template <typename T> NDView<T, 2> view() & {
|
||||
std::array<ssize_t, 2> shape = {static_cast<ssize_t>(m_rows),
|
||||
static_cast<ssize_t>(m_cols)};
|
||||
T *data = reinterpret_cast<T *>(m_data);
|
||||
|
||||
@@ -25,7 +25,7 @@ template <typename T, ssize_t Ndim = 2>
|
||||
class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
std::array<ssize_t, Ndim> shape_;
|
||||
std::array<ssize_t, Ndim> strides_;
|
||||
size_t size_{};
|
||||
size_t size_{}; //TODO! do we need to store size when we have shape?
|
||||
T *data_;
|
||||
|
||||
public:
|
||||
@@ -33,7 +33,7 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
* @brief Default constructor. Will construct an empty NDArray.
|
||||
*
|
||||
*/
|
||||
NDArray() : shape_(), strides_(c_strides<Ndim>(shape_)), data_(nullptr){};
|
||||
NDArray() : shape_(), strides_(c_strides<Ndim>(shape_)), data_(nullptr) {};
|
||||
|
||||
/**
|
||||
* @brief Construct a new NDArray object with a given shape.
|
||||
@@ -43,8 +43,7 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
*/
|
||||
explicit NDArray(std::array<ssize_t, Ndim> shape)
|
||||
: shape_(shape), strides_(c_strides<Ndim>(shape_)),
|
||||
size_(std::accumulate(shape_.begin(), shape_.end(), 1,
|
||||
std::multiplies<>())),
|
||||
size_(num_elements(shape_)),
|
||||
data_(new T[size_]) {}
|
||||
|
||||
/**
|
||||
@@ -79,6 +78,24 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
other.reset(); // TODO! is this necessary?
|
||||
}
|
||||
|
||||
|
||||
//Move constructor from an an array with Ndim + 1
|
||||
template <ssize_t M, typename = std::enable_if_t<(M == Ndim + 1)>>
|
||||
NDArray(NDArray<T, M> &&other)
|
||||
: shape_(drop_first_dim(other.shape())),
|
||||
strides_(c_strides<Ndim>(shape_)), size_(num_elements(shape_)),
|
||||
data_(other.data()) {
|
||||
|
||||
// For now only allow move if the size matches, to avoid unreachable data
|
||||
// if the use case arises we can remove this check
|
||||
if(size() != other.size()) {
|
||||
data_ = nullptr; // avoid double free, other will clean up the memory in it's destructor
|
||||
throw std::runtime_error(LOCATION +
|
||||
"Size mismatch in move constructor of NDArray<T, Ndim-1>");
|
||||
}
|
||||
other.reset();
|
||||
}
|
||||
|
||||
// Copy constructor
|
||||
NDArray(const NDArray &other)
|
||||
: shape_(other.shape_), strides_(c_strides<Ndim>(shape_)),
|
||||
@@ -380,12 +397,6 @@ NDArray<T, Ndim> NDArray<T, Ndim>::operator*(const T &value) {
|
||||
result *= value;
|
||||
return result;
|
||||
}
|
||||
// template <typename T, ssize_t Ndim> void NDArray<T, Ndim>::Print() {
|
||||
// if (shape_[0] < 20 && shape_[1] < 20)
|
||||
// Print_all();
|
||||
// else
|
||||
// Print_some();
|
||||
// }
|
||||
|
||||
template <typename T, ssize_t Ndim>
|
||||
std::ostream &operator<<(std::ostream &os, const NDArray<T, Ndim> &arr) {
|
||||
@@ -437,4 +448,23 @@ NDArray<T, Ndim> load(const std::string &pathname,
|
||||
return img;
|
||||
}
|
||||
|
||||
template <typename RT, typename NT, typename DT, ssize_t Ndim>
|
||||
NDArray<RT, Ndim> safe_divide(const NDArray<NT, Ndim> &numerator,
|
||||
const NDArray<DT, Ndim> &denominator) {
|
||||
if (numerator.shape() != denominator.shape()) {
|
||||
throw std::runtime_error(
|
||||
"Shapes of numerator and denominator must match");
|
||||
}
|
||||
NDArray<RT, Ndim> result(numerator.shape());
|
||||
for (ssize_t i = 0; i < numerator.size(); ++i) {
|
||||
if (denominator[i] != 0) {
|
||||
result[i] =
|
||||
static_cast<RT>(numerator[i]) / static_cast<RT>(denominator[i]);
|
||||
} else {
|
||||
result[i] = RT{0}; // or handle division by zero as needed
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -26,6 +26,33 @@ Shape<Ndim> make_shape(const std::vector<size_t> &shape) {
|
||||
return arr;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Helper function to drop the first dimension of a shape.
|
||||
* This is useful when you want to create a 2D view from a 3D array.
|
||||
* @param shape The shape to drop the first dimension from.
|
||||
* @return A new shape with the first dimension dropped.
|
||||
*/
|
||||
template<size_t Ndim>
|
||||
Shape<Ndim-1> drop_first_dim(const Shape<Ndim> &shape) {
|
||||
static_assert(Ndim > 1, "Cannot drop first dimension from a 1D shape");
|
||||
Shape<Ndim - 1> new_shape;
|
||||
std::copy(shape.begin() + 1, shape.end(), new_shape.begin());
|
||||
return new_shape;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Helper function when constructing NDArray/NDView. Calculates the number
|
||||
* of elements in the resulting array from a shape.
|
||||
* @param shape The shape to calculate the number of elements for.
|
||||
* @return The number of elements in and NDArray/NDView of that shape.
|
||||
*/
|
||||
template <size_t Ndim>
|
||||
size_t num_elements(const Shape<Ndim> &shape) {
|
||||
return std::accumulate(shape.begin(), shape.end(), 1,
|
||||
std::multiplies<size_t>());
|
||||
}
|
||||
|
||||
template <ssize_t Dim = 0, typename Strides>
|
||||
ssize_t element_offset(const Strides & /*unused*/) {
|
||||
return 0;
|
||||
@@ -67,16 +94,33 @@ class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
|
||||
size_(std::accumulate(std::begin(shape), std::end(shape), 1,
|
||||
std::multiplies<>())) {}
|
||||
|
||||
// stride-aware constructor
|
||||
NDView(T* buffer, std::array<ssize_t, Ndim> shape, std::array<ssize_t, Ndim> strides)
|
||||
: buffer_(buffer), shape_(shape), strides_(strides),
|
||||
size_(std::accumulate(std::begin(shape), std::end(shape), 1,
|
||||
std::multiplies<>())) {}
|
||||
|
||||
template <typename... Ix>
|
||||
std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) {
|
||||
return buffer_[element_offset(strides_, index...)];
|
||||
}
|
||||
|
||||
template <typename... Ix>
|
||||
const std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) const {
|
||||
std::enable_if_t<sizeof...(Ix) == 1 && (Ndim > 1), NDView<T, Ndim - 1>> operator()(Ix... index) {
|
||||
// return a view of the next dimension
|
||||
std::array<ssize_t, Ndim - 1> new_shape{};
|
||||
std::copy_n(shape_.begin() + 1, Ndim - 1, new_shape.begin());
|
||||
return NDView<T, Ndim - 1>(&buffer_[element_offset(strides_, index...)],
|
||||
new_shape);
|
||||
|
||||
}
|
||||
|
||||
template <typename... Ix>
|
||||
std::enable_if_t<sizeof...(Ix) == Ndim, const T &> operator()(Ix... index) const {
|
||||
return buffer_[element_offset(strides_, index...)];
|
||||
}
|
||||
|
||||
|
||||
ssize_t size() const { return static_cast<ssize_t>(size_); }
|
||||
size_t total_bytes() const { return size_ * sizeof(T); }
|
||||
std::array<ssize_t, Ndim> strides() const noexcept { return strides_; }
|
||||
@@ -85,9 +129,19 @@ class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
|
||||
T *end() { return buffer_ + size_; }
|
||||
T const *begin() const { return buffer_; }
|
||||
T const *end() const { return buffer_ + size_; }
|
||||
T &operator()(ssize_t i) { return buffer_[i]; }
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief Access element at index i.
|
||||
*/
|
||||
T &operator[](ssize_t i) { return buffer_[i]; }
|
||||
const T &operator()(ssize_t i) const { return buffer_[i]; }
|
||||
|
||||
/**
|
||||
* @brief Access element at index i.
|
||||
*/
|
||||
const T &operator[](ssize_t i) const { return buffer_[i]; }
|
||||
|
||||
bool operator==(const NDView &other) const {
|
||||
@@ -157,6 +211,22 @@ class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
|
||||
const T *data() const { return buffer_; }
|
||||
void print_all() const;
|
||||
|
||||
/**
|
||||
* @brief Create a subview of a range of the first dimension.
|
||||
* This is useful for splitting a batches of frames in parallel processing.
|
||||
* @param first The first index of the subview (inclusive).
|
||||
* @param last The last index of the subview (exclusive).
|
||||
* @return A new NDView that is a subview of the current view.
|
||||
* @throws std::runtime_error if the range is invalid.
|
||||
*/
|
||||
NDView sub_view(ssize_t first, ssize_t last) const {
|
||||
if (first < 0 || last > shape_[0] || first >= last)
|
||||
throw std::runtime_error(LOCATION + "Invalid sub_view range");
|
||||
auto new_shape = shape_;
|
||||
new_shape[0] = last - first;
|
||||
return NDView(buffer_ + first * strides_[0], new_shape);
|
||||
}
|
||||
|
||||
private:
|
||||
T *buffer_{nullptr};
|
||||
std::array<ssize_t, Ndim> strides_{};
|
||||
|
||||
@@ -42,14 +42,16 @@ class RawFileNameComponents {
|
||||
|
||||
class ScanParameters {
|
||||
bool m_enabled = false;
|
||||
std::string m_dac;
|
||||
DACIndex m_dac{};
|
||||
int m_start = 0;
|
||||
int m_stop = 0;
|
||||
int m_step = 0;
|
||||
// TODO! add settleTime, requires string to time conversion
|
||||
int64_t m_settleTime = 0; // [ns]
|
||||
|
||||
public:
|
||||
ScanParameters(const std::string &par);
|
||||
ScanParameters(const bool enabled, const DACIndex dac, const int start,
|
||||
const int stop, const int step, const int64_t settleTime);
|
||||
ScanParameters() = default;
|
||||
ScanParameters(const ScanParameters &) = default;
|
||||
ScanParameters &operator=(const ScanParameters &) = default;
|
||||
@@ -57,8 +59,9 @@ class ScanParameters {
|
||||
int start() const;
|
||||
int stop() const;
|
||||
int step() const;
|
||||
const std::string &dac() const;
|
||||
DACIndex dac() const;
|
||||
bool enabled() const;
|
||||
int64_t settleTime() const;
|
||||
void increment_stop();
|
||||
};
|
||||
|
||||
|
||||
@@ -240,14 +240,14 @@ template <typename T> void VarClusterFinder<T>::first_pass() {
|
||||
|
||||
for (ssize_t i = 0; i < original_.size(); ++i) {
|
||||
if (use_noise_map)
|
||||
threshold_ = 5 * noiseMap(i);
|
||||
binary_(i) = (original_(i) > threshold_);
|
||||
threshold_ = 5 * noiseMap[i];
|
||||
binary_[i] = (original_[i] > threshold_);
|
||||
}
|
||||
|
||||
for (int i = 0; i < shape_[0]; ++i) {
|
||||
for (int j = 0; j < shape_[1]; ++j) {
|
||||
|
||||
// do we have someting to process?
|
||||
// do we have something to process?
|
||||
if (binary_(i, j)) {
|
||||
auto tmp = check_neighbours(i, j);
|
||||
if (tmp != 0) {
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include "aare/NDArray.hpp"
|
||||
#include "aare/NDView.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
#include "aare/utils/par.hpp"
|
||||
#include "aare/utils/task.hpp"
|
||||
#include <cstdint>
|
||||
#include <future>
|
||||
@@ -55,32 +58,152 @@ ALWAYS_INLINE std::pair<uint16_t, int16_t> get_value_and_gain(uint16_t raw) {
|
||||
|
||||
template <class T>
|
||||
void apply_calibration_impl(NDView<T, 3> res, NDView<uint16_t, 3> raw_data,
|
||||
NDView<T, 3> ped, NDView<T, 3> cal, int start,
|
||||
int stop) {
|
||||
NDView<T, 3> ped, NDView<T, 3> cal, int start,
|
||||
int stop) {
|
||||
|
||||
for (int frame_nr = start; frame_nr != stop; ++frame_nr) {
|
||||
for (int row = 0; row != raw_data.shape(1); ++row) {
|
||||
for (int col = 0; col != raw_data.shape(2); ++col) {
|
||||
auto [value, gain] = get_value_and_gain(raw_data(frame_nr, row, col));
|
||||
auto [value, gain] =
|
||||
get_value_and_gain(raw_data(frame_nr, row, col));
|
||||
|
||||
// Using multiplication does not seem to speed up the code here
|
||||
// ADU/keV is the standard unit for the calibration which
|
||||
// means rewriting the formula is not worth it.
|
||||
res(frame_nr, row, col) =
|
||||
(value - ped(gain, row, col)) / cal(gain, row, col); //TODO! use multiplication
|
||||
(value - ped(gain, row, col)) / cal(gain, row, col);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void apply_calibration_impl(NDView<T, 3> res, NDView<uint16_t, 3> raw_data,
|
||||
NDView<T, 2> ped, NDView<T, 2> cal, int start,
|
||||
int stop) {
|
||||
|
||||
for (int frame_nr = start; frame_nr != stop; ++frame_nr) {
|
||||
for (int row = 0; row != raw_data.shape(1); ++row) {
|
||||
for (int col = 0; col != raw_data.shape(2); ++col) {
|
||||
auto [value, gain] =
|
||||
get_value_and_gain(raw_data(frame_nr, row, col));
|
||||
|
||||
// Using multiplication does not seem to speed up the code here
|
||||
// ADU/keV is the standard unit for the calibration which
|
||||
// means rewriting the formula is not worth it.
|
||||
|
||||
// Set the value to 0 if the gain is not 0
|
||||
if (gain == 0)
|
||||
res(frame_nr, row, col) =
|
||||
(value - ped(row, col)) / cal(row, col);
|
||||
else
|
||||
res(frame_nr, row, col) = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T, ssize_t Ndim = 3>
|
||||
void apply_calibration(NDView<T, 3> res, NDView<uint16_t, 3> raw_data,
|
||||
NDView<T, 3> ped, NDView<T, 3> cal,
|
||||
NDView<T, Ndim> ped, NDView<T, Ndim> cal,
|
||||
ssize_t n_threads = 4) {
|
||||
std::vector<std::future<void>> futures;
|
||||
futures.reserve(n_threads);
|
||||
auto limits = split_task(0, raw_data.shape(0), n_threads);
|
||||
for (const auto &lim : limits)
|
||||
futures.push_back(std::async(&apply_calibration_impl<T>, res, raw_data, ped, cal,
|
||||
lim.first, lim.second));
|
||||
futures.push_back(std::async(
|
||||
static_cast<void (*)(NDView<T, 3>, NDView<uint16_t, 3>,
|
||||
NDView<T, Ndim>, NDView<T, Ndim>, int, int)>(
|
||||
apply_calibration_impl),
|
||||
res, raw_data, ped, cal, lim.first, lim.second));
|
||||
for (auto &f : futures)
|
||||
f.get();
|
||||
}
|
||||
|
||||
template <bool only_gain0>
|
||||
std::pair<NDArray<size_t, 3>, NDArray<size_t, 3>>
|
||||
sum_and_count_per_gain(NDView<uint16_t, 3> raw_data) {
|
||||
constexpr ssize_t num_gains = only_gain0 ? 1 : 3;
|
||||
NDArray<size_t, 3> accumulator(
|
||||
std::array<ssize_t, 3>{num_gains, raw_data.shape(1), raw_data.shape(2)},
|
||||
0);
|
||||
NDArray<size_t, 3> count(
|
||||
std::array<ssize_t, 3>{num_gains, raw_data.shape(1), raw_data.shape(2)},
|
||||
0);
|
||||
for (int frame_nr = 0; frame_nr != raw_data.shape(0); ++frame_nr) {
|
||||
for (int row = 0; row != raw_data.shape(1); ++row) {
|
||||
for (int col = 0; col != raw_data.shape(2); ++col) {
|
||||
auto [value, gain] =
|
||||
get_value_and_gain(raw_data(frame_nr, row, col));
|
||||
if (gain != 0 && only_gain0)
|
||||
continue;
|
||||
accumulator(gain, row, col) += value;
|
||||
count(gain, row, col) += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {std::move(accumulator), std::move(count)};
|
||||
}
|
||||
|
||||
template <typename T, bool only_gain0 = false>
|
||||
NDArray<T, 3 - static_cast<ssize_t>(only_gain0)>
|
||||
calculate_pedestal(NDView<uint16_t, 3> raw_data, ssize_t n_threads) {
|
||||
|
||||
constexpr ssize_t num_gains = only_gain0 ? 1 : 3;
|
||||
std::vector<std::future<std::pair<NDArray<size_t, 3>, NDArray<size_t, 3>>>>
|
||||
futures;
|
||||
futures.reserve(n_threads);
|
||||
|
||||
auto subviews = make_subviews(raw_data, n_threads);
|
||||
|
||||
for (auto view : subviews) {
|
||||
futures.push_back(std::async(
|
||||
static_cast<std::pair<NDArray<size_t, 3>, NDArray<size_t, 3>> (*)(
|
||||
NDView<uint16_t, 3>)>(&sum_and_count_per_gain<only_gain0>),
|
||||
view));
|
||||
}
|
||||
Shape<3> shape{num_gains, raw_data.shape(1), raw_data.shape(2)};
|
||||
NDArray<size_t, 3> accumulator(shape, 0);
|
||||
NDArray<size_t, 3> count(shape, 0);
|
||||
|
||||
// Combine the results from the futures
|
||||
for (auto &f : futures) {
|
||||
auto [acc, cnt] = f.get();
|
||||
accumulator += acc;
|
||||
count += cnt;
|
||||
}
|
||||
|
||||
|
||||
// Will move to a NDArray<T, 3 - static_cast<ssize_t>(only_gain0)>
|
||||
// if only_gain0 is true
|
||||
return safe_divide<T>(accumulator, count);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Count the number of switching pixels in the raw data.
|
||||
* This function counts the number of pixels that switch between G1 and G2 gain.
|
||||
* It returns an NDArray with the number of switching pixels per pixel.
|
||||
* @param raw_data The NDView containing the raw data
|
||||
* @return An NDArray with the number of switching pixels per pixel
|
||||
*/
|
||||
NDArray<int, 2> count_switching_pixels(NDView<uint16_t, 3> raw_data);
|
||||
|
||||
/**
|
||||
* @brief Count the number of switching pixels in the raw data.
|
||||
* This function counts the number of pixels that switch between G1 and G2 gain.
|
||||
* It returns an NDArray with the number of switching pixels per pixel.
|
||||
* @param raw_data The NDView containing the raw data
|
||||
* @param n_threads The number of threads to use for parallel processing
|
||||
* @return An NDArray with the number of switching pixels per pixel
|
||||
*/
|
||||
NDArray<int, 2> count_switching_pixels(NDView<uint16_t, 3> raw_data,
|
||||
ssize_t n_threads);
|
||||
|
||||
template <typename T>
|
||||
auto calculate_pedestal_g0(NDView<uint16_t, 3> raw_data, ssize_t n_threads) {
|
||||
return calculate_pedestal<T, true>(raw_data, n_threads);
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -215,6 +215,122 @@ enum class DetectorType {
|
||||
Unknown
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Enum class to define the Digital to Analog converter
|
||||
* The values are the same as in slsDetectorPackage
|
||||
*/
|
||||
enum DACIndex {
|
||||
DAC_0,
|
||||
DAC_1,
|
||||
DAC_2,
|
||||
DAC_3,
|
||||
DAC_4,
|
||||
DAC_5,
|
||||
DAC_6,
|
||||
DAC_7,
|
||||
DAC_8,
|
||||
DAC_9,
|
||||
DAC_10,
|
||||
DAC_11,
|
||||
DAC_12,
|
||||
DAC_13,
|
||||
DAC_14,
|
||||
DAC_15,
|
||||
DAC_16,
|
||||
DAC_17,
|
||||
VSVP,
|
||||
VTRIM,
|
||||
VRPREAMP,
|
||||
VRSHAPER,
|
||||
VSVN,
|
||||
VTGSTV,
|
||||
VCMP_LL,
|
||||
VCMP_LR,
|
||||
VCAL,
|
||||
VCMP_RL,
|
||||
RXB_RB,
|
||||
RXB_LB,
|
||||
VCMP_RR,
|
||||
VCP,
|
||||
VCN,
|
||||
VISHAPER,
|
||||
VTHRESHOLD,
|
||||
IO_DELAY,
|
||||
VREF_DS,
|
||||
VOUT_CM,
|
||||
VIN_CM,
|
||||
VREF_COMP,
|
||||
VB_COMP,
|
||||
VDD_PROT,
|
||||
VIN_COM,
|
||||
VREF_PRECH,
|
||||
VB_PIXBUF,
|
||||
VB_DS,
|
||||
VREF_H_ADC,
|
||||
VB_COMP_FE,
|
||||
VB_COMP_ADC,
|
||||
VCOM_CDS,
|
||||
VREF_RSTORE,
|
||||
VB_OPA_1ST,
|
||||
VREF_COMP_FE,
|
||||
VCOM_ADC1,
|
||||
VREF_L_ADC,
|
||||
VREF_CDS,
|
||||
VB_CS,
|
||||
VB_OPA_FD,
|
||||
VCOM_ADC2,
|
||||
VCASSH,
|
||||
VTH2,
|
||||
VRSHAPER_N,
|
||||
VIPRE_OUT,
|
||||
VTH3,
|
||||
VTH1,
|
||||
VICIN,
|
||||
VCAS,
|
||||
VCAL_N,
|
||||
VIPRE,
|
||||
VCAL_P,
|
||||
VDCSH,
|
||||
VBP_COLBUF,
|
||||
VB_SDA,
|
||||
VCASC_SFP,
|
||||
VIPRE_CDS,
|
||||
IBIAS_SFP,
|
||||
ADC_VPP,
|
||||
HIGH_VOLTAGE,
|
||||
TEMPERATURE_ADC,
|
||||
TEMPERATURE_FPGA,
|
||||
TEMPERATURE_FPGAEXT,
|
||||
TEMPERATURE_10GE,
|
||||
TEMPERATURE_DCDC,
|
||||
TEMPERATURE_SODL,
|
||||
TEMPERATURE_SODR,
|
||||
TEMPERATURE_FPGA2,
|
||||
TEMPERATURE_FPGA3,
|
||||
TRIMBIT_SCAN,
|
||||
V_POWER_A = 100,
|
||||
V_POWER_B = 101,
|
||||
V_POWER_C = 102,
|
||||
V_POWER_D = 103,
|
||||
V_POWER_IO = 104,
|
||||
V_POWER_CHIP = 105,
|
||||
I_POWER_A = 106,
|
||||
I_POWER_B = 107,
|
||||
I_POWER_C = 108,
|
||||
I_POWER_D = 109,
|
||||
I_POWER_IO = 110,
|
||||
V_LIMIT = 111,
|
||||
SLOW_ADC0 = 1000,
|
||||
SLOW_ADC1,
|
||||
SLOW_ADC2,
|
||||
SLOW_ADC3,
|
||||
SLOW_ADC4,
|
||||
SLOW_ADC5,
|
||||
SLOW_ADC6,
|
||||
SLOW_ADC7,
|
||||
SLOW_ADC_TEMP
|
||||
};
|
||||
|
||||
enum class TimingMode { Auto, Trigger };
|
||||
enum class FrameDiscardPolicy { NoDiscard, Discard, DiscardPartial };
|
||||
|
||||
@@ -231,6 +347,15 @@ template <> FrameDiscardPolicy StringTo(const std::string & /*mode*/);
|
||||
|
||||
using DataTypeVariants = std::variant<uint16_t, uint32_t>;
|
||||
|
||||
constexpr uint16_t ADC_MASK = 0x3FFF; // used to mask out the gain bits in Jungfrau
|
||||
constexpr uint16_t ADC_MASK =
|
||||
0x3FFF; // used to mask out the gain bits in Jungfrau
|
||||
|
||||
/**
|
||||
* @brief Convert a string to a DACIndex
|
||||
* @param arg string representation of the dacIndex
|
||||
* @return DACIndex
|
||||
* @throw invalid argument error if the string does not match any DACIndex
|
||||
*/
|
||||
template <> DACIndex StringTo(const std::string &arg);
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,7 +1,10 @@
|
||||
#pragma once
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "aare/utils/task.hpp"
|
||||
|
||||
namespace aare {
|
||||
|
||||
template <typename F>
|
||||
@@ -15,4 +18,17 @@ void RunInParallel(F func, const std::vector<std::pair<int, int>> &tasks) {
|
||||
thread.join();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
std::vector<NDView<T,3>> make_subviews(NDView<T, 3> &data, ssize_t n_threads) {
|
||||
std::vector<NDView<T, 3>> subviews;
|
||||
subviews.reserve(n_threads);
|
||||
auto limits = split_task(0, data.shape(0), n_threads);
|
||||
for (const auto &lim : limits) {
|
||||
subviews.push_back(data.sub_view(lim.first, lim.second));
|
||||
}
|
||||
return subviews;
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,4 +1,4 @@
|
||||
|
||||
#pragma once
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
|
||||
@@ -46,14 +46,13 @@ def ClusterFinderMT(image_size, cluster_size = (3,3), dtype=np.int32, n_sigma=5,
|
||||
return cls(image_size, n_sigma=n_sigma, capacity=capacity, n_threads=n_threads)
|
||||
|
||||
|
||||
|
||||
def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32):
|
||||
def ClusterCollector(clusterfindermt, dtype=np.int32):
|
||||
"""
|
||||
Factory function to create a ClusterCollector object. Provides a cleaner syntax for
|
||||
the templated ClusterCollector in C++.
|
||||
"""
|
||||
|
||||
cls = _get_class("ClusterCollector", cluster_size, dtype)
|
||||
|
||||
cls = _get_class("ClusterCollector", clusterfindermt.cluster_size, dtype)
|
||||
return cls(clusterfindermt)
|
||||
|
||||
def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):
|
||||
|
||||
@@ -17,7 +17,7 @@ from .ClusterVector import ClusterVector
|
||||
from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2
|
||||
from ._aare import Interpolator
|
||||
from ._aare import calculate_eta2
|
||||
|
||||
from ._aare import reduce_to_2x2, reduce_to_3x3
|
||||
|
||||
from ._aare import apply_custom_weights
|
||||
|
||||
@@ -32,6 +32,7 @@ from .utils import random_pixels, random_pixel, flat_list, add_colorbar
|
||||
from .func import *
|
||||
|
||||
from .calibration import *
|
||||
from ._aare import apply_calibration
|
||||
from ._aare import apply_calibration, count_switching_pixels
|
||||
from ._aare import calculate_pedestal, calculate_pedestal_float, calculate_pedestal_g0, calculate_pedestal_g0_float
|
||||
|
||||
from ._aare import VarClusterFinder
|
||||
|
||||
@@ -24,7 +24,8 @@ void define_Cluster(py::module &m, const std::string &typestr) {
|
||||
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>(
|
||||
m, class_name.c_str(), py::buffer_protocol())
|
||||
|
||||
.def(py::init([](uint8_t x, uint8_t y, py::array_t<Type> data) {
|
||||
.def(py::init([](uint8_t x, uint8_t y,
|
||||
py::array_t<Type, py::array::forcecast> data) {
|
||||
py::buffer_info buf_info = data.request();
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster;
|
||||
cluster.x = x;
|
||||
@@ -34,31 +35,58 @@ void define_Cluster(py::module &m, const std::string &typestr) {
|
||||
cluster.data[i] = r(i);
|
||||
}
|
||||
return cluster;
|
||||
}));
|
||||
}))
|
||||
|
||||
/*
|
||||
//TODO! Review if to keep or not
|
||||
.def_property(
|
||||
"data",
|
||||
[](ClusterType &c) -> py::array {
|
||||
return py::array(py::buffer_info(
|
||||
c.data, sizeof(Type),
|
||||
py::format_descriptor<Type>::format(), // Type
|
||||
// format
|
||||
1, // Number of dimensions
|
||||
{static_cast<ssize_t>(ClusterSizeX *
|
||||
ClusterSizeY)}, // Shape (flattened)
|
||||
{sizeof(Type)} // Stride (step size between elements)
|
||||
));
|
||||
// TODO! Review if to keep or not
|
||||
.def_property_readonly(
|
||||
"data",
|
||||
[](Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> &c)
|
||||
-> py::array {
|
||||
return py::array(py::buffer_info(
|
||||
c.data.data(), sizeof(Type),
|
||||
py::format_descriptor<Type>::format(), // Type
|
||||
// format
|
||||
2, // Number of dimensions
|
||||
{static_cast<ssize_t>(ClusterSizeX),
|
||||
static_cast<ssize_t>(ClusterSizeY)}, // Shape (flattened)
|
||||
{sizeof(Type) * ClusterSizeY, sizeof(Type)}
|
||||
// Stride (step size between elements)
|
||||
));
|
||||
})
|
||||
|
||||
.def_readonly("x",
|
||||
&Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>::x)
|
||||
|
||||
.def_readonly("y",
|
||||
&Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>::y);
|
||||
}
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = int16_t>
|
||||
void reduce_to_3x3(py::module &m) {
|
||||
|
||||
m.def(
|
||||
"reduce_to_3x3",
|
||||
[](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||
return reduce_to_3x3(cl);
|
||||
},
|
||||
[](ClusterType &c, py::array_t<Type> arr) {
|
||||
py::buffer_info buf_info = arr.request();
|
||||
Type *ptr = static_cast<Type *>(buf_info.ptr);
|
||||
std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY,
|
||||
c.data); // TODO dont iterate over centers!!!
|
||||
py::return_value_policy::move,
|
||||
"Reduce cluster to 3x3 subcluster by taking the 3x3 subcluster with "
|
||||
"the highest photon energy.");
|
||||
}
|
||||
|
||||
});
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = int16_t>
|
||||
void reduce_to_2x2(py::module &m) {
|
||||
|
||||
m.def(
|
||||
"reduce_to_2x2",
|
||||
[](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||
return reduce_to_2x2(cl);
|
||||
},
|
||||
py::return_value_policy::move,
|
||||
"Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with "
|
||||
"the highest photon energy.");
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
@@ -104,4 +104,47 @@ void define_ClusterVector(py::module &m, const std::string &typestr) {
|
||||
});
|
||||
}
|
||||
|
||||
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_2x2_reduction(py::module &m) {
|
||||
m.def(
|
||||
"reduce_to_2x2",
|
||||
[](const ClusterVector<
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>> &cv) {
|
||||
return new ClusterVector<Cluster<Type, 2, 2, CoordType>>(
|
||||
reduce_to_2x2(cv));
|
||||
},
|
||||
R"(
|
||||
|
||||
Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with
|
||||
the highest photon energy."
|
||||
Parameters
|
||||
----------
|
||||
cv : ClusterVector
|
||||
)",
|
||||
py::arg("clustervector"));
|
||||
}
|
||||
|
||||
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_3x3_reduction(py::module &m) {
|
||||
|
||||
m.def(
|
||||
"reduce_to_3x3",
|
||||
[](const ClusterVector<
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>> &cv) {
|
||||
return new ClusterVector<Cluster<Type, 3, 3, CoordType>>(
|
||||
reduce_to_3x3(cv));
|
||||
},
|
||||
R"(
|
||||
|
||||
Reduce cluster to 3x3 subcluster by taking the 3x3 subcluster with
|
||||
the highest photon energy."
|
||||
Parameters
|
||||
----------
|
||||
cv : ClusterVector
|
||||
)",
|
||||
py::arg("clustervector"));
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
@@ -17,27 +17,137 @@ py::array_t<DataType> pybind_apply_calibration(
|
||||
calibration,
|
||||
int n_threads = 4) {
|
||||
|
||||
auto data_span = make_view_3d(data);
|
||||
auto ped = make_view_3d(pedestal);
|
||||
auto cal = make_view_3d(calibration);
|
||||
|
||||
auto data_span = make_view_3d(data); // data is always 3D
|
||||
/* No pointer is passed, so NumPy will allocate the buffer */
|
||||
auto result = py::array_t<DataType>(data_span.shape());
|
||||
auto res = make_view_3d(result);
|
||||
|
||||
aare::apply_calibration<DataType>(res, data_span, ped, cal, n_threads);
|
||||
|
||||
if (data.ndim() == 3 && pedestal.ndim() == 3 && calibration.ndim() == 3) {
|
||||
auto ped = make_view_3d(pedestal);
|
||||
auto cal = make_view_3d(calibration);
|
||||
aare::apply_calibration<DataType, 3>(res, data_span, ped, cal,
|
||||
n_threads);
|
||||
} else if (data.ndim() == 3 && pedestal.ndim() == 2 &&
|
||||
calibration.ndim() == 2) {
|
||||
auto ped = make_view_2d(pedestal);
|
||||
auto cal = make_view_2d(calibration);
|
||||
aare::apply_calibration<DataType, 2>(res, data_span, ped, cal,
|
||||
n_threads);
|
||||
} else {
|
||||
throw std::runtime_error(
|
||||
"Invalid number of dimensions for data, pedestal or calibration");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
py::array_t<int> pybind_count_switching_pixels(
|
||||
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> data,
|
||||
ssize_t n_threads = 4) {
|
||||
|
||||
auto data_span = make_view_3d(data);
|
||||
auto arr = new NDArray<int, 2>{};
|
||||
*arr = aare::count_switching_pixels(data_span, n_threads);
|
||||
return return_image_data(arr);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
py::array_t<T> pybind_calculate_pedestal(
|
||||
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> data,
|
||||
ssize_t n_threads) {
|
||||
|
||||
auto data_span = make_view_3d(data);
|
||||
auto arr = new NDArray<T, 3>{};
|
||||
*arr = aare::calculate_pedestal<T, false>(data_span, n_threads);
|
||||
return return_image_data(arr);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
py::array_t<T> pybind_calculate_pedestal_g0(
|
||||
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> data,
|
||||
ssize_t n_threads) {
|
||||
|
||||
auto data_span = make_view_3d(data);
|
||||
auto arr = new NDArray<T, 2>{};
|
||||
*arr = aare::calculate_pedestal<T, true>(data_span, n_threads);
|
||||
return return_image_data(arr);
|
||||
}
|
||||
|
||||
void bind_calibration(py::module &m) {
|
||||
m.def("apply_calibration", &pybind_apply_calibration<double>,
|
||||
py::arg("raw_data").noconvert(), py::kw_only(),
|
||||
py::arg("pd").noconvert(), py::arg("cal").noconvert(),
|
||||
py::arg("n_threads") = 4);
|
||||
|
||||
m.def("apply_calibration", &pybind_apply_calibration<float>,
|
||||
py::arg("raw_data").noconvert(), py::kw_only(),
|
||||
py::arg("pd").noconvert(), py::arg("cal").noconvert(),
|
||||
py::arg("n_threads") = 4);
|
||||
|
||||
m.def("apply_calibration", &pybind_apply_calibration<double>,
|
||||
m.def("count_switching_pixels", &pybind_count_switching_pixels,
|
||||
R"(
|
||||
Count the number of time each pixel switches to G1 or G2.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
raw_data : array_like
|
||||
3D array of shape (frames, rows, cols) to count the switching pixels from.
|
||||
n_threads : int
|
||||
The number of threads to use for the calculation.
|
||||
)",
|
||||
py::arg("raw_data").noconvert(), py::kw_only(),
|
||||
py::arg("pd").noconvert(), py::arg("cal").noconvert(),
|
||||
py::arg("n_threads") = 4);
|
||||
|
||||
m.def("calculate_pedestal", &pybind_calculate_pedestal<double>,
|
||||
R"(
|
||||
Calculate the pedestal for all three gains and return the result as a 3D array of doubles.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
raw_data : array_like
|
||||
3D array of shape (frames, rows, cols) to calculate the pedestal from.
|
||||
Needs to contain data for all three gains (G0, G1, G2).
|
||||
n_threads : int
|
||||
The number of threads to use for the calculation.
|
||||
)",
|
||||
py::arg("raw_data").noconvert(), py::arg("n_threads") = 4);
|
||||
|
||||
m.def("calculate_pedestal_float", &pybind_calculate_pedestal<float>,
|
||||
R"(
|
||||
Same as `calculate_pedestal` but returns a 3D array of floats.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
raw_data : array_like
|
||||
3D array of shape (frames, rows, cols) to calculate the pedestal from.
|
||||
Needs to contain data for all three gains (G0, G1, G2).
|
||||
n_threads : int
|
||||
The number of threads to use for the calculation.
|
||||
)",
|
||||
py::arg("raw_data").noconvert(), py::arg("n_threads") = 4);
|
||||
|
||||
m.def("calculate_pedestal_g0", &pybind_calculate_pedestal_g0<double>,
|
||||
R"(
|
||||
Calculate the pedestal for G0 and return the result as a 2D array of doubles.
|
||||
Pixels in G1 and G2 are ignored.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
raw_data : array_like
|
||||
3D array of shape (frames, rows, cols) to calculate the pedestal from.
|
||||
n_threads : int
|
||||
The number of threads to use for the calculation.
|
||||
)",
|
||||
py::arg("raw_data").noconvert(), py::arg("n_threads") = 4);
|
||||
|
||||
m.def("calculate_pedestal_g0_float", &pybind_calculate_pedestal_g0<float>,
|
||||
R"(
|
||||
Same as `calculate_pedestal_g0` but returns a 2D array of floats.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
raw_data : array_like
|
||||
3D array of shape (frames, rows, cols) to calculate the pedestal from.
|
||||
n_threads : int
|
||||
The number of threads to use for the calculation.
|
||||
)",
|
||||
py::arg("raw_data").noconvert(), py::arg("n_threads") = 4);
|
||||
}
|
||||
@@ -47,7 +47,9 @@ double, 'f' for float)
|
||||
define_ClusterFileSink<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
|
||||
define_ClusterCollector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
|
||||
define_Cluster<T, N, M, U>(m, #N "x" #M #TYPE_CODE); \
|
||||
register_calculate_eta<T, N, M, U>(m);
|
||||
register_calculate_eta<T, N, M, U>(m); \
|
||||
define_2x2_reduction<T, N, M, U>(m); \
|
||||
reduce_to_2x2<T, N, M, U>(m);
|
||||
|
||||
PYBIND11_MODULE(_aare, m) {
|
||||
define_file_io_bindings(m);
|
||||
@@ -84,4 +86,30 @@ PYBIND11_MODULE(_aare, m) {
|
||||
DEFINE_CLUSTER_BINDINGS(int, 9, 9, uint16_t, i);
|
||||
DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d);
|
||||
DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f);
|
||||
|
||||
define_3x3_reduction<int, 3, 3, uint16_t>(m);
|
||||
define_3x3_reduction<double, 3, 3, uint16_t>(m);
|
||||
define_3x3_reduction<float, 3, 3, uint16_t>(m);
|
||||
define_3x3_reduction<int, 5, 5, uint16_t>(m);
|
||||
define_3x3_reduction<double, 5, 5, uint16_t>(m);
|
||||
define_3x3_reduction<float, 5, 5, uint16_t>(m);
|
||||
define_3x3_reduction<int, 7, 7, uint16_t>(m);
|
||||
define_3x3_reduction<double, 7, 7, uint16_t>(m);
|
||||
define_3x3_reduction<float, 7, 7, uint16_t>(m);
|
||||
define_3x3_reduction<int, 9, 9, uint16_t>(m);
|
||||
define_3x3_reduction<double, 9, 9, uint16_t>(m);
|
||||
define_3x3_reduction<float, 9, 9, uint16_t>(m);
|
||||
|
||||
reduce_to_3x3<int, 3, 3, uint16_t>(m);
|
||||
reduce_to_3x3<double, 3, 3, uint16_t>(m);
|
||||
reduce_to_3x3<float, 3, 3, uint16_t>(m);
|
||||
reduce_to_3x3<int, 5, 5, uint16_t>(m);
|
||||
reduce_to_3x3<double, 5, 5, uint16_t>(m);
|
||||
reduce_to_3x3<float, 5, 5, uint16_t>(m);
|
||||
reduce_to_3x3<int, 7, 7, uint16_t>(m);
|
||||
reduce_to_3x3<double, 7, 7, uint16_t>(m);
|
||||
reduce_to_3x3<float, 7, 7, uint16_t>(m);
|
||||
reduce_to_3x3<int, 9, 9, uint16_t>(m);
|
||||
reduce_to_3x3<double, 9, 9, uint16_t>(m);
|
||||
reduce_to_3x3<float, 9, 9, uint16_t>(m);
|
||||
}
|
||||
|
||||
@@ -101,6 +101,27 @@ def test_cluster_finder():
|
||||
assert clusters.size == 0
|
||||
|
||||
|
||||
def test_2x2_reduction():
|
||||
"""Test 2x2 Reduction"""
|
||||
cluster = _aare.Cluster3x3i(5,5,np.array([1, 1, 1, 2, 3, 1, 2, 2, 1], dtype=np.int32))
|
||||
|
||||
reduced_cluster = _aare.reduce_to_2x2(cluster)
|
||||
|
||||
assert reduced_cluster.x == 4
|
||||
assert reduced_cluster.y == 5
|
||||
assert (reduced_cluster.data == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
|
||||
|
||||
|
||||
def test_3x3_reduction():
|
||||
"""Test 3x3 Reduction"""
|
||||
cluster = _aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
|
||||
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double))
|
||||
|
||||
reduced_cluster = _aare.reduce_to_3x3(cluster)
|
||||
|
||||
assert reduced_cluster.x == 4
|
||||
assert reduced_cluster.y == 5
|
||||
assert (reduced_cluster.data == np.array([[1.0, 2.0, 1.0], [2.0, 2.0, 3.0], [1.0, 2.0, 1.0]], dtype=np.double)).all()
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import time
|
||||
from pathlib import Path
|
||||
import pickle
|
||||
|
||||
from aare import ClusterFile
|
||||
from aare import ClusterFile, ClusterVector
|
||||
from aare import _aare
|
||||
from conftest import test_data_path
|
||||
|
||||
@@ -51,4 +51,36 @@ def test_make_a_hitmap_from_cluster_vector():
|
||||
# print(img)
|
||||
# print(ref)
|
||||
assert (img == ref).all()
|
||||
|
||||
|
||||
|
||||
def test_2x2_reduction():
|
||||
cv = ClusterVector((3,3))
|
||||
|
||||
cv.push_back(_aare.Cluster3x3i(5, 5, np.array([1, 1, 1, 2, 3, 1, 2, 2, 1], dtype=np.int32)))
|
||||
cv.push_back(_aare.Cluster3x3i(5, 5, np.array([2, 2, 1, 2, 3, 1, 1, 1, 1], dtype=np.int32)))
|
||||
|
||||
reduced_cv = np.array(_aare.reduce_to_2x2(cv), copy=False)
|
||||
|
||||
assert reduced_cv.size == 2
|
||||
assert reduced_cv[0]["x"] == 4
|
||||
assert reduced_cv[0]["y"] == 5
|
||||
assert (reduced_cv[0]["data"] == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
|
||||
assert reduced_cv[1]["x"] == 4
|
||||
assert reduced_cv[1]["y"] == 6
|
||||
assert (reduced_cv[1]["data"] == np.array([[2, 2], [2, 3]], dtype=np.int32)).all()
|
||||
|
||||
|
||||
def test_3x3_reduction():
|
||||
cv = _aare.ClusterVector_Cluster5x5d()
|
||||
|
||||
cv.push_back(_aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
|
||||
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double)))
|
||||
cv.push_back(_aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
|
||||
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double)))
|
||||
|
||||
reduced_cv = np.array(_aare.reduce_to_3x3(cv), copy=False)
|
||||
|
||||
assert reduced_cv.size == 2
|
||||
assert reduced_cv[0]["x"] == 4
|
||||
assert reduced_cv[0]["y"] == 5
|
||||
assert (reduced_cv[0]["data"] == np.array([[1.0, 2.0, 1.0], [2.0, 2.0, 3.0], [1.0, 2.0, 1.0]], dtype=np.double)).all()
|
||||
@@ -1,6 +1,7 @@
|
||||
import pytest
|
||||
import numpy as np
|
||||
from aare import apply_calibration
|
||||
|
||||
import aare
|
||||
|
||||
def test_apply_calibration_small_data():
|
||||
# The raw data consists of 10 4x5 images
|
||||
@@ -27,7 +28,7 @@ def test_apply_calibration_small_data():
|
||||
|
||||
|
||||
|
||||
data = apply_calibration(raw, pd = pedestal, cal = calibration)
|
||||
data = aare.apply_calibration(raw, pd = pedestal, cal = calibration)
|
||||
|
||||
|
||||
# The formula that is applied is:
|
||||
@@ -41,3 +42,94 @@ def test_apply_calibration_small_data():
|
||||
assert data[2,2,2] == 0
|
||||
assert data[0,1,1] == 0
|
||||
assert data[1,3,0] == 0
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def raw_data_3x2x2():
|
||||
raw = np.zeros((3, 2, 2), dtype=np.uint16)
|
||||
raw[0, 0, 0] = 100
|
||||
raw[1,0, 0] = 200
|
||||
raw[2, 0, 0] = 300
|
||||
|
||||
raw[0, 0, 1] = (1<<14) + 100
|
||||
raw[1, 0, 1] = (1<<14) + 200
|
||||
raw[2, 0, 1] = (1<<14) + 300
|
||||
|
||||
raw[0, 1, 0] = (1<<14) + 37
|
||||
raw[1, 1, 0] = 38
|
||||
raw[2, 1, 0] = (3<<14) + 39
|
||||
|
||||
raw[0, 1, 1] = (3<<14) + 100
|
||||
raw[1, 1, 1] = (3<<14) + 200
|
||||
raw[2, 1, 1] = (3<<14) + 300
|
||||
return raw
|
||||
|
||||
def test_calculate_pedestal(raw_data_3x2x2):
|
||||
# Calculate the pedestal
|
||||
pd = aare.calculate_pedestal(raw_data_3x2x2)
|
||||
assert pd.shape == (3, 2, 2)
|
||||
assert pd.dtype == np.float64
|
||||
assert pd[0, 0, 0] == 200
|
||||
assert pd[1, 0, 0] == 0
|
||||
assert pd[2, 0, 0] == 0
|
||||
|
||||
assert pd[0, 0, 1] == 0
|
||||
assert pd[1, 0, 1] == 200
|
||||
assert pd[2, 0, 1] == 0
|
||||
|
||||
assert pd[0, 1, 0] == 38
|
||||
assert pd[1, 1, 0] == 37
|
||||
assert pd[2, 1, 0] == 39
|
||||
|
||||
assert pd[0, 1, 1] == 0
|
||||
assert pd[1, 1, 1] == 0
|
||||
assert pd[2, 1, 1] == 200
|
||||
|
||||
def test_calculate_pedestal_float(raw_data_3x2x2):
|
||||
#results should be the same for float
|
||||
pd2 = aare.calculate_pedestal_float(raw_data_3x2x2)
|
||||
assert pd2.shape == (3, 2, 2)
|
||||
assert pd2.dtype == np.float32
|
||||
assert pd2[0, 0, 0] == 200
|
||||
assert pd2[1, 0, 0] == 0
|
||||
assert pd2[2, 0, 0] == 0
|
||||
|
||||
assert pd2[0, 0, 1] == 0
|
||||
assert pd2[1, 0, 1] == 200
|
||||
assert pd2[2, 0, 1] == 0
|
||||
|
||||
assert pd2[0, 1, 0] == 38
|
||||
assert pd2[1, 1, 0] == 37
|
||||
assert pd2[2, 1, 0] == 39
|
||||
|
||||
assert pd2[0, 1, 1] == 0
|
||||
assert pd2[1, 1, 1] == 0
|
||||
assert pd2[2, 1, 1] == 200
|
||||
|
||||
def test_calculate_pedestal_g0(raw_data_3x2x2):
|
||||
pd = aare.calculate_pedestal_g0(raw_data_3x2x2)
|
||||
assert pd.shape == (2, 2)
|
||||
assert pd.dtype == np.float64
|
||||
assert pd[0, 0] == 200
|
||||
assert pd[1, 0] == 38
|
||||
assert pd[0, 1] == 0
|
||||
assert pd[1, 1] == 0
|
||||
|
||||
def test_calculate_pedestal_g0_float(raw_data_3x2x2):
|
||||
pd = aare.calculate_pedestal_g0_float(raw_data_3x2x2)
|
||||
assert pd.shape == (2, 2)
|
||||
assert pd.dtype == np.float32
|
||||
assert pd[0, 0] == 200
|
||||
assert pd[1, 0] == 38
|
||||
assert pd[0, 1] == 0
|
||||
assert pd[1, 1] == 0
|
||||
|
||||
def test_count_switching_pixels(raw_data_3x2x2):
|
||||
# Count the number of pixels that switched gain
|
||||
count = aare.count_switching_pixels(raw_data_3x2x2)
|
||||
assert count.shape == (2, 2)
|
||||
assert count.sum() == 8
|
||||
assert count[0, 0] == 0
|
||||
assert count[1, 0] == 2
|
||||
assert count[0, 1] == 3
|
||||
assert count[1, 1] == 3
|
||||
@@ -18,4 +18,86 @@ TEST_CASE("Test sum of Cluster", "[.cluster]") {
|
||||
Cluster<int, 2, 2> cluster{0, 0, {1, 2, 3, 4}};
|
||||
|
||||
CHECK(cluster.sum() == 10);
|
||||
}
|
||||
|
||||
using ClusterTypes = std::variant<Cluster<int, 2, 2>, Cluster<int, 3, 3>,
|
||||
Cluster<int, 5, 5>, Cluster<int, 2, 3>>;
|
||||
|
||||
using ClusterTypesLargerThan2x2 =
|
||||
std::variant<Cluster<int, 3, 3>, Cluster<int, 4, 4>, Cluster<int, 5, 5>>;
|
||||
|
||||
TEST_CASE("Test reduce to 2x2 Cluster", "[.cluster]") {
|
||||
auto [cluster, expected_reduced_cluster] = GENERATE(
|
||||
std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{5, 5, {1, 2, 3, 4}}},
|
||||
Cluster<int, 2, 2>{4, 6, {1, 2, 3, 4}}),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 2, 1, 2, 2}}},
|
||||
Cluster<int, 2, 2>{5, 5, {3, 2, 2, 2}}),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 1, 1, 2, 3, 1, 2, 2, 1}}},
|
||||
Cluster<int, 2, 2>{4, 5, {2, 3, 2, 2}}),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 3, 3>{5, 5, {2, 2, 1, 2, 3, 1, 1, 1, 1}}},
|
||||
Cluster<int, 2, 2>{4, 6, {2, 2, 2, 3}}),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 2, 2, 1, 3, 2, 1, 1, 1}}},
|
||||
Cluster<int, 2, 2>{5, 6, {2, 2, 3, 2}}),
|
||||
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
|
||||
5, 5, {1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 3,
|
||||
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}},
|
||||
Cluster<int, 2, 2>{5, 6, {2, 2, 3, 2}}),
|
||||
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
|
||||
5, 5, {1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 3,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}},
|
||||
Cluster<int, 2, 2>{4, 6, {2, 2, 2, 3}}),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 2, 3>{5, 5, {2, 2, 3, 2, 1, 1}}},
|
||||
Cluster<int, 2, 2>{4, 6, {2, 2, 3, 2}}));
|
||||
|
||||
auto reduced_cluster = std::visit(
|
||||
[](const auto &clustertype) { return reduce_to_2x2(clustertype); },
|
||||
cluster);
|
||||
|
||||
CHECK(reduced_cluster.x == expected_reduced_cluster.x);
|
||||
CHECK(reduced_cluster.y == expected_reduced_cluster.y);
|
||||
CHECK(std::equal(reduced_cluster.data.begin(),
|
||||
reduced_cluster.data.begin() + 4,
|
||||
expected_reduced_cluster.data.begin()));
|
||||
}
|
||||
|
||||
TEST_CASE("Test reduce to 3x3 Cluster", "[.cluster]") {
|
||||
auto [cluster, expected_reduced_cluster] = GENERATE(
|
||||
std::make_tuple(ClusterTypesLargerThan2x2{Cluster<int, 3, 3>{
|
||||
5, 5, {1, 1, 1, 1, 3, 1, 1, 1, 1}}},
|
||||
Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 1, 1, 1, 1}}),
|
||||
std::make_tuple(
|
||||
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
|
||||
5, 5, {2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1}}},
|
||||
Cluster<int, 3, 3>{4, 6, {2, 2, 1, 2, 2, 1, 1, 1, 3}}),
|
||||
std::make_tuple(
|
||||
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
|
||||
5, 5, {1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 3, 1, 1, 1, 1, 1}}},
|
||||
Cluster<int, 3, 3>{5, 6, {1, 2, 2, 1, 2, 2, 1, 3, 1}}),
|
||||
std::make_tuple(
|
||||
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
|
||||
5, 5, {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 1, 1, 2, 2}}},
|
||||
Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 2, 1, 2, 2}}),
|
||||
std::make_tuple(
|
||||
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
|
||||
5, 5, {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 1, 2, 2, 1, 1}}},
|
||||
Cluster<int, 3, 3>{4, 5, {1, 1, 1, 2, 2, 3, 2, 2, 1}}),
|
||||
std::make_tuple(ClusterTypesLargerThan2x2{Cluster<int, 5, 5>{
|
||||
5, 5, {1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 3,
|
||||
1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1}}},
|
||||
Cluster<int, 3, 3>{4, 5, {1, 2, 1, 2, 2, 3, 1, 2, 1}}));
|
||||
|
||||
auto reduced_cluster = std::visit(
|
||||
[](const auto &clustertype) { return reduce_to_3x3(clustertype); },
|
||||
cluster);
|
||||
|
||||
CHECK(reduced_cluster.x == expected_reduced_cluster.x);
|
||||
CHECK(reduced_cluster.y == expected_reduced_cluster.y);
|
||||
CHECK(std::equal(reduced_cluster.data.begin(),
|
||||
reduced_cluster.data.begin() + 9,
|
||||
expected_reduced_cluster.data.begin()));
|
||||
}
|
||||
@@ -57,6 +57,7 @@ class ClusterFinderMTWrapper
|
||||
size_t m_sink_size() const { return this->m_sink.sizeGuess(); }
|
||||
};
|
||||
|
||||
|
||||
TEST_CASE("multithreaded cluster finder", "[.with-data]") {
|
||||
auto fpath =
|
||||
test_data_path() / "raw/moench03/cu_half_speed_master_4.json";
|
||||
@@ -81,7 +82,8 @@ TEST_CASE("multithreaded cluster finder", "[.with-data]") {
|
||||
CHECK(cf.m_input_queues_are_empty() == true);
|
||||
|
||||
for (size_t i = 0; i < n_frames_pd; ++i) {
|
||||
cf.find_clusters(file.read_frame().view<uint16_t>());
|
||||
auto frame = file.read_frame();
|
||||
cf.find_clusters(frame.view<uint16_t>());
|
||||
}
|
||||
|
||||
cf.stop();
|
||||
|
||||
@@ -25,13 +25,13 @@ TEST_CASE("Construct from an NDView") {
|
||||
REQUIRE(image.data() != view.data());
|
||||
|
||||
for (uint32_t i = 0; i < image.size(); ++i) {
|
||||
REQUIRE(image(i) == view(i));
|
||||
REQUIRE(image[i] == view[i]);
|
||||
}
|
||||
|
||||
// Changing the image doesn't change the view
|
||||
image = 43;
|
||||
for (uint32_t i = 0; i < image.size(); ++i) {
|
||||
REQUIRE(image(i) != view(i));
|
||||
REQUIRE(image[i] != view[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -427,4 +427,30 @@ TEST_CASE("Construct an NDArray from an std::array") {
|
||||
for (uint32_t i = 0; i < a.size(); ++i) {
|
||||
REQUIRE(a(i) == b[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
TEST_CASE("Move construct from an array with Ndim + 1") {
|
||||
NDArray<int, 3> a({{1,2,2}}, 0);
|
||||
a(0, 0, 0) = 1;
|
||||
a(0, 0, 1) = 2;
|
||||
a(0, 1, 0) = 3;
|
||||
a(0, 1, 1) = 4;
|
||||
|
||||
|
||||
NDArray<int, 2> b(std::move(a));
|
||||
REQUIRE(b.shape() == Shape<2>{2,2});
|
||||
REQUIRE(b.size() == 4);
|
||||
REQUIRE(b(0, 0) == 1);
|
||||
REQUIRE(b(0, 1) == 2);
|
||||
REQUIRE(b(1, 0) == 3);
|
||||
REQUIRE(b(1, 1) == 4);
|
||||
|
||||
}
|
||||
|
||||
TEST_CASE("Move construct from an array with Ndim + 1 throws on size mismatch") {
|
||||
NDArray<int, 3> a({{2,2,2}}, 0);
|
||||
REQUIRE_THROWS(NDArray<int, 2>(std::move(a)));
|
||||
}
|
||||
|
||||
|
||||
@@ -99,7 +99,8 @@ TEST_CASE("Read data from a jungfrau 500k single port raw file",
|
||||
}
|
||||
|
||||
TEST_CASE("Read frame numbers from a raw file", "[.with-data]") {
|
||||
auto fpath = test_data_path() / "raw/eiger" / "eiger_500k_16bit_master_0.json";
|
||||
auto fpath =
|
||||
test_data_path() / "raw/eiger" / "eiger_500k_16bit_master_0.json";
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
// we know this file has 3 frames with frame numbers 14, 15, 16
|
||||
@@ -288,8 +289,7 @@ TEST_CASE("check find_geometry", "[.with-data]") {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Open multi module file with ROI",
|
||||
"[.with-data]") {
|
||||
TEST_CASE("Open multi module file with ROI", "[.with-data]") {
|
||||
|
||||
auto fpath = test_data_path() / "raw/SingleChipROI/Data_master_0.json";
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
@@ -319,4 +319,4 @@ TEST_CASE("Read file with unordered frames", "[.with-data]") {
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
File f(fpath);
|
||||
REQUIRE_THROWS((f.read_frame()));
|
||||
}
|
||||
}
|
||||
@@ -64,6 +64,12 @@ const std::string &RawFileNameComponents::base_name() const {
|
||||
const std::string &RawFileNameComponents::ext() const { return m_ext; }
|
||||
int RawFileNameComponents::file_index() const { return m_file_index; }
|
||||
|
||||
ScanParameters::ScanParameters(const bool enabled, const DACIndex dac,
|
||||
const int start, const int stop, const int step,
|
||||
const int64_t settleTime)
|
||||
: m_enabled(enabled), m_dac(dac), m_start(start), m_stop(stop),
|
||||
m_step(step), m_settleTime(settleTime){};
|
||||
|
||||
// "[enabled\ndac dac 4\nstart 500\nstop 2200\nstep 5\nsettleTime 100us\n]"
|
||||
ScanParameters::ScanParameters(const std::string &par) {
|
||||
std::istringstream iss(par.substr(1, par.size() - 2));
|
||||
@@ -72,7 +78,7 @@ ScanParameters::ScanParameters(const std::string &par) {
|
||||
if (line == "enabled") {
|
||||
m_enabled = true;
|
||||
} else if (line.find("dac") != std::string::npos) {
|
||||
m_dac = line.substr(4);
|
||||
m_dac = StringTo<DACIndex>(line.substr(4));
|
||||
} else if (line.find("start") != std::string::npos) {
|
||||
m_start = std::stoi(line.substr(6));
|
||||
} else if (line.find("stop") != std::string::npos) {
|
||||
@@ -87,8 +93,9 @@ int ScanParameters::start() const { return m_start; }
|
||||
int ScanParameters::stop() const { return m_stop; }
|
||||
void ScanParameters::increment_stop() { m_stop += 1; }
|
||||
int ScanParameters::step() const { return m_step; }
|
||||
const std::string &ScanParameters::dac() const { return m_dac; }
|
||||
DACIndex ScanParameters::dac() const { return m_dac; }
|
||||
bool ScanParameters::enabled() const { return m_enabled; }
|
||||
int64_t ScanParameters::settleTime() const { return m_settleTime; }
|
||||
|
||||
RawMasterFile::RawMasterFile(const std::filesystem::path &fpath)
|
||||
: m_fnc(fpath) {
|
||||
@@ -170,6 +177,7 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
||||
std::ifstream ifs(fpath);
|
||||
json j;
|
||||
ifs >> j;
|
||||
|
||||
double v = j["Version"];
|
||||
m_version = fmt::format("{:.1f}", v);
|
||||
|
||||
@@ -181,7 +189,9 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
||||
j["Geometry"]["x"]}; // TODO: isnt it only available for version > 7.1?
|
||||
// - try block default should be 1x1
|
||||
|
||||
m_image_size_in_bytes = j["Image Size in bytes"];
|
||||
m_image_size_in_bytes =
|
||||
v < 8.0 ? j["Image Size in bytes"] : j["Image Size"];
|
||||
|
||||
m_frames_in_file = j["Frames in File"];
|
||||
m_pixels_y = j["Pixels"]["y"];
|
||||
m_pixels_x = j["Pixels"]["x"];
|
||||
@@ -206,7 +216,6 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
||||
} catch (const json::out_of_range &e) {
|
||||
// keep the optional empty
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
// Special treatment of analog flag because of Moench03
|
||||
try {
|
||||
@@ -227,7 +236,6 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
||||
m_analog_flag = 0;
|
||||
}
|
||||
//-----------------------------------------------------------------
|
||||
|
||||
try {
|
||||
m_quad = j.at("Quad");
|
||||
} catch (const json::out_of_range &e) {
|
||||
@@ -239,7 +247,6 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
||||
// }catch (const json::out_of_range &e) {
|
||||
// m_adc_mask = 0;
|
||||
// }
|
||||
|
||||
try {
|
||||
int digital_flag = j.at("Digital Flag");
|
||||
if (digital_flag) {
|
||||
@@ -248,7 +255,6 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
||||
} catch (const json::out_of_range &e) {
|
||||
// keep the optional empty
|
||||
}
|
||||
|
||||
try {
|
||||
m_transceiver_flag = j.at("Transceiver Flag");
|
||||
if (m_transceiver_flag) {
|
||||
@@ -257,10 +263,20 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
||||
} catch (const json::out_of_range &e) {
|
||||
// keep the optional empty
|
||||
}
|
||||
|
||||
try {
|
||||
std::string scan_parameters = j.at("Scan Parameters");
|
||||
m_scan_parameters = ScanParameters(scan_parameters);
|
||||
if (v < 8.0) {
|
||||
std::string scan_parameters = j.at("Scan Parameters");
|
||||
m_scan_parameters = ScanParameters(scan_parameters);
|
||||
} else {
|
||||
auto json_obj = j.at("Scan Parameters");
|
||||
m_scan_parameters = ScanParameters(
|
||||
json_obj.at("enable").get<int>(),
|
||||
static_cast<DACIndex>(json_obj.at("dacInd").get<int>()),
|
||||
json_obj.at("start offset").get<int>(),
|
||||
json_obj.at("stop offset").get<int>(),
|
||||
json_obj.at("step size").get<int>(),
|
||||
json_obj.at("dac settle time ns").get<int>());
|
||||
}
|
||||
if (v < 7.21) {
|
||||
m_scan_parameters
|
||||
.increment_stop(); // adjust for endpoint being included
|
||||
@@ -268,6 +284,7 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
||||
} catch (const json::out_of_range &e) {
|
||||
// not a scan
|
||||
}
|
||||
|
||||
try {
|
||||
m_udp_interfaces_per_module = {j.at("Number of UDP Interfaces"), 1};
|
||||
} catch (const json::out_of_range &e) {
|
||||
@@ -277,14 +294,22 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
||||
m_udp_interfaces_per_module = {1, 2};
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
ROI tmp_roi;
|
||||
auto obj = j.at("Receiver Roi");
|
||||
tmp_roi.xmin = obj.at("xmin");
|
||||
tmp_roi.xmax = obj.at("xmax");
|
||||
tmp_roi.ymin = obj.at("ymin");
|
||||
tmp_roi.ymax = obj.at("ymax");
|
||||
if (v < 8.0) {
|
||||
auto obj = j.at("Receiver Roi");
|
||||
tmp_roi.xmin = obj.at("xmin");
|
||||
tmp_roi.xmax = obj.at("xmax");
|
||||
tmp_roi.ymin = obj.at("ymin");
|
||||
tmp_roi.ymax = obj.at("ymax");
|
||||
} else {
|
||||
// TODO: for now only handle single ROI
|
||||
auto obj = j.at("Receiver Rois");
|
||||
tmp_roi.xmin = obj[0].at("xmin");
|
||||
tmp_roi.xmax = obj[0].at("xmax");
|
||||
tmp_roi.ymin = obj[0].at("ymin");
|
||||
tmp_roi.ymax = obj[0].at("ymax");
|
||||
}
|
||||
|
||||
// if any of the values are set update the roi
|
||||
if (tmp_roi.xmin != 4294967295 || tmp_roi.xmax != 4294967295 ||
|
||||
@@ -298,14 +323,10 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
||||
}
|
||||
|
||||
} catch (const json::out_of_range &e) {
|
||||
std::cout << e.what() << std::endl;
|
||||
LOG(TLogLevel::logERROR) << e.what() << std::endl;
|
||||
// leave the optional empty
|
||||
}
|
||||
|
||||
// if we have an roi we need to update the geometry for the subfiles
|
||||
if (m_roi) {
|
||||
}
|
||||
|
||||
// Update detector type for Moench
|
||||
// TODO! How does this work with old .raw master files?
|
||||
#ifdef AARE_VERBOSE
|
||||
|
||||
@@ -51,7 +51,7 @@ TEST_CASE("Parse scan parameters") {
|
||||
ScanParameters s("[enabled\ndac dac 4\nstart 500\nstop 2200\nstep "
|
||||
"5\nsettleTime 100us\n]");
|
||||
REQUIRE(s.enabled());
|
||||
REQUIRE(s.dac() == "dac 4");
|
||||
REQUIRE(s.dac() == DACIndex::DAC_4);
|
||||
REQUIRE(s.start() == 500);
|
||||
REQUIRE(s.stop() == 2200);
|
||||
REQUIRE(s.step() == 5);
|
||||
@@ -60,7 +60,7 @@ TEST_CASE("Parse scan parameters") {
|
||||
TEST_CASE("A disabled scan") {
|
||||
ScanParameters s("[disabled]");
|
||||
REQUIRE_FALSE(s.enabled());
|
||||
REQUIRE(s.dac() == "");
|
||||
REQUIRE(s.dac() == DACIndex::DAC_0);
|
||||
REQUIRE(s.start() == 0);
|
||||
REQUIRE(s.stop() == 0);
|
||||
REQUIRE(s.step() == 0);
|
||||
@@ -68,7 +68,7 @@ TEST_CASE("A disabled scan") {
|
||||
|
||||
TEST_CASE("Parse a master file in .json format", "[.integration]") {
|
||||
auto fpath =
|
||||
test_data_path() / "jungfrau" / "jungfrau_single_master_0.json";
|
||||
test_data_path() / "raw" / "jungfrau" / "jungfrau_single_master_0.json";
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
RawMasterFile f(fpath);
|
||||
|
||||
@@ -224,6 +224,41 @@ TEST_CASE("Parse a master file in .raw format", "[.integration]") {
|
||||
// Packets Caught Mask : 64 bytes
|
||||
}
|
||||
|
||||
TEST_CASE("Parse a master file in new .json format",
|
||||
"[.integration][.width-data]") {
|
||||
|
||||
auto file_path =
|
||||
test_data_path() / "raw" / "newmythen03" / "run_87_master_0.json";
|
||||
REQUIRE(std::filesystem::exists(file_path));
|
||||
|
||||
RawMasterFile f(file_path);
|
||||
|
||||
// Version : 8.0
|
||||
REQUIRE(f.version() == "8.0");
|
||||
|
||||
REQUIRE(f.detector_type() == DetectorType::Mythen3);
|
||||
// Timing Mode : auto
|
||||
REQUIRE(f.timing_mode() == TimingMode::Auto);
|
||||
// Geometry : [2, 1]
|
||||
REQUIRE(f.geometry().col == 2);
|
||||
REQUIRE(f.geometry().row == 1);
|
||||
// Image Size : 5120 bytes
|
||||
REQUIRE(f.image_size_in_bytes() == 5120);
|
||||
|
||||
REQUIRE(f.scan_parameters().enabled() == false);
|
||||
REQUIRE(f.scan_parameters().dac() == DACIndex::DAC_0);
|
||||
REQUIRE(f.scan_parameters().start() == 0);
|
||||
REQUIRE(f.scan_parameters().stop() == 0);
|
||||
REQUIRE(f.scan_parameters().step() == 0);
|
||||
REQUIRE(f.scan_parameters().settleTime() == 0);
|
||||
|
||||
auto roi = f.roi().value();
|
||||
REQUIRE(roi.xmin == 0);
|
||||
REQUIRE(roi.xmax == 2559);
|
||||
REQUIRE(roi.ymin == -1);
|
||||
REQUIRE(roi.ymax == -1);
|
||||
}
|
||||
|
||||
TEST_CASE("Read eiger master file", "[.integration]") {
|
||||
auto fpath = test_data_path() / "eiger" / "eiger_500k_32bit_master_0.json";
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
@@ -292,4 +327,4 @@ TEST_CASE("Read eiger master file", "[.integration]") {
|
||||
// "Packets Caught Mask": "64 bytes"
|
||||
// }
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
||||
44
src/calibration.cpp
Normal file
44
src/calibration.cpp
Normal file
@@ -0,0 +1,44 @@
|
||||
#include "aare/calibration.hpp"
|
||||
|
||||
namespace aare {
|
||||
|
||||
NDArray<int, 2> count_switching_pixels(NDView<uint16_t, 3> raw_data) {
|
||||
NDArray<int, 2> switched(
|
||||
std::array<ssize_t, 2>{raw_data.shape(1), raw_data.shape(2)}, 0);
|
||||
for (int frame_nr = 0; frame_nr != raw_data.shape(0); ++frame_nr) {
|
||||
for (int row = 0; row != raw_data.shape(1); ++row) {
|
||||
for (int col = 0; col != raw_data.shape(2); ++col) {
|
||||
auto [value, gain] =
|
||||
get_value_and_gain(raw_data(frame_nr, row, col));
|
||||
if (gain != 0) {
|
||||
switched(row, col) += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return switched;
|
||||
}
|
||||
|
||||
NDArray<int, 2> count_switching_pixels(NDView<uint16_t, 3> raw_data,
|
||||
ssize_t n_threads) {
|
||||
NDArray<int, 2> switched(
|
||||
std::array<ssize_t, 2>{raw_data.shape(1), raw_data.shape(2)}, 0);
|
||||
std::vector<std::future<NDArray<int, 2>>> futures;
|
||||
futures.reserve(n_threads);
|
||||
|
||||
auto subviews = make_subviews(raw_data, n_threads);
|
||||
|
||||
for (auto view : subviews) {
|
||||
futures.push_back(
|
||||
std::async(static_cast<NDArray<int, 2> (*)(NDView<uint16_t, 3>)>(
|
||||
&count_switching_pixels),
|
||||
view));
|
||||
}
|
||||
|
||||
for (auto &f : futures) {
|
||||
switched += f.get();
|
||||
}
|
||||
return switched;
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
49
src/calibration.test.cpp
Normal file
49
src/calibration.test.cpp
Normal file
@@ -0,0 +1,49 @@
|
||||
/************************************************
|
||||
* @file test-Cluster.cpp
|
||||
* @short test case for generic Cluster, ClusterVector, and calculate_eta2
|
||||
***********************************************/
|
||||
|
||||
#include "aare/calibration.hpp"
|
||||
|
||||
// #include "catch.hpp"
|
||||
#include <array>
|
||||
#include <catch2/catch_all.hpp>
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
|
||||
using namespace aare;
|
||||
|
||||
TEST_CASE("Test Pedestal Generation", "[.calibration]") {
|
||||
NDArray<uint16_t, 3> raw(std::array<ssize_t, 3>{3, 2, 2}, 0);
|
||||
|
||||
// gain 0
|
||||
raw(0, 0, 0) = 100;
|
||||
raw(1, 0, 0) = 200;
|
||||
raw(2, 0, 0) = 300;
|
||||
|
||||
// gain 1
|
||||
raw(0, 0, 1) = (1 << 14) + 100;
|
||||
raw(1, 0, 1) = (1 << 14) + 200;
|
||||
raw(2, 0, 1) = (1 << 14) + 300;
|
||||
|
||||
raw(0, 1, 0) = (1 << 14) + 37;
|
||||
raw(1, 1, 0) = 38;
|
||||
raw(2, 1, 0) = (3 << 14) + 39;
|
||||
|
||||
// gain 2
|
||||
raw(0, 1, 1) = (3 << 14) + 100;
|
||||
raw(1, 1, 1) = (3 << 14) + 200;
|
||||
raw(2, 1, 1) = (3 << 14) + 300;
|
||||
|
||||
auto pedestal = calculate_pedestal<double>(raw.view(), 4);
|
||||
|
||||
REQUIRE(pedestal.size() == raw.size());
|
||||
CHECK(pedestal(0, 0, 0) == 200);
|
||||
CHECK(pedestal(1, 0, 0) == 0);
|
||||
CHECK(pedestal(1, 0, 1) == 200);
|
||||
|
||||
auto pedestal_gain0 = calculate_pedestal_g0<double>(raw.view(), 4);
|
||||
|
||||
REQUIRE(pedestal_gain0.size() == 4);
|
||||
CHECK(pedestal_gain0(0, 0) == 200);
|
||||
CHECK(pedestal_gain0(1, 0) == 38);
|
||||
}
|
||||
182
src/defs.cpp
182
src/defs.cpp
@@ -115,4 +115,186 @@ template <> FrameDiscardPolicy StringTo(const std::string &arg) {
|
||||
|
||||
// template <> TimingMode StringTo<TimingMode>(std::string mode);
|
||||
|
||||
template <> DACIndex StringTo(const std::string &arg) {
|
||||
if (arg == "dac 0")
|
||||
return DACIndex::DAC_0;
|
||||
else if (arg == "dac 1")
|
||||
return DACIndex::DAC_1;
|
||||
else if (arg == "dac 2")
|
||||
return DACIndex::DAC_2;
|
||||
else if (arg == "dac 3")
|
||||
return DACIndex::DAC_3;
|
||||
else if (arg == "dac 4")
|
||||
return DACIndex::DAC_4;
|
||||
else if (arg == "dac 5")
|
||||
return DACIndex::DAC_5;
|
||||
else if (arg == "dac 6")
|
||||
return DACIndex::DAC_6;
|
||||
else if (arg == "dac 7")
|
||||
return DACIndex::DAC_7;
|
||||
else if (arg == "dac 8")
|
||||
return DACIndex::DAC_8;
|
||||
else if (arg == "dac 9")
|
||||
return DACIndex::DAC_9;
|
||||
else if (arg == "dac 10")
|
||||
return DACIndex::DAC_10;
|
||||
else if (arg == "dac 11")
|
||||
return DACIndex::DAC_11;
|
||||
else if (arg == "dac 12")
|
||||
return DACIndex::DAC_12;
|
||||
else if (arg == "dac 13")
|
||||
return DACIndex::DAC_13;
|
||||
else if (arg == "dac 14")
|
||||
return DACIndex::DAC_14;
|
||||
else if (arg == "dac 15")
|
||||
return DACIndex::DAC_15;
|
||||
else if (arg == "dac 16")
|
||||
return DACIndex::DAC_16;
|
||||
else if (arg == "dac 17")
|
||||
return DACIndex::DAC_17;
|
||||
else if (arg == "vsvp")
|
||||
return DACIndex::VSVP;
|
||||
else if (arg == "vtrim")
|
||||
return DACIndex::VTRIM;
|
||||
else if (arg == "vrpreamp")
|
||||
return DACIndex::VRPREAMP;
|
||||
else if (arg == "vrshaper")
|
||||
return DACIndex::VRSHAPER;
|
||||
else if (arg == "vsvn")
|
||||
return DACIndex::VSVN;
|
||||
else if (arg == "vtgstv")
|
||||
return DACIndex::VTGSTV;
|
||||
else if (arg == "vcmp_ll")
|
||||
return DACIndex::VCMP_LL;
|
||||
else if (arg == "vcmp_lr")
|
||||
return DACIndex::VCMP_LR;
|
||||
else if (arg == "vcal")
|
||||
return DACIndex::VCAL;
|
||||
else if (arg == "vcmp_rl")
|
||||
return DACIndex::VCMP_RL;
|
||||
else if (arg == "rxb_rb")
|
||||
return DACIndex::RXB_RB;
|
||||
else if (arg == "rxb_lb")
|
||||
return DACIndex::RXB_LB;
|
||||
else if (arg == "vcmp_rr")
|
||||
return DACIndex::VCMP_RR;
|
||||
else if (arg == "vcp")
|
||||
return DACIndex::VCP;
|
||||
else if (arg == "vcn")
|
||||
return DACIndex::VCN;
|
||||
else if (arg == "vishaper")
|
||||
return DACIndex::VISHAPER;
|
||||
else if (arg == "vthreshold")
|
||||
return DACIndex::VTHRESHOLD;
|
||||
else if (arg == "vref_ds")
|
||||
return DACIndex::VREF_DS;
|
||||
else if (arg == "vout_cm")
|
||||
return DACIndex::VOUT_CM;
|
||||
else if (arg == "vin_cm")
|
||||
return DACIndex::VIN_CM;
|
||||
else if (arg == "vref_comp")
|
||||
return DACIndex::VREF_COMP;
|
||||
else if (arg == "vb_comp")
|
||||
return DACIndex::VB_COMP;
|
||||
else if (arg == "vdd_prot")
|
||||
return DACIndex::VDD_PROT;
|
||||
else if (arg == "vin_com")
|
||||
return DACIndex::VIN_COM;
|
||||
else if (arg == "vref_prech")
|
||||
return DACIndex::VREF_PRECH;
|
||||
else if (arg == "vb_pixbuf")
|
||||
return DACIndex::VB_PIXBUF;
|
||||
else if (arg == "vb_ds")
|
||||
return DACIndex::VB_DS;
|
||||
else if (arg == "vref_h_adc")
|
||||
return DACIndex::VREF_H_ADC;
|
||||
else if (arg == "vb_comp_fe")
|
||||
return DACIndex::VB_COMP_FE;
|
||||
else if (arg == "vb_comp_adc")
|
||||
return DACIndex::VB_COMP_ADC;
|
||||
else if (arg == "vcom_cds")
|
||||
return DACIndex::VCOM_CDS;
|
||||
else if (arg == "vref_rstore")
|
||||
return DACIndex::VREF_RSTORE;
|
||||
else if (arg == "vb_opa_1st")
|
||||
return DACIndex::VB_OPA_1ST;
|
||||
else if (arg == "vref_comp_fe")
|
||||
return DACIndex::VREF_COMP_FE;
|
||||
else if (arg == "vcom_adc1")
|
||||
return DACIndex::VCOM_ADC1;
|
||||
else if (arg == "vref_l_adc")
|
||||
return DACIndex::VREF_L_ADC;
|
||||
else if (arg == "vref_cds")
|
||||
return DACIndex::VREF_CDS;
|
||||
else if (arg == "vb_cs")
|
||||
return DACIndex::VB_CS;
|
||||
else if (arg == "vb_opa_fd")
|
||||
return DACIndex::VB_OPA_FD;
|
||||
else if (arg == "vcom_adc2")
|
||||
return DACIndex::VCOM_ADC2;
|
||||
else if (arg == "vcassh")
|
||||
return DACIndex::VCASSH;
|
||||
else if (arg == "vth2")
|
||||
return DACIndex::VTH2;
|
||||
else if (arg == "vrshaper_n")
|
||||
return DACIndex::VRSHAPER_N;
|
||||
else if (arg == "vipre_out")
|
||||
return DACIndex::VIPRE_OUT;
|
||||
else if (arg == "vth3")
|
||||
return DACIndex::VTH3;
|
||||
else if (arg == "vth1")
|
||||
return DACIndex::VTH1;
|
||||
else if (arg == "vicin")
|
||||
return DACIndex::VICIN;
|
||||
else if (arg == "vcas")
|
||||
return DACIndex::VCAS;
|
||||
else if (arg == "vcal_n")
|
||||
return DACIndex::VCAL_N;
|
||||
else if (arg == "vipre")
|
||||
return DACIndex::VIPRE;
|
||||
else if (arg == "vcal_p")
|
||||
return DACIndex::VCAL_P;
|
||||
else if (arg == "vdcsh")
|
||||
return DACIndex::VDCSH;
|
||||
else if (arg == "vbp_colbuf")
|
||||
return DACIndex::VBP_COLBUF;
|
||||
else if (arg == "vb_sda")
|
||||
return DACIndex::VB_SDA;
|
||||
else if (arg == "vcasc_sfp")
|
||||
return DACIndex::VCASC_SFP;
|
||||
else if (arg == "vipre_cds")
|
||||
return DACIndex::VIPRE_CDS;
|
||||
else if (arg == "ibias_sfp")
|
||||
return DACIndex::IBIAS_SFP;
|
||||
else if (arg == "trimbits")
|
||||
return DACIndex::TRIMBIT_SCAN;
|
||||
else if (arg == "highvoltage")
|
||||
return DACIndex::HIGH_VOLTAGE;
|
||||
else if (arg == "iodelay")
|
||||
return DACIndex::IO_DELAY;
|
||||
else if (arg == "temp_adc")
|
||||
return DACIndex::TEMPERATURE_ADC;
|
||||
else if (arg == "temp_fpga")
|
||||
return DACIndex::TEMPERATURE_FPGA;
|
||||
else if (arg == "temp_fpgaext")
|
||||
return DACIndex::TEMPERATURE_FPGAEXT;
|
||||
else if (arg == "temp_10ge")
|
||||
return DACIndex::TEMPERATURE_10GE;
|
||||
else if (arg == "temp_dcdc")
|
||||
return DACIndex::TEMPERATURE_DCDC;
|
||||
else if (arg == "temp_sodl")
|
||||
return DACIndex::TEMPERATURE_SODL;
|
||||
else if (arg == "temp_sodr")
|
||||
return DACIndex::TEMPERATURE_SODR;
|
||||
else if (arg == "temp_fpgafl")
|
||||
return DACIndex::TEMPERATURE_FPGA2;
|
||||
else if (arg == "temp_fpgafr")
|
||||
return DACIndex::TEMPERATURE_FPGA3;
|
||||
else if (arg == "temp_slowadc")
|
||||
return DACIndex::SLOW_ADC_TEMP;
|
||||
else
|
||||
throw std::invalid_argument("Could not decode DACIndex from: \"" + arg +
|
||||
"\"");
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -7,6 +7,7 @@ Script to update VERSION file with semantic versioning if provided as an argumen
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
from packaging.version import Version, InvalidVersion
|
||||
|
||||
@@ -26,9 +27,9 @@ def get_version():
|
||||
|
||||
# Check at least one argument is passed
|
||||
if len(sys.argv) < 2:
|
||||
return "0.0.0"
|
||||
|
||||
version = sys.argv[1]
|
||||
version = datetime.today().strftime('%Y.%-m.%-d')
|
||||
else:
|
||||
version = sys.argv[1]
|
||||
|
||||
try:
|
||||
v = Version(version) # normalize check if version follows PEP 440 specification
|
||||
@@ -54,4 +55,4 @@ def write_version_to_file(version):
|
||||
if __name__ == "__main__":
|
||||
|
||||
version = get_version()
|
||||
write_version_to_file(version)
|
||||
write_version_to_file(version)
|
||||
|
||||
Reference in New Issue
Block a user