mirror of
https://github.com/slsdetectorgroup/aare.git
synced 2025-12-22 21:11:25 +01:00
Compare commits
102 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5dfaba53c2 | ||
| 45f506b473 | |||
| 6f10afbcdc | |||
| e418986fd2 | |||
| 723c8dd013 | |||
| 351f4626b3 | |||
| 516ef88d10 | |||
| 5329be816e | |||
| 72a2604ca5 | |||
| c78a73ebaf | |||
| 77a9788891 | |||
| c0ee17275e | |||
| ad3ef88607 | |||
| f814b3f4e7 | |||
| 1f46266183 | |||
| d3d9f760b3 | |||
| 0891ffb1ee | |||
| 0b74bc25d5 | |||
| 3ec40fa809 | |||
| 74280379ce | |||
| 474c35cc6b | |||
| e2a97d3c45 | |||
| bce8e9d5fc | |||
| 4c1e276e2c | |||
| 12114e7275 | |||
| 7926993bb2 | |||
| ed7fb1f1f9 | |||
|
|
8ab98b356b | ||
| d908ad3636 | |||
| 8733a1d66f | |||
| 437f7cec89 | |||
|
|
6c3524298f | ||
| b59277c4bf | |||
| cb163c79b4 | |||
|
|
a0fb4900f0 | ||
|
|
91d74110fa | ||
| f54e76e6bf | |||
|
|
c6da36d10b | ||
| 5107513ff5 | |||
| f7aa66a2c9 | |||
| 3ac94641e3 | |||
|
|
89bb8776ea | ||
|
|
1527a45cf3 | ||
|
|
3d6858ad33 | ||
|
|
d6222027d0 | ||
| 1195a5e100 | |||
| 1347158235 | |||
|
|
8c4d8b687e | ||
|
|
b8e91d0282 | ||
|
|
46876bfa73 | ||
|
|
348fd0f937 | ||
|
|
0fea0f5b0e | ||
|
|
cb439efb48 | ||
|
|
5de402f91b | ||
|
|
9a7713e98a | ||
|
|
b898e1c8d0 | ||
|
|
4073c0cbe0 | ||
|
|
9a3694b980 | ||
|
|
abae2674a9 | ||
| 1414d75320 | |||
|
|
85c3bf7bed | ||
|
|
fa3b7a5afe | ||
|
|
e95326faa1 | ||
|
|
8143524acf | ||
|
|
8e2346abf8 | ||
|
|
8eb7fec435 | ||
|
|
d8952eccc6 | ||
|
|
83717571c8 | ||
|
|
97dae4ac60 | ||
|
|
5a9c3b717e | ||
|
|
e3f4b34b72 | ||
|
|
6ec8fbee72 | ||
| 30822d9c5f | |||
| ff7312f45d | |||
| 8e7c9eadff | |||
| d35b7762b4 | |||
| df4dbb8fd0 | |||
| c92be4bca2 | |||
| 664055de92 | |||
| 318e640639 | |||
| c6990dabad | |||
| c9fe16b4c2 | |||
|
|
64438c8803 | ||
| 9f8eee5d08 | |||
| 35114cde9d | |||
| b13f864b2b | |||
| 05828baa54 | |||
| 0f56846e3d | |||
| be67bbab6b | |||
|
|
8354439605 | ||
|
|
11fa95b23c | ||
| bd7870e75a | |||
| 75f63607fc | |||
| cfe7c31fe4 | |||
|
|
4976ec1651 | ||
|
|
a9a55fb27d | ||
| 19ecc82fff | |||
| 923f7d22b8 | |||
| 6438a4bef1 | |||
| ad7525cd02 | |||
| 87d8682b1e | |||
| 9c6e629298 |
2
.github/workflows/build_docs.yml
vendored
2
.github/workflows/build_docs.yml
vendored
@@ -43,7 +43,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake .. -DAARE_SYSTEM_LIBRARIES=ON -DAARE_DOCS=ON
|
cmake .. -DAARE_SYSTEM_LIBRARIES=ON -DAARE_PYTHON_BINDINGS=ON -DAARE_DOCS=ON
|
||||||
make -j 2
|
make -j 2
|
||||||
make docs
|
make docs
|
||||||
|
|
||||||
|
|||||||
@@ -77,17 +77,6 @@ if(AARE_SYSTEM_LIBRARIES)
|
|||||||
# on conda-forge
|
# on conda-forge
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(AARE_VERBOSE)
|
|
||||||
add_compile_definitions(AARE_VERBOSE)
|
|
||||||
add_compile_definitions(AARE_LOG_LEVEL=aare::logDEBUG5)
|
|
||||||
else()
|
|
||||||
add_compile_definitions(AARE_LOG_LEVEL=aare::logERROR)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(AARE_CUSTOM_ASSERT)
|
|
||||||
add_compile_definitions(AARE_CUSTOM_ASSERT)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(AARE_BENCHMARKS)
|
if(AARE_BENCHMARKS)
|
||||||
add_subdirectory(benchmarks)
|
add_subdirectory(benchmarks)
|
||||||
endif()
|
endif()
|
||||||
@@ -335,13 +324,10 @@ if(AARE_ASAN)
|
|||||||
)
|
)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if(AARE_TESTS)
|
if(AARE_TESTS)
|
||||||
enable_testing()
|
enable_testing()
|
||||||
add_subdirectory(tests)
|
add_subdirectory(tests)
|
||||||
|
target_compile_definitions(tests PRIVATE AARE_TESTS)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
###------------------------------------------------------------------------------MAIN LIBRARY
|
###------------------------------------------------------------------------------MAIN LIBRARY
|
||||||
@@ -364,8 +350,9 @@ set(PUBLICHEADERS
|
|||||||
include/aare/FilePtr.hpp
|
include/aare/FilePtr.hpp
|
||||||
include/aare/Frame.hpp
|
include/aare/Frame.hpp
|
||||||
include/aare/GainMap.hpp
|
include/aare/GainMap.hpp
|
||||||
include/aare/geo_helpers.hpp
|
include/aare/DetectorGeometry.hpp
|
||||||
include/aare/JungfrauDataFile.hpp
|
include/aare/JungfrauDataFile.hpp
|
||||||
|
include/aare/logger.hpp
|
||||||
include/aare/NDArray.hpp
|
include/aare/NDArray.hpp
|
||||||
include/aare/NDView.hpp
|
include/aare/NDView.hpp
|
||||||
include/aare/NumpyFile.hpp
|
include/aare/NumpyFile.hpp
|
||||||
@@ -381,23 +368,24 @@ set(PUBLICHEADERS
|
|||||||
|
|
||||||
|
|
||||||
set(SourceFiles
|
set(SourceFiles
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/calibration.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/CtbRawFile.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/CtbRawFile.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/DetectorGeometry.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/FilePtr.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/FilePtr.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Fit.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Fit.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp
|
||||||
)
|
)
|
||||||
@@ -424,23 +412,36 @@ target_link_libraries(
|
|||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
set_property(TARGET aare_core PROPERTY POSITION_INDEPENDENT_CODE ON)
|
||||||
|
|
||||||
|
if(AARE_TESTS)
|
||||||
|
target_compile_definitions(aare_core PRIVATE AARE_TESTS)
|
||||||
|
endif()
|
||||||
|
if(AARE_VERBOSE)
|
||||||
|
target_compile_definitions(aare_core PUBLIC AARE_VERBOSE)
|
||||||
|
target_compile_definitions(aare_core PUBLIC AARE_LOG_LEVEL=aare::logDEBUG5)
|
||||||
|
else()
|
||||||
|
target_compile_definitions(aare_core PUBLIC AARE_LOG_LEVEL=aare::logERROR)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(AARE_CUSTOM_ASSERT)
|
||||||
|
target_compile_definitions(aare_core PUBLIC AARE_CUSTOM_ASSERT)
|
||||||
|
endif()
|
||||||
|
|
||||||
set_target_properties(aare_core PROPERTIES
|
set_target_properties(aare_core PROPERTIES
|
||||||
ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}
|
ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}
|
||||||
PUBLIC_HEADER "${PUBLICHEADERS}"
|
PUBLIC_HEADER "${PUBLICHEADERS}"
|
||||||
)
|
)
|
||||||
|
|
||||||
if (AARE_PYTHON_BINDINGS)
|
|
||||||
set_property(TARGET aare_core PROPERTY POSITION_INDEPENDENT_CODE ON)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(AARE_TESTS)
|
if(AARE_TESTS)
|
||||||
set(TestSources
|
set(TestSources
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/calibration.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/DetectorGeometry.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp
|
||||||
@@ -463,11 +464,6 @@ if(AARE_TESTS)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
###------------------------------------------------------------------------------------------
|
|
||||||
###------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
|
|
||||||
if(AARE_MASTER_PROJECT)
|
if(AARE_MASTER_PROJECT)
|
||||||
install(TARGETS aare_core aare_compiler_flags
|
install(TARGETS aare_core aare_compiler_flags
|
||||||
EXPORT "${TARGETS_EXPORT_NAME}"
|
EXPORT "${TARGETS_EXPORT_NAME}"
|
||||||
@@ -477,7 +473,6 @@ if(AARE_MASTER_PROJECT)
|
|||||||
)
|
)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
|
||||||
set(CMAKE_INSTALL_RPATH $ORIGIN)
|
set(CMAKE_INSTALL_RPATH $ORIGIN)
|
||||||
set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
|
set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
|
||||||
|
|
||||||
|
|||||||
35
RELEASE.md
35
RELEASE.md
@@ -1,14 +1,41 @@
|
|||||||
# Release notes
|
# Release notes
|
||||||
|
|
||||||
|
### 2025.10.1
|
||||||
|
|
||||||
### head
|
Bugfixes:
|
||||||
|
|
||||||
|
- File supports reading new master json file format (multiple ROI's not supported yet)
|
||||||
|
|
||||||
|
### 2025.8.22
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
- Apply calibration works in G0 if passes a 2D calibration and pedestal
|
||||||
|
- count pixels that switch
|
||||||
|
- calculate pedestal (also g0 version)
|
||||||
|
- NDArray::view() needs an lvalue to reduce issues with the view outliving the array
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes:
|
||||||
|
|
||||||
|
- Now using glibc 2.17 in conda builds (was using the host)
|
||||||
|
- Fixed shifted pixels in clusters close to the edge of a frame
|
||||||
|
|
||||||
|
### 2025.7.18
|
||||||
|
|
||||||
Features:
|
Features:
|
||||||
|
|
||||||
- Cluster finder now works with 5x5, 7x7 and 9x9 clusters
|
- Cluster finder now works with 5x5, 7x7 and 9x9 clusters
|
||||||
|
- Added ClusterVector::empty() member
|
||||||
|
- Added apply_calibration function for Jungfrau data
|
||||||
|
|
||||||
|
Bugfixes:
|
||||||
|
- Fixed reading RawFiles with ROI fully excluding some sub files.
|
||||||
|
- Decoding of MH02 files placed the pixels in wrong position
|
||||||
|
- Removed unused file: ClusterFile.cpp
|
||||||
|
|
||||||
|
|
||||||
### 2025.05.22
|
### 2025.5.22
|
||||||
|
|
||||||
Features:
|
Features:
|
||||||
|
|
||||||
@@ -20,3 +47,7 @@ Bugfixes:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ FetchContent_MakeAvailable(benchmark)
|
|||||||
|
|
||||||
add_executable(benchmarks)
|
add_executable(benchmarks)
|
||||||
|
|
||||||
target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp)
|
target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp reduce_benchmark.cpp)
|
||||||
|
|
||||||
# Link Google Benchmark and other necessary libraries
|
# Link Google Benchmark and other necessary libraries
|
||||||
target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags)
|
target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags)
|
||||||
|
|||||||
168
benchmarks/reduce_benchmark.cpp
Normal file
168
benchmarks/reduce_benchmark.cpp
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
#include "aare/Cluster.hpp"
|
||||||
|
#include <benchmark/benchmark.h>
|
||||||
|
|
||||||
|
using namespace aare;
|
||||||
|
|
||||||
|
class ClustersForReduceFixture : public benchmark::Fixture {
|
||||||
|
public:
|
||||||
|
Cluster<int, 5, 5> cluster_5x5{};
|
||||||
|
Cluster<int, 3, 3> cluster_3x3{};
|
||||||
|
|
||||||
|
private:
|
||||||
|
using benchmark::Fixture::SetUp;
|
||||||
|
|
||||||
|
void SetUp([[maybe_unused]] const benchmark::State &state) override {
|
||||||
|
int temp_data[25] = {1, 1, 1, 1, 1, 1, 1, 2, 1, 1,
|
||||||
|
1, 2, 3, 1, 2, 1, 1, 1, 1, 2};
|
||||||
|
std::copy(std::begin(temp_data), std::end(temp_data),
|
||||||
|
std::begin(cluster_5x5.data));
|
||||||
|
|
||||||
|
cluster_5x5.x = 5;
|
||||||
|
cluster_5x5.y = 5;
|
||||||
|
|
||||||
|
int temp_data2[9] = {1, 1, 1, 2, 3, 1, 2, 2, 1};
|
||||||
|
std::copy(std::begin(temp_data2), std::end(temp_data2),
|
||||||
|
std::begin(cluster_3x3.data));
|
||||||
|
|
||||||
|
cluster_3x3.x = 5;
|
||||||
|
cluster_3x3.y = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
// void TearDown(::benchmark::State& state) {
|
||||||
|
// }
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
Cluster<T, 3, 3, int16_t> reduce_to_3x3(const Cluster<T, 5, 5, int16_t> &c) {
|
||||||
|
Cluster<T, 3, 3, int16_t> result;
|
||||||
|
|
||||||
|
// Write out the sums in the hope that the compiler can optimize this
|
||||||
|
std::array<T, 9> sum_3x3_subclusters;
|
||||||
|
|
||||||
|
// Write out the sums in the hope that the compiler can optimize this
|
||||||
|
sum_3x3_subclusters[0] = c.data[0] + c.data[1] + c.data[2] + c.data[5] +
|
||||||
|
c.data[6] + c.data[7] + c.data[10] + c.data[11] +
|
||||||
|
c.data[12];
|
||||||
|
sum_3x3_subclusters[1] = c.data[1] + c.data[2] + c.data[3] + c.data[6] +
|
||||||
|
c.data[7] + c.data[8] + c.data[11] + c.data[12] +
|
||||||
|
c.data[13];
|
||||||
|
sum_3x3_subclusters[2] = c.data[2] + c.data[3] + c.data[4] + c.data[7] +
|
||||||
|
c.data[8] + c.data[9] + c.data[12] + c.data[13] +
|
||||||
|
c.data[14];
|
||||||
|
sum_3x3_subclusters[3] = c.data[5] + c.data[6] + c.data[7] + c.data[10] +
|
||||||
|
c.data[11] + c.data[12] + c.data[15] + c.data[16] +
|
||||||
|
c.data[17];
|
||||||
|
sum_3x3_subclusters[4] = c.data[6] + c.data[7] + c.data[8] + c.data[11] +
|
||||||
|
c.data[12] + c.data[13] + c.data[16] + c.data[17] +
|
||||||
|
c.data[18];
|
||||||
|
sum_3x3_subclusters[5] = c.data[7] + c.data[8] + c.data[9] + c.data[12] +
|
||||||
|
c.data[13] + c.data[14] + c.data[17] + c.data[18] +
|
||||||
|
c.data[19];
|
||||||
|
sum_3x3_subclusters[6] = c.data[10] + c.data[11] + c.data[12] + c.data[15] +
|
||||||
|
c.data[16] + c.data[17] + c.data[20] + c.data[21] +
|
||||||
|
c.data[22];
|
||||||
|
sum_3x3_subclusters[7] = c.data[11] + c.data[12] + c.data[13] + c.data[16] +
|
||||||
|
c.data[17] + c.data[18] + c.data[21] + c.data[22] +
|
||||||
|
c.data[23];
|
||||||
|
sum_3x3_subclusters[8] = c.data[12] + c.data[13] + c.data[14] + c.data[17] +
|
||||||
|
c.data[18] + c.data[19] + c.data[22] + c.data[23] +
|
||||||
|
c.data[24];
|
||||||
|
|
||||||
|
auto index = std::max_element(sum_3x3_subclusters.begin(),
|
||||||
|
sum_3x3_subclusters.end()) -
|
||||||
|
sum_3x3_subclusters.begin();
|
||||||
|
|
||||||
|
switch (index) {
|
||||||
|
case 0:
|
||||||
|
result.x = c.x - 1;
|
||||||
|
result.y = c.y + 1;
|
||||||
|
result.data = {c.data[0], c.data[1], c.data[2], c.data[5], c.data[6],
|
||||||
|
c.data[7], c.data[10], c.data[11], c.data[12]};
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
result.x = c.x;
|
||||||
|
result.y = c.y + 1;
|
||||||
|
result.data = {c.data[1], c.data[2], c.data[3], c.data[6], c.data[7],
|
||||||
|
c.data[8], c.data[11], c.data[12], c.data[13]};
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
result.x = c.x + 1;
|
||||||
|
result.y = c.y + 1;
|
||||||
|
result.data = {c.data[2], c.data[3], c.data[4], c.data[7], c.data[8],
|
||||||
|
c.data[9], c.data[12], c.data[13], c.data[14]};
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
result.x = c.x - 1;
|
||||||
|
result.y = c.y;
|
||||||
|
result.data = {c.data[5], c.data[6], c.data[7],
|
||||||
|
c.data[10], c.data[11], c.data[12],
|
||||||
|
c.data[15], c.data[16], c.data[17]};
|
||||||
|
break;
|
||||||
|
case 4:
|
||||||
|
result.x = c.x + 1;
|
||||||
|
result.y = c.y;
|
||||||
|
result.data = {c.data[6], c.data[7], c.data[8],
|
||||||
|
c.data[11], c.data[12], c.data[13],
|
||||||
|
c.data[16], c.data[17], c.data[18]};
|
||||||
|
break;
|
||||||
|
case 5:
|
||||||
|
result.x = c.x + 1;
|
||||||
|
result.y = c.y;
|
||||||
|
result.data = {c.data[7], c.data[8], c.data[9],
|
||||||
|
c.data[12], c.data[13], c.data[14],
|
||||||
|
c.data[17], c.data[18], c.data[19]};
|
||||||
|
break;
|
||||||
|
case 6:
|
||||||
|
result.x = c.x + 1;
|
||||||
|
result.y = c.y - 1;
|
||||||
|
result.data = {c.data[10], c.data[11], c.data[12],
|
||||||
|
c.data[15], c.data[16], c.data[17],
|
||||||
|
c.data[20], c.data[21], c.data[22]};
|
||||||
|
break;
|
||||||
|
case 7:
|
||||||
|
result.x = c.x + 1;
|
||||||
|
result.y = c.y - 1;
|
||||||
|
result.data = {c.data[11], c.data[12], c.data[13],
|
||||||
|
c.data[16], c.data[17], c.data[18],
|
||||||
|
c.data[21], c.data[22], c.data[23]};
|
||||||
|
break;
|
||||||
|
case 8:
|
||||||
|
result.x = c.x + 1;
|
||||||
|
result.y = c.y - 1;
|
||||||
|
result.data = {c.data[12], c.data[13], c.data[14],
|
||||||
|
c.data[17], c.data[18], c.data[19],
|
||||||
|
c.data[22], c.data[23], c.data[24]};
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(ClustersForReduceFixture, Reduce2x2)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
benchmark::DoNotOptimize(reduce_to_2x2<int, 3, 3, int16_t>(
|
||||||
|
cluster_3x3)); // make sure compiler evaluates the expression
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(ClustersForReduceFixture, SpecificReduce2x2)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
benchmark::DoNotOptimize(reduce_to_2x2<int>(cluster_3x3));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(ClustersForReduceFixture, Reduce3x3)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
benchmark::DoNotOptimize(
|
||||||
|
reduce_to_3x3<int, 5, 5, int16_t>(cluster_5x5));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(ClustersForReduceFixture, SpecificReduce3x3)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
benchmark::DoNotOptimize(reduce_to_3x3<int>(cluster_5x5));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,5 +1,17 @@
|
|||||||
python:
|
python:
|
||||||
- 3.11
|
# - 3.11
|
||||||
- 3.12
|
# - 3.12
|
||||||
- 3.13
|
# - 3.13
|
||||||
|
- 3.14
|
||||||
|
|
||||||
|
c_compiler:
|
||||||
|
- gcc # [linux]
|
||||||
|
|
||||||
|
c_stdlib:
|
||||||
|
- sysroot # [linux]
|
||||||
|
|
||||||
|
cxx_compiler:
|
||||||
|
- gxx # [linux]
|
||||||
|
|
||||||
|
c_stdlib_version: # [linux]
|
||||||
|
- 2.17 # [linux]
|
||||||
|
|||||||
@@ -16,6 +16,8 @@ build:
|
|||||||
|
|
||||||
requirements:
|
requirements:
|
||||||
build:
|
build:
|
||||||
|
- {{ compiler('c') }}
|
||||||
|
- {{ stdlib("c") }}
|
||||||
- {{ compiler('cxx') }}
|
- {{ compiler('cxx') }}
|
||||||
- cmake
|
- cmake
|
||||||
- ninja
|
- ninja
|
||||||
@@ -23,7 +25,7 @@ requirements:
|
|||||||
host:
|
host:
|
||||||
- python
|
- python
|
||||||
- pip
|
- pip
|
||||||
- numpy=2.1
|
- numpy=2.3
|
||||||
- scikit-build-core
|
- scikit-build-core
|
||||||
- pybind11 >=2.13.0
|
- pybind11 >=2.13.0
|
||||||
- matplotlib # needed in host to solve the environment for run
|
- matplotlib # needed in host to solve the environment for run
|
||||||
@@ -40,11 +42,11 @@ test:
|
|||||||
- aare
|
- aare
|
||||||
requires:
|
requires:
|
||||||
- pytest
|
- pytest
|
||||||
- boost-histogram
|
# - boost-histogram
|
||||||
source_files:
|
source_files:
|
||||||
- python/tests
|
# - python/tests
|
||||||
commands:
|
commands:
|
||||||
- python -m pytest python/tests
|
# - python -m pytest python/tests
|
||||||
|
|
||||||
about:
|
about:
|
||||||
summary: Data analysis library for hybrid pixel detectors from PSI
|
summary: Data analysis library for hybrid pixel detectors from PSI
|
||||||
|
|||||||
@@ -45,12 +45,3 @@ add_custom_target(
|
|||||||
COMMENT "Generating documentation with Sphinx"
|
COMMENT "Generating documentation with Sphinx"
|
||||||
)
|
)
|
||||||
|
|
||||||
add_custom_target(
|
|
||||||
rst
|
|
||||||
COMMAND ${SPHINX_EXECUTABLE} -a -b html
|
|
||||||
-Dbreathe_projects.aare=${CMAKE_CURRENT_BINARY_DIR}/xml
|
|
||||||
-c "${SPHINX_BUILD}"
|
|
||||||
${SPHINX_BUILD}/src
|
|
||||||
${SPHINX_BUILD}/html
|
|
||||||
COMMENT "Generating documentation with Sphinx"
|
|
||||||
)
|
|
||||||
@@ -886,7 +886,7 @@ EXCLUDE_SYMLINKS = NO
|
|||||||
# Note that the wildcards are matched against the file with absolute path, so to
|
# Note that the wildcards are matched against the file with absolute path, so to
|
||||||
# exclude all test directories for example use the pattern */test/*
|
# exclude all test directories for example use the pattern */test/*
|
||||||
|
|
||||||
EXCLUDE_PATTERNS = */docs/* */tests/* */python/* */manual */slsDetectorServers/* */libs/* */integrationTests *README* */slsDetectorGui/* */ctbGui/* */slsDetectorCalibration/* *TobiSchluter*
|
EXCLUDE_PATTERNS = *build* */docs/* */tests/* *.test.cpp* */python/* */manual */slsDetectorServers/* */libs/* */integrationTests *README* *_deps* *TobiSchluter*
|
||||||
|
|
||||||
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
|
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
|
||||||
# (namespaces, classes, functions, etc.) that should be excluded from the
|
# (namespaces, classes, functions, etc.) that should be excluded from the
|
||||||
|
|||||||
@@ -5,3 +5,4 @@ ClusterFile
|
|||||||
:members:
|
:members:
|
||||||
:undoc-members:
|
:undoc-members:
|
||||||
:private-members:
|
:private-members:
|
||||||
|
|
||||||
|
|||||||
@@ -4,3 +4,19 @@ ClusterVector
|
|||||||
.. doxygenclass:: aare::ClusterVector
|
.. doxygenclass:: aare::ClusterVector
|
||||||
:members:
|
:members:
|
||||||
:undoc-members:
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::ClusterVector< Cluster< T, ClusterSizeX, ClusterSizeY, CoordType > >
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
|
||||||
|
|
||||||
|
**Free Functions:**
|
||||||
|
|
||||||
|
.. doxygenfunction:: aare::reduce_to_3x3(const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>&)
|
||||||
|
|
||||||
|
.. doxygenfunction:: aare::reduce_to_2x2(const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>&)
|
||||||
|
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
Tests
|
Tests
|
||||||
****************
|
****************
|
||||||
|
|
||||||
We test the code both from the C++ and Python API. By default only tests that does not require image data is run.
|
We test the code both from C++ and Python. By default only tests that does not require additional data are run.
|
||||||
|
|
||||||
C++
|
C++
|
||||||
~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~
|
||||||
@@ -15,7 +15,7 @@ C++
|
|||||||
make -j 4
|
make -j 4
|
||||||
|
|
||||||
export AARE_TEST_DATA=/path/to/test/data
|
export AARE_TEST_DATA=/path/to/test/data
|
||||||
./run_test [.files] #or using ctest, [.files] is the option to include tests needing data
|
./run_test [.with-data] #or using ctest, [.with-data] is the option to include tests needing data
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -25,7 +25,7 @@ Python
|
|||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
#From the root dir of the library
|
#From the root dir of the library
|
||||||
python -m pytest python/tests --files # passing --files will run the tests needing data
|
python -m pytest python/tests --with-data # passing --with-data will run the tests needing data
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -35,7 +35,7 @@ Getting the test data
|
|||||||
.. attention ::
|
.. attention ::
|
||||||
|
|
||||||
The tests needing the test data are not run by default. To make the data available, you need to set the environment variable
|
The tests needing the test data are not run by default. To make the data available, you need to set the environment variable
|
||||||
AARE_TEST_DATA to the path of the test data directory. Then pass either [.files] for the C++ tests or --files for Python
|
AARE_TEST_DATA to the path of the test data directory. Then pass either [.with-data] for the C++ tests or --files for Python
|
||||||
|
|
||||||
The image files needed for the test are large and are not included in the repository. They are stored
|
The image files needed for the test are large and are not included in the repository. They are stored
|
||||||
using GIT LFS in a separate repository. To get the test data, you need to clone the repository.
|
using GIT LFS in a separate repository. To get the test data, you need to clone the repository.
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ unfamiliar steps.
|
|||||||
|
|
||||||
|
|
||||||
Checklists for deployment
|
Checklists for deployment
|
||||||
~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
**Feature:**
|
**Feature:**
|
||||||
|
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ AARE
|
|||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
|
|
||||||
pyFile
|
pyFile
|
||||||
|
pycalibration
|
||||||
pyCtbRawFile
|
pyCtbRawFile
|
||||||
pyClusterFile
|
pyClusterFile
|
||||||
pyClusterVector
|
pyClusterVector
|
||||||
|
|||||||
@@ -2,9 +2,24 @@
|
|||||||
ClusterFile
|
ClusterFile
|
||||||
============
|
============
|
||||||
|
|
||||||
|
|
||||||
|
The :class:`ClusterFile` class is the main interface to read and write clusters in aare. Unfortunately the
|
||||||
|
old file format does not include metadata like the cluster size and the data type. This means that the
|
||||||
|
user has to know this information from other sources. Specifying the wrong cluster size or data type
|
||||||
|
will lead to garbage data being read.
|
||||||
|
|
||||||
.. py:currentmodule:: aare
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
.. autoclass:: ClusterFile
|
.. autoclass:: ClusterFile
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:inherited-members:
|
||||||
|
|
||||||
|
|
||||||
|
Below is the API of the ClusterFile_Cluster3x3i but all variants share the same API.
|
||||||
|
|
||||||
|
.. autoclass:: aare._aare.ClusterFile_Cluster3x3i
|
||||||
|
:special-members: __init__
|
||||||
:members:
|
:members:
|
||||||
:undoc-members:
|
:undoc-members:
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
|
|||||||
@@ -2,8 +2,10 @@ ClusterVector
|
|||||||
================
|
================
|
||||||
|
|
||||||
The ClusterVector, holds clusters from the ClusterFinder. Since it is templated
|
The ClusterVector, holds clusters from the ClusterFinder. Since it is templated
|
||||||
in C++ we use a suffix indicating the data type in python. The suffix is
|
in C++ we use a suffix indicating the type of cluster it holds. The suffix follows
|
||||||
``_i`` for integer, ``_f`` for float, and ``_d`` for double.
|
the same pattern as for ClusterFile i.e. ``ClusterVector_Cluster3x3i``
|
||||||
|
for a vector holding 3x3 integer clusters.
|
||||||
|
|
||||||
|
|
||||||
At the moment the functionality from python is limited and it is not supported
|
At the moment the functionality from python is limited and it is not supported
|
||||||
to push_back clusters to the vector. The intended use case is to pass it to
|
to push_back clusters to the vector. The intended use case is to pass it to
|
||||||
@@ -26,8 +28,22 @@ C++ functions that support the ClusterVector or to view it as a numpy array.
|
|||||||
|
|
||||||
.. py:currentmodule:: aare
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
.. autoclass:: ClusterVector_i
|
.. autoclass:: aare._aare.ClusterVector_Cluster3x3i
|
||||||
|
:special-members: __init__
|
||||||
:members:
|
:members:
|
||||||
:undoc-members:
|
:undoc-members:
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
:inherited-members:
|
:inherited-members:
|
||||||
|
|
||||||
|
|
||||||
|
**Free Functions:**
|
||||||
|
|
||||||
|
.. autofunction:: reduce_to_3x3
|
||||||
|
:noindex:
|
||||||
|
|
||||||
|
Reduce a single Cluster to 3x3 by taking the 3x3 subcluster with highest photon energy.
|
||||||
|
|
||||||
|
.. autofunction:: reduce_to_2x2
|
||||||
|
:noindex:
|
||||||
|
|
||||||
|
Reduce a single Cluster to 2x2 by taking the 2x2 subcluster with highest photon energy.
|
||||||
|
|||||||
40
docs/src/pycalibration.rst
Normal file
40
docs/src/pycalibration.rst
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
|
||||||
|
Calibration
|
||||||
|
==============
|
||||||
|
|
||||||
|
Functions for applying calibration to data.
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
import aare
|
||||||
|
|
||||||
|
# Load calibration data for a single JF module (512x1024 pixels)
|
||||||
|
calibration = aare.load_calibration('path/to/calibration/file.bin')
|
||||||
|
|
||||||
|
raw_data = ... # Load your raw data here
|
||||||
|
pedestal = ... # Load your pedestal data here
|
||||||
|
|
||||||
|
# Apply calibration to raw data to convert from raw ADC values to keV
|
||||||
|
data = aare.apply_calibration(raw_data, pd=pedestal, cal=calibration)
|
||||||
|
|
||||||
|
# If you pass a 2D pedestal and calibration only G0 will be used for the conversion
|
||||||
|
# Pixels that switched to G1 or G2 will be set to 0
|
||||||
|
data = aare.apply_calibration(raw_data, pd=pedestal[0], cal=calibration[0])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
.. autofunction:: apply_calibration
|
||||||
|
|
||||||
|
.. autofunction:: load_calibration
|
||||||
|
|
||||||
|
.. autofunction:: calculate_pedestal
|
||||||
|
|
||||||
|
.. autofunction:: calculate_pedestal_float
|
||||||
|
|
||||||
|
.. autofunction:: calculate_pedestal_g0
|
||||||
|
|
||||||
|
.. autofunction:: calculate_pedestal_g0_float
|
||||||
|
|
||||||
|
.. autofunction:: count_switching_pixels
|
||||||
@@ -5,9 +5,13 @@ dependencies:
|
|||||||
- anaconda-client
|
- anaconda-client
|
||||||
- conda-build
|
- conda-build
|
||||||
- doxygen
|
- doxygen
|
||||||
- sphinx=7.1.2
|
- sphinx
|
||||||
- breathe
|
- breathe
|
||||||
- sphinx_rtd_theme
|
- sphinx_rtd_theme
|
||||||
- furo
|
- furo
|
||||||
- zeromq
|
- zeromq
|
||||||
|
- pybind11
|
||||||
|
- numpy
|
||||||
|
- matplotlib
|
||||||
|
- nlohmann_json
|
||||||
|
|
||||||
|
|||||||
@@ -7,10 +7,10 @@
|
|||||||
namespace aare {
|
namespace aare {
|
||||||
|
|
||||||
enum class corner : int {
|
enum class corner : int {
|
||||||
cBottomLeft = 0,
|
cTopLeft = 0,
|
||||||
cBottomRight = 1,
|
cTopRight = 1,
|
||||||
cTopLeft = 2,
|
cBottomLeft = 2,
|
||||||
cTopRight = 3
|
cBottomRight = 3
|
||||||
};
|
};
|
||||||
|
|
||||||
enum class pixel : int {
|
enum class pixel : int {
|
||||||
@@ -28,7 +28,7 @@ enum class pixel : int {
|
|||||||
template <typename T> struct Eta2 {
|
template <typename T> struct Eta2 {
|
||||||
double x;
|
double x;
|
||||||
double y;
|
double y;
|
||||||
int c;
|
int c{0};
|
||||||
T sum;
|
T sum;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -58,85 +58,126 @@ template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
|||||||
typename CoordType>
|
typename CoordType>
|
||||||
Eta2<T>
|
Eta2<T>
|
||||||
calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||||
Eta2<T> eta{};
|
|
||||||
|
|
||||||
auto max_sum = cl.max_sum_2x2();
|
static_assert(ClusterSizeX > 1 && ClusterSizeY > 1);
|
||||||
eta.sum = max_sum.first;
|
Eta2<T> eta{};
|
||||||
auto c = max_sum.second;
|
|
||||||
|
|
||||||
size_t cluster_center_index =
|
size_t cluster_center_index =
|
||||||
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||||
|
|
||||||
size_t index_bottom_left_max_2x2_subcluster =
|
auto max_sum = cl.max_sum_2x2();
|
||||||
(int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1);
|
eta.sum = max_sum.first;
|
||||||
|
int c = max_sum.second;
|
||||||
|
|
||||||
// check that cluster center is in max subcluster
|
// subcluster top right from center
|
||||||
if (cluster_center_index != index_bottom_left_max_2x2_subcluster &&
|
switch (static_cast<corner>(c)) {
|
||||||
cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 &&
|
case (corner::cTopLeft):
|
||||||
cluster_center_index !=
|
if ((cl.data[cluster_center_index - 1] +
|
||||||
index_bottom_left_max_2x2_subcluster + ClusterSizeX &&
|
|
||||||
cluster_center_index !=
|
|
||||||
index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1)
|
|
||||||
throw std::runtime_error("Photon center is not in max 2x2_subcluster");
|
|
||||||
|
|
||||||
if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) %
|
|
||||||
ClusterSizeX ==
|
|
||||||
0) {
|
|
||||||
if ((cl.data[cluster_center_index + 1] +
|
|
||||||
cl.data[cluster_center_index]) != 0)
|
cl.data[cluster_center_index]) != 0)
|
||||||
|
eta.x = static_cast<double>(cl.data[cluster_center_index - 1]) /
|
||||||
|
static_cast<double>(cl.data[cluster_center_index - 1] +
|
||||||
|
cl.data[cluster_center_index]);
|
||||||
|
if ((cl.data[cluster_center_index - ClusterSizeX] +
|
||||||
|
cl.data[cluster_center_index]) != 0)
|
||||||
|
eta.y = static_cast<double>(
|
||||||
|
cl.data[cluster_center_index - ClusterSizeX]) /
|
||||||
|
static_cast<double>(
|
||||||
|
cl.data[cluster_center_index - ClusterSizeX] +
|
||||||
|
cl.data[cluster_center_index]);
|
||||||
|
|
||||||
eta.x = static_cast<double>(cl.data[cluster_center_index + 1]) /
|
// dx = 0
|
||||||
static_cast<double>((cl.data[cluster_center_index + 1] +
|
// dy = 0
|
||||||
cl.data[cluster_center_index]));
|
break;
|
||||||
} else {
|
case (corner::cTopRight):
|
||||||
if ((cl.data[cluster_center_index] +
|
if (cl.data[cluster_center_index] + cl.data[cluster_center_index + 1] !=
|
||||||
cl.data[cluster_center_index - 1]) != 0)
|
0)
|
||||||
|
|
||||||
eta.x = static_cast<double>(cl.data[cluster_center_index]) /
|
eta.x = static_cast<double>(cl.data[cluster_center_index]) /
|
||||||
static_cast<double>((cl.data[cluster_center_index - 1] +
|
static_cast<double>(cl.data[cluster_center_index] +
|
||||||
cl.data[cluster_center_index]));
|
cl.data[cluster_center_index + 1]);
|
||||||
}
|
if ((cl.data[cluster_center_index - ClusterSizeX] +
|
||||||
if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) /
|
cl.data[cluster_center_index]) != 0)
|
||||||
ClusterSizeX <
|
eta.y = static_cast<double>(
|
||||||
1) {
|
cl.data[cluster_center_index - ClusterSizeX]) /
|
||||||
assert(cluster_center_index + ClusterSizeX <
|
static_cast<double>(
|
||||||
ClusterSizeX * ClusterSizeY); // suppress warning
|
cl.data[cluster_center_index - ClusterSizeX] +
|
||||||
|
cl.data[cluster_center_index]);
|
||||||
|
// dx = 1
|
||||||
|
// dy = 0
|
||||||
|
break;
|
||||||
|
case (corner::cBottomLeft):
|
||||||
|
if ((cl.data[cluster_center_index - 1] +
|
||||||
|
cl.data[cluster_center_index]) != 0)
|
||||||
|
eta.x = static_cast<double>(cl.data[cluster_center_index - 1]) /
|
||||||
|
static_cast<double>(cl.data[cluster_center_index - 1] +
|
||||||
|
cl.data[cluster_center_index]);
|
||||||
if ((cl.data[cluster_center_index] +
|
if ((cl.data[cluster_center_index] +
|
||||||
cl.data[cluster_center_index + ClusterSizeX]) != 0)
|
cl.data[cluster_center_index + ClusterSizeX]) != 0)
|
||||||
eta.y = static_cast<double>(
|
|
||||||
cl.data[cluster_center_index + ClusterSizeX]) /
|
|
||||||
static_cast<double>(
|
|
||||||
(cl.data[cluster_center_index] +
|
|
||||||
cl.data[cluster_center_index + ClusterSizeX]));
|
|
||||||
} else {
|
|
||||||
if ((cl.data[cluster_center_index] +
|
|
||||||
cl.data[cluster_center_index - ClusterSizeX]) != 0)
|
|
||||||
eta.y = static_cast<double>(cl.data[cluster_center_index]) /
|
eta.y = static_cast<double>(cl.data[cluster_center_index]) /
|
||||||
static_cast<double>(
|
static_cast<double>(
|
||||||
(cl.data[cluster_center_index] +
|
cl.data[cluster_center_index] +
|
||||||
cl.data[cluster_center_index - ClusterSizeX]));
|
cl.data[cluster_center_index + ClusterSizeX]);
|
||||||
|
// dx = 0
|
||||||
|
// dy = 1
|
||||||
|
break;
|
||||||
|
case (corner::cBottomRight):
|
||||||
|
if (cl.data[cluster_center_index] + cl.data[cluster_center_index + 1] !=
|
||||||
|
0)
|
||||||
|
eta.x = static_cast<double>(cl.data[cluster_center_index]) /
|
||||||
|
static_cast<double>(cl.data[cluster_center_index] +
|
||||||
|
cl.data[cluster_center_index + 1]);
|
||||||
|
if ((cl.data[cluster_center_index] +
|
||||||
|
cl.data[cluster_center_index + ClusterSizeX]) != 0)
|
||||||
|
eta.y = static_cast<double>(cl.data[cluster_center_index]) /
|
||||||
|
static_cast<double>(
|
||||||
|
cl.data[cluster_center_index] +
|
||||||
|
cl.data[cluster_center_index + ClusterSizeX]);
|
||||||
|
// dx = 1
|
||||||
|
// dy = 1
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no
|
eta.c = c;
|
||||||
// underyling enum class
|
|
||||||
return eta;
|
return eta;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO! Look up eta2 calculation - photon center should be top right corner
|
// TODO! Look up eta2 calculation - photon center should be bottom right corner
|
||||||
template <typename T>
|
template <typename T>
|
||||||
Eta2<T> calculate_eta2(const Cluster<T, 2, 2, int16_t> &cl) {
|
Eta2<T> calculate_eta2(const Cluster<T, 2, 2, int16_t> &cl) {
|
||||||
Eta2<T> eta{};
|
Eta2<T> eta{};
|
||||||
|
|
||||||
if ((cl.data[0] + cl.data[1]) != 0)
|
if ((cl.data[0] + cl.data[1]) != 0)
|
||||||
eta.x = static_cast<double>(cl.data[1]) / (cl.data[0] + cl.data[1]);
|
eta.x = static_cast<double>(cl.data[2]) /
|
||||||
|
(cl.data[2] + cl.data[3]); // between (0,1) the closer to zero
|
||||||
|
// left value probably larger
|
||||||
if ((cl.data[0] + cl.data[2]) != 0)
|
if ((cl.data[0] + cl.data[2]) != 0)
|
||||||
eta.y = static_cast<double>(cl.data[2]) / (cl.data[0] + cl.data[2]);
|
eta.y = static_cast<double>(cl.data[1]) /
|
||||||
|
(cl.data[1] + cl.data[3]); // between (0,1) the closer to zero
|
||||||
|
// bottom value probably larger
|
||||||
eta.sum = cl.sum();
|
eta.sum = cl.sum();
|
||||||
eta.c = static_cast<int>(corner::cBottomLeft); // TODO! This is not correct,
|
|
||||||
// but need to put something
|
|
||||||
return eta;
|
return eta;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO generalize
|
||||||
|
template <typename T>
|
||||||
|
Eta2<T> calculate_eta2(const Cluster<T, 1, 2, int16_t> &cl) {
|
||||||
|
Eta2<T> eta{};
|
||||||
|
|
||||||
|
eta.x = 0;
|
||||||
|
eta.y = static_cast<double>(cl.data[0]) / cl.data[1];
|
||||||
|
eta.sum = cl.sum();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
Eta2<T> calculate_eta2(const Cluster<T, 2, 1, int16_t> &cl) {
|
||||||
|
Eta2<T> eta{};
|
||||||
|
|
||||||
|
eta.x = static_cast<double>(cl.data[0]) / cl.data[1];
|
||||||
|
eta.y = 0;
|
||||||
|
eta.sum = cl.sum();
|
||||||
|
}
|
||||||
|
|
||||||
// calculates Eta3 for 3x3 cluster based on code from analyze_cluster
|
// calculates Eta3 for 3x3 cluster based on code from analyze_cluster
|
||||||
// TODO only supported for 3x3 Clusters
|
// TODO only supported for 3x3 Clusters
|
||||||
template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) {
|
template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) {
|
||||||
@@ -150,13 +191,11 @@ template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) {
|
|||||||
|
|
||||||
eta.sum = sum;
|
eta.sum = sum;
|
||||||
|
|
||||||
eta.c = corner::cBottomLeft;
|
|
||||||
|
|
||||||
if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0)
|
if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0)
|
||||||
|
|
||||||
eta.x = static_cast<double>(-cl.data[3] + cl.data[3 + 2]) /
|
eta.x = static_cast<double>(-cl.data[3] + cl.data[3 + 2]) /
|
||||||
|
|
||||||
(cl.data[3] + cl.data[4] + cl.data[5]);
|
(cl.data[3] + cl.data[4] + cl.data[5]); // (-1,1)
|
||||||
|
|
||||||
if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0)
|
if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0)
|
||||||
|
|
||||||
|
|||||||
207
include/aare/Cluster.hpp
Normal file → Executable file
207
include/aare/Cluster.hpp
Normal file → Executable file
@@ -18,7 +18,7 @@ namespace aare {
|
|||||||
|
|
||||||
// requires clause c++20 maybe update
|
// requires clause c++20 maybe update
|
||||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
typename CoordType = int16_t>
|
typename CoordType = uint16_t>
|
||||||
struct Cluster {
|
struct Cluster {
|
||||||
|
|
||||||
static_assert(std::is_arithmetic_v<T>, "T needs to be an arithmetic type");
|
static_assert(std::is_arithmetic_v<T>, "T needs to be an arithmetic type");
|
||||||
@@ -38,6 +38,13 @@ struct Cluster {
|
|||||||
|
|
||||||
T sum() const { return std::accumulate(data.begin(), data.end(), T{}); }
|
T sum() const { return std::accumulate(data.begin(), data.end(), T{}); }
|
||||||
|
|
||||||
|
// TODO: handle 1 dimensional clusters
|
||||||
|
// TODO: change int to corner
|
||||||
|
/**
|
||||||
|
* @brief sum of 2x2 subcluster with highest energy
|
||||||
|
* @return photon energy of subcluster, 2x2 subcluster index relative to
|
||||||
|
* cluster center
|
||||||
|
*/
|
||||||
std::pair<T, int> max_sum_2x2() const {
|
std::pair<T, int> max_sum_2x2() const {
|
||||||
|
|
||||||
if constexpr (cluster_size_x == 3 && cluster_size_y == 3) {
|
if constexpr (cluster_size_x == 3 && cluster_size_y == 3) {
|
||||||
@@ -53,17 +60,38 @@ struct Cluster {
|
|||||||
} else if constexpr (cluster_size_x == 2 && cluster_size_y == 2) {
|
} else if constexpr (cluster_size_x == 2 && cluster_size_y == 2) {
|
||||||
return std::make_pair(data[0] + data[1] + data[2] + data[3], 0);
|
return std::make_pair(data[0] + data[1] + data[2] + data[3], 0);
|
||||||
} else {
|
} else {
|
||||||
constexpr size_t num_2x2_subclusters =
|
constexpr size_t cluster_center_index =
|
||||||
(ClusterSizeX - 1) * (ClusterSizeY - 1);
|
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||||
|
|
||||||
std::array<T, num_2x2_subclusters> sum_2x2_subcluster;
|
std::array<T, 4> sum_2x2_subcluster{0};
|
||||||
for (size_t i = 0; i < ClusterSizeY - 1; ++i) {
|
// subcluster top left from center
|
||||||
for (size_t j = 0; j < ClusterSizeX - 1; ++j)
|
sum_2x2_subcluster[0] =
|
||||||
sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] =
|
data[cluster_center_index] + data[cluster_center_index - 1] +
|
||||||
data[i * ClusterSizeX + j] +
|
data[cluster_center_index - ClusterSizeX] +
|
||||||
data[i * ClusterSizeX + j + 1] +
|
data[cluster_center_index - 1 - ClusterSizeX];
|
||||||
data[(i + 1) * ClusterSizeX + j] +
|
// subcluster top right from center
|
||||||
data[(i + 1) * ClusterSizeX + j + 1];
|
if (ClusterSizeX > 2) {
|
||||||
|
sum_2x2_subcluster[1] =
|
||||||
|
data[cluster_center_index] +
|
||||||
|
data[cluster_center_index + 1] +
|
||||||
|
data[cluster_center_index - ClusterSizeX] +
|
||||||
|
data[cluster_center_index - ClusterSizeX + 1];
|
||||||
|
}
|
||||||
|
// subcluster bottom left from center
|
||||||
|
if (ClusterSizeY > 2) {
|
||||||
|
sum_2x2_subcluster[2] =
|
||||||
|
data[cluster_center_index] +
|
||||||
|
data[cluster_center_index - 1] +
|
||||||
|
data[cluster_center_index + ClusterSizeX] +
|
||||||
|
data[cluster_center_index + ClusterSizeX - 1];
|
||||||
|
}
|
||||||
|
// subcluster bottom right from center
|
||||||
|
if (ClusterSizeX > 2 && ClusterSizeY > 2) {
|
||||||
|
sum_2x2_subcluster[3] =
|
||||||
|
data[cluster_center_index] +
|
||||||
|
data[cluster_center_index + 1] +
|
||||||
|
data[cluster_center_index + ClusterSizeX] +
|
||||||
|
data[cluster_center_index + ClusterSizeX + 1];
|
||||||
}
|
}
|
||||||
|
|
||||||
int index = std::max_element(sum_2x2_subcluster.begin(),
|
int index = std::max_element(sum_2x2_subcluster.begin(),
|
||||||
@@ -74,6 +102,163 @@ struct Cluster {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Reduce a cluster to a 2x2 cluster by selecting the 2x2 block with the
|
||||||
|
* highest sum.
|
||||||
|
* @param c Cluster to reduce
|
||||||
|
* @return reduced cluster
|
||||||
|
*/
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType = int16_t>
|
||||||
|
Cluster<T, 2, 2, CoordType>
|
||||||
|
reduce_to_2x2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
|
||||||
|
|
||||||
|
static_assert(ClusterSizeX >= 2 && ClusterSizeY >= 2,
|
||||||
|
"Cluster sizes must be at least 2x2 for reduction to 2x2");
|
||||||
|
|
||||||
|
// TODO maybe add sanity check and check that center is in max subcluster
|
||||||
|
Cluster<T, 2, 2, CoordType> result;
|
||||||
|
|
||||||
|
auto [sum, index] = c.max_sum_2x2();
|
||||||
|
|
||||||
|
int16_t cluster_center_index =
|
||||||
|
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||||
|
|
||||||
|
int16_t index_bottom_left_max_2x2_subcluster =
|
||||||
|
(int(index / (ClusterSizeX - 1))) * ClusterSizeX +
|
||||||
|
index % (ClusterSizeX - 1);
|
||||||
|
|
||||||
|
result.x =
|
||||||
|
c.x + (index_bottom_left_max_2x2_subcluster - cluster_center_index) %
|
||||||
|
ClusterSizeX;
|
||||||
|
|
||||||
|
result.y =
|
||||||
|
c.y - (index_bottom_left_max_2x2_subcluster - cluster_center_index) /
|
||||||
|
ClusterSizeX;
|
||||||
|
result.data = {
|
||||||
|
c.data[index_bottom_left_max_2x2_subcluster],
|
||||||
|
c.data[index_bottom_left_max_2x2_subcluster + 1],
|
||||||
|
c.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX],
|
||||||
|
c.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1]};
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
Cluster<T, 2, 2, int16_t> reduce_to_2x2(const Cluster<T, 3, 3, int16_t> &c) {
|
||||||
|
Cluster<T, 2, 2, int16_t> result;
|
||||||
|
|
||||||
|
auto [s, i] = c.max_sum_2x2();
|
||||||
|
switch (i) {
|
||||||
|
case 0:
|
||||||
|
result.x = c.x - 1;
|
||||||
|
result.y = c.y + 1;
|
||||||
|
result.data = {c.data[0], c.data[1], c.data[3], c.data[4]};
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
result.x = c.x;
|
||||||
|
result.y = c.y + 1;
|
||||||
|
result.data = {c.data[1], c.data[2], c.data[4], c.data[5]};
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
result.x = c.x - 1;
|
||||||
|
result.y = c.y;
|
||||||
|
result.data = {c.data[3], c.data[4], c.data[6], c.data[7]};
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
result.x = c.x;
|
||||||
|
result.y = c.y;
|
||||||
|
result.data = {c.data[4], c.data[5], c.data[7], c.data[8]};
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType = int16_t>
|
||||||
|
inline std::pair<T, uint16_t>
|
||||||
|
max_3x3_sum(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cluster) {
|
||||||
|
|
||||||
|
if constexpr (ClusterSizeX == 3 && ClusterSizeY == 3) {
|
||||||
|
return std::make_pair(cluster.sum(), 0);
|
||||||
|
} else {
|
||||||
|
|
||||||
|
size_t index = 0;
|
||||||
|
T max_3x3_subcluster_sum = 0;
|
||||||
|
for (size_t i = 0; i < ClusterSizeY - 2; ++i) {
|
||||||
|
for (size_t j = 0; j < ClusterSizeX - 2; ++j) {
|
||||||
|
|
||||||
|
T sum = cluster.data[i * ClusterSizeX + j] +
|
||||||
|
cluster.data[i * ClusterSizeX + j + 1] +
|
||||||
|
cluster.data[i * ClusterSizeX + j + 2] +
|
||||||
|
cluster.data[(i + 1) * ClusterSizeX + j] +
|
||||||
|
cluster.data[(i + 1) * ClusterSizeX + j + 1] +
|
||||||
|
cluster.data[(i + 1) * ClusterSizeX + j + 2] +
|
||||||
|
cluster.data[(i + 2) * ClusterSizeX + j] +
|
||||||
|
cluster.data[(i + 2) * ClusterSizeX + j + 1] +
|
||||||
|
cluster.data[(i + 2) * ClusterSizeX + j + 2];
|
||||||
|
if (sum > max_3x3_subcluster_sum) {
|
||||||
|
max_3x3_subcluster_sum = sum;
|
||||||
|
index = i * (ClusterSizeX - 2) + j;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::make_pair(max_3x3_subcluster_sum, index);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Reduce a cluster to a 3x3 cluster by selecting the 3x3 block with the
|
||||||
|
* highest sum.
|
||||||
|
* @param c Cluster to reduce
|
||||||
|
* @return reduced cluster
|
||||||
|
*/
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType = int16_t>
|
||||||
|
Cluster<T, 3, 3, CoordType>
|
||||||
|
reduce_to_3x3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
|
||||||
|
|
||||||
|
static_assert(ClusterSizeX >= 3 && ClusterSizeY >= 3,
|
||||||
|
"Cluster sizes must be at least 3x3 for reduction to 3x3");
|
||||||
|
|
||||||
|
Cluster<T, 3, 3, CoordType> result;
|
||||||
|
|
||||||
|
// TODO maybe add sanity check and check that center is in max subcluster
|
||||||
|
|
||||||
|
auto [sum, index] = max_3x3_sum(c);
|
||||||
|
|
||||||
|
int16_t cluster_center_index =
|
||||||
|
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||||
|
|
||||||
|
int16_t index_center_max_3x3_subcluster =
|
||||||
|
(int(index / (ClusterSizeX - 2))) * ClusterSizeX + ClusterSizeX +
|
||||||
|
index % (ClusterSizeX - 2) + 1;
|
||||||
|
|
||||||
|
int16_t index_3x3_subcluster_cluster_center =
|
||||||
|
int((cluster_center_index - 1 - ClusterSizeX) / ClusterSizeX) *
|
||||||
|
(ClusterSizeX - 2) +
|
||||||
|
(cluster_center_index - 1 - ClusterSizeX) % ClusterSizeX;
|
||||||
|
|
||||||
|
result.x =
|
||||||
|
c.x + (index % (ClusterSizeX - 2) -
|
||||||
|
(index_3x3_subcluster_cluster_center % (ClusterSizeX - 2)));
|
||||||
|
result.y =
|
||||||
|
c.y - (index / (ClusterSizeX - 2) -
|
||||||
|
(index_3x3_subcluster_cluster_center / (ClusterSizeX - 2)));
|
||||||
|
|
||||||
|
result.data = {c.data[index_center_max_3x3_subcluster - ClusterSizeX - 1],
|
||||||
|
c.data[index_center_max_3x3_subcluster - ClusterSizeX],
|
||||||
|
c.data[index_center_max_3x3_subcluster - ClusterSizeX + 1],
|
||||||
|
c.data[index_center_max_3x3_subcluster - 1],
|
||||||
|
c.data[index_center_max_3x3_subcluster],
|
||||||
|
c.data[index_center_max_3x3_subcluster + 1],
|
||||||
|
c.data[index_center_max_3x3_subcluster + ClusterSizeX - 1],
|
||||||
|
c.data[index_center_max_3x3_subcluster + ClusterSizeX],
|
||||||
|
c.data[index_center_max_3x3_subcluster + ClusterSizeX + 1]};
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
// Type Traits for is_cluster_type
|
// Type Traits for is_cluster_type
|
||||||
template <typename T>
|
template <typename T>
|
||||||
struct is_cluster : std::false_type {}; // Default case: Not a Cluster
|
struct is_cluster : std::false_type {}; // Default case: Not a Cluster
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
namespace aare {
|
namespace aare {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Binary cluster file. Expects data to be layed out as:
|
Binary cluster file. Expects data to be laid out as:
|
||||||
int32_t frame_number
|
int32_t frame_number
|
||||||
uint32_t number_of_clusters
|
uint32_t number_of_clusters
|
||||||
int16_t x, int16_t y, int32_t data[9] x number_of_clusters
|
int16_t x, int16_t y, int32_t data[9] x number_of_clusters
|
||||||
|
|||||||
@@ -136,7 +136,7 @@ class ClusterFinder {
|
|||||||
// don't have a photon
|
// don't have a photon
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) {
|
for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) {
|
||||||
for (int ic = -dx; ic < dx + has_center_pixel_y; ic++) {
|
for (int ic = -dx; ic < dx + has_center_pixel_x; ic++) {
|
||||||
if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
|
if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
|
||||||
iy + ir >= 0 && iy + ir < frame.shape(0)) {
|
iy + ir >= 0 && iy + ir < frame.shape(0)) {
|
||||||
CT tmp =
|
CT tmp =
|
||||||
@@ -145,8 +145,8 @@ class ClusterFinder {
|
|||||||
m_pedestal.mean(iy + ir, ix + ic));
|
m_pedestal.mean(iy + ir, ix + ic));
|
||||||
cluster.data[i] =
|
cluster.data[i] =
|
||||||
tmp; // Watch for out of bounds access
|
tmp; // Watch for out of bounds access
|
||||||
i++;
|
|
||||||
}
|
}
|
||||||
|
i++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -122,6 +122,11 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
|||||||
*/
|
*/
|
||||||
size_t size() const { return m_data.size(); }
|
size_t size() const { return m_data.size(); }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Check if the vector is empty
|
||||||
|
*/
|
||||||
|
bool empty() const { return m_data.empty(); }
|
||||||
|
|
||||||
uint8_t cluster_size_x() const { return ClusterSizeX; }
|
uint8_t cluster_size_x() const { return ClusterSizeX; }
|
||||||
|
|
||||||
uint8_t cluster_size_y() const { return ClusterSizeY; }
|
uint8_t cluster_size_y() const { return ClusterSizeY; }
|
||||||
@@ -167,4 +172,40 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Reduce a cluster to a 2x2 cluster by selecting the 2x2 block with the
|
||||||
|
* highest sum.
|
||||||
|
* @param cv Clustervector containing clusters to reduce
|
||||||
|
* @return Clustervector with reduced clusters
|
||||||
|
*/
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType = uint16_t>
|
||||||
|
ClusterVector<Cluster<T, 2, 2, CoordType>> reduce_to_2x2(
|
||||||
|
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
|
||||||
|
&cv) {
|
||||||
|
ClusterVector<Cluster<T, 2, 2, CoordType>> result;
|
||||||
|
for (const auto &c : cv) {
|
||||||
|
result.push_back(reduce_to_2x2(c));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Reduce a cluster to a 3x3 cluster by selecting the 3x3 block with the
|
||||||
|
* highest sum.
|
||||||
|
* @param cv Clustervector containing clusters to reduce
|
||||||
|
* @return Clustervector with reduced clusters
|
||||||
|
*/
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType = uint16_t>
|
||||||
|
ClusterVector<Cluster<T, 3, 3, CoordType>> reduce_to_3x3(
|
||||||
|
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
|
||||||
|
&cv) {
|
||||||
|
ClusterVector<Cluster<T, 3, 3, CoordType>> result;
|
||||||
|
for (const auto &c : cv) {
|
||||||
|
result.push_back(reduce_to_3x3(c));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
||||||
81
include/aare/DetectorGeometry.hpp
Normal file
81
include/aare/DetectorGeometry.hpp
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
#pragma once
|
||||||
|
#include "aare/RawMasterFile.hpp" //ROI refactor away
|
||||||
|
#include "aare/defs.hpp"
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
struct ModuleConfig {
|
||||||
|
int module_gap_row{};
|
||||||
|
int module_gap_col{};
|
||||||
|
|
||||||
|
bool operator==(const ModuleConfig &other) const {
|
||||||
|
if (module_gap_col != other.module_gap_col)
|
||||||
|
return false;
|
||||||
|
if (module_gap_row != other.module_gap_row)
|
||||||
|
return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Class to hold the geometry of a module. Where pixel 0 is located and
|
||||||
|
* the size of the module
|
||||||
|
*/
|
||||||
|
struct ModuleGeometry {
|
||||||
|
int origin_x{};
|
||||||
|
int origin_y{};
|
||||||
|
int height{};
|
||||||
|
int width{};
|
||||||
|
int row_index{};
|
||||||
|
int col_index{};
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Class to hold the geometry of a detector. Number of modules, their
|
||||||
|
* size and where pixel 0 for each module is located
|
||||||
|
*/
|
||||||
|
class DetectorGeometry {
|
||||||
|
public:
|
||||||
|
DetectorGeometry(const xy &geometry, const ssize_t module_pixels_x,
|
||||||
|
const ssize_t module_pixels_y,
|
||||||
|
const xy udp_interfaces_per_module = xy{1, 1},
|
||||||
|
const bool quad = false);
|
||||||
|
|
||||||
|
~DetectorGeometry() = default;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Update the detector geometry given a region of interest
|
||||||
|
*
|
||||||
|
* @param roi
|
||||||
|
* @return DetectorGeometry
|
||||||
|
*/
|
||||||
|
void update_geometry_with_roi(ROI roi);
|
||||||
|
|
||||||
|
size_t n_modules() const;
|
||||||
|
|
||||||
|
size_t n_modules_in_roi() const;
|
||||||
|
|
||||||
|
size_t pixels_x() const;
|
||||||
|
size_t pixels_y() const;
|
||||||
|
|
||||||
|
size_t modules_x() const;
|
||||||
|
size_t modules_y() const;
|
||||||
|
|
||||||
|
const std::vector<ssize_t> &get_modules_in_roi() const;
|
||||||
|
|
||||||
|
ssize_t get_modules_in_roi(const size_t index) const;
|
||||||
|
|
||||||
|
const std::vector<ModuleGeometry> &get_module_geometries() const;
|
||||||
|
|
||||||
|
const ModuleGeometry &get_module_geometries(const size_t index) const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
size_t m_modules_x{};
|
||||||
|
size_t m_modules_y{};
|
||||||
|
size_t m_pixels_x{};
|
||||||
|
size_t m_pixels_y{};
|
||||||
|
static constexpr ModuleConfig cfg{0, 0};
|
||||||
|
std::vector<ModuleGeometry> module_geometries{};
|
||||||
|
std::vector<ssize_t> modules_in_roi{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
@@ -105,7 +105,7 @@ class Frame {
|
|||||||
* @tparam T type of the pixels
|
* @tparam T type of the pixels
|
||||||
* @return NDView<T, 2>
|
* @return NDView<T, 2>
|
||||||
*/
|
*/
|
||||||
template <typename T> NDView<T, 2> view() {
|
template <typename T> NDView<T, 2> view() & {
|
||||||
std::array<ssize_t, 2> shape = {static_cast<ssize_t>(m_rows),
|
std::array<ssize_t, 2> shape = {static_cast<ssize_t>(m_rows),
|
||||||
static_cast<ssize_t>(m_cols)};
|
static_cast<ssize_t>(m_cols)};
|
||||||
T *data = reinterpret_cast<T *>(m_data);
|
T *data = reinterpret_cast<T *>(m_data);
|
||||||
|
|||||||
@@ -69,26 +69,27 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
|
|||||||
// cBottomRight = 1,
|
// cBottomRight = 1,
|
||||||
// cTopLeft = 2,
|
// cTopLeft = 2,
|
||||||
// cTopRight = 3
|
// cTopRight = 3
|
||||||
|
// TODO: could also chaneg the sign of the eta calculation
|
||||||
switch (static_cast<corner>(eta.c)) {
|
switch (static_cast<corner>(eta.c)) {
|
||||||
case corner::cTopLeft:
|
case corner::cTopLeft:
|
||||||
dX = -1.;
|
dX = 0.0;
|
||||||
dY = 0;
|
dY = 0.0;
|
||||||
break;
|
break;
|
||||||
case corner::cTopRight:;
|
case corner::cTopRight:;
|
||||||
dX = 0;
|
dX = 1.0;
|
||||||
dY = 0;
|
dY = 0.0;
|
||||||
break;
|
break;
|
||||||
case corner::cBottomLeft:
|
case corner::cBottomLeft:
|
||||||
dX = -1.;
|
dX = 0.0;
|
||||||
dY = -1.;
|
dY = 1.0;
|
||||||
break;
|
break;
|
||||||
case corner::cBottomRight:
|
case corner::cBottomRight:
|
||||||
dX = 0.;
|
dX = 1.0;
|
||||||
dY = -1.;
|
dY = 1.0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
photon.x += m_ietax(ix, iy, ie) * 2 + dX;
|
photon.x -= m_ietax(ix, iy, ie) - dX;
|
||||||
photon.y += m_ietay(ix, iy, ie) * 2 + dY;
|
photon.y -= m_ietay(ix, iy, ie) - dY;
|
||||||
photons.push_back(photon);
|
photons.push_back(photon);
|
||||||
}
|
}
|
||||||
} else if (clusters.cluster_size_x() == 2 ||
|
} else if (clusters.cluster_size_x() == 2 ||
|
||||||
@@ -112,10 +113,11 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
|
|||||||
auto ix = last_smaller(m_etabinsx, eta.x);
|
auto ix = last_smaller(m_etabinsx, eta.x);
|
||||||
auto iy = last_smaller(m_etabinsy, eta.y);
|
auto iy = last_smaller(m_etabinsy, eta.y);
|
||||||
|
|
||||||
photon.x += m_ietax(ix, iy, ie) *
|
// TODO: why 2?
|
||||||
2; // eta goes between 0 and 1 but we could move the hit
|
photon.x -=
|
||||||
// anywhere in the 2x2
|
m_ietax(ix, iy, ie); // eta goes between 0 and 1 but we could
|
||||||
photon.y += m_ietay(ix, iy, ie) * 2;
|
// move the hit anywhere in the 2x2
|
||||||
|
photon.y -= m_ietay(ix, iy, ie);
|
||||||
photons.push_back(photon);
|
photons.push_back(photon);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ template <typename T, ssize_t Ndim = 2>
|
|||||||
class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||||
std::array<ssize_t, Ndim> shape_;
|
std::array<ssize_t, Ndim> shape_;
|
||||||
std::array<ssize_t, Ndim> strides_;
|
std::array<ssize_t, Ndim> strides_;
|
||||||
size_t size_{};
|
size_t size_{}; //TODO! do we need to store size when we have shape?
|
||||||
T *data_;
|
T *data_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@@ -43,8 +43,7 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
|||||||
*/
|
*/
|
||||||
explicit NDArray(std::array<ssize_t, Ndim> shape)
|
explicit NDArray(std::array<ssize_t, Ndim> shape)
|
||||||
: shape_(shape), strides_(c_strides<Ndim>(shape_)),
|
: shape_(shape), strides_(c_strides<Ndim>(shape_)),
|
||||||
size_(std::accumulate(shape_.begin(), shape_.end(), 1,
|
size_(num_elements(shape_)),
|
||||||
std::multiplies<>())),
|
|
||||||
data_(new T[size_]) {}
|
data_(new T[size_]) {}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -79,6 +78,24 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
|||||||
other.reset(); // TODO! is this necessary?
|
other.reset(); // TODO! is this necessary?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
//Move constructor from an an array with Ndim + 1
|
||||||
|
template <ssize_t M, typename = std::enable_if_t<(M == Ndim + 1)>>
|
||||||
|
NDArray(NDArray<T, M> &&other)
|
||||||
|
: shape_(drop_first_dim(other.shape())),
|
||||||
|
strides_(c_strides<Ndim>(shape_)), size_(num_elements(shape_)),
|
||||||
|
data_(other.data()) {
|
||||||
|
|
||||||
|
// For now only allow move if the size matches, to avoid unreachable data
|
||||||
|
// if the use case arises we can remove this check
|
||||||
|
if(size() != other.size()) {
|
||||||
|
data_ = nullptr; // avoid double free, other will clean up the memory in it's destructor
|
||||||
|
throw std::runtime_error(LOCATION +
|
||||||
|
"Size mismatch in move constructor of NDArray<T, Ndim-1>");
|
||||||
|
}
|
||||||
|
other.reset();
|
||||||
|
}
|
||||||
|
|
||||||
// Copy constructor
|
// Copy constructor
|
||||||
NDArray(const NDArray &other)
|
NDArray(const NDArray &other)
|
||||||
: shape_(other.shape_), strides_(c_strides<Ndim>(shape_)),
|
: shape_(other.shape_), strides_(c_strides<Ndim>(shape_)),
|
||||||
@@ -380,12 +397,6 @@ NDArray<T, Ndim> NDArray<T, Ndim>::operator*(const T &value) {
|
|||||||
result *= value;
|
result *= value;
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
// template <typename T, ssize_t Ndim> void NDArray<T, Ndim>::Print() {
|
|
||||||
// if (shape_[0] < 20 && shape_[1] < 20)
|
|
||||||
// Print_all();
|
|
||||||
// else
|
|
||||||
// Print_some();
|
|
||||||
// }
|
|
||||||
|
|
||||||
template <typename T, ssize_t Ndim>
|
template <typename T, ssize_t Ndim>
|
||||||
std::ostream &operator<<(std::ostream &os, const NDArray<T, Ndim> &arr) {
|
std::ostream &operator<<(std::ostream &os, const NDArray<T, Ndim> &arr) {
|
||||||
@@ -437,4 +448,23 @@ NDArray<T, Ndim> load(const std::string &pathname,
|
|||||||
return img;
|
return img;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename RT, typename NT, typename DT, ssize_t Ndim>
|
||||||
|
NDArray<RT, Ndim> safe_divide(const NDArray<NT, Ndim> &numerator,
|
||||||
|
const NDArray<DT, Ndim> &denominator) {
|
||||||
|
if (numerator.shape() != denominator.shape()) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
"Shapes of numerator and denominator must match");
|
||||||
|
}
|
||||||
|
NDArray<RT, Ndim> result(numerator.shape());
|
||||||
|
for (ssize_t i = 0; i < numerator.size(); ++i) {
|
||||||
|
if (denominator[i] != 0) {
|
||||||
|
result[i] =
|
||||||
|
static_cast<RT>(numerator[i]) / static_cast<RT>(denominator[i]);
|
||||||
|
} else {
|
||||||
|
result[i] = RT{0}; // or handle division by zero as needed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
||||||
@@ -26,6 +26,33 @@ Shape<Ndim> make_shape(const std::vector<size_t> &shape) {
|
|||||||
return arr;
|
return arr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Helper function to drop the first dimension of a shape.
|
||||||
|
* This is useful when you want to create a 2D view from a 3D array.
|
||||||
|
* @param shape The shape to drop the first dimension from.
|
||||||
|
* @return A new shape with the first dimension dropped.
|
||||||
|
*/
|
||||||
|
template<size_t Ndim>
|
||||||
|
Shape<Ndim-1> drop_first_dim(const Shape<Ndim> &shape) {
|
||||||
|
static_assert(Ndim > 1, "Cannot drop first dimension from a 1D shape");
|
||||||
|
Shape<Ndim - 1> new_shape;
|
||||||
|
std::copy(shape.begin() + 1, shape.end(), new_shape.begin());
|
||||||
|
return new_shape;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Helper function when constructing NDArray/NDView. Calculates the number
|
||||||
|
* of elements in the resulting array from a shape.
|
||||||
|
* @param shape The shape to calculate the number of elements for.
|
||||||
|
* @return The number of elements in and NDArray/NDView of that shape.
|
||||||
|
*/
|
||||||
|
template <size_t Ndim>
|
||||||
|
size_t num_elements(const Shape<Ndim> &shape) {
|
||||||
|
return std::accumulate(shape.begin(), shape.end(), 1,
|
||||||
|
std::multiplies<size_t>());
|
||||||
|
}
|
||||||
|
|
||||||
template <ssize_t Dim = 0, typename Strides>
|
template <ssize_t Dim = 0, typename Strides>
|
||||||
ssize_t element_offset(const Strides & /*unused*/) {
|
ssize_t element_offset(const Strides & /*unused*/) {
|
||||||
return 0;
|
return 0;
|
||||||
@@ -67,23 +94,27 @@ class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
|
|||||||
size_(std::accumulate(std::begin(shape), std::end(shape), 1,
|
size_(std::accumulate(std::begin(shape), std::end(shape), 1,
|
||||||
std::multiplies<>())) {}
|
std::multiplies<>())) {}
|
||||||
|
|
||||||
// NDView(T *buffer, const std::vector<ssize_t> &shape)
|
|
||||||
// : buffer_(buffer),
|
|
||||||
// strides_(c_strides<Ndim>(make_array<Ndim>(shape))),
|
|
||||||
// shape_(make_array<Ndim>(shape)),
|
|
||||||
// size_(std::accumulate(std::begin(shape), std::end(shape), 1,
|
|
||||||
// std::multiplies<>())) {}
|
|
||||||
|
|
||||||
template <typename... Ix>
|
template <typename... Ix>
|
||||||
std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) {
|
std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) {
|
||||||
return buffer_[element_offset(strides_, index...)];
|
return buffer_[element_offset(strides_, index...)];
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename... Ix>
|
template <typename... Ix>
|
||||||
std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) const {
|
std::enable_if_t<sizeof...(Ix) == 1 && (Ndim > 1), NDView<T, Ndim - 1>> operator()(Ix... index) {
|
||||||
|
// return a view of the next dimension
|
||||||
|
std::array<ssize_t, Ndim - 1> new_shape{};
|
||||||
|
std::copy_n(shape_.begin() + 1, Ndim - 1, new_shape.begin());
|
||||||
|
return NDView<T, Ndim - 1>(&buffer_[element_offset(strides_, index...)],
|
||||||
|
new_shape);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename... Ix>
|
||||||
|
std::enable_if_t<sizeof...(Ix) == Ndim, const T &> operator()(Ix... index) const {
|
||||||
return buffer_[element_offset(strides_, index...)];
|
return buffer_[element_offset(strides_, index...)];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
ssize_t size() const { return static_cast<ssize_t>(size_); }
|
ssize_t size() const { return static_cast<ssize_t>(size_); }
|
||||||
size_t total_bytes() const { return size_ * sizeof(T); }
|
size_t total_bytes() const { return size_ * sizeof(T); }
|
||||||
std::array<ssize_t, Ndim> strides() const noexcept { return strides_; }
|
std::array<ssize_t, Ndim> strides() const noexcept { return strides_; }
|
||||||
@@ -92,13 +123,27 @@ class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
|
|||||||
T *end() { return buffer_ + size_; }
|
T *end() { return buffer_ + size_; }
|
||||||
T const *begin() const { return buffer_; }
|
T const *begin() const { return buffer_; }
|
||||||
T const *end() const { return buffer_ + size_; }
|
T const *end() const { return buffer_ + size_; }
|
||||||
T &operator()(ssize_t i) const { return buffer_[i]; }
|
|
||||||
T &operator[](ssize_t i) const { return buffer_[i]; }
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Access element at index i.
|
||||||
|
*/
|
||||||
|
T &operator[](ssize_t i) { return buffer_[i]; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Access element at index i.
|
||||||
|
*/
|
||||||
|
const T &operator[](ssize_t i) const { return buffer_[i]; }
|
||||||
|
|
||||||
bool operator==(const NDView &other) const {
|
bool operator==(const NDView &other) const {
|
||||||
if (size_ != other.size_)
|
if (size_ != other.size_)
|
||||||
return false;
|
return false;
|
||||||
for (uint64_t i = 0; i != size_; ++i) {
|
if (shape_ != other.shape_)
|
||||||
|
return false;
|
||||||
|
for (size_t i = 0; i != size_; ++i) {
|
||||||
if (buffer_[i] != other.buffer_[i])
|
if (buffer_[i] != other.buffer_[i])
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@@ -157,8 +202,25 @@ class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
|
|||||||
auto shape(ssize_t i) const { return shape_[i]; }
|
auto shape(ssize_t i) const { return shape_[i]; }
|
||||||
|
|
||||||
T *data() { return buffer_; }
|
T *data() { return buffer_; }
|
||||||
|
const T *data() const { return buffer_; }
|
||||||
void print_all() const;
|
void print_all() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Create a subview of a range of the first dimension.
|
||||||
|
* This is useful for splitting a batches of frames in parallel processing.
|
||||||
|
* @param first The first index of the subview (inclusive).
|
||||||
|
* @param last The last index of the subview (exclusive).
|
||||||
|
* @return A new NDView that is a subview of the current view.
|
||||||
|
* @throws std::runtime_error if the range is invalid.
|
||||||
|
*/
|
||||||
|
NDView sub_view(ssize_t first, ssize_t last) const {
|
||||||
|
if (first < 0 || last > shape_[0] || first >= last)
|
||||||
|
throw std::runtime_error(LOCATION + "Invalid sub_view range");
|
||||||
|
auto new_shape = shape_;
|
||||||
|
new_shape[0] = last - first;
|
||||||
|
return NDView(buffer_ + first * strides_[0], new_shape);
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
T *buffer_{nullptr};
|
T *buffer_{nullptr};
|
||||||
std::array<ssize_t, Ndim> strides_{};
|
std::array<ssize_t, Ndim> strides_{};
|
||||||
@@ -180,6 +242,7 @@ class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
|
|||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename T, ssize_t Ndim> void NDView<T, Ndim>::print_all() const {
|
template <typename T, ssize_t Ndim> void NDView<T, Ndim>::print_all() const {
|
||||||
for (auto row = 0; row < shape_[0]; ++row) {
|
for (auto row = 0; row < shape_[0]; ++row) {
|
||||||
for (auto col = 0; col < shape_[1]; ++col) {
|
for (auto col = 0; col < shape_[1]; ++col) {
|
||||||
|
|||||||
@@ -1,27 +1,19 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
#include "aare/DetectorGeometry.hpp"
|
||||||
#include "aare/FileInterface.hpp"
|
#include "aare/FileInterface.hpp"
|
||||||
#include "aare/Frame.hpp"
|
#include "aare/Frame.hpp"
|
||||||
#include "aare/NDArray.hpp" //for pixel map
|
#include "aare/NDArray.hpp" //for pixel map
|
||||||
#include "aare/RawMasterFile.hpp"
|
#include "aare/RawMasterFile.hpp"
|
||||||
#include "aare/RawSubFile.hpp"
|
#include "aare/RawSubFile.hpp"
|
||||||
|
|
||||||
|
#ifdef AARE_TESTS
|
||||||
|
#include "../tests/friend_test.hpp"
|
||||||
|
#endif
|
||||||
|
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
|
||||||
namespace aare {
|
namespace aare {
|
||||||
|
|
||||||
struct ModuleConfig {
|
|
||||||
int module_gap_row{};
|
|
||||||
int module_gap_col{};
|
|
||||||
|
|
||||||
bool operator==(const ModuleConfig &other) const {
|
|
||||||
if (module_gap_col != other.module_gap_col)
|
|
||||||
return false;
|
|
||||||
if (module_gap_row != other.module_gap_row)
|
|
||||||
return false;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Class to read .raw files. The class will parse the master file
|
* @brief Class to read .raw files. The class will parse the master file
|
||||||
* to find the correct geometry for the frames.
|
* to find the correct geometry for the frames.
|
||||||
@@ -29,11 +21,12 @@ struct ModuleConfig {
|
|||||||
* Consider using that unless you need raw file specific functionality.
|
* Consider using that unless you need raw file specific functionality.
|
||||||
*/
|
*/
|
||||||
class RawFile : public FileInterface {
|
class RawFile : public FileInterface {
|
||||||
|
|
||||||
std::vector<std::unique_ptr<RawSubFile>> m_subfiles;
|
std::vector<std::unique_ptr<RawSubFile>> m_subfiles;
|
||||||
ModuleConfig cfg{0, 0};
|
|
||||||
RawMasterFile m_master;
|
RawMasterFile m_master;
|
||||||
size_t m_current_frame{};
|
size_t m_current_frame{};
|
||||||
size_t m_current_subfile{};
|
|
||||||
DetectorGeometry m_geometry;
|
DetectorGeometry m_geometry;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@@ -67,13 +60,21 @@ class RawFile : public FileInterface {
|
|||||||
size_t rows() const override;
|
size_t rows() const override;
|
||||||
size_t cols() const override;
|
size_t cols() const override;
|
||||||
size_t bitdepth() const override;
|
size_t bitdepth() const override;
|
||||||
xy geometry();
|
|
||||||
size_t n_modules() const;
|
size_t n_modules() const;
|
||||||
|
size_t n_modules_in_roi() const;
|
||||||
|
xy geometry() const;
|
||||||
|
|
||||||
RawMasterFile master() const;
|
RawMasterFile master() const;
|
||||||
|
|
||||||
DetectorType detector_type() const override;
|
DetectorType detector_type() const override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief read the header of the file
|
||||||
|
* @param fname path to the data subfile
|
||||||
|
* @return DetectorHeader
|
||||||
|
*/
|
||||||
|
static DetectorHeader read_header(const std::filesystem::path &fname);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/**
|
/**
|
||||||
* @brief read the frame at the given frame index into the image buffer
|
* @brief read the frame at the given frame index into the image buffer
|
||||||
@@ -91,15 +92,7 @@ class RawFile : public FileInterface {
|
|||||||
*/
|
*/
|
||||||
Frame get_frame(size_t frame_index);
|
Frame get_frame(size_t frame_index);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief read the header of the file
|
|
||||||
* @param fname path to the data subfile
|
|
||||||
* @return DetectorHeader
|
|
||||||
*/
|
|
||||||
static DetectorHeader read_header(const std::filesystem::path &fname);
|
|
||||||
|
|
||||||
void open_subfiles();
|
void open_subfiles();
|
||||||
void find_geometry();
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
#include "aare/defs.hpp"
|
#include "aare/defs.hpp"
|
||||||
|
#include <algorithm>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
@@ -41,14 +42,16 @@ class RawFileNameComponents {
|
|||||||
|
|
||||||
class ScanParameters {
|
class ScanParameters {
|
||||||
bool m_enabled = false;
|
bool m_enabled = false;
|
||||||
std::string m_dac;
|
DACIndex m_dac{};
|
||||||
int m_start = 0;
|
int m_start = 0;
|
||||||
int m_stop = 0;
|
int m_stop = 0;
|
||||||
int m_step = 0;
|
int m_step = 0;
|
||||||
// TODO! add settleTime, requires string to time conversion
|
int64_t m_settleTime = 0; // [ns]
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ScanParameters(const std::string &par);
|
ScanParameters(const std::string &par);
|
||||||
|
ScanParameters(const bool enabled, const DACIndex dac, const int start,
|
||||||
|
const int stop, const int step, const int64_t settleTime);
|
||||||
ScanParameters() = default;
|
ScanParameters() = default;
|
||||||
ScanParameters(const ScanParameters &) = default;
|
ScanParameters(const ScanParameters &) = default;
|
||||||
ScanParameters &operator=(const ScanParameters &) = default;
|
ScanParameters &operator=(const ScanParameters &) = default;
|
||||||
@@ -56,8 +59,9 @@ class ScanParameters {
|
|||||||
int start() const;
|
int start() const;
|
||||||
int stop() const;
|
int stop() const;
|
||||||
int step() const;
|
int step() const;
|
||||||
const std::string &dac() const;
|
DACIndex dac() const;
|
||||||
bool enabled() const;
|
bool enabled() const;
|
||||||
|
int64_t settleTime() const;
|
||||||
void increment_stop();
|
void increment_stop();
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -77,8 +81,10 @@ class RawMasterFile {
|
|||||||
size_t m_pixels_y{};
|
size_t m_pixels_y{};
|
||||||
size_t m_pixels_x{};
|
size_t m_pixels_x{};
|
||||||
size_t m_bitdepth{};
|
size_t m_bitdepth{};
|
||||||
|
uint8_t m_quad = 0;
|
||||||
|
|
||||||
xy m_geometry{};
|
xy m_geometry{};
|
||||||
|
xy m_udp_interfaces_per_module{1, 1};
|
||||||
|
|
||||||
size_t m_max_frames_per_file{};
|
size_t m_max_frames_per_file{};
|
||||||
// uint32_t m_adc_mask{}; // TODO! implement reading
|
// uint32_t m_adc_mask{}; // TODO! implement reading
|
||||||
@@ -96,7 +102,6 @@ class RawMasterFile {
|
|||||||
std::optional<size_t> m_digital_samples;
|
std::optional<size_t> m_digital_samples;
|
||||||
std::optional<size_t> m_transceiver_samples;
|
std::optional<size_t> m_transceiver_samples;
|
||||||
std::optional<size_t> m_number_of_rows;
|
std::optional<size_t> m_number_of_rows;
|
||||||
std::optional<uint8_t> m_quad;
|
|
||||||
|
|
||||||
std::optional<ROI> m_roi;
|
std::optional<ROI> m_roi;
|
||||||
|
|
||||||
@@ -115,17 +120,18 @@ class RawMasterFile {
|
|||||||
size_t max_frames_per_file() const;
|
size_t max_frames_per_file() const;
|
||||||
size_t bitdepth() const;
|
size_t bitdepth() const;
|
||||||
size_t frame_padding() const;
|
size_t frame_padding() const;
|
||||||
|
xy udp_interfaces_per_module() const;
|
||||||
const FrameDiscardPolicy &frame_discard_policy() const;
|
const FrameDiscardPolicy &frame_discard_policy() const;
|
||||||
|
|
||||||
size_t total_frames_expected() const;
|
size_t total_frames_expected() const;
|
||||||
xy geometry() const;
|
xy geometry() const;
|
||||||
size_t n_modules() const;
|
size_t n_modules() const;
|
||||||
|
uint8_t quad() const;
|
||||||
|
|
||||||
std::optional<size_t> analog_samples() const;
|
std::optional<size_t> analog_samples() const;
|
||||||
std::optional<size_t> digital_samples() const;
|
std::optional<size_t> digital_samples() const;
|
||||||
std::optional<size_t> transceiver_samples() const;
|
std::optional<size_t> transceiver_samples() const;
|
||||||
std::optional<size_t> number_of_rows() const;
|
std::optional<size_t> number_of_rows() const;
|
||||||
std::optional<uint8_t> quad() const;
|
|
||||||
|
|
||||||
std::optional<ROI> roi() const;
|
std::optional<ROI> roi() const;
|
||||||
|
|
||||||
@@ -134,6 +140,7 @@ class RawMasterFile {
|
|||||||
private:
|
private:
|
||||||
void parse_json(const std::filesystem::path &fpath);
|
void parse_json(const std::filesystem::path &fpath);
|
||||||
void parse_raw(const std::filesystem::path &fpath);
|
void parse_raw(const std::filesystem::path &fpath);
|
||||||
|
void retrieve_geometry();
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
||||||
@@ -240,14 +240,14 @@ template <typename T> void VarClusterFinder<T>::first_pass() {
|
|||||||
|
|
||||||
for (ssize_t i = 0; i < original_.size(); ++i) {
|
for (ssize_t i = 0; i < original_.size(); ++i) {
|
||||||
if (use_noise_map)
|
if (use_noise_map)
|
||||||
threshold_ = 5 * noiseMap(i);
|
threshold_ = 5 * noiseMap[i];
|
||||||
binary_(i) = (original_(i) > threshold_);
|
binary_[i] = (original_[i] > threshold_);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < shape_[0]; ++i) {
|
for (int i = 0; i < shape_[0]; ++i) {
|
||||||
for (int j = 0; j < shape_[1]; ++j) {
|
for (int j = 0; j < shape_[1]; ++j) {
|
||||||
|
|
||||||
// do we have someting to process?
|
// do we have something to process?
|
||||||
if (binary_(i, j)) {
|
if (binary_(i, j)) {
|
||||||
auto tmp = check_neighbours(i, j);
|
auto tmp = check_neighbours(i, j);
|
||||||
if (tmp != 0) {
|
if (tmp != 0) {
|
||||||
|
|||||||
209
include/aare/calibration.hpp
Normal file
209
include/aare/calibration.hpp
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "aare/NDArray.hpp"
|
||||||
|
#include "aare/NDView.hpp"
|
||||||
|
#include "aare/defs.hpp"
|
||||||
|
#include "aare/utils/par.hpp"
|
||||||
|
#include "aare/utils/task.hpp"
|
||||||
|
#include <cstdint>
|
||||||
|
#include <future>
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
// Really try to convince the compile to inline this function
|
||||||
|
// TODO! Clang?
|
||||||
|
#if (defined(_MSC_VER) || defined(__INTEL_COMPILER))
|
||||||
|
#define STRONG_INLINE __forceinline
|
||||||
|
#else
|
||||||
|
#define STRONG_INLINE inline
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(__GNUC__)
|
||||||
|
#define ALWAYS_INLINE __attribute__((always_inline)) inline
|
||||||
|
#else
|
||||||
|
#define ALWAYS_INLINE STRONG_INLINE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Get the gain from the raw ADC value. In Jungfrau the gain is
|
||||||
|
* encoded in the left most 2 bits of the raw value.
|
||||||
|
* 00 -> gain 0
|
||||||
|
* 01 -> gain 1
|
||||||
|
* 11 -> gain 2
|
||||||
|
* @param raw the raw ADC value
|
||||||
|
* @return the gain as an integer
|
||||||
|
*/
|
||||||
|
ALWAYS_INLINE int get_gain(uint16_t raw) {
|
||||||
|
switch (raw >> 14) {
|
||||||
|
case 0:
|
||||||
|
return 0;
|
||||||
|
case 1:
|
||||||
|
return 1;
|
||||||
|
case 3:
|
||||||
|
return 2;
|
||||||
|
default:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE uint16_t get_value(uint16_t raw) { return raw & ADC_MASK; }
|
||||||
|
|
||||||
|
ALWAYS_INLINE std::pair<uint16_t, int16_t> get_value_and_gain(uint16_t raw) {
|
||||||
|
static_assert(
|
||||||
|
sizeof(std::pair<uint16_t, int16_t>) ==
|
||||||
|
sizeof(uint16_t) + sizeof(int16_t),
|
||||||
|
"Size of pair<uint16_t, int16_t> does not match expected size");
|
||||||
|
return {get_value(raw), get_gain(raw)};
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T>
|
||||||
|
void apply_calibration_impl(NDView<T, 3> res, NDView<uint16_t, 3> raw_data,
|
||||||
|
NDView<T, 3> ped, NDView<T, 3> cal, int start,
|
||||||
|
int stop) {
|
||||||
|
|
||||||
|
for (int frame_nr = start; frame_nr != stop; ++frame_nr) {
|
||||||
|
for (int row = 0; row != raw_data.shape(1); ++row) {
|
||||||
|
for (int col = 0; col != raw_data.shape(2); ++col) {
|
||||||
|
auto [value, gain] =
|
||||||
|
get_value_and_gain(raw_data(frame_nr, row, col));
|
||||||
|
|
||||||
|
// Using multiplication does not seem to speed up the code here
|
||||||
|
// ADU/keV is the standard unit for the calibration which
|
||||||
|
// means rewriting the formula is not worth it.
|
||||||
|
res(frame_nr, row, col) =
|
||||||
|
(value - ped(gain, row, col)) / cal(gain, row, col);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T>
|
||||||
|
void apply_calibration_impl(NDView<T, 3> res, NDView<uint16_t, 3> raw_data,
|
||||||
|
NDView<T, 2> ped, NDView<T, 2> cal, int start,
|
||||||
|
int stop) {
|
||||||
|
|
||||||
|
for (int frame_nr = start; frame_nr != stop; ++frame_nr) {
|
||||||
|
for (int row = 0; row != raw_data.shape(1); ++row) {
|
||||||
|
for (int col = 0; col != raw_data.shape(2); ++col) {
|
||||||
|
auto [value, gain] =
|
||||||
|
get_value_and_gain(raw_data(frame_nr, row, col));
|
||||||
|
|
||||||
|
// Using multiplication does not seem to speed up the code here
|
||||||
|
// ADU/keV is the standard unit for the calibration which
|
||||||
|
// means rewriting the formula is not worth it.
|
||||||
|
|
||||||
|
// Set the value to 0 if the gain is not 0
|
||||||
|
if (gain == 0)
|
||||||
|
res(frame_nr, row, col) =
|
||||||
|
(value - ped(row, col)) / cal(row, col);
|
||||||
|
else
|
||||||
|
res(frame_nr, row, col) = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T, ssize_t Ndim = 3>
|
||||||
|
void apply_calibration(NDView<T, 3> res, NDView<uint16_t, 3> raw_data,
|
||||||
|
NDView<T, Ndim> ped, NDView<T, Ndim> cal,
|
||||||
|
ssize_t n_threads = 4) {
|
||||||
|
std::vector<std::future<void>> futures;
|
||||||
|
futures.reserve(n_threads);
|
||||||
|
auto limits = split_task(0, raw_data.shape(0), n_threads);
|
||||||
|
for (const auto &lim : limits)
|
||||||
|
futures.push_back(std::async(
|
||||||
|
static_cast<void (*)(NDView<T, 3>, NDView<uint16_t, 3>,
|
||||||
|
NDView<T, Ndim>, NDView<T, Ndim>, int, int)>(
|
||||||
|
apply_calibration_impl),
|
||||||
|
res, raw_data, ped, cal, lim.first, lim.second));
|
||||||
|
for (auto &f : futures)
|
||||||
|
f.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <bool only_gain0>
|
||||||
|
std::pair<NDArray<size_t, 3>, NDArray<size_t, 3>>
|
||||||
|
sum_and_count_per_gain(NDView<uint16_t, 3> raw_data) {
|
||||||
|
constexpr ssize_t num_gains = only_gain0 ? 1 : 3;
|
||||||
|
NDArray<size_t, 3> accumulator(
|
||||||
|
std::array<ssize_t, 3>{num_gains, raw_data.shape(1), raw_data.shape(2)},
|
||||||
|
0);
|
||||||
|
NDArray<size_t, 3> count(
|
||||||
|
std::array<ssize_t, 3>{num_gains, raw_data.shape(1), raw_data.shape(2)},
|
||||||
|
0);
|
||||||
|
for (int frame_nr = 0; frame_nr != raw_data.shape(0); ++frame_nr) {
|
||||||
|
for (int row = 0; row != raw_data.shape(1); ++row) {
|
||||||
|
for (int col = 0; col != raw_data.shape(2); ++col) {
|
||||||
|
auto [value, gain] =
|
||||||
|
get_value_and_gain(raw_data(frame_nr, row, col));
|
||||||
|
if (gain != 0 && only_gain0)
|
||||||
|
continue;
|
||||||
|
accumulator(gain, row, col) += value;
|
||||||
|
count(gain, row, col) += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {std::move(accumulator), std::move(count)};
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, bool only_gain0 = false>
|
||||||
|
NDArray<T, 3 - static_cast<ssize_t>(only_gain0)>
|
||||||
|
calculate_pedestal(NDView<uint16_t, 3> raw_data, ssize_t n_threads) {
|
||||||
|
|
||||||
|
constexpr ssize_t num_gains = only_gain0 ? 1 : 3;
|
||||||
|
std::vector<std::future<std::pair<NDArray<size_t, 3>, NDArray<size_t, 3>>>>
|
||||||
|
futures;
|
||||||
|
futures.reserve(n_threads);
|
||||||
|
|
||||||
|
auto subviews = make_subviews(raw_data, n_threads);
|
||||||
|
|
||||||
|
for (auto view : subviews) {
|
||||||
|
futures.push_back(std::async(
|
||||||
|
static_cast<std::pair<NDArray<size_t, 3>, NDArray<size_t, 3>> (*)(
|
||||||
|
NDView<uint16_t, 3>)>(&sum_and_count_per_gain<only_gain0>),
|
||||||
|
view));
|
||||||
|
}
|
||||||
|
Shape<3> shape{num_gains, raw_data.shape(1), raw_data.shape(2)};
|
||||||
|
NDArray<size_t, 3> accumulator(shape, 0);
|
||||||
|
NDArray<size_t, 3> count(shape, 0);
|
||||||
|
|
||||||
|
// Combine the results from the futures
|
||||||
|
for (auto &f : futures) {
|
||||||
|
auto [acc, cnt] = f.get();
|
||||||
|
accumulator += acc;
|
||||||
|
count += cnt;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Will move to a NDArray<T, 3 - static_cast<ssize_t>(only_gain0)>
|
||||||
|
// if only_gain0 is true
|
||||||
|
return safe_divide<T>(accumulator, count);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Count the number of switching pixels in the raw data.
|
||||||
|
* This function counts the number of pixels that switch between G1 and G2 gain.
|
||||||
|
* It returns an NDArray with the number of switching pixels per pixel.
|
||||||
|
* @param raw_data The NDView containing the raw data
|
||||||
|
* @return An NDArray with the number of switching pixels per pixel
|
||||||
|
*/
|
||||||
|
NDArray<int, 2> count_switching_pixels(NDView<uint16_t, 3> raw_data);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Count the number of switching pixels in the raw data.
|
||||||
|
* This function counts the number of pixels that switch between G1 and G2 gain.
|
||||||
|
* It returns an NDArray with the number of switching pixels per pixel.
|
||||||
|
* @param raw_data The NDView containing the raw data
|
||||||
|
* @param n_threads The number of threads to use for parallel processing
|
||||||
|
* @return An NDArray with the number of switching pixels per pixel
|
||||||
|
*/
|
||||||
|
NDArray<int, 2> count_switching_pixels(NDView<uint16_t, 3> raw_data,
|
||||||
|
ssize_t n_threads);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
auto calculate_pedestal_g0(NDView<uint16_t, 3> raw_data, ssize_t n_threads) {
|
||||||
|
return calculate_pedestal<T, true>(raw_data, n_threads);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
@@ -174,35 +174,6 @@ template <typename T> struct t_xy {
|
|||||||
};
|
};
|
||||||
using xy = t_xy<uint32_t>;
|
using xy = t_xy<uint32_t>;
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Class to hold the geometry of a module. Where pixel 0 is located and
|
|
||||||
* the size of the module
|
|
||||||
*/
|
|
||||||
struct ModuleGeometry {
|
|
||||||
int origin_x{};
|
|
||||||
int origin_y{};
|
|
||||||
int height{};
|
|
||||||
int width{};
|
|
||||||
int row_index{};
|
|
||||||
int col_index{};
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Class to hold the geometry of a detector. Number of modules, their
|
|
||||||
* size and where pixel 0 for each module is located
|
|
||||||
*/
|
|
||||||
struct DetectorGeometry {
|
|
||||||
int modules_x{};
|
|
||||||
int modules_y{};
|
|
||||||
int pixels_x{};
|
|
||||||
int pixels_y{};
|
|
||||||
int module_gap_row{};
|
|
||||||
int module_gap_col{};
|
|
||||||
std::vector<ModuleGeometry> module_pixel_0;
|
|
||||||
|
|
||||||
auto size() const { return module_pixel_0.size(); }
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ROI {
|
struct ROI {
|
||||||
ssize_t xmin{};
|
ssize_t xmin{};
|
||||||
ssize_t xmax{};
|
ssize_t xmax{};
|
||||||
@@ -244,6 +215,122 @@ enum class DetectorType {
|
|||||||
Unknown
|
Unknown
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Enum class to define the Digital to Analog converter
|
||||||
|
* The values are the same as in slsDetectorPackage
|
||||||
|
*/
|
||||||
|
enum DACIndex {
|
||||||
|
DAC_0,
|
||||||
|
DAC_1,
|
||||||
|
DAC_2,
|
||||||
|
DAC_3,
|
||||||
|
DAC_4,
|
||||||
|
DAC_5,
|
||||||
|
DAC_6,
|
||||||
|
DAC_7,
|
||||||
|
DAC_8,
|
||||||
|
DAC_9,
|
||||||
|
DAC_10,
|
||||||
|
DAC_11,
|
||||||
|
DAC_12,
|
||||||
|
DAC_13,
|
||||||
|
DAC_14,
|
||||||
|
DAC_15,
|
||||||
|
DAC_16,
|
||||||
|
DAC_17,
|
||||||
|
VSVP,
|
||||||
|
VTRIM,
|
||||||
|
VRPREAMP,
|
||||||
|
VRSHAPER,
|
||||||
|
VSVN,
|
||||||
|
VTGSTV,
|
||||||
|
VCMP_LL,
|
||||||
|
VCMP_LR,
|
||||||
|
VCAL,
|
||||||
|
VCMP_RL,
|
||||||
|
RXB_RB,
|
||||||
|
RXB_LB,
|
||||||
|
VCMP_RR,
|
||||||
|
VCP,
|
||||||
|
VCN,
|
||||||
|
VISHAPER,
|
||||||
|
VTHRESHOLD,
|
||||||
|
IO_DELAY,
|
||||||
|
VREF_DS,
|
||||||
|
VOUT_CM,
|
||||||
|
VIN_CM,
|
||||||
|
VREF_COMP,
|
||||||
|
VB_COMP,
|
||||||
|
VDD_PROT,
|
||||||
|
VIN_COM,
|
||||||
|
VREF_PRECH,
|
||||||
|
VB_PIXBUF,
|
||||||
|
VB_DS,
|
||||||
|
VREF_H_ADC,
|
||||||
|
VB_COMP_FE,
|
||||||
|
VB_COMP_ADC,
|
||||||
|
VCOM_CDS,
|
||||||
|
VREF_RSTORE,
|
||||||
|
VB_OPA_1ST,
|
||||||
|
VREF_COMP_FE,
|
||||||
|
VCOM_ADC1,
|
||||||
|
VREF_L_ADC,
|
||||||
|
VREF_CDS,
|
||||||
|
VB_CS,
|
||||||
|
VB_OPA_FD,
|
||||||
|
VCOM_ADC2,
|
||||||
|
VCASSH,
|
||||||
|
VTH2,
|
||||||
|
VRSHAPER_N,
|
||||||
|
VIPRE_OUT,
|
||||||
|
VTH3,
|
||||||
|
VTH1,
|
||||||
|
VICIN,
|
||||||
|
VCAS,
|
||||||
|
VCAL_N,
|
||||||
|
VIPRE,
|
||||||
|
VCAL_P,
|
||||||
|
VDCSH,
|
||||||
|
VBP_COLBUF,
|
||||||
|
VB_SDA,
|
||||||
|
VCASC_SFP,
|
||||||
|
VIPRE_CDS,
|
||||||
|
IBIAS_SFP,
|
||||||
|
ADC_VPP,
|
||||||
|
HIGH_VOLTAGE,
|
||||||
|
TEMPERATURE_ADC,
|
||||||
|
TEMPERATURE_FPGA,
|
||||||
|
TEMPERATURE_FPGAEXT,
|
||||||
|
TEMPERATURE_10GE,
|
||||||
|
TEMPERATURE_DCDC,
|
||||||
|
TEMPERATURE_SODL,
|
||||||
|
TEMPERATURE_SODR,
|
||||||
|
TEMPERATURE_FPGA2,
|
||||||
|
TEMPERATURE_FPGA3,
|
||||||
|
TRIMBIT_SCAN,
|
||||||
|
V_POWER_A = 100,
|
||||||
|
V_POWER_B = 101,
|
||||||
|
V_POWER_C = 102,
|
||||||
|
V_POWER_D = 103,
|
||||||
|
V_POWER_IO = 104,
|
||||||
|
V_POWER_CHIP = 105,
|
||||||
|
I_POWER_A = 106,
|
||||||
|
I_POWER_B = 107,
|
||||||
|
I_POWER_C = 108,
|
||||||
|
I_POWER_D = 109,
|
||||||
|
I_POWER_IO = 110,
|
||||||
|
V_LIMIT = 111,
|
||||||
|
SLOW_ADC0 = 1000,
|
||||||
|
SLOW_ADC1,
|
||||||
|
SLOW_ADC2,
|
||||||
|
SLOW_ADC3,
|
||||||
|
SLOW_ADC4,
|
||||||
|
SLOW_ADC5,
|
||||||
|
SLOW_ADC6,
|
||||||
|
SLOW_ADC7,
|
||||||
|
SLOW_ADC_TEMP
|
||||||
|
};
|
||||||
|
|
||||||
enum class TimingMode { Auto, Trigger };
|
enum class TimingMode { Auto, Trigger };
|
||||||
enum class FrameDiscardPolicy { NoDiscard, Discard, DiscardPartial };
|
enum class FrameDiscardPolicy { NoDiscard, Discard, DiscardPartial };
|
||||||
|
|
||||||
@@ -260,4 +347,15 @@ template <> FrameDiscardPolicy StringTo(const std::string & /*mode*/);
|
|||||||
|
|
||||||
using DataTypeVariants = std::variant<uint16_t, uint32_t>;
|
using DataTypeVariants = std::variant<uint16_t, uint32_t>;
|
||||||
|
|
||||||
|
constexpr uint16_t ADC_MASK =
|
||||||
|
0x3FFF; // used to mask out the gain bits in Jungfrau
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Convert a string to a DACIndex
|
||||||
|
* @param arg string representation of the dacIndex
|
||||||
|
* @return DACIndex
|
||||||
|
* @throw invalid argument error if the string does not match any DACIndex
|
||||||
|
*/
|
||||||
|
template <> DACIndex StringTo(const std::string &arg);
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
#include "aare/RawMasterFile.hpp" //ROI refactor away
|
|
||||||
#include "aare/defs.hpp"
|
|
||||||
namespace aare {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Update the detector geometry given a region of interest
|
|
||||||
*
|
|
||||||
* @param geo
|
|
||||||
* @param roi
|
|
||||||
* @return DetectorGeometry
|
|
||||||
*/
|
|
||||||
DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, ROI roi);
|
|
||||||
|
|
||||||
} // namespace aare
|
|
||||||
@@ -1,7 +1,10 @@
|
|||||||
|
#pragma once
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
#include "aare/utils/task.hpp"
|
||||||
|
|
||||||
namespace aare {
|
namespace aare {
|
||||||
|
|
||||||
template <typename F>
|
template <typename F>
|
||||||
@@ -15,4 +18,17 @@ void RunInParallel(F func, const std::vector<std::pair<int, int>> &tasks) {
|
|||||||
thread.join();
|
thread.join();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
std::vector<NDView<T,3>> make_subviews(NDView<T, 3> &data, ssize_t n_threads) {
|
||||||
|
std::vector<NDView<T, 3>> subviews;
|
||||||
|
subviews.reserve(n_threads);
|
||||||
|
auto limits = split_task(0, data.shape(0), n_threads);
|
||||||
|
for (const auto &lim : limits) {
|
||||||
|
subviews.push_back(data.sub_view(lim.first, lim.second));
|
||||||
|
}
|
||||||
|
return subviews;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
|
#pragma once
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ set( PYTHON_FILES
|
|||||||
aare/ClusterFinder.py
|
aare/ClusterFinder.py
|
||||||
aare/ClusterVector.py
|
aare/ClusterVector.py
|
||||||
|
|
||||||
|
aare/calibration.py
|
||||||
aare/func.py
|
aare/func.py
|
||||||
aare/RawFile.py
|
aare/RawFile.py
|
||||||
aare/transform.py
|
aare/transform.py
|
||||||
|
|||||||
@@ -1,16 +1,8 @@
|
|||||||
|
|
||||||
# from ._aare import ClusterFinder_Cluster3x3i, ClusterFinder_Cluster2x2i, ClusterFinderMT_Cluster3x3i, ClusterFinderMT_Cluster2x2i, ClusterCollector_Cluster3x3i, ClusterCollector_Cluster2x2i
|
|
||||||
|
|
||||||
|
|
||||||
# from ._aare import ClusterFileSink_Cluster3x3i, ClusterFileSink_Cluster2x2i
|
|
||||||
|
|
||||||
from . import _aare
|
from . import _aare
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
_supported_cluster_sizes = [(2,2), (3,3), (5,5), (7,7), (9,9),]
|
_supported_cluster_sizes = [(2,2), (3,3), (5,5), (7,7), (9,9),]
|
||||||
|
|
||||||
# def _get_class()
|
|
||||||
|
|
||||||
def _type_to_char(dtype):
|
def _type_to_char(dtype):
|
||||||
if dtype == np.int32:
|
if dtype == np.int32:
|
||||||
return 'i'
|
return 'i'
|
||||||
@@ -54,14 +46,13 @@ def ClusterFinderMT(image_size, cluster_size = (3,3), dtype=np.int32, n_sigma=5,
|
|||||||
return cls(image_size, n_sigma=n_sigma, capacity=capacity, n_threads=n_threads)
|
return cls(image_size, n_sigma=n_sigma, capacity=capacity, n_threads=n_threads)
|
||||||
|
|
||||||
|
|
||||||
|
def ClusterCollector(clusterfindermt, dtype=np.int32):
|
||||||
def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32):
|
|
||||||
"""
|
"""
|
||||||
Factory function to create a ClusterCollector object. Provides a cleaner syntax for
|
Factory function to create a ClusterCollector object. Provides a cleaner syntax for
|
||||||
the templated ClusterCollector in C++.
|
the templated ClusterCollector in C++.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cls = _get_class("ClusterCollector", cluster_size, dtype)
|
cls = _get_class("ClusterCollector", clusterfindermt.cluster_size, dtype)
|
||||||
return cls(clusterfindermt)
|
return cls(clusterfindermt)
|
||||||
|
|
||||||
def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):
|
def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):
|
||||||
@@ -74,11 +65,22 @@ def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):
|
|||||||
return cls(clusterfindermt, cluster_file)
|
return cls(clusterfindermt, cluster_file)
|
||||||
|
|
||||||
|
|
||||||
def ClusterFile(fname, cluster_size=(3,3), dtype=np.int32):
|
def ClusterFile(fname, cluster_size=(3,3), dtype=np.int32, chunk_size = 1000):
|
||||||
"""
|
"""
|
||||||
Factory function to create a ClusterFile object. Provides a cleaner syntax for
|
Factory function to create a ClusterFile object. Provides a cleaner syntax for
|
||||||
the templated ClusterFile in C++.
|
the templated ClusterFile in C++.
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
from aare import ClusterFile
|
||||||
|
|
||||||
|
with ClusterFile("clusters.clust", cluster_size=(3,3), dtype=np.int32) as cf:
|
||||||
|
# cf is now a ClusterFile_Cluster3x3i object but you don't need to know that.
|
||||||
|
for clusters in cf:
|
||||||
|
# Loop over clusters in chunks of 1000
|
||||||
|
# The type of clusters will be a ClusterVector_Cluster3x3i in this case
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cls = _get_class("ClusterFile", cluster_size, dtype)
|
cls = _get_class("ClusterFile", cluster_size, dtype)
|
||||||
return cls(fname)
|
return cls(fname, chunk_size=chunk_size)
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ from .ClusterVector import ClusterVector
|
|||||||
from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2
|
from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2
|
||||||
from ._aare import Interpolator
|
from ._aare import Interpolator
|
||||||
from ._aare import calculate_eta2
|
from ._aare import calculate_eta2
|
||||||
|
from ._aare import reduce_to_2x2, reduce_to_3x3
|
||||||
|
|
||||||
from ._aare import apply_custom_weights
|
from ._aare import apply_custom_weights
|
||||||
|
|
||||||
@@ -30,3 +30,9 @@ from .utils import random_pixels, random_pixel, flat_list, add_colorbar
|
|||||||
|
|
||||||
#make functions available in the top level API
|
#make functions available in the top level API
|
||||||
from .func import *
|
from .func import *
|
||||||
|
|
||||||
|
from .calibration import *
|
||||||
|
from ._aare import apply_calibration, count_switching_pixels
|
||||||
|
from ._aare import calculate_pedestal, calculate_pedestal_float, calculate_pedestal_g0, calculate_pedestal_g0_float
|
||||||
|
|
||||||
|
from ._aare import VarClusterFinder
|
||||||
|
|||||||
21
python/aare/calibration.py
Normal file
21
python/aare/calibration.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
#Calibration related functions
|
||||||
|
import numpy as np
|
||||||
|
def load_calibration(fname, hg0=False):
|
||||||
|
"""
|
||||||
|
Load calibration data from a file.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
fname (str): Path to the calibration file.
|
||||||
|
hg0 (bool): If True, load HG0 calibration data instead of G0.
|
||||||
|
|
||||||
|
"""
|
||||||
|
gains = 3
|
||||||
|
rows = 512
|
||||||
|
cols = 1024
|
||||||
|
with open(fname, 'rb') as f:
|
||||||
|
cal = np.fromfile(f, count=gains * rows * cols, dtype=np.double).reshape(
|
||||||
|
gains, rows, cols
|
||||||
|
)
|
||||||
|
if hg0:
|
||||||
|
cal[0] = np.fromfile(f, count=rows * cols, dtype=np.double).reshape(rows, cols)
|
||||||
|
return cal
|
||||||
@@ -1,89 +1,8 @@
|
|||||||
import sys
|
|
||||||
sys.path.append('/home/l_msdetect/erik/aare/build')
|
|
||||||
|
|
||||||
|
from aare import apply_calibration
|
||||||
|
import numpy as np
|
||||||
|
raw = np.zeros((5,10,10), dtype=np.uint16)
|
||||||
|
pedestal = np.zeros((3,10,10), dtype=np.float32)
|
||||||
|
calibration = np.ones((3,10,10), dtype=np.float32)
|
||||||
|
calibrated = apply_calibration(raw, pedestal, calibration,)
|
||||||
|
|
||||||
from aare import RawSubFile, DetectorType, RawFile
|
|
||||||
|
|
||||||
from pathlib import Path
|
|
||||||
path = Path("/home/l_msdetect/erik/data/aare-test-data/raw/jungfrau/")
|
|
||||||
f = RawSubFile(path/"jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16)
|
|
||||||
|
|
||||||
# f = RawFile(path/"jungfrau_single_master_0.json")
|
|
||||||
|
|
||||||
|
|
||||||
# from aare._aare import ClusterVector_i, Interpolator
|
|
||||||
|
|
||||||
# import pickle
|
|
||||||
# import numpy as np
|
|
||||||
# import matplotlib.pyplot as plt
|
|
||||||
# import boost_histogram as bh
|
|
||||||
# import torch
|
|
||||||
# import math
|
|
||||||
# import time
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# def gaussian_2d(mx, my, sigma = 1, res=100, grid_size = 2):
|
|
||||||
# """
|
|
||||||
# Generate a 2D gaussian as position mx, my, with sigma=sigma.
|
|
||||||
# The gaussian is placed on a 2x2 pixel matrix with resolution
|
|
||||||
# res in one dimesion.
|
|
||||||
# """
|
|
||||||
# x = torch.linspace(0, pixel_size*grid_size, res)
|
|
||||||
# x,y = torch.meshgrid(x,x, indexing="ij")
|
|
||||||
# return 1 / (2*math.pi*sigma**2) * \
|
|
||||||
# torch.exp(-((x - my)**2 / (2*sigma**2) + (y - mx)**2 / (2*sigma**2)))
|
|
||||||
|
|
||||||
# scale = 1000 #Scale factor when converting to integer
|
|
||||||
# pixel_size = 25 #um
|
|
||||||
# grid = 2
|
|
||||||
# resolution = 100
|
|
||||||
# sigma_um = 10
|
|
||||||
# xa = np.linspace(0,grid*pixel_size,resolution)
|
|
||||||
# ticks = [0, 25, 50]
|
|
||||||
|
|
||||||
# hit = np.array((20,20))
|
|
||||||
# etahist_fname = "/home/l_msdetect/erik/tmp/test_hist.pkl"
|
|
||||||
|
|
||||||
# local_resolution = 99
|
|
||||||
# grid_size = 3
|
|
||||||
# xaxis = np.linspace(0,grid_size*pixel_size, local_resolution)
|
|
||||||
# t = gaussian_2d(hit[0],hit[1], grid_size = grid_size, sigma = 10, res = local_resolution)
|
|
||||||
# pixels = t.reshape(grid_size, t.shape[0] // grid_size, grid_size, t.shape[1] // grid_size).sum(axis = 3).sum(axis = 1)
|
|
||||||
# pixels = pixels.numpy()
|
|
||||||
# pixels = (pixels*scale).astype(np.int32)
|
|
||||||
# v = ClusterVector_i(3,3)
|
|
||||||
# v.push_back(1,1, pixels)
|
|
||||||
|
|
||||||
# with open(etahist_fname, "rb") as f:
|
|
||||||
# hist = pickle.load(f)
|
|
||||||
# eta = hist.view().copy()
|
|
||||||
# etabinsx = np.array(hist.axes.edges.T[0].flat)
|
|
||||||
# etabinsy = np.array(hist.axes.edges.T[1].flat)
|
|
||||||
# ebins = np.array(hist.axes.edges.T[2].flat)
|
|
||||||
# p = Interpolator(eta, etabinsx[0:-1], etabinsy[0:-1], ebins[0:-1])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# #Generate the hit
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# tmp = p.interpolate(v)
|
|
||||||
# print(f'tmp:{tmp}')
|
|
||||||
# pos = np.array((tmp['x'], tmp['y']))*25
|
|
||||||
|
|
||||||
|
|
||||||
# print(pixels)
|
|
||||||
# fig, ax = plt.subplots(figsize = (7,7))
|
|
||||||
# ax.pcolormesh(xaxis, xaxis, t)
|
|
||||||
# ax.plot(*pos, 'o')
|
|
||||||
# ax.set_xticks([0,25,50,75])
|
|
||||||
# ax.set_yticks([0,25,50,75])
|
|
||||||
# ax.set_xlim(0,75)
|
|
||||||
# ax.set_ylim(0,75)
|
|
||||||
# ax.grid()
|
|
||||||
# print(f'{hit=}')
|
|
||||||
# print(f'{pos=}')
|
|
||||||
@@ -24,7 +24,8 @@ void define_Cluster(py::module &m, const std::string &typestr) {
|
|||||||
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>(
|
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>(
|
||||||
m, class_name.c_str(), py::buffer_protocol())
|
m, class_name.c_str(), py::buffer_protocol())
|
||||||
|
|
||||||
.def(py::init([](uint8_t x, uint8_t y, py::array_t<Type> data) {
|
.def(py::init([](CoordType x, CoordType y,
|
||||||
|
py::array_t<Type, py::array::forcecast> data) {
|
||||||
py::buffer_info buf_info = data.request();
|
py::buffer_info buf_info = data.request();
|
||||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster;
|
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster;
|
||||||
cluster.x = x;
|
cluster.x = x;
|
||||||
@@ -34,31 +35,58 @@ void define_Cluster(py::module &m, const std::string &typestr) {
|
|||||||
cluster.data[i] = r(i);
|
cluster.data[i] = r(i);
|
||||||
}
|
}
|
||||||
return cluster;
|
return cluster;
|
||||||
}));
|
}))
|
||||||
|
|
||||||
/*
|
|
||||||
// TODO! Review if to keep or not
|
// TODO! Review if to keep or not
|
||||||
.def_property(
|
.def_property_readonly(
|
||||||
"data",
|
"data",
|
||||||
[](ClusterType &c) -> py::array {
|
[](Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> &c)
|
||||||
|
-> py::array {
|
||||||
return py::array(py::buffer_info(
|
return py::array(py::buffer_info(
|
||||||
c.data, sizeof(Type),
|
c.data.data(), sizeof(Type),
|
||||||
py::format_descriptor<Type>::format(), // Type
|
py::format_descriptor<Type>::format(), // Type
|
||||||
// format
|
// format
|
||||||
1, // Number of dimensions
|
2, // Number of dimensions
|
||||||
{static_cast<ssize_t>(ClusterSizeX *
|
{static_cast<ssize_t>(ClusterSizeX),
|
||||||
ClusterSizeY)}, // Shape (flattened)
|
static_cast<ssize_t>(ClusterSizeY)}, // Shape (flattened)
|
||||||
{sizeof(Type)} // Stride (step size between elements)
|
{sizeof(Type) * ClusterSizeY, sizeof(Type)}
|
||||||
|
// Stride (step size between elements)
|
||||||
));
|
));
|
||||||
},
|
})
|
||||||
[](ClusterType &c, py::array_t<Type> arr) {
|
|
||||||
py::buffer_info buf_info = arr.request();
|
|
||||||
Type *ptr = static_cast<Type *>(buf_info.ptr);
|
|
||||||
std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY,
|
|
||||||
c.data); // TODO dont iterate over centers!!!
|
|
||||||
|
|
||||||
});
|
.def_readonly("x",
|
||||||
*/
|
&Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>::x)
|
||||||
|
|
||||||
|
.def_readonly("y",
|
||||||
|
&Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>::y);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType = int16_t>
|
||||||
|
void reduce_to_3x3(py::module &m) {
|
||||||
|
|
||||||
|
m.def(
|
||||||
|
"reduce_to_3x3",
|
||||||
|
[](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||||
|
return reduce_to_3x3(cl);
|
||||||
|
},
|
||||||
|
py::return_value_policy::move,
|
||||||
|
"Reduce cluster to 3x3 subcluster by taking the 3x3 subcluster with "
|
||||||
|
"the highest photon energy.");
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType = int16_t>
|
||||||
|
void reduce_to_2x2(py::module &m) {
|
||||||
|
|
||||||
|
m.def(
|
||||||
|
"reduce_to_2x2",
|
||||||
|
[](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||||
|
return reduce_to_2x2(cl);
|
||||||
|
},
|
||||||
|
py::return_value_policy::move,
|
||||||
|
"Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with "
|
||||||
|
"the highest photon energy.");
|
||||||
}
|
}
|
||||||
|
|
||||||
#pragma GCC diagnostic pop
|
#pragma GCC diagnostic pop
|
||||||
@@ -38,19 +38,20 @@ void define_ClusterFile(py::module &m, const std::string &typestr) {
|
|||||||
self.read_clusters(n_clusters));
|
self.read_clusters(n_clusters));
|
||||||
return v;
|
return v;
|
||||||
},
|
},
|
||||||
py::return_value_policy::take_ownership)
|
py::return_value_policy::take_ownership, py::arg("n_clusters"))
|
||||||
.def("read_frame",
|
.def("read_frame",
|
||||||
[](ClusterFile<ClusterType> &self) {
|
[](ClusterFile<ClusterType> &self) {
|
||||||
auto v = new ClusterVector<ClusterType>(self.read_frame());
|
auto v = new ClusterVector<ClusterType>(self.read_frame());
|
||||||
return v;
|
return v;
|
||||||
})
|
})
|
||||||
.def("set_roi", &ClusterFile<ClusterType>::set_roi)
|
.def("set_roi", &ClusterFile<ClusterType>::set_roi, py::arg("roi"))
|
||||||
.def(
|
.def(
|
||||||
"set_noise_map",
|
"set_noise_map",
|
||||||
[](ClusterFile<ClusterType> &self, py::array_t<int32_t> noise_map) {
|
[](ClusterFile<ClusterType> &self, py::array_t<int32_t> noise_map) {
|
||||||
auto view = make_view_2d(noise_map);
|
auto view = make_view_2d(noise_map);
|
||||||
self.set_noise_map(view);
|
self.set_noise_map(view);
|
||||||
})
|
},
|
||||||
|
py::arg("noise_map"))
|
||||||
|
|
||||||
.def("set_gain_map",
|
.def("set_gain_map",
|
||||||
[](ClusterFile<ClusterType> &self, py::array_t<double> gain_map) {
|
[](ClusterFile<ClusterType> &self, py::array_t<double> gain_map) {
|
||||||
@@ -83,11 +84,19 @@ template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
|
|||||||
typename CoordType = uint16_t>
|
typename CoordType = uint16_t>
|
||||||
void register_calculate_eta(py::module &m) {
|
void register_calculate_eta(py::module &m) {
|
||||||
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
|
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
|
||||||
|
|
||||||
m.def("calculate_eta2",
|
m.def("calculate_eta2",
|
||||||
[](const aare::ClusterVector<ClusterType> &clusters) {
|
[](const aare::ClusterVector<ClusterType> &clusters) {
|
||||||
auto eta2 = new NDArray<double, 2>(calculate_eta2(clusters));
|
auto eta2 = new NDArray<double, 2>(calculate_eta2(clusters));
|
||||||
return return_image_data(eta2);
|
return return_image_data(eta2);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
m.def("calculate_eta2", [](const aare::Cluster<Type, CoordSizeX, CoordSizeY,
|
||||||
|
CoordType> &cluster) {
|
||||||
|
auto eta2 = calculate_eta2(cluster);
|
||||||
|
// TODO return proper eta class
|
||||||
|
return py::make_tuple(eta2.x, eta2.y, eta2.sum);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
#pragma GCC diagnostic pop
|
#pragma GCC diagnostic pop
|
||||||
@@ -104,4 +104,47 @@ void define_ClusterVector(py::module &m, const std::string &typestr) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType = uint16_t>
|
||||||
|
void define_2x2_reduction(py::module &m) {
|
||||||
|
m.def(
|
||||||
|
"reduce_to_2x2",
|
||||||
|
[](const ClusterVector<
|
||||||
|
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>> &cv) {
|
||||||
|
return new ClusterVector<Cluster<Type, 2, 2, CoordType>>(
|
||||||
|
reduce_to_2x2(cv));
|
||||||
|
},
|
||||||
|
R"(
|
||||||
|
|
||||||
|
Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with
|
||||||
|
the highest photon energy."
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
cv : ClusterVector
|
||||||
|
)",
|
||||||
|
py::arg("clustervector"));
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType = uint16_t>
|
||||||
|
void define_3x3_reduction(py::module &m) {
|
||||||
|
|
||||||
|
m.def(
|
||||||
|
"reduce_to_3x3",
|
||||||
|
[](const ClusterVector<
|
||||||
|
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>> &cv) {
|
||||||
|
return new ClusterVector<Cluster<Type, 3, 3, CoordType>>(
|
||||||
|
reduce_to_3x3(cv));
|
||||||
|
},
|
||||||
|
R"(
|
||||||
|
|
||||||
|
Reduce cluster to 3x3 subcluster by taking the 3x3 subcluster with
|
||||||
|
the highest photon energy."
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
cv : ClusterVector
|
||||||
|
)",
|
||||||
|
py::arg("clustervector"));
|
||||||
|
}
|
||||||
|
|
||||||
#pragma GCC diagnostic pop
|
#pragma GCC diagnostic pop
|
||||||
153
python/src/bind_calibration.hpp
Normal file
153
python/src/bind_calibration.hpp
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
#include "aare/calibration.hpp"
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <filesystem>
|
||||||
|
#include <pybind11/numpy.h>
|
||||||
|
#include <pybind11/pybind11.h>
|
||||||
|
#include <pybind11/stl.h>
|
||||||
|
#include <pybind11/stl_bind.h>
|
||||||
|
|
||||||
|
namespace py = pybind11;
|
||||||
|
|
||||||
|
template <typename DataType>
|
||||||
|
py::array_t<DataType> pybind_apply_calibration(
|
||||||
|
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> data,
|
||||||
|
py::array_t<DataType, py::array::c_style | py::array::forcecast> pedestal,
|
||||||
|
py::array_t<DataType, py::array::c_style | py::array::forcecast>
|
||||||
|
calibration,
|
||||||
|
int n_threads = 4) {
|
||||||
|
|
||||||
|
auto data_span = make_view_3d(data); // data is always 3D
|
||||||
|
/* No pointer is passed, so NumPy will allocate the buffer */
|
||||||
|
auto result = py::array_t<DataType>(data_span.shape());
|
||||||
|
auto res = make_view_3d(result);
|
||||||
|
if (data.ndim() == 3 && pedestal.ndim() == 3 && calibration.ndim() == 3) {
|
||||||
|
auto ped = make_view_3d(pedestal);
|
||||||
|
auto cal = make_view_3d(calibration);
|
||||||
|
aare::apply_calibration<DataType, 3>(res, data_span, ped, cal,
|
||||||
|
n_threads);
|
||||||
|
} else if (data.ndim() == 3 && pedestal.ndim() == 2 &&
|
||||||
|
calibration.ndim() == 2) {
|
||||||
|
auto ped = make_view_2d(pedestal);
|
||||||
|
auto cal = make_view_2d(calibration);
|
||||||
|
aare::apply_calibration<DataType, 2>(res, data_span, ped, cal,
|
||||||
|
n_threads);
|
||||||
|
} else {
|
||||||
|
throw std::runtime_error(
|
||||||
|
"Invalid number of dimensions for data, pedestal or calibration");
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
py::array_t<int> pybind_count_switching_pixels(
|
||||||
|
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> data,
|
||||||
|
ssize_t n_threads = 4) {
|
||||||
|
|
||||||
|
auto data_span = make_view_3d(data);
|
||||||
|
auto arr = new NDArray<int, 2>{};
|
||||||
|
*arr = aare::count_switching_pixels(data_span, n_threads);
|
||||||
|
return return_image_data(arr);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
py::array_t<T> pybind_calculate_pedestal(
|
||||||
|
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> data,
|
||||||
|
ssize_t n_threads) {
|
||||||
|
|
||||||
|
auto data_span = make_view_3d(data);
|
||||||
|
auto arr = new NDArray<T, 3>{};
|
||||||
|
*arr = aare::calculate_pedestal<T, false>(data_span, n_threads);
|
||||||
|
return return_image_data(arr);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
py::array_t<T> pybind_calculate_pedestal_g0(
|
||||||
|
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> data,
|
||||||
|
ssize_t n_threads) {
|
||||||
|
|
||||||
|
auto data_span = make_view_3d(data);
|
||||||
|
auto arr = new NDArray<T, 2>{};
|
||||||
|
*arr = aare::calculate_pedestal<T, true>(data_span, n_threads);
|
||||||
|
return return_image_data(arr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void bind_calibration(py::module &m) {
|
||||||
|
m.def("apply_calibration", &pybind_apply_calibration<double>,
|
||||||
|
py::arg("raw_data").noconvert(), py::kw_only(),
|
||||||
|
py::arg("pd").noconvert(), py::arg("cal").noconvert(),
|
||||||
|
py::arg("n_threads") = 4);
|
||||||
|
|
||||||
|
m.def("apply_calibration", &pybind_apply_calibration<float>,
|
||||||
|
py::arg("raw_data").noconvert(), py::kw_only(),
|
||||||
|
py::arg("pd").noconvert(), py::arg("cal").noconvert(),
|
||||||
|
py::arg("n_threads") = 4);
|
||||||
|
|
||||||
|
m.def("count_switching_pixels", &pybind_count_switching_pixels,
|
||||||
|
R"(
|
||||||
|
Count the number of time each pixel switches to G1 or G2.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
raw_data : array_like
|
||||||
|
3D array of shape (frames, rows, cols) to count the switching pixels from.
|
||||||
|
n_threads : int
|
||||||
|
The number of threads to use for the calculation.
|
||||||
|
)",
|
||||||
|
py::arg("raw_data").noconvert(), py::kw_only(),
|
||||||
|
py::arg("n_threads") = 4);
|
||||||
|
|
||||||
|
m.def("calculate_pedestal", &pybind_calculate_pedestal<double>,
|
||||||
|
R"(
|
||||||
|
Calculate the pedestal for all three gains and return the result as a 3D array of doubles.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
raw_data : array_like
|
||||||
|
3D array of shape (frames, rows, cols) to calculate the pedestal from.
|
||||||
|
Needs to contain data for all three gains (G0, G1, G2).
|
||||||
|
n_threads : int
|
||||||
|
The number of threads to use for the calculation.
|
||||||
|
)",
|
||||||
|
py::arg("raw_data").noconvert(), py::arg("n_threads") = 4);
|
||||||
|
|
||||||
|
m.def("calculate_pedestal_float", &pybind_calculate_pedestal<float>,
|
||||||
|
R"(
|
||||||
|
Same as `calculate_pedestal` but returns a 3D array of floats.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
raw_data : array_like
|
||||||
|
3D array of shape (frames, rows, cols) to calculate the pedestal from.
|
||||||
|
Needs to contain data for all three gains (G0, G1, G2).
|
||||||
|
n_threads : int
|
||||||
|
The number of threads to use for the calculation.
|
||||||
|
)",
|
||||||
|
py::arg("raw_data").noconvert(), py::arg("n_threads") = 4);
|
||||||
|
|
||||||
|
m.def("calculate_pedestal_g0", &pybind_calculate_pedestal_g0<double>,
|
||||||
|
R"(
|
||||||
|
Calculate the pedestal for G0 and return the result as a 2D array of doubles.
|
||||||
|
Pixels in G1 and G2 are ignored.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
raw_data : array_like
|
||||||
|
3D array of shape (frames, rows, cols) to calculate the pedestal from.
|
||||||
|
n_threads : int
|
||||||
|
The number of threads to use for the calculation.
|
||||||
|
)",
|
||||||
|
py::arg("raw_data").noconvert(), py::arg("n_threads") = 4);
|
||||||
|
|
||||||
|
m.def("calculate_pedestal_g0_float", &pybind_calculate_pedestal_g0<float>,
|
||||||
|
R"(
|
||||||
|
Same as `calculate_pedestal_g0` but returns a 2D array of floats.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
raw_data : array_like
|
||||||
|
3D array of shape (frames, rows, cols) to calculate the pedestal from.
|
||||||
|
n_threads : int
|
||||||
|
The number of threads to use for the calculation.
|
||||||
|
)",
|
||||||
|
py::arg("raw_data").noconvert(), py::arg("n_threads") = 4);
|
||||||
|
}
|
||||||
@@ -8,6 +8,7 @@
|
|||||||
#include "bind_ClusterFinder.hpp"
|
#include "bind_ClusterFinder.hpp"
|
||||||
#include "bind_ClusterFinderMT.hpp"
|
#include "bind_ClusterFinderMT.hpp"
|
||||||
#include "bind_ClusterVector.hpp"
|
#include "bind_ClusterVector.hpp"
|
||||||
|
#include "bind_calibration.hpp"
|
||||||
|
|
||||||
// TODO! migrate the other names
|
// TODO! migrate the other names
|
||||||
#include "ctb_raw_file.hpp"
|
#include "ctb_raw_file.hpp"
|
||||||
@@ -46,7 +47,9 @@ double, 'f' for float)
|
|||||||
define_ClusterFileSink<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
|
define_ClusterFileSink<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
|
||||||
define_ClusterCollector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
|
define_ClusterCollector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
|
||||||
define_Cluster<T, N, M, U>(m, #N "x" #M #TYPE_CODE); \
|
define_Cluster<T, N, M, U>(m, #N "x" #M #TYPE_CODE); \
|
||||||
register_calculate_eta<T, N, M, U>(m);
|
register_calculate_eta<T, N, M, U>(m); \
|
||||||
|
define_2x2_reduction<T, N, M, U>(m); \
|
||||||
|
reduce_to_2x2<T, N, M, U>(m);
|
||||||
|
|
||||||
PYBIND11_MODULE(_aare, m) {
|
PYBIND11_MODULE(_aare, m) {
|
||||||
define_file_io_bindings(m);
|
define_file_io_bindings(m);
|
||||||
@@ -62,6 +65,8 @@ PYBIND11_MODULE(_aare, m) {
|
|||||||
define_interpolation_bindings(m);
|
define_interpolation_bindings(m);
|
||||||
define_jungfrau_data_file_io_bindings(m);
|
define_jungfrau_data_file_io_bindings(m);
|
||||||
|
|
||||||
|
bind_calibration(m);
|
||||||
|
|
||||||
DEFINE_CLUSTER_BINDINGS(int, 3, 3, uint16_t, i);
|
DEFINE_CLUSTER_BINDINGS(int, 3, 3, uint16_t, i);
|
||||||
DEFINE_CLUSTER_BINDINGS(double, 3, 3, uint16_t, d);
|
DEFINE_CLUSTER_BINDINGS(double, 3, 3, uint16_t, d);
|
||||||
DEFINE_CLUSTER_BINDINGS(float, 3, 3, uint16_t, f);
|
DEFINE_CLUSTER_BINDINGS(float, 3, 3, uint16_t, f);
|
||||||
@@ -81,4 +86,32 @@ PYBIND11_MODULE(_aare, m) {
|
|||||||
DEFINE_CLUSTER_BINDINGS(int, 9, 9, uint16_t, i);
|
DEFINE_CLUSTER_BINDINGS(int, 9, 9, uint16_t, i);
|
||||||
DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d);
|
DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d);
|
||||||
DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f);
|
DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f);
|
||||||
|
|
||||||
|
// DEFINE_CLUSTER_BINDINGS(double, 2, 1, uint16_t, d);
|
||||||
|
|
||||||
|
define_3x3_reduction<int, 3, 3, uint16_t>(m);
|
||||||
|
define_3x3_reduction<double, 3, 3, uint16_t>(m);
|
||||||
|
define_3x3_reduction<float, 3, 3, uint16_t>(m);
|
||||||
|
define_3x3_reduction<int, 5, 5, uint16_t>(m);
|
||||||
|
define_3x3_reduction<double, 5, 5, uint16_t>(m);
|
||||||
|
define_3x3_reduction<float, 5, 5, uint16_t>(m);
|
||||||
|
define_3x3_reduction<int, 7, 7, uint16_t>(m);
|
||||||
|
define_3x3_reduction<double, 7, 7, uint16_t>(m);
|
||||||
|
define_3x3_reduction<float, 7, 7, uint16_t>(m);
|
||||||
|
define_3x3_reduction<int, 9, 9, uint16_t>(m);
|
||||||
|
define_3x3_reduction<double, 9, 9, uint16_t>(m);
|
||||||
|
define_3x3_reduction<float, 9, 9, uint16_t>(m);
|
||||||
|
|
||||||
|
reduce_to_3x3<int, 3, 3, uint16_t>(m);
|
||||||
|
reduce_to_3x3<double, 3, 3, uint16_t>(m);
|
||||||
|
reduce_to_3x3<float, 3, 3, uint16_t>(m);
|
||||||
|
reduce_to_3x3<int, 5, 5, uint16_t>(m);
|
||||||
|
reduce_to_3x3<double, 5, 5, uint16_t>(m);
|
||||||
|
reduce_to_3x3<float, 5, 5, uint16_t>(m);
|
||||||
|
reduce_to_3x3<int, 7, 7, uint16_t>(m);
|
||||||
|
reduce_to_3x3<double, 7, 7, uint16_t>(m);
|
||||||
|
reduce_to_3x3<float, 7, 7, uint16_t>(m);
|
||||||
|
reduce_to_3x3<int, 9, 9, uint16_t>(m);
|
||||||
|
reduce_to_3x3<double, 9, 9, uint16_t>(m);
|
||||||
|
reduce_to_3x3<float, 9, 9, uint16_t>(m);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ void define_raw_file_io_bindings(py::module &m) {
|
|||||||
header = py::array_t<DetectorHeader>(n_frames);
|
header = py::array_t<DetectorHeader>(n_frames);
|
||||||
} else {
|
} else {
|
||||||
header = py::array_t<DetectorHeader>(
|
header = py::array_t<DetectorHeader>(
|
||||||
{self.n_modules(), n_frames});
|
{self.n_modules_in_roi(), n_frames});
|
||||||
}
|
}
|
||||||
// py::array_t<DetectorHeader> header({self.n_mod(), n_frames});
|
// py::array_t<DetectorHeader> header({self.n_mod(), n_frames});
|
||||||
|
|
||||||
@@ -101,7 +101,8 @@ void define_raw_file_io_bindings(py::module &m) {
|
|||||||
.def_property_readonly("cols", &RawFile::cols)
|
.def_property_readonly("cols", &RawFile::cols)
|
||||||
.def_property_readonly("bitdepth", &RawFile::bitdepth)
|
.def_property_readonly("bitdepth", &RawFile::bitdepth)
|
||||||
.def_property_readonly("geometry", &RawFile::geometry)
|
.def_property_readonly("geometry", &RawFile::geometry)
|
||||||
.def_property_readonly("n_modules", &RawFile::n_modules)
|
|
||||||
.def_property_readonly("detector_type", &RawFile::detector_type)
|
.def_property_readonly("detector_type", &RawFile::detector_type)
|
||||||
.def_property_readonly("master", &RawFile::master);
|
.def_property_readonly("master", &RawFile::master)
|
||||||
|
.def_property_readonly("n_modules", &RawFile::n_modules)
|
||||||
|
.def_property_readonly("n_modules_in_roi", &RawFile::n_modules_in_roi);
|
||||||
}
|
}
|
||||||
@@ -57,6 +57,8 @@ void define_raw_master_file_bindings(py::module &m) {
|
|||||||
.def_property_readonly("total_frames_expected",
|
.def_property_readonly("total_frames_expected",
|
||||||
&RawMasterFile::total_frames_expected)
|
&RawMasterFile::total_frames_expected)
|
||||||
.def_property_readonly("geometry", &RawMasterFile::geometry)
|
.def_property_readonly("geometry", &RawMasterFile::geometry)
|
||||||
|
.def_property_readonly("udp_interfaces_per_module",
|
||||||
|
&RawMasterFile::udp_interfaces_per_module)
|
||||||
.def_property_readonly("analog_samples", &RawMasterFile::analog_samples,
|
.def_property_readonly("analog_samples", &RawMasterFile::analog_samples,
|
||||||
R"(
|
R"(
|
||||||
Number of analog samples
|
Number of analog samples
|
||||||
|
|||||||
@@ -6,20 +6,20 @@ import pytest
|
|||||||
|
|
||||||
def pytest_addoption(parser):
|
def pytest_addoption(parser):
|
||||||
parser.addoption(
|
parser.addoption(
|
||||||
"--files", action="store_true", default=False, help="run slow tests"
|
"--with-data", action="store_true", default=False, help="Run tests that require additional data"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def pytest_configure(config):
|
def pytest_configure(config):
|
||||||
config.addinivalue_line("markers", "files: mark test as needing image files to run")
|
config.addinivalue_line("markers", "withdata: mark test as needing image files to run")
|
||||||
|
|
||||||
|
|
||||||
def pytest_collection_modifyitems(config, items):
|
def pytest_collection_modifyitems(config, items):
|
||||||
if config.getoption("--files"):
|
if config.getoption("--with-data"):
|
||||||
return
|
return
|
||||||
skip = pytest.mark.skip(reason="need --files option to run")
|
skip = pytest.mark.skip(reason="need --with-data option to run")
|
||||||
for item in items:
|
for item in items:
|
||||||
if "files" in item.keywords:
|
if "withdata" in item.keywords:
|
||||||
item.add_marker(skip)
|
item.add_marker(skip)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -53,8 +53,8 @@ def test_Interpolator():
|
|||||||
|
|
||||||
assert interpolated_photons.size == 1
|
assert interpolated_photons.size == 1
|
||||||
|
|
||||||
assert interpolated_photons[0]["x"] == -1
|
assert interpolated_photons[0]["x"] == 0
|
||||||
assert interpolated_photons[0]["y"] == -1
|
assert interpolated_photons[0]["y"] == 0
|
||||||
assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0
|
assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0
|
||||||
|
|
||||||
clustervector = _aare.ClusterVector_Cluster2x2i()
|
clustervector = _aare.ClusterVector_Cluster2x2i()
|
||||||
@@ -84,7 +84,7 @@ def test_calculate_eta():
|
|||||||
assert eta2[0,0] == 0.5
|
assert eta2[0,0] == 0.5
|
||||||
assert eta2[0,1] == 0.5
|
assert eta2[0,1] == 0.5
|
||||||
assert eta2[1,0] == 0.5
|
assert eta2[1,0] == 0.5
|
||||||
assert eta2[1,1] == 0.6 #1/5
|
assert eta2[1,1] == 0.4 #2/5
|
||||||
|
|
||||||
def test_cluster_finder():
|
def test_cluster_finder():
|
||||||
"""Test ClusterFinder"""
|
"""Test ClusterFinder"""
|
||||||
@@ -101,6 +101,27 @@ def test_cluster_finder():
|
|||||||
assert clusters.size == 0
|
assert clusters.size == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_2x2_reduction():
|
||||||
|
"""Test 2x2 Reduction"""
|
||||||
|
cluster = _aare.Cluster3x3i(5,5,np.array([1, 1, 1, 2, 3, 1, 2, 2, 1], dtype=np.int32))
|
||||||
|
|
||||||
|
reduced_cluster = _aare.reduce_to_2x2(cluster)
|
||||||
|
|
||||||
|
assert reduced_cluster.x == 4
|
||||||
|
assert reduced_cluster.y == 5
|
||||||
|
assert (reduced_cluster.data == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
|
||||||
|
|
||||||
|
|
||||||
|
def test_3x3_reduction():
|
||||||
|
"""Test 3x3 Reduction"""
|
||||||
|
cluster = _aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
|
||||||
|
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double))
|
||||||
|
|
||||||
|
reduced_cluster = _aare.reduce_to_3x3(cluster)
|
||||||
|
|
||||||
|
assert reduced_cluster.x == 4
|
||||||
|
assert reduced_cluster.y == 5
|
||||||
|
assert (reduced_cluster.data == np.array([[1.0, 2.0, 1.0], [2.0, 2.0, 3.0], [1.0, 2.0, 1.0]], dtype=np.double)).all()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import pickle
|
|||||||
from aare import ClusterFile
|
from aare import ClusterFile
|
||||||
from conftest import test_data_path
|
from conftest import test_data_path
|
||||||
|
|
||||||
@pytest.mark.files
|
@pytest.mark.withdata
|
||||||
def test_cluster_file(test_data_path):
|
def test_cluster_file(test_data_path):
|
||||||
"""Test ClusterFile"""
|
"""Test ClusterFile"""
|
||||||
f = ClusterFile(test_data_path / "clust/single_frame_97_clustrers.clust")
|
f = ClusterFile(test_data_path / "clust/single_frame_97_clustrers.clust")
|
||||||
@@ -39,7 +39,7 @@ def test_cluster_file(test_data_path):
|
|||||||
for i in range(10):
|
for i in range(10):
|
||||||
assert arr[i]['x'] == i+1
|
assert arr[i]['x'] == i+1
|
||||||
|
|
||||||
@pytest.mark.files
|
@pytest.mark.withdata
|
||||||
def test_read_clusters_and_fill_histogram(test_data_path):
|
def test_read_clusters_and_fill_histogram(test_data_path):
|
||||||
# Create the histogram
|
# Create the histogram
|
||||||
n_bins = 100
|
n_bins = 100
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import time
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import pickle
|
import pickle
|
||||||
|
|
||||||
from aare import ClusterFile
|
from aare import ClusterFile, ClusterVector
|
||||||
from aare import _aare
|
from aare import _aare
|
||||||
from conftest import test_data_path
|
from conftest import test_data_path
|
||||||
|
|
||||||
@@ -52,3 +52,35 @@ def test_make_a_hitmap_from_cluster_vector():
|
|||||||
# print(ref)
|
# print(ref)
|
||||||
assert (img == ref).all()
|
assert (img == ref).all()
|
||||||
|
|
||||||
|
|
||||||
|
def test_2x2_reduction():
|
||||||
|
cv = ClusterVector((3,3))
|
||||||
|
|
||||||
|
cv.push_back(_aare.Cluster3x3i(5, 5, np.array([1, 1, 1, 2, 3, 1, 2, 2, 1], dtype=np.int32)))
|
||||||
|
cv.push_back(_aare.Cluster3x3i(5, 5, np.array([2, 2, 1, 2, 3, 1, 1, 1, 1], dtype=np.int32)))
|
||||||
|
|
||||||
|
reduced_cv = np.array(_aare.reduce_to_2x2(cv), copy=False)
|
||||||
|
|
||||||
|
assert reduced_cv.size == 2
|
||||||
|
assert reduced_cv[0]["x"] == 4
|
||||||
|
assert reduced_cv[0]["y"] == 5
|
||||||
|
assert (reduced_cv[0]["data"] == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
|
||||||
|
assert reduced_cv[1]["x"] == 4
|
||||||
|
assert reduced_cv[1]["y"] == 6
|
||||||
|
assert (reduced_cv[1]["data"] == np.array([[2, 2], [2, 3]], dtype=np.int32)).all()
|
||||||
|
|
||||||
|
|
||||||
|
def test_3x3_reduction():
|
||||||
|
cv = _aare.ClusterVector_Cluster5x5d()
|
||||||
|
|
||||||
|
cv.push_back(_aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
|
||||||
|
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double)))
|
||||||
|
cv.push_back(_aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
|
||||||
|
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double)))
|
||||||
|
|
||||||
|
reduced_cv = np.array(_aare.reduce_to_3x3(cv), copy=False)
|
||||||
|
|
||||||
|
assert reduced_cv.size == 2
|
||||||
|
assert reduced_cv[0]["x"] == 4
|
||||||
|
assert reduced_cv[0]["y"] == 5
|
||||||
|
assert (reduced_cv[0]["data"] == np.array([[1.0, 2.0, 1.0], [2.0, 2.0, 3.0], [1.0, 2.0, 1.0]], dtype=np.double)).all()
|
||||||
148
python/tests/test_Interpolation.py
Normal file
148
python/tests/test_Interpolation.py
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
import pytest
|
||||||
|
import numpy as np
|
||||||
|
import boost_histogram as bh
|
||||||
|
import pickle
|
||||||
|
from scipy.stats import multivariate_normal
|
||||||
|
|
||||||
|
from aare import Interpolator, calculate_eta2
|
||||||
|
from aare._aare import ClusterVector_Cluster2x2d, Cluster2x2d, Cluster3x3d, ClusterVector_Cluster3x3d
|
||||||
|
|
||||||
|
from conftest import test_data_path
|
||||||
|
|
||||||
|
pixel_width = 1e-4
|
||||||
|
values = np.arange(0.5*pixel_width, 0.1, pixel_width)
|
||||||
|
num_pixels = values.size
|
||||||
|
X, Y = np.meshgrid(values, values)
|
||||||
|
data_points = np.stack([X.ravel(), Y.ravel()], axis=1)
|
||||||
|
variance = 10*pixel_width
|
||||||
|
covariance_matrix = np.array([[variance, 0],[0, variance]])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points):
|
||||||
|
gaussian = multivariate_normal(mean=mean, cov=covariance_matrix)
|
||||||
|
probability_values = gaussian.pdf(data_points)
|
||||||
|
return (probability_values.reshape(X.shape)).round() #python bindings only support frame types of uint16_t
|
||||||
|
|
||||||
|
def photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, photon_hit):
|
||||||
|
scaled_photon_hit_x = cluster_center - (1 - photon_hit[0][0])*pixels_per_superpixel*pixel_width
|
||||||
|
scaled_photon_hit_y = cluster_center - (1 - photon_hit[0][1])*pixels_per_superpixel*pixel_width
|
||||||
|
return (scaled_photon_hit_x, scaled_photon_hit_y)
|
||||||
|
|
||||||
|
def create_2x2cluster_from_frame(frame, pixels_per_superpixel):
|
||||||
|
return Cluster2x2d(1, 1, np.array([frame[0:pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
|
||||||
|
frame[0:pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
|
||||||
|
frame[pixels_per_superpixel:2*pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
|
||||||
|
frame[pixels_per_superpixel:2*pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum()], dtype=np.float64))
|
||||||
|
|
||||||
|
|
||||||
|
def create_3x3cluster_from_frame(frame, pixels_per_superpixel):
|
||||||
|
return Cluster3x3d(1, 1, np.array([frame[0:pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
|
||||||
|
frame[0:pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
|
||||||
|
frame[0:pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum(),
|
||||||
|
frame[pixels_per_superpixel:2*pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
|
||||||
|
frame[pixels_per_superpixel:2*pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
|
||||||
|
frame[pixels_per_superpixel:2*pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum(),
|
||||||
|
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
|
||||||
|
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
|
||||||
|
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum()], dtype=np.float64))
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, cluster_2x2 = True):
|
||||||
|
hist = bh.Histogram(
|
||||||
|
bh.axis.Regular(100, -0.2, 1.2),
|
||||||
|
bh.axis.Regular(100, -0.2, 1.2), bh.axis.Regular(1, 0, num_pixels*num_pixels*1/(variance*2*np.pi)))
|
||||||
|
|
||||||
|
for _ in range(0, num_frames):
|
||||||
|
mean_x = random_number_generator.uniform(pixels_per_superpixel*pixel_width, 2*pixels_per_superpixel*pixel_width)
|
||||||
|
mean_y = random_number_generator.uniform(pixels_per_superpixel*pixel_width, 2*pixels_per_superpixel*pixel_width)
|
||||||
|
frame = create_photon_hit_with_gaussian_distribution(np.array([mean_x, mean_y]), variance, data_points)
|
||||||
|
|
||||||
|
cluster = None
|
||||||
|
|
||||||
|
if cluster_2x2:
|
||||||
|
cluster = create_2x2cluster_from_frame(frame, pixels_per_superpixel)
|
||||||
|
else:
|
||||||
|
cluster = create_3x3cluster_from_frame(frame, pixels_per_superpixel)
|
||||||
|
|
||||||
|
eta2 = calculate_eta2(cluster)
|
||||||
|
hist.fill(eta2[0], eta2[1], eta2[2])
|
||||||
|
|
||||||
|
return hist
|
||||||
|
|
||||||
|
@pytest.mark.withdata
|
||||||
|
def test_interpolation_of_2x2_cluster(test_data_path):
|
||||||
|
"""Test Interpolation of 2x2 cluster from Photon hit with Gaussian Distribution"""
|
||||||
|
|
||||||
|
#TODO maybe better to compute in test instead of loading - depends on eta
|
||||||
|
"""
|
||||||
|
filename = test_data_path/"eta_distributions"/"eta_distribution_2x2cluster_gaussian.pkl"
|
||||||
|
with open(filename, "rb") as f:
|
||||||
|
eta_distribution = pickle.load(f)
|
||||||
|
"""
|
||||||
|
|
||||||
|
num_frames = 1000
|
||||||
|
pixels_per_superpixel = int(num_pixels*0.5)
|
||||||
|
random_number_generator = np.random.default_rng(42)
|
||||||
|
|
||||||
|
eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator)
|
||||||
|
|
||||||
|
interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges[:-1], eta_distribution.axes[1].edges[:-1], eta_distribution.axes[2].edges[:-1])
|
||||||
|
|
||||||
|
#actual photon hit
|
||||||
|
mean = 1.2*pixels_per_superpixel*pixel_width
|
||||||
|
mean = np.array([mean, mean])
|
||||||
|
frame = create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points)
|
||||||
|
cluster = create_2x2cluster_from_frame(frame, pixels_per_superpixel)
|
||||||
|
|
||||||
|
clustervec = ClusterVector_Cluster2x2d()
|
||||||
|
clustervec.push_back(cluster)
|
||||||
|
|
||||||
|
interpolated_photon = interpolation.interpolate(clustervec)
|
||||||
|
|
||||||
|
assert interpolated_photon.size == 1
|
||||||
|
|
||||||
|
cluster_center = 1.5*pixels_per_superpixel*pixel_width
|
||||||
|
|
||||||
|
scaled_photon_hit = photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, interpolated_photon)
|
||||||
|
|
||||||
|
assert (np.linalg.norm(scaled_photon_hit - mean) < np.linalg.norm(np.array([cluster_center, cluster_center] - mean)))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.withdata
|
||||||
|
def test_interpolation_of_3x3_cluster(test_data_path):
|
||||||
|
"""Test Interpolation of 3x3 Cluster from Photon hit with Gaussian Distribution"""
|
||||||
|
|
||||||
|
#TODO maybe better to compute in test instead of loading - depends on eta
|
||||||
|
"""
|
||||||
|
filename = test_data_path/"eta_distributions"/"eta_distribution_3x3cluster_gaussian.pkl"
|
||||||
|
with open(filename, "rb") as f:
|
||||||
|
eta_distribution = pickle.load(f)
|
||||||
|
"""
|
||||||
|
|
||||||
|
num_frames = 1000
|
||||||
|
pixels_per_superpixel = int(num_pixels/3)
|
||||||
|
random_number_generator = np.random.default_rng(42)
|
||||||
|
eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, False)
|
||||||
|
|
||||||
|
interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges[:-1], eta_distribution.axes[1].edges[:-1], eta_distribution.axes[2].edges[:-1])
|
||||||
|
|
||||||
|
#actual photon hit
|
||||||
|
mean = 1.2*pixels_per_superpixel*pixel_width
|
||||||
|
mean = np.array([mean, mean])
|
||||||
|
frame = create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points)
|
||||||
|
cluster = create_3x3cluster_from_frame(frame, pixels_per_superpixel)
|
||||||
|
|
||||||
|
clustervec = ClusterVector_Cluster3x3d()
|
||||||
|
clustervec.push_back(cluster)
|
||||||
|
|
||||||
|
interpolated_photon = interpolation.interpolate(clustervec)
|
||||||
|
|
||||||
|
assert interpolated_photon.size == 1
|
||||||
|
|
||||||
|
cluster_center = 1.5*pixels_per_superpixel*pixel_width
|
||||||
|
|
||||||
|
scaled_photon_hit = photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, interpolated_photon)
|
||||||
|
|
||||||
|
assert (np.linalg.norm(scaled_photon_hit - mean) < np.linalg.norm(np.array([cluster_center, cluster_center] - mean)))
|
||||||
|
|
||||||
73
python/tests/test_RawFile.py
Normal file
73
python/tests/test_RawFile.py
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
import pytest
|
||||||
|
from aare import RawFile
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
@pytest.mark.withdata
|
||||||
|
def test_read_rawfile_with_roi(test_data_path):
|
||||||
|
|
||||||
|
with RawFile(test_data_path / "raw/SingleChipROI/Data_master_0.json") as f:
|
||||||
|
headers, frames = f.read()
|
||||||
|
|
||||||
|
assert headers.size == 10100
|
||||||
|
assert frames.shape == (10100, 256, 256)
|
||||||
|
|
||||||
|
@pytest.mark.withdata
|
||||||
|
def test_read_rawfile_quad_eiger_and_compare_to_numpy(test_data_path):
|
||||||
|
|
||||||
|
d0 = test_data_path/'raw/eiger_quad_data/W13_vrpreampscan_m21C_300V_800eV_vthre2000_d0_f0_0.raw'
|
||||||
|
d1 = test_data_path/'raw/eiger_quad_data/W13_vrpreampscan_m21C_300V_800eV_vthre2000_d1_f0_0.raw'
|
||||||
|
|
||||||
|
image = np.zeros((512,512), dtype=np.uint32)
|
||||||
|
|
||||||
|
with open(d0) as f:
|
||||||
|
raw = np.fromfile(f, dtype=np.uint32, count = 256*512, offset = 20*256*512*4 + 112*21).reshape(256,512)
|
||||||
|
|
||||||
|
image[256:,:] = raw
|
||||||
|
|
||||||
|
with open(d1) as f:
|
||||||
|
raw = np.fromfile(f, dtype=np.uint32, count = 256*512, offset = 20*256*512*4 + 112*21).reshape(256,512)
|
||||||
|
|
||||||
|
image[0:256,:] = raw[::-1,:]
|
||||||
|
|
||||||
|
with RawFile(test_data_path/'raw/eiger_quad_data/W13_vrpreampscan_m21C_300V_800eV_vthre2000_master_0.json') as f:
|
||||||
|
f.seek(20)
|
||||||
|
header, image1 = f.read_frame()
|
||||||
|
|
||||||
|
assert (image == image1).all()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.withdata
|
||||||
|
def test_read_rawfile_eiger_and_compare_to_numpy(test_data_path):
|
||||||
|
d0 = test_data_path/'raw/eiger/Lab6_20500eV_2deg_20240629_d0_f0_7.raw'
|
||||||
|
d1 = test_data_path/'raw/eiger/Lab6_20500eV_2deg_20240629_d1_f0_7.raw'
|
||||||
|
d2 = test_data_path/'raw/eiger/Lab6_20500eV_2deg_20240629_d2_f0_7.raw'
|
||||||
|
d3 = test_data_path/'raw/eiger/Lab6_20500eV_2deg_20240629_d3_f0_7.raw'
|
||||||
|
|
||||||
|
image = np.zeros((512,1024), dtype=np.uint32)
|
||||||
|
|
||||||
|
#TODO why is there no header offset?
|
||||||
|
with open(d0) as f:
|
||||||
|
raw = np.fromfile(f, dtype=np.uint32, count = 256*512, offset=112).reshape(256,512)
|
||||||
|
|
||||||
|
image[0:256,0:512] = raw[::-1]
|
||||||
|
|
||||||
|
with open(d1) as f:
|
||||||
|
raw = np.fromfile(f, dtype=np.uint32, count = 256*512, offset=112).reshape(256,512)
|
||||||
|
|
||||||
|
image[0:256,512:] = raw[::-1]
|
||||||
|
|
||||||
|
with open(d2) as f:
|
||||||
|
raw = np.fromfile(f, dtype=np.uint32, count = 256*512, offset=112).reshape(256,512)
|
||||||
|
|
||||||
|
image[256:,0:512] = raw
|
||||||
|
|
||||||
|
with open(d3) as f:
|
||||||
|
raw = np.fromfile(f, dtype=np.uint32, count = 256*512, offset=112).reshape(256,512)
|
||||||
|
|
||||||
|
image[256:,512:] = raw
|
||||||
|
|
||||||
|
|
||||||
|
with RawFile(test_data_path/'raw/eiger/Lab6_20500eV_2deg_20240629_master_7.json') as f:
|
||||||
|
header, image1 = f.read_frame()
|
||||||
|
|
||||||
|
assert (image == image1).all()
|
||||||
@@ -3,7 +3,7 @@ import numpy as np
|
|||||||
from aare import RawSubFile, DetectorType
|
from aare import RawSubFile, DetectorType
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.files
|
@pytest.mark.withdata
|
||||||
def test_read_a_jungfrau_RawSubFile(test_data_path):
|
def test_read_a_jungfrau_RawSubFile(test_data_path):
|
||||||
|
|
||||||
# Starting with f1 there is now 7 frames left in the series of files
|
# Starting with f1 there is now 7 frames left in the series of files
|
||||||
@@ -23,7 +23,7 @@ def test_read_a_jungfrau_RawSubFile(test_data_path):
|
|||||||
data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy")
|
data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy")
|
||||||
assert np.all(data[3:] == frames)
|
assert np.all(data[3:] == frames)
|
||||||
|
|
||||||
@pytest.mark.files
|
@pytest.mark.withdata
|
||||||
def test_iterate_over_a_jungfrau_RawSubFile(test_data_path):
|
def test_iterate_over_a_jungfrau_RawSubFile(test_data_path):
|
||||||
|
|
||||||
data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy")
|
data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy")
|
||||||
|
|||||||
135
python/tests/test_calibration.py
Normal file
135
python/tests/test_calibration.py
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
import pytest
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
import aare
|
||||||
|
|
||||||
|
def test_apply_calibration_small_data():
|
||||||
|
# The raw data consists of 10 4x5 images
|
||||||
|
raw = np.zeros((10, 4, 5), dtype=np.uint16)
|
||||||
|
|
||||||
|
# We need a pedestal for each gain, so 3
|
||||||
|
pedestal = np.zeros((3, 4, 5), dtype=np.float32)
|
||||||
|
|
||||||
|
# And the same for calibration
|
||||||
|
calibration = np.ones((3, 4, 5), dtype=np.float32)
|
||||||
|
|
||||||
|
# Set the known values, probing one pixel in each gain
|
||||||
|
raw[0, 0, 0] = 100 #ADC value of 100, gain 0
|
||||||
|
pedestal[0, 0, 0] = 10
|
||||||
|
calibration[0, 0, 0] = 43.7
|
||||||
|
|
||||||
|
raw[2, 3, 3] = (1<<14) + 1000 #ADC value of 1000, gain 1
|
||||||
|
pedestal[1, 3, 3] = 500
|
||||||
|
calibration[1, 3, 3] = 2.0
|
||||||
|
|
||||||
|
raw[1,1,4] = (3<<14) + 857 #ADC value of 857, gain 2
|
||||||
|
pedestal[2,1,4] = 100
|
||||||
|
calibration[2,1,4] = 3.0
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
data = aare.apply_calibration(raw, pd = pedestal, cal = calibration)
|
||||||
|
|
||||||
|
|
||||||
|
# The formula that is applied is:
|
||||||
|
# calibrated = (raw - pedestal) / calibration
|
||||||
|
assert data.shape == (10, 4, 5)
|
||||||
|
assert data[0, 0, 0] == (100 - 10) / 43.7
|
||||||
|
assert data[2, 3, 3] == (1000 - 500) / 2.0
|
||||||
|
assert data[1, 1, 4] == (857 - 100) / 3.0
|
||||||
|
|
||||||
|
# Other pixels should be zero
|
||||||
|
assert data[2,2,2] == 0
|
||||||
|
assert data[0,1,1] == 0
|
||||||
|
assert data[1,3,0] == 0
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def raw_data_3x2x2():
|
||||||
|
raw = np.zeros((3, 2, 2), dtype=np.uint16)
|
||||||
|
raw[0, 0, 0] = 100
|
||||||
|
raw[1,0, 0] = 200
|
||||||
|
raw[2, 0, 0] = 300
|
||||||
|
|
||||||
|
raw[0, 0, 1] = (1<<14) + 100
|
||||||
|
raw[1, 0, 1] = (1<<14) + 200
|
||||||
|
raw[2, 0, 1] = (1<<14) + 300
|
||||||
|
|
||||||
|
raw[0, 1, 0] = (1<<14) + 37
|
||||||
|
raw[1, 1, 0] = 38
|
||||||
|
raw[2, 1, 0] = (3<<14) + 39
|
||||||
|
|
||||||
|
raw[0, 1, 1] = (3<<14) + 100
|
||||||
|
raw[1, 1, 1] = (3<<14) + 200
|
||||||
|
raw[2, 1, 1] = (3<<14) + 300
|
||||||
|
return raw
|
||||||
|
|
||||||
|
def test_calculate_pedestal(raw_data_3x2x2):
|
||||||
|
# Calculate the pedestal
|
||||||
|
pd = aare.calculate_pedestal(raw_data_3x2x2)
|
||||||
|
assert pd.shape == (3, 2, 2)
|
||||||
|
assert pd.dtype == np.float64
|
||||||
|
assert pd[0, 0, 0] == 200
|
||||||
|
assert pd[1, 0, 0] == 0
|
||||||
|
assert pd[2, 0, 0] == 0
|
||||||
|
|
||||||
|
assert pd[0, 0, 1] == 0
|
||||||
|
assert pd[1, 0, 1] == 200
|
||||||
|
assert pd[2, 0, 1] == 0
|
||||||
|
|
||||||
|
assert pd[0, 1, 0] == 38
|
||||||
|
assert pd[1, 1, 0] == 37
|
||||||
|
assert pd[2, 1, 0] == 39
|
||||||
|
|
||||||
|
assert pd[0, 1, 1] == 0
|
||||||
|
assert pd[1, 1, 1] == 0
|
||||||
|
assert pd[2, 1, 1] == 200
|
||||||
|
|
||||||
|
def test_calculate_pedestal_float(raw_data_3x2x2):
|
||||||
|
#results should be the same for float
|
||||||
|
pd2 = aare.calculate_pedestal_float(raw_data_3x2x2)
|
||||||
|
assert pd2.shape == (3, 2, 2)
|
||||||
|
assert pd2.dtype == np.float32
|
||||||
|
assert pd2[0, 0, 0] == 200
|
||||||
|
assert pd2[1, 0, 0] == 0
|
||||||
|
assert pd2[2, 0, 0] == 0
|
||||||
|
|
||||||
|
assert pd2[0, 0, 1] == 0
|
||||||
|
assert pd2[1, 0, 1] == 200
|
||||||
|
assert pd2[2, 0, 1] == 0
|
||||||
|
|
||||||
|
assert pd2[0, 1, 0] == 38
|
||||||
|
assert pd2[1, 1, 0] == 37
|
||||||
|
assert pd2[2, 1, 0] == 39
|
||||||
|
|
||||||
|
assert pd2[0, 1, 1] == 0
|
||||||
|
assert pd2[1, 1, 1] == 0
|
||||||
|
assert pd2[2, 1, 1] == 200
|
||||||
|
|
||||||
|
def test_calculate_pedestal_g0(raw_data_3x2x2):
|
||||||
|
pd = aare.calculate_pedestal_g0(raw_data_3x2x2)
|
||||||
|
assert pd.shape == (2, 2)
|
||||||
|
assert pd.dtype == np.float64
|
||||||
|
assert pd[0, 0] == 200
|
||||||
|
assert pd[1, 0] == 38
|
||||||
|
assert pd[0, 1] == 0
|
||||||
|
assert pd[1, 1] == 0
|
||||||
|
|
||||||
|
def test_calculate_pedestal_g0_float(raw_data_3x2x2):
|
||||||
|
pd = aare.calculate_pedestal_g0_float(raw_data_3x2x2)
|
||||||
|
assert pd.shape == (2, 2)
|
||||||
|
assert pd.dtype == np.float32
|
||||||
|
assert pd[0, 0] == 200
|
||||||
|
assert pd[1, 0] == 38
|
||||||
|
assert pd[0, 1] == 0
|
||||||
|
assert pd[1, 1] == 0
|
||||||
|
|
||||||
|
def test_count_switching_pixels(raw_data_3x2x2):
|
||||||
|
# Count the number of pixels that switched gain
|
||||||
|
count = aare.count_switching_pixels(raw_data_3x2x2)
|
||||||
|
assert count.shape == (2, 2)
|
||||||
|
assert count.sum() == 8
|
||||||
|
assert count[0, 0] == 0
|
||||||
|
assert count[1, 0] == 2
|
||||||
|
assert count[0, 1] == 3
|
||||||
|
assert count[1, 1] == 3
|
||||||
663
python/tests/test_interpolation.ipynb
Normal file
663
python/tests/test_interpolation.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -2,7 +2,7 @@ import pytest
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from aare import JungfrauDataFile
|
from aare import JungfrauDataFile
|
||||||
|
|
||||||
@pytest.mark.files
|
@pytest.mark.withdata
|
||||||
def test_jfungfrau_dat_read_number_of_frames(test_data_path):
|
def test_jfungfrau_dat_read_number_of_frames(test_data_path):
|
||||||
with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as dat_file:
|
with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as dat_file:
|
||||||
assert dat_file.total_frames == 24
|
assert dat_file.total_frames == 24
|
||||||
@@ -14,7 +14,7 @@ def test_jfungfrau_dat_read_number_of_frames(test_data_path):
|
|||||||
assert dat_file.total_frames == 113
|
assert dat_file.total_frames == 113
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.files
|
@pytest.mark.withdata
|
||||||
def test_jfungfrau_dat_read_number_of_file(test_data_path):
|
def test_jfungfrau_dat_read_number_of_file(test_data_path):
|
||||||
with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as dat_file:
|
with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as dat_file:
|
||||||
assert dat_file.n_files == 4
|
assert dat_file.n_files == 4
|
||||||
@@ -26,7 +26,7 @@ def test_jfungfrau_dat_read_number_of_file(test_data_path):
|
|||||||
assert dat_file.n_files == 7
|
assert dat_file.n_files == 7
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.files
|
@pytest.mark.withdata
|
||||||
def test_read_module(test_data_path):
|
def test_read_module(test_data_path):
|
||||||
"""
|
"""
|
||||||
Read all frames from the series of .dat files. Compare to canned data in npz format.
|
Read all frames from the series of .dat files. Compare to canned data in npz format.
|
||||||
@@ -50,7 +50,7 @@ def test_read_module(test_data_path):
|
|||||||
assert np.all(ref_header == header)
|
assert np.all(ref_header == header)
|
||||||
assert np.all(ref_data == data)
|
assert np.all(ref_data == data)
|
||||||
|
|
||||||
@pytest.mark.files
|
@pytest.mark.withdata
|
||||||
def test_read_half_module(test_data_path):
|
def test_read_half_module(test_data_path):
|
||||||
|
|
||||||
# Read all frames from the .dat file
|
# Read all frames from the .dat file
|
||||||
@@ -71,7 +71,7 @@ def test_read_half_module(test_data_path):
|
|||||||
assert np.all(ref_data == data)
|
assert np.all(ref_data == data)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.files
|
@pytest.mark.withdata
|
||||||
def test_read_single_chip(test_data_path):
|
def test_read_single_chip(test_data_path):
|
||||||
|
|
||||||
# Read all frames from the .dat file
|
# Read all frames from the .dat file
|
||||||
|
|||||||
@@ -19,3 +19,85 @@ TEST_CASE("Test sum of Cluster", "[.cluster]") {
|
|||||||
|
|
||||||
CHECK(cluster.sum() == 10);
|
CHECK(cluster.sum() == 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
using ClusterTypes = std::variant<Cluster<int, 2, 2>, Cluster<int, 3, 3>,
|
||||||
|
Cluster<int, 5, 5>, Cluster<int, 2, 3>>;
|
||||||
|
|
||||||
|
using ClusterTypesLargerThan2x2 =
|
||||||
|
std::variant<Cluster<int, 3, 3>, Cluster<int, 4, 4>, Cluster<int, 5, 5>>;
|
||||||
|
|
||||||
|
TEST_CASE("Test reduce to 2x2 Cluster", "[.cluster]") {
|
||||||
|
auto [cluster, expected_reduced_cluster] = GENERATE(
|
||||||
|
std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{5, 5, {1, 2, 3, 4}}},
|
||||||
|
Cluster<int, 2, 2>{4, 6, {1, 2, 3, 4}}),
|
||||||
|
std::make_tuple(
|
||||||
|
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 2, 1, 2, 2}}},
|
||||||
|
Cluster<int, 2, 2>{5, 5, {3, 2, 2, 2}}),
|
||||||
|
std::make_tuple(
|
||||||
|
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 1, 1, 2, 3, 1, 2, 2, 1}}},
|
||||||
|
Cluster<int, 2, 2>{4, 5, {2, 3, 2, 2}}),
|
||||||
|
std::make_tuple(
|
||||||
|
ClusterTypes{Cluster<int, 3, 3>{5, 5, {2, 2, 1, 2, 3, 1, 1, 1, 1}}},
|
||||||
|
Cluster<int, 2, 2>{4, 6, {2, 2, 2, 3}}),
|
||||||
|
std::make_tuple(
|
||||||
|
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 2, 2, 1, 3, 2, 1, 1, 1}}},
|
||||||
|
Cluster<int, 2, 2>{5, 6, {2, 2, 3, 2}}),
|
||||||
|
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
|
||||||
|
5, 5, {1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 3,
|
||||||
|
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}},
|
||||||
|
Cluster<int, 2, 2>{5, 6, {2, 2, 3, 2}}),
|
||||||
|
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
|
||||||
|
5, 5, {1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 3,
|
||||||
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}},
|
||||||
|
Cluster<int, 2, 2>{4, 6, {2, 2, 2, 3}}),
|
||||||
|
std::make_tuple(
|
||||||
|
ClusterTypes{Cluster<int, 2, 3>{5, 5, {2, 2, 3, 2, 1, 1}}},
|
||||||
|
Cluster<int, 2, 2>{4, 6, {2, 2, 3, 2}}));
|
||||||
|
|
||||||
|
auto reduced_cluster = std::visit(
|
||||||
|
[](const auto &clustertype) { return reduce_to_2x2(clustertype); },
|
||||||
|
cluster);
|
||||||
|
|
||||||
|
CHECK(reduced_cluster.x == expected_reduced_cluster.x);
|
||||||
|
CHECK(reduced_cluster.y == expected_reduced_cluster.y);
|
||||||
|
CHECK(std::equal(reduced_cluster.data.begin(),
|
||||||
|
reduced_cluster.data.begin() + 4,
|
||||||
|
expected_reduced_cluster.data.begin()));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Test reduce to 3x3 Cluster", "[.cluster]") {
|
||||||
|
auto [cluster, expected_reduced_cluster] = GENERATE(
|
||||||
|
std::make_tuple(ClusterTypesLargerThan2x2{Cluster<int, 3, 3>{
|
||||||
|
5, 5, {1, 1, 1, 1, 3, 1, 1, 1, 1}}},
|
||||||
|
Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 1, 1, 1, 1}}),
|
||||||
|
std::make_tuple(
|
||||||
|
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
|
||||||
|
5, 5, {2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1}}},
|
||||||
|
Cluster<int, 3, 3>{4, 6, {2, 2, 1, 2, 2, 1, 1, 1, 3}}),
|
||||||
|
std::make_tuple(
|
||||||
|
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
|
||||||
|
5, 5, {1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 3, 1, 1, 1, 1, 1}}},
|
||||||
|
Cluster<int, 3, 3>{5, 6, {1, 2, 2, 1, 2, 2, 1, 3, 1}}),
|
||||||
|
std::make_tuple(
|
||||||
|
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
|
||||||
|
5, 5, {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 1, 1, 2, 2}}},
|
||||||
|
Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 2, 1, 2, 2}}),
|
||||||
|
std::make_tuple(
|
||||||
|
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
|
||||||
|
5, 5, {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 1, 2, 2, 1, 1}}},
|
||||||
|
Cluster<int, 3, 3>{4, 5, {1, 1, 1, 2, 2, 3, 2, 2, 1}}),
|
||||||
|
std::make_tuple(ClusterTypesLargerThan2x2{Cluster<int, 5, 5>{
|
||||||
|
5, 5, {1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 3,
|
||||||
|
1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1}}},
|
||||||
|
Cluster<int, 3, 3>{4, 5, {1, 2, 1, 2, 2, 3, 1, 2, 1}}));
|
||||||
|
|
||||||
|
auto reduced_cluster = std::visit(
|
||||||
|
[](const auto &clustertype) { return reduce_to_3x3(clustertype); },
|
||||||
|
cluster);
|
||||||
|
|
||||||
|
CHECK(reduced_cluster.x == expected_reduced_cluster.x);
|
||||||
|
CHECK(reduced_cluster.y == expected_reduced_cluster.y);
|
||||||
|
CHECK(std::equal(reduced_cluster.data.begin(),
|
||||||
|
reduced_cluster.data.begin() + 9,
|
||||||
|
expected_reduced_cluster.data.begin()));
|
||||||
|
}
|
||||||
@@ -1,395 +0,0 @@
|
|||||||
#include "aare/ClusterFile.hpp"
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
|
|
||||||
namespace aare {
|
|
||||||
|
|
||||||
ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size,
|
|
||||||
const std::string &mode)
|
|
||||||
: m_chunk_size(chunk_size), m_mode(mode) {
|
|
||||||
|
|
||||||
if (mode == "r") {
|
|
||||||
fp = fopen(fname.c_str(), "rb");
|
|
||||||
if (!fp) {
|
|
||||||
throw std::runtime_error("Could not open file for reading: " +
|
|
||||||
fname.string());
|
|
||||||
}
|
|
||||||
} else if (mode == "w") {
|
|
||||||
fp = fopen(fname.c_str(), "wb");
|
|
||||||
if (!fp) {
|
|
||||||
throw std::runtime_error("Could not open file for writing: " +
|
|
||||||
fname.string());
|
|
||||||
}
|
|
||||||
} else if (mode == "a") {
|
|
||||||
fp = fopen(fname.c_str(), "ab");
|
|
||||||
if (!fp) {
|
|
||||||
throw std::runtime_error("Could not open file for appending: " +
|
|
||||||
fname.string());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
throw std::runtime_error("Unsupported mode: " + mode);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ClusterFile::set_roi(ROI roi) { m_roi = roi; }
|
|
||||||
|
|
||||||
void ClusterFile::set_noise_map(const NDView<int32_t, 2> noise_map) {
|
|
||||||
m_noise_map = NDArray<int32_t, 2>(noise_map);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ClusterFile::set_gain_map(const NDView<double, 2> gain_map) {
|
|
||||||
m_gain_map = NDArray<double, 2>(gain_map);
|
|
||||||
|
|
||||||
// Gain map is passed as ADU/keV to avoid dividing in when applying the gain
|
|
||||||
// map we invert it here
|
|
||||||
for (auto &item : m_gain_map->view()) {
|
|
||||||
item = 1.0 / item;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterFile::~ClusterFile() { close(); }
|
|
||||||
|
|
||||||
void ClusterFile::close() {
|
|
||||||
if (fp) {
|
|
||||||
fclose(fp);
|
|
||||||
fp = nullptr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ClusterFile::write_frame(const ClusterVector<int32_t> &clusters) {
|
|
||||||
if (m_mode != "w" && m_mode != "a") {
|
|
||||||
throw std::runtime_error("File not opened for writing");
|
|
||||||
}
|
|
||||||
if (!(clusters.cluster_size_x() == 3) &&
|
|
||||||
!(clusters.cluster_size_y() == 3)) {
|
|
||||||
throw std::runtime_error("Only 3x3 clusters are supported");
|
|
||||||
}
|
|
||||||
// First write the frame number - 4 bytes
|
|
||||||
int32_t frame_number = clusters.frame_number();
|
|
||||||
if (fwrite(&frame_number, sizeof(frame_number), 1, fp) != 1) {
|
|
||||||
throw std::runtime_error(LOCATION + "Could not write frame number");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then write the number of clusters - 4 bytes
|
|
||||||
uint32_t n_clusters = clusters.size();
|
|
||||||
if (fwrite(&n_clusters, sizeof(n_clusters), 1, fp) != 1) {
|
|
||||||
throw std::runtime_error(LOCATION +
|
|
||||||
"Could not write number of clusters");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now write the clusters in the frame
|
|
||||||
if (fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp) !=
|
|
||||||
clusters.size()) {
|
|
||||||
throw std::runtime_error(LOCATION + "Could not write clusters");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterVector<int32_t> ClusterFile::read_clusters(size_t n_clusters) {
|
|
||||||
if (m_mode != "r") {
|
|
||||||
throw std::runtime_error("File not opened for reading");
|
|
||||||
}
|
|
||||||
if (m_noise_map || m_roi) {
|
|
||||||
return read_clusters_with_cut(n_clusters);
|
|
||||||
} else {
|
|
||||||
return read_clusters_without_cut(n_clusters);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterVector<int32_t>
|
|
||||||
ClusterFile::read_clusters_without_cut(size_t n_clusters) {
|
|
||||||
if (m_mode != "r") {
|
|
||||||
throw std::runtime_error("File not opened for reading");
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterVector<int32_t> clusters(3, 3, n_clusters);
|
|
||||||
|
|
||||||
int32_t iframe = 0; // frame number needs to be 4 bytes!
|
|
||||||
size_t nph_read = 0;
|
|
||||||
uint32_t nn = m_num_left;
|
|
||||||
uint32_t nph = m_num_left; // number of clusters in frame needs to be 4
|
|
||||||
|
|
||||||
// auto buf = reinterpret_cast<Cluster3x3 *>(clusters.data());
|
|
||||||
auto buf = clusters.data();
|
|
||||||
// if there are photons left from previous frame read them first
|
|
||||||
if (nph) {
|
|
||||||
if (nph > n_clusters) {
|
|
||||||
// if we have more photons left in the frame then photons to read we
|
|
||||||
// read directly the requested number
|
|
||||||
nn = n_clusters;
|
|
||||||
} else {
|
|
||||||
nn = nph;
|
|
||||||
}
|
|
||||||
nph_read += fread((buf + nph_read * clusters.item_size()),
|
|
||||||
clusters.item_size(), nn, fp);
|
|
||||||
m_num_left = nph - nn; // write back the number of photons left
|
|
||||||
}
|
|
||||||
|
|
||||||
if (nph_read < n_clusters) {
|
|
||||||
// keep on reading frames and photons until reaching n_clusters
|
|
||||||
while (fread(&iframe, sizeof(iframe), 1, fp)) {
|
|
||||||
clusters.set_frame_number(iframe);
|
|
||||||
// read number of clusters in frame
|
|
||||||
if (fread(&nph, sizeof(nph), 1, fp)) {
|
|
||||||
if (nph > (n_clusters - nph_read))
|
|
||||||
nn = n_clusters - nph_read;
|
|
||||||
else
|
|
||||||
nn = nph;
|
|
||||||
|
|
||||||
nph_read += fread((buf + nph_read * clusters.item_size()),
|
|
||||||
clusters.item_size(), nn, fp);
|
|
||||||
m_num_left = nph - nn;
|
|
||||||
}
|
|
||||||
if (nph_read >= n_clusters)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resize the vector to the number of clusters.
|
|
||||||
// No new allocation, only change bounds.
|
|
||||||
clusters.resize(nph_read);
|
|
||||||
if (m_gain_map)
|
|
||||||
clusters.apply_gain_map(m_gain_map->view());
|
|
||||||
return clusters;
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterVector<int32_t> ClusterFile::read_clusters_with_cut(size_t n_clusters) {
|
|
||||||
ClusterVector<int32_t> clusters(3, 3);
|
|
||||||
clusters.reserve(n_clusters);
|
|
||||||
|
|
||||||
// if there are photons left from previous frame read them first
|
|
||||||
if (m_num_left) {
|
|
||||||
while (m_num_left && clusters.size() < n_clusters) {
|
|
||||||
Cluster3x3 c = read_one_cluster();
|
|
||||||
if (is_selected(c)) {
|
|
||||||
clusters.push_back(c.x, c.y,
|
|
||||||
reinterpret_cast<std::byte *>(c.data));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// we did not have enough clusters left in the previous frame
|
|
||||||
// keep on reading frames until reaching n_clusters
|
|
||||||
if (clusters.size() < n_clusters) {
|
|
||||||
// sanity check
|
|
||||||
if (m_num_left) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
LOCATION + "Entered second loop with clusters left\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t frame_number = 0; // frame number needs to be 4 bytes!
|
|
||||||
while (fread(&frame_number, sizeof(frame_number), 1, fp)) {
|
|
||||||
if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) {
|
|
||||||
clusters.set_frame_number(
|
|
||||||
frame_number); // cluster vector will hold the last frame
|
|
||||||
// number
|
|
||||||
while (m_num_left && clusters.size() < n_clusters) {
|
|
||||||
Cluster3x3 c = read_one_cluster();
|
|
||||||
if (is_selected(c)) {
|
|
||||||
clusters.push_back(
|
|
||||||
c.x, c.y, reinterpret_cast<std::byte *>(c.data));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// we have enough clusters, break out of the outer while loop
|
|
||||||
if (clusters.size() >= n_clusters)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (m_gain_map)
|
|
||||||
clusters.apply_gain_map(m_gain_map->view());
|
|
||||||
|
|
||||||
return clusters;
|
|
||||||
}
|
|
||||||
|
|
||||||
Cluster3x3 ClusterFile::read_one_cluster() {
|
|
||||||
Cluster3x3 c;
|
|
||||||
auto rc = fread(&c, sizeof(c), 1, fp);
|
|
||||||
if (rc != 1) {
|
|
||||||
throw std::runtime_error(LOCATION + "Could not read cluster");
|
|
||||||
}
|
|
||||||
--m_num_left;
|
|
||||||
return c;
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterVector<int32_t> ClusterFile::read_frame() {
|
|
||||||
if (m_mode != "r") {
|
|
||||||
throw std::runtime_error(LOCATION + "File not opened for reading");
|
|
||||||
}
|
|
||||||
if (m_noise_map || m_roi) {
|
|
||||||
return read_frame_with_cut();
|
|
||||||
} else {
|
|
||||||
return read_frame_without_cut();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterVector<int32_t> ClusterFile::read_frame_without_cut() {
|
|
||||||
if (m_mode != "r") {
|
|
||||||
throw std::runtime_error("File not opened for reading");
|
|
||||||
}
|
|
||||||
if (m_num_left) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
"There are still photons left in the last frame");
|
|
||||||
}
|
|
||||||
int32_t frame_number;
|
|
||||||
if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) {
|
|
||||||
throw std::runtime_error(LOCATION + "Could not read frame number");
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t n_clusters; // Saved as 32bit integer in the cluster file
|
|
||||||
if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) {
|
|
||||||
throw std::runtime_error(LOCATION +
|
|
||||||
"Could not read number of clusters");
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterVector<int32_t> clusters(3, 3, n_clusters);
|
|
||||||
clusters.set_frame_number(frame_number);
|
|
||||||
|
|
||||||
if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) !=
|
|
||||||
static_cast<size_t>(n_clusters)) {
|
|
||||||
throw std::runtime_error(LOCATION + "Could not read clusters");
|
|
||||||
}
|
|
||||||
clusters.resize(n_clusters);
|
|
||||||
if (m_gain_map)
|
|
||||||
clusters.apply_gain_map(m_gain_map->view());
|
|
||||||
return clusters;
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterVector<int32_t> ClusterFile::read_frame_with_cut() {
|
|
||||||
if (m_mode != "r") {
|
|
||||||
throw std::runtime_error("File not opened for reading");
|
|
||||||
}
|
|
||||||
if (m_num_left) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
"There are still photons left in the last frame");
|
|
||||||
}
|
|
||||||
int32_t frame_number;
|
|
||||||
if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) {
|
|
||||||
throw std::runtime_error("Could not read frame number");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fread(&m_num_left, sizeof(m_num_left), 1, fp) != 1) {
|
|
||||||
throw std::runtime_error("Could not read number of clusters");
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterVector<int32_t> clusters(3, 3);
|
|
||||||
clusters.reserve(m_num_left);
|
|
||||||
clusters.set_frame_number(frame_number);
|
|
||||||
while (m_num_left) {
|
|
||||||
Cluster3x3 c = read_one_cluster();
|
|
||||||
if (is_selected(c)) {
|
|
||||||
clusters.push_back(c.x, c.y, reinterpret_cast<std::byte *>(c.data));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (m_gain_map)
|
|
||||||
clusters.apply_gain_map(m_gain_map->view());
|
|
||||||
return clusters;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ClusterFile::is_selected(Cluster3x3 &cl) {
|
|
||||||
// Should fail fast
|
|
||||||
if (m_roi) {
|
|
||||||
if (!(m_roi->contains(cl.x, cl.y))) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (m_noise_map) {
|
|
||||||
int32_t sum_1x1 = cl.data[4]; // central pixel
|
|
||||||
int32_t sum_2x2 = cl.sum_2x2(); // highest sum of 2x2 subclusters
|
|
||||||
int32_t sum_3x3 = cl.sum(); // sum of all pixels
|
|
||||||
|
|
||||||
auto noise =
|
|
||||||
(*m_noise_map)(cl.y, cl.x); // TODO! check if this is correct
|
|
||||||
if (sum_1x1 <= noise || sum_2x2 <= 2 * noise || sum_3x3 <= 3 * noise) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// we passed all checks
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
NDArray<double, 2> calculate_eta2(ClusterVector<int> &clusters) {
|
|
||||||
// TOTO! make work with 2x2 clusters
|
|
||||||
NDArray<double, 2> eta2({static_cast<int64_t>(clusters.size()), 2});
|
|
||||||
|
|
||||||
if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) {
|
|
||||||
for (size_t i = 0; i < clusters.size(); i++) {
|
|
||||||
auto e = calculate_eta2(clusters.at<Cluster3x3>(i));
|
|
||||||
eta2(i, 0) = e.x;
|
|
||||||
eta2(i, 1) = e.y;
|
|
||||||
}
|
|
||||||
} else if (clusters.cluster_size_x() == 2 ||
|
|
||||||
clusters.cluster_size_y() == 2) {
|
|
||||||
for (size_t i = 0; i < clusters.size(); i++) {
|
|
||||||
auto e = calculate_eta2(clusters.at<Cluster2x2>(i));
|
|
||||||
eta2(i, 0) = e.x;
|
|
||||||
eta2(i, 1) = e.y;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
throw std::runtime_error("Only 3x3 and 2x2 clusters are supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
return eta2;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2
|
|
||||||
* struct containing etay, etax and the corner of the cluster.
|
|
||||||
*/
|
|
||||||
Eta2 calculate_eta2(Cluster3x3 &cl) {
|
|
||||||
Eta2 eta{};
|
|
||||||
|
|
||||||
std::array<int32_t, 4> tot2;
|
|
||||||
tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4];
|
|
||||||
tot2[1] = cl.data[1] + cl.data[2] + cl.data[4] + cl.data[5];
|
|
||||||
tot2[2] = cl.data[3] + cl.data[4] + cl.data[6] + cl.data[7];
|
|
||||||
tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8];
|
|
||||||
|
|
||||||
auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin();
|
|
||||||
eta.sum = tot2[c];
|
|
||||||
switch (c) {
|
|
||||||
case cBottomLeft:
|
|
||||||
if ((cl.data[3] + cl.data[4]) != 0)
|
|
||||||
eta.x = static_cast<double>(cl.data[4]) / (cl.data[3] + cl.data[4]);
|
|
||||||
if ((cl.data[1] + cl.data[4]) != 0)
|
|
||||||
eta.y = static_cast<double>(cl.data[4]) / (cl.data[1] + cl.data[4]);
|
|
||||||
eta.c = cBottomLeft;
|
|
||||||
break;
|
|
||||||
case cBottomRight:
|
|
||||||
if ((cl.data[2] + cl.data[5]) != 0)
|
|
||||||
eta.x = static_cast<double>(cl.data[5]) / (cl.data[4] + cl.data[5]);
|
|
||||||
if ((cl.data[1] + cl.data[4]) != 0)
|
|
||||||
eta.y = static_cast<double>(cl.data[4]) / (cl.data[1] + cl.data[4]);
|
|
||||||
eta.c = cBottomRight;
|
|
||||||
break;
|
|
||||||
case cTopLeft:
|
|
||||||
if ((cl.data[7] + cl.data[4]) != 0)
|
|
||||||
eta.x = static_cast<double>(cl.data[4]) / (cl.data[3] + cl.data[4]);
|
|
||||||
if ((cl.data[7] + cl.data[4]) != 0)
|
|
||||||
eta.y = static_cast<double>(cl.data[7]) / (cl.data[7] + cl.data[4]);
|
|
||||||
eta.c = cTopLeft;
|
|
||||||
break;
|
|
||||||
case cTopRight:
|
|
||||||
if ((cl.data[5] + cl.data[4]) != 0)
|
|
||||||
eta.x = static_cast<double>(cl.data[5]) / (cl.data[5] + cl.data[4]);
|
|
||||||
if ((cl.data[7] + cl.data[4]) != 0)
|
|
||||||
eta.y = static_cast<double>(cl.data[7]) / (cl.data[7] + cl.data[4]);
|
|
||||||
eta.c = cTopRight;
|
|
||||||
break;
|
|
||||||
// no default to allow compiler to warn about missing cases
|
|
||||||
}
|
|
||||||
return eta;
|
|
||||||
}
|
|
||||||
|
|
||||||
Eta2 calculate_eta2(Cluster2x2 &cl) {
|
|
||||||
Eta2 eta{};
|
|
||||||
if ((cl.data[0] + cl.data[1]) != 0)
|
|
||||||
eta.x = static_cast<double>(cl.data[1]) / (cl.data[0] + cl.data[1]);
|
|
||||||
if ((cl.data[0] + cl.data[2]) != 0)
|
|
||||||
eta.y = static_cast<double>(cl.data[2]) / (cl.data[0] + cl.data[2]);
|
|
||||||
eta.sum = cl.data[0] + cl.data[1] + cl.data[2] + cl.data[3];
|
|
||||||
eta.c = cBottomLeft; // TODO! This is not correct, but need to put something
|
|
||||||
return eta;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace aare
|
|
||||||
@@ -10,7 +10,7 @@ using aare::Cluster;
|
|||||||
using aare::ClusterFile;
|
using aare::ClusterFile;
|
||||||
using aare::ClusterVector;
|
using aare::ClusterVector;
|
||||||
|
|
||||||
TEST_CASE("Read one frame from a cluster file", "[.files]") {
|
TEST_CASE("Read one frame from a cluster file", "[.with-data]") {
|
||||||
// We know that the frame has 97 clusters
|
// We know that the frame has 97 clusters
|
||||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
@@ -26,7 +26,7 @@ TEST_CASE("Read one frame from a cluster file", "[.files]") {
|
|||||||
std::begin(expected_cluster_data)));
|
std::begin(expected_cluster_data)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Read one frame using ROI", "[.files]") {
|
TEST_CASE("Read one frame using ROI", "[.with-data]") {
|
||||||
// We know that the frame has 97 clusters
|
// We know that the frame has 97 clusters
|
||||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
@@ -58,7 +58,7 @@ TEST_CASE("Read one frame using ROI", "[.files]") {
|
|||||||
std::begin(expected_cluster_data)));
|
std::begin(expected_cluster_data)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Read clusters from single frame file", "[.files]") {
|
TEST_CASE("Read clusters from single frame file", "[.with-data]") {
|
||||||
|
|
||||||
// frame_number, num_clusters [135] 97
|
// frame_number, num_clusters [135] 97
|
||||||
// [ 1 200] [0 1 2 3 4 5 6 7 8]
|
// [ 1 200] [0 1 2 3 4 5 6 7 8]
|
||||||
@@ -202,7 +202,7 @@ TEST_CASE("Read clusters from single frame file", "[.files]") {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Read clusters from single frame file with ROI", "[.files]") {
|
TEST_CASE("Read clusters from single frame file with ROI", "[.with-data]") {
|
||||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
@@ -226,7 +226,7 @@ TEST_CASE("Read clusters from single frame file with ROI", "[.files]") {
|
|||||||
std::begin(expected_cluster_data)));
|
std::begin(expected_cluster_data)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Read cluster from multiple frame file", "[.files]") {
|
TEST_CASE("Read cluster from multiple frame file", "[.with-data]") {
|
||||||
|
|
||||||
using ClusterType = Cluster<double, 2, 2>;
|
using ClusterType = Cluster<double, 2, 2>;
|
||||||
|
|
||||||
@@ -279,7 +279,8 @@ TEST_CASE("Read cluster from multiple frame file", "[.files]") {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Write cluster with potential padding", "[.files][.ClusterFile]") {
|
TEST_CASE("Write cluster with potential padding",
|
||||||
|
"[.with-data][.ClusterFile]") {
|
||||||
|
|
||||||
using ClusterType = Cluster<double, 3, 3>;
|
using ClusterType = Cluster<double, 3, 3>;
|
||||||
|
|
||||||
@@ -290,7 +291,7 @@ TEST_CASE("Write cluster with potential padding", "[.files][.ClusterFile]") {
|
|||||||
ClusterFile<ClusterType> file(fpath, 1000, "w");
|
ClusterFile<ClusterType> file(fpath, 1000, "w");
|
||||||
|
|
||||||
ClusterVector<ClusterType> clustervec(2);
|
ClusterVector<ClusterType> clustervec(2);
|
||||||
int16_t coordinate = 5;
|
uint16_t coordinate = 5;
|
||||||
clustervec.push_back(ClusterType{
|
clustervec.push_back(ClusterType{
|
||||||
coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}});
|
coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}});
|
||||||
clustervec.push_back(ClusterType{
|
clustervec.push_back(ClusterType{
|
||||||
@@ -324,7 +325,7 @@ TEST_CASE("Write cluster with potential padding", "[.files][.ClusterFile]") {
|
|||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Read frame and modify cluster data", "[.files][.ClusterFile]") {
|
TEST_CASE("Read frame and modify cluster data", "[.with-data][.ClusterFile]") {
|
||||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
|
|||||||
@@ -57,9 +57,12 @@ class ClusterFinderMTWrapper
|
|||||||
size_t m_sink_size() const { return this->m_sink.sizeGuess(); }
|
size_t m_sink_size() const { return this->m_sink.sizeGuess(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_CASE("multithreaded cluster finder", "[.files][.ClusterFinder]") {
|
|
||||||
auto fpath = "/mnt/sls_det_storage/matterhorn_data/aare_test_data/"
|
TEST_CASE("multithreaded cluster finder", "[.with-data]") {
|
||||||
"Moench03new/cu_half_speed_master_4.json";
|
auto fpath =
|
||||||
|
test_data_path() / "raw/moench03/cu_half_speed_master_4.json";
|
||||||
|
|
||||||
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
File file(fpath);
|
File file(fpath);
|
||||||
|
|
||||||
@@ -79,7 +82,8 @@ TEST_CASE("multithreaded cluster finder", "[.files][.ClusterFinder]") {
|
|||||||
CHECK(cf.m_input_queues_are_empty() == true);
|
CHECK(cf.m_input_queues_are_empty() == true);
|
||||||
|
|
||||||
for (size_t i = 0; i < n_frames_pd; ++i) {
|
for (size_t i = 0; i < n_frames_pd; ++i) {
|
||||||
cf.find_clusters(file.read_frame().view<uint16_t>());
|
auto frame = file.read_frame();
|
||||||
|
cf.find_clusters(frame.view<uint16_t>());
|
||||||
}
|
}
|
||||||
|
|
||||||
cf.stop();
|
cf.stop();
|
||||||
|
|||||||
@@ -7,9 +7,21 @@
|
|||||||
|
|
||||||
using aare::Cluster;
|
using aare::Cluster;
|
||||||
using aare::ClusterVector;
|
using aare::ClusterVector;
|
||||||
|
using C1 = Cluster<int32_t, 2, 2>;
|
||||||
|
|
||||||
|
|
||||||
|
TEST_CASE("A newly created ClusterVector is empty") {
|
||||||
|
ClusterVector<C1> cv(4);
|
||||||
|
REQUIRE(cv.empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("After pushing back one element the ClusterVector is not empty") {
|
||||||
|
ClusterVector<C1> cv(4);
|
||||||
|
cv.push_back(C1{1, 2, {3, 4}});
|
||||||
|
REQUIRE(!cv.empty());
|
||||||
|
}
|
||||||
|
|
||||||
TEST_CASE("item_size return the size of the cluster stored") {
|
TEST_CASE("item_size return the size of the cluster stored") {
|
||||||
using C1 = Cluster<int32_t, 2, 2>;
|
|
||||||
ClusterVector<C1> cv(4);
|
ClusterVector<C1> cv(4);
|
||||||
CHECK(cv.item_size() == sizeof(C1));
|
CHECK(cv.item_size() == sizeof(C1));
|
||||||
|
|
||||||
@@ -43,8 +55,7 @@ TEST_CASE("item_size return the size of the cluster stored") {
|
|||||||
CHECK(cv7.item_size() == sizeof(C7));
|
CHECK(cv7.item_size() == sizeof(C7));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read",
|
TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") {
|
||||||
"[.ClusterVector]") {
|
|
||||||
|
|
||||||
ClusterVector<Cluster<int32_t, 2, 2>> cv(4);
|
ClusterVector<Cluster<int32_t, 2, 2>> cv(4);
|
||||||
REQUIRE(cv.capacity() == 4);
|
REQUIRE(cv.capacity() == 4);
|
||||||
@@ -70,7 +81,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read",
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Summing 3x1 clusters of int64", "[.ClusterVector]") {
|
TEST_CASE("Summing 3x1 clusters of int64") {
|
||||||
ClusterVector<Cluster<int32_t, 3, 1>> cv(2);
|
ClusterVector<Cluster<int32_t, 3, 1>> cv(2);
|
||||||
REQUIRE(cv.capacity() == 2);
|
REQUIRE(cv.capacity() == 2);
|
||||||
REQUIRE(cv.size() == 0);
|
REQUIRE(cv.size() == 0);
|
||||||
@@ -102,7 +113,7 @@ TEST_CASE("Summing 3x1 clusters of int64", "[.ClusterVector]") {
|
|||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Storing floats", "[.ClusterVector]") {
|
TEST_CASE("Storing floats") {
|
||||||
ClusterVector<Cluster<float, 2, 4>> cv(10);
|
ClusterVector<Cluster<float, 2, 4>> cv(10);
|
||||||
REQUIRE(cv.capacity() == 10);
|
REQUIRE(cv.capacity() == 10);
|
||||||
REQUIRE(cv.size() == 0);
|
REQUIRE(cv.size() == 0);
|
||||||
@@ -129,7 +140,7 @@ TEST_CASE("Storing floats", "[.ClusterVector]") {
|
|||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Push back more than initial capacity", "[.ClusterVector]") {
|
TEST_CASE("Push back more than initial capacity") {
|
||||||
|
|
||||||
ClusterVector<Cluster<int32_t, 2, 2>> cv(2);
|
ClusterVector<Cluster<int32_t, 2, 2>> cv(2);
|
||||||
auto initial_data = cv.data();
|
auto initial_data = cv.data();
|
||||||
@@ -162,8 +173,7 @@ TEST_CASE("Push back more than initial capacity", "[.ClusterVector]") {
|
|||||||
REQUIRE(initial_data != cv.data());
|
REQUIRE(initial_data != cv.data());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Concatenate two cluster vectors where the first has enough capacity",
|
TEST_CASE("Concatenate two cluster vectors where the first has enough capacity") {
|
||||||
"[.ClusterVector]") {
|
|
||||||
ClusterVector<Cluster<int32_t, 2, 2>> cv1(12);
|
ClusterVector<Cluster<int32_t, 2, 2>> cv1(12);
|
||||||
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
|
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
|
||||||
cv1.push_back(c1);
|
cv1.push_back(c1);
|
||||||
@@ -192,8 +202,7 @@ TEST_CASE("Concatenate two cluster vectors where the first has enough capacity",
|
|||||||
REQUIRE(ptr[3].y == 17);
|
REQUIRE(ptr[3].y == 17);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Concatenate two cluster vectors where we need to allocate",
|
TEST_CASE("Concatenate two cluster vectors where we need to allocate") {
|
||||||
"[.ClusterVector]") {
|
|
||||||
ClusterVector<Cluster<int32_t, 2, 2>> cv1(2);
|
ClusterVector<Cluster<int32_t, 2, 2>> cv1(2);
|
||||||
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
|
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
|
||||||
cv1.push_back(c1);
|
cv1.push_back(c1);
|
||||||
@@ -229,7 +238,7 @@ struct ClusterTestData {
|
|||||||
std::vector<int64_t> index_map_y;
|
std::vector<int64_t> index_map_y;
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_CASE("Gain Map Calculation Index Map", "[.ClusterVector][.gain_map]") {
|
TEST_CASE("Gain Map Calculation Index Map") {
|
||||||
|
|
||||||
auto clustertestdata = GENERATE(
|
auto clustertestdata = GENERATE(
|
||||||
ClusterTestData{3,
|
ClusterTestData{3,
|
||||||
|
|||||||
151
src/DetectorGeometry.cpp
Normal file
151
src/DetectorGeometry.cpp
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
|
||||||
|
#include "aare/DetectorGeometry.hpp"
|
||||||
|
#include "fmt/core.h"
|
||||||
|
#include <iostream>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
DetectorGeometry::DetectorGeometry(const xy &geometry,
|
||||||
|
const ssize_t module_pixels_x,
|
||||||
|
const ssize_t module_pixels_y,
|
||||||
|
const xy udp_interfaces_per_module,
|
||||||
|
const bool quad) {
|
||||||
|
|
||||||
|
size_t num_modules = geometry.col * geometry.row;
|
||||||
|
module_geometries.reserve(num_modules);
|
||||||
|
for (size_t col = 0; col < geometry.col;
|
||||||
|
col += udp_interfaces_per_module.col)
|
||||||
|
for (size_t row = 0; row < geometry.row;
|
||||||
|
row += udp_interfaces_per_module.row) {
|
||||||
|
for (size_t port_row = 0; port_row < udp_interfaces_per_module.row;
|
||||||
|
++port_row)
|
||||||
|
for (size_t port_col = 0;
|
||||||
|
port_col < udp_interfaces_per_module.col; ++port_col) {
|
||||||
|
ModuleGeometry g;
|
||||||
|
g.row_index =
|
||||||
|
quad ? (row + port_row + 1) % 2 : (row + port_row);
|
||||||
|
g.col_index = col + port_col;
|
||||||
|
g.origin_x = g.col_index * module_pixels_x;
|
||||||
|
g.origin_y = g.row_index * module_pixels_y;
|
||||||
|
|
||||||
|
g.width = module_pixels_x;
|
||||||
|
g.height = module_pixels_y;
|
||||||
|
module_geometries.push_back(g);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
m_pixels_y = (geometry.row * module_pixels_y);
|
||||||
|
m_pixels_x = (geometry.col * module_pixels_x);
|
||||||
|
m_modules_x = geometry.col;
|
||||||
|
m_modules_y = geometry.row;
|
||||||
|
m_pixels_y += static_cast<size_t>((geometry.row - 1) * cfg.module_gap_row);
|
||||||
|
|
||||||
|
modules_in_roi.resize(num_modules);
|
||||||
|
std::iota(modules_in_roi.begin(), modules_in_roi.end(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t DetectorGeometry::n_modules() const { return m_modules_x * m_modules_y; }
|
||||||
|
|
||||||
|
size_t DetectorGeometry::n_modules_in_roi() const {
|
||||||
|
return modules_in_roi.size();
|
||||||
|
};
|
||||||
|
|
||||||
|
size_t DetectorGeometry::pixels_x() const { return m_pixels_x; }
|
||||||
|
size_t DetectorGeometry::pixels_y() const { return m_pixels_y; }
|
||||||
|
|
||||||
|
size_t DetectorGeometry::modules_x() const { return m_modules_x; };
|
||||||
|
size_t DetectorGeometry::modules_y() const { return m_modules_y; };
|
||||||
|
|
||||||
|
const std::vector<ssize_t> &DetectorGeometry::get_modules_in_roi() const {
|
||||||
|
return modules_in_roi;
|
||||||
|
}
|
||||||
|
|
||||||
|
ssize_t DetectorGeometry::get_modules_in_roi(const size_t index) const {
|
||||||
|
return modules_in_roi[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::vector<ModuleGeometry> &
|
||||||
|
DetectorGeometry::get_module_geometries() const {
|
||||||
|
return module_geometries;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ModuleGeometry &
|
||||||
|
DetectorGeometry::get_module_geometries(const size_t index) const {
|
||||||
|
return module_geometries[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
void DetectorGeometry::update_geometry_with_roi(ROI roi) {
|
||||||
|
#ifdef AARE_VERBOSE
|
||||||
|
fmt::println("update_geometry_with_roi() called with ROI: {} {} {} {}",
|
||||||
|
roi.xmin, roi.xmax, roi.ymin, roi.ymax);
|
||||||
|
fmt::println("Geometry: {} {} {} {} {} {}", m_modules_x, m_modules_y,
|
||||||
|
m_pixels_x, m_pixels_y, cfg.module_gap_row,
|
||||||
|
cfg.module_gap_col);
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
modules_in_roi.clear();
|
||||||
|
modules_in_roi.reserve(m_modules_x * m_modules_y);
|
||||||
|
int pos_y = 0;
|
||||||
|
int pos_y_increment = 0;
|
||||||
|
for (size_t row = 0; row < m_modules_y; row++) {
|
||||||
|
int pos_x = 0;
|
||||||
|
for (size_t col = 0; col < m_modules_x; col++) {
|
||||||
|
auto &m = module_geometries[row * m_modules_x + col];
|
||||||
|
|
||||||
|
auto original_height = m.height;
|
||||||
|
auto original_width = m.width;
|
||||||
|
|
||||||
|
// module is to the left of the roi
|
||||||
|
if (m.origin_x + m.width < roi.xmin) {
|
||||||
|
m.width = 0;
|
||||||
|
|
||||||
|
// roi is in module
|
||||||
|
} else {
|
||||||
|
// here we only arrive when the roi is in or to the left of
|
||||||
|
// the module
|
||||||
|
if (roi.xmin > m.origin_x) {
|
||||||
|
m.width -= roi.xmin - m.origin_x;
|
||||||
|
}
|
||||||
|
if (roi.xmax < m.origin_x + original_width) {
|
||||||
|
m.width -= m.origin_x + original_width - roi.xmax;
|
||||||
|
}
|
||||||
|
m.origin_x = pos_x;
|
||||||
|
pos_x += m.width;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m.origin_y + m.height < roi.ymin) {
|
||||||
|
m.height = 0;
|
||||||
|
} else {
|
||||||
|
if ((roi.ymin > m.origin_y) &&
|
||||||
|
(roi.ymin < m.origin_y + m.height)) {
|
||||||
|
m.height -= roi.ymin - m.origin_y;
|
||||||
|
}
|
||||||
|
if (roi.ymax < m.origin_y + original_height) {
|
||||||
|
m.height -= m.origin_y + original_height - roi.ymax;
|
||||||
|
}
|
||||||
|
m.origin_y = pos_y;
|
||||||
|
pos_y_increment = m.height;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m.height != 0 && m.width != 0) {
|
||||||
|
modules_in_roi.push_back(row * m_modules_x + col);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef AARE_VERBOSE
|
||||||
|
fmt::println("Module {} {} {} {}", m.origin_x, m.origin_y, m.width,
|
||||||
|
m.height);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
// increment pos_y
|
||||||
|
pos_y += pos_y_increment;
|
||||||
|
}
|
||||||
|
|
||||||
|
// m_rows = roi.height();
|
||||||
|
// m_cols = roi.width();
|
||||||
|
m_pixels_x = roi.width();
|
||||||
|
m_pixels_y = roi.height();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
194
src/DetectorGeometry.test.cpp
Normal file
194
src/DetectorGeometry.test.cpp
Normal file
@@ -0,0 +1,194 @@
|
|||||||
|
#include "aare/File.hpp"
|
||||||
|
#include "aare/RawFile.hpp"
|
||||||
|
#include "aare/RawMasterFile.hpp" //needed for ROI
|
||||||
|
|
||||||
|
#include <catch2/catch_test_macros.hpp>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
#include "aare/DetectorGeometry.hpp"
|
||||||
|
#include "test_config.hpp"
|
||||||
|
|
||||||
|
TEST_CASE("Simple ROIs on one module") {
|
||||||
|
// DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI
|
||||||
|
// roi)
|
||||||
|
aare::DetectorGeometry geo(aare::xy{1, 1}, 1024, 512);
|
||||||
|
|
||||||
|
REQUIRE(geo.get_module_geometries(0).origin_x == 0);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).origin_y == 0);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).width == 1024);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).height == 512);
|
||||||
|
|
||||||
|
SECTION("ROI is the whole module") {
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 0;
|
||||||
|
roi.xmax = 1024;
|
||||||
|
roi.ymin = 0;
|
||||||
|
roi.ymax = 512;
|
||||||
|
geo.update_geometry_with_roi(roi);
|
||||||
|
|
||||||
|
REQUIRE(geo.pixels_x() == 1024);
|
||||||
|
REQUIRE(geo.pixels_y() == 512);
|
||||||
|
REQUIRE(geo.modules_x() == 1);
|
||||||
|
REQUIRE(geo.modules_y() == 1);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).height == 512);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).width == 1024);
|
||||||
|
}
|
||||||
|
SECTION("ROI is the top left corner of the module") {
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 100;
|
||||||
|
roi.xmax = 200;
|
||||||
|
roi.ymin = 150;
|
||||||
|
roi.ymax = 200;
|
||||||
|
geo.update_geometry_with_roi(roi);
|
||||||
|
|
||||||
|
REQUIRE(geo.pixels_x() == 100);
|
||||||
|
REQUIRE(geo.pixels_y() == 50);
|
||||||
|
REQUIRE(geo.modules_x() == 1);
|
||||||
|
REQUIRE(geo.modules_y() == 1);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).height == 50);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).width == 100);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("ROI is a small square") {
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 1000;
|
||||||
|
roi.xmax = 1010;
|
||||||
|
roi.ymin = 500;
|
||||||
|
roi.ymax = 510;
|
||||||
|
geo.update_geometry_with_roi(roi);
|
||||||
|
|
||||||
|
REQUIRE(geo.pixels_x() == 10);
|
||||||
|
REQUIRE(geo.pixels_y() == 10);
|
||||||
|
REQUIRE(geo.modules_x() == 1);
|
||||||
|
REQUIRE(geo.modules_y() == 1);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).height == 10);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).width == 10);
|
||||||
|
}
|
||||||
|
SECTION("ROI is a few columns") {
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 750;
|
||||||
|
roi.xmax = 800;
|
||||||
|
roi.ymin = 0;
|
||||||
|
roi.ymax = 512;
|
||||||
|
geo.update_geometry_with_roi(roi);
|
||||||
|
|
||||||
|
REQUIRE(geo.pixels_x() == 50);
|
||||||
|
REQUIRE(geo.pixels_y() == 512);
|
||||||
|
REQUIRE(geo.modules_x() == 1);
|
||||||
|
REQUIRE(geo.modules_y() == 1);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).height == 512);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).width == 50);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Two modules side by side") {
|
||||||
|
// DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI
|
||||||
|
// roi)
|
||||||
|
aare::DetectorGeometry geo(aare::xy{1, 2}, 1024, 512);
|
||||||
|
|
||||||
|
REQUIRE(geo.get_module_geometries(0).origin_x == 0);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).origin_y == 0);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).width == 1024);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).height == 512);
|
||||||
|
REQUIRE(geo.get_module_geometries(1).origin_x == 1024);
|
||||||
|
REQUIRE(geo.get_module_geometries(1).origin_y == 0);
|
||||||
|
|
||||||
|
SECTION("ROI is the whole image") {
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 0;
|
||||||
|
roi.xmax = 2048;
|
||||||
|
roi.ymin = 0;
|
||||||
|
roi.ymax = 512;
|
||||||
|
geo.update_geometry_with_roi(roi);
|
||||||
|
|
||||||
|
REQUIRE(geo.pixels_x() == 2048);
|
||||||
|
REQUIRE(geo.pixels_y() == 512);
|
||||||
|
REQUIRE(geo.modules_x() == 2);
|
||||||
|
REQUIRE(geo.modules_y() == 1);
|
||||||
|
}
|
||||||
|
SECTION("rectangle on both modules") {
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 800;
|
||||||
|
roi.xmax = 1300;
|
||||||
|
roi.ymin = 200;
|
||||||
|
roi.ymax = 499;
|
||||||
|
geo.update_geometry_with_roi(roi);
|
||||||
|
|
||||||
|
REQUIRE(geo.pixels_x() == 500);
|
||||||
|
REQUIRE(geo.pixels_y() == 299);
|
||||||
|
REQUIRE(geo.modules_x() == 2);
|
||||||
|
REQUIRE(geo.modules_y() == 1);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).height == 299);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).width == 224);
|
||||||
|
REQUIRE(geo.get_module_geometries(1).height == 299);
|
||||||
|
REQUIRE(geo.get_module_geometries(1).width == 276);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Three modules side by side") {
|
||||||
|
// DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI
|
||||||
|
// roi)
|
||||||
|
aare::DetectorGeometry geo(aare::xy{1, 3}, 1024, 512);
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 700;
|
||||||
|
roi.xmax = 2500;
|
||||||
|
roi.ymin = 0;
|
||||||
|
roi.ymax = 123;
|
||||||
|
|
||||||
|
REQUIRE(geo.get_module_geometries(0).origin_x == 0);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).origin_y == 0);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).width == 1024);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).height == 512);
|
||||||
|
REQUIRE(geo.get_module_geometries(1).origin_x == 1024);
|
||||||
|
REQUIRE(geo.get_module_geometries(2).origin_x == 2048);
|
||||||
|
|
||||||
|
geo.update_geometry_with_roi(roi);
|
||||||
|
|
||||||
|
REQUIRE(geo.pixels_x() == 1800);
|
||||||
|
REQUIRE(geo.pixels_y() == 123);
|
||||||
|
REQUIRE(geo.modules_x() == 3);
|
||||||
|
REQUIRE(geo.modules_y() == 1);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).height == 123);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).width == 324);
|
||||||
|
REQUIRE(geo.get_module_geometries(1).height == 123);
|
||||||
|
REQUIRE(geo.get_module_geometries(1).width == 1024);
|
||||||
|
REQUIRE(geo.get_module_geometries(2).height == 123);
|
||||||
|
REQUIRE(geo.get_module_geometries(2).width == 452);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Four modules as a square") {
|
||||||
|
// DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI
|
||||||
|
// roi)
|
||||||
|
aare::DetectorGeometry geo(aare::xy{2, 2}, 1024, 512, aare::xy{1, 2});
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 500;
|
||||||
|
roi.xmax = 2000;
|
||||||
|
roi.ymin = 500;
|
||||||
|
roi.ymax = 600;
|
||||||
|
|
||||||
|
REQUIRE(geo.get_module_geometries(0).origin_x == 0);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).origin_y == 0);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).width == 1024);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).height == 512);
|
||||||
|
REQUIRE(geo.get_module_geometries(1).origin_x == 1024);
|
||||||
|
REQUIRE(geo.get_module_geometries(1).origin_y == 0);
|
||||||
|
REQUIRE(geo.get_module_geometries(2).origin_x == 0);
|
||||||
|
REQUIRE(geo.get_module_geometries(2).origin_y == 512);
|
||||||
|
REQUIRE(geo.get_module_geometries(3).origin_x == 1024);
|
||||||
|
REQUIRE(geo.get_module_geometries(3).origin_y == 512);
|
||||||
|
|
||||||
|
geo.update_geometry_with_roi(roi);
|
||||||
|
|
||||||
|
REQUIRE(geo.pixels_x() == 1500);
|
||||||
|
REQUIRE(geo.pixels_y() == 100);
|
||||||
|
REQUIRE(geo.modules_x() == 2);
|
||||||
|
REQUIRE(geo.modules_y() == 2);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).height == 12);
|
||||||
|
REQUIRE(geo.get_module_geometries(0).width == 524);
|
||||||
|
REQUIRE(geo.get_module_geometries(1).height == 12);
|
||||||
|
REQUIRE(geo.get_module_geometries(1).width == 976);
|
||||||
|
REQUIRE(geo.get_module_geometries(2).height == 88);
|
||||||
|
REQUIRE(geo.get_module_geometries(2).width == 524);
|
||||||
|
REQUIRE(geo.get_module_geometries(3).height == 88);
|
||||||
|
REQUIRE(geo.get_module_geometries(3).width == 976);
|
||||||
|
}
|
||||||
@@ -5,7 +5,7 @@
|
|||||||
|
|
||||||
using aare::JungfrauDataFile;
|
using aare::JungfrauDataFile;
|
||||||
using aare::JungfrauDataHeader;
|
using aare::JungfrauDataHeader;
|
||||||
TEST_CASE("Open a Jungfrau data file", "[.files]") {
|
TEST_CASE("Open a Jungfrau data file", "[.with-data]") {
|
||||||
// we know we have 4 files with 7, 7, 7, and 3 frames
|
// we know we have 4 files with 7, 7, 7, and 3 frames
|
||||||
// firs frame number if 1 and the bunch id is frame_number**2
|
// firs frame number if 1 and the bunch id is frame_number**2
|
||||||
// so we can check the header
|
// so we can check the header
|
||||||
@@ -37,7 +37,7 @@ TEST_CASE("Open a Jungfrau data file", "[.files]") {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Seek in a JungfrauDataFile", "[.files]") {
|
TEST_CASE("Seek in a JungfrauDataFile", "[.with-data]") {
|
||||||
auto fpath = test_data_path() / "dat" / "AldoJF65k_000000.dat";
|
auto fpath = test_data_path() / "dat" / "AldoJF65k_000000.dat";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
@@ -70,7 +70,7 @@ TEST_CASE("Seek in a JungfrauDataFile", "[.files]") {
|
|||||||
REQUIRE_THROWS(f.seek(86356)); // out of range
|
REQUIRE_THROWS(f.seek(86356)); // out of range
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Open a Jungfrau data file with non zero file index", "[.files]") {
|
TEST_CASE("Open a Jungfrau data file with non zero file index", "[.with-data]") {
|
||||||
|
|
||||||
auto fpath = test_data_path() / "dat" / "AldoJF65k_000003.dat";
|
auto fpath = test_data_path() / "dat" / "AldoJF65k_000003.dat";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
@@ -94,7 +94,7 @@ TEST_CASE("Open a Jungfrau data file with non zero file index", "[.files]") {
|
|||||||
REQUIRE(f.current_file().stem() == "AldoJF65k_000003");
|
REQUIRE(f.current_file().stem() == "AldoJF65k_000003");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Read into throws if size doesn't match", "[.files]") {
|
TEST_CASE("Read into throws if size doesn't match", "[.with-data]") {
|
||||||
auto fpath = test_data_path() / "dat" / "AldoJF65k_000000.dat";
|
auto fpath = test_data_path() / "dat" / "AldoJF65k_000000.dat";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
|
|||||||
@@ -25,13 +25,13 @@ TEST_CASE("Construct from an NDView") {
|
|||||||
REQUIRE(image.data() != view.data());
|
REQUIRE(image.data() != view.data());
|
||||||
|
|
||||||
for (uint32_t i = 0; i < image.size(); ++i) {
|
for (uint32_t i = 0; i < image.size(); ++i) {
|
||||||
REQUIRE(image(i) == view(i));
|
REQUIRE(image[i] == view[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Changing the image doesn't change the view
|
// Changing the image doesn't change the view
|
||||||
image = 43;
|
image = 43;
|
||||||
for (uint32_t i = 0; i < image.size(); ++i) {
|
for (uint32_t i = 0; i < image.size(); ++i) {
|
||||||
REQUIRE(image(i) != view(i));
|
REQUIRE(image[i] != view[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -428,3 +428,29 @@ TEST_CASE("Construct an NDArray from an std::array") {
|
|||||||
REQUIRE(a(i) == b[i]);
|
REQUIRE(a(i) == b[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
TEST_CASE("Move construct from an array with Ndim + 1") {
|
||||||
|
NDArray<int, 3> a({{1,2,2}}, 0);
|
||||||
|
a(0, 0, 0) = 1;
|
||||||
|
a(0, 0, 1) = 2;
|
||||||
|
a(0, 1, 0) = 3;
|
||||||
|
a(0, 1, 1) = 4;
|
||||||
|
|
||||||
|
|
||||||
|
NDArray<int, 2> b(std::move(a));
|
||||||
|
REQUIRE(b.shape() == Shape<2>{2,2});
|
||||||
|
REQUIRE(b.size() == 4);
|
||||||
|
REQUIRE(b(0, 0) == 1);
|
||||||
|
REQUIRE(b(0, 1) == 2);
|
||||||
|
REQUIRE(b(1, 0) == 3);
|
||||||
|
REQUIRE(b(1, 1) == 4);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Move construct from an array with Ndim + 1 throws on size mismatch") {
|
||||||
|
NDArray<int, 3> a({{2,2,2}}, 0);
|
||||||
|
REQUIRE_THROWS(NDArray<int, 2>(std::move(a)));
|
||||||
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,6 +21,57 @@ TEST_CASE("Element reference 1D") {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
TEST_CASE("Assign elements through () and []") {
|
||||||
|
std::vector<int> vec;
|
||||||
|
for (int i = 0; i != 10; ++i) {
|
||||||
|
vec.push_back(i);
|
||||||
|
}
|
||||||
|
NDView<int, 1> data(vec.data(), Shape<1>{10});
|
||||||
|
REQUIRE(vec.size() == static_cast<size_t>(data.size()));
|
||||||
|
|
||||||
|
data[3] = 187;
|
||||||
|
data(4) = 512;
|
||||||
|
|
||||||
|
|
||||||
|
REQUIRE(data(0) == 0);
|
||||||
|
REQUIRE(data[0] == 0);
|
||||||
|
REQUIRE(data(1) == 1);
|
||||||
|
REQUIRE(data[1] == 1);
|
||||||
|
REQUIRE(data(2) == 2);
|
||||||
|
REQUIRE(data[2] == 2);
|
||||||
|
REQUIRE(data(3) == 187);
|
||||||
|
REQUIRE(data[3] == 187);
|
||||||
|
REQUIRE(data(4) == 512);
|
||||||
|
REQUIRE(data[4] == 512);
|
||||||
|
REQUIRE(data(5) == 5);
|
||||||
|
REQUIRE(data[5] == 5);
|
||||||
|
REQUIRE(data(6) == 6);
|
||||||
|
REQUIRE(data[6] == 6);
|
||||||
|
REQUIRE(data(7) == 7);
|
||||||
|
REQUIRE(data[7] == 7);
|
||||||
|
REQUIRE(data(8) == 8);
|
||||||
|
REQUIRE(data[8] == 8);
|
||||||
|
REQUIRE(data(9) == 9);
|
||||||
|
REQUIRE(data[9] == 9);
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Element reference 1D with a const NDView") {
|
||||||
|
std::vector<int> vec;
|
||||||
|
for (int i = 0; i != 10; ++i) {
|
||||||
|
vec.push_back(i);
|
||||||
|
}
|
||||||
|
const NDView<int, 1> data(vec.data(), Shape<1>{10});
|
||||||
|
REQUIRE(vec.size() == static_cast<size_t>(data.size()));
|
||||||
|
for (int i = 0; i != 10; ++i) {
|
||||||
|
REQUIRE(data(i) == vec[i]);
|
||||||
|
REQUIRE(data[i] == vec[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
TEST_CASE("Element reference 2D") {
|
TEST_CASE("Element reference 2D") {
|
||||||
std::vector<int> vec(12);
|
std::vector<int> vec(12);
|
||||||
std::iota(vec.begin(), vec.end(), 0);
|
std::iota(vec.begin(), vec.end(), 0);
|
||||||
@@ -56,7 +107,7 @@ TEST_CASE("Element reference 3D") {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Plus and miuns with single value") {
|
TEST_CASE("Plus and minus with single value") {
|
||||||
std::vector<int> vec(12);
|
std::vector<int> vec(12);
|
||||||
std::iota(vec.begin(), vec.end(), 0);
|
std::iota(vec.begin(), vec.end(), 0);
|
||||||
NDView<int, 2> data(vec.data(), Shape<2>{3, 4});
|
NDView<int, 2> data(vec.data(), Shape<2>{3, 4});
|
||||||
@@ -137,16 +188,9 @@ TEST_CASE("iterators") {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TEST_CASE("shape from vector") {
|
|
||||||
// std::vector<int> vec;
|
|
||||||
// for (int i = 0; i != 12; ++i) {
|
|
||||||
// vec.push_back(i);
|
|
||||||
// }
|
|
||||||
// std::vector<ssize_t> shape{3, 4};
|
|
||||||
// NDView<int, 2> data(vec.data(), shape);
|
|
||||||
// }
|
|
||||||
|
|
||||||
TEST_CASE("divide with another span") {
|
|
||||||
|
TEST_CASE("divide with another NDView") {
|
||||||
std::vector<int> vec0{9, 12, 3};
|
std::vector<int> vec0{9, 12, 3};
|
||||||
std::vector<int> vec1{3, 2, 1};
|
std::vector<int> vec1{3, 2, 1};
|
||||||
std::vector<int> result{3, 6, 3};
|
std::vector<int> result{3, 6, 3};
|
||||||
@@ -183,6 +227,30 @@ TEST_CASE("compare two views") {
|
|||||||
REQUIRE((view1 == view2));
|
REQUIRE((view1 == view2));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Compare two views with different size"){
|
||||||
|
std::vector<int> vec1(12);
|
||||||
|
std::iota(vec1.begin(), vec1.end(), 0);
|
||||||
|
NDView<int, 2> view1(vec1.data(), Shape<2>{3, 4});
|
||||||
|
|
||||||
|
std::vector<int> vec2(8);
|
||||||
|
std::iota(vec2.begin(), vec2.end(), 0);
|
||||||
|
NDView<int, 2> view2(vec2.data(), Shape<2>{2, 4});
|
||||||
|
|
||||||
|
REQUIRE_FALSE(view1 == view2);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Compare two views with same size but different shape"){
|
||||||
|
std::vector<int> vec1(12);
|
||||||
|
std::iota(vec1.begin(), vec1.end(), 0);
|
||||||
|
NDView<int, 2> view1(vec1.data(), Shape<2>{3, 4});
|
||||||
|
|
||||||
|
std::vector<int> vec2(12);
|
||||||
|
std::iota(vec2.begin(), vec2.end(), 0);
|
||||||
|
NDView<int, 2> view2(vec2.data(), Shape<2>{2, 6});
|
||||||
|
|
||||||
|
REQUIRE_FALSE(view1 == view2);
|
||||||
|
}
|
||||||
|
|
||||||
TEST_CASE("Create a view over a vector") {
|
TEST_CASE("Create a view over a vector") {
|
||||||
std::vector<int> vec(12);
|
std::vector<int> vec(12);
|
||||||
std::iota(vec.begin(), vec.end(), 0);
|
std::iota(vec.begin(), vec.end(), 0);
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
using aare::Dtype;
|
using aare::Dtype;
|
||||||
using aare::NumpyFile;
|
using aare::NumpyFile;
|
||||||
TEST_CASE("Read a 1D numpy file with int32 data type", "[.integration]") {
|
TEST_CASE("Read a 1D numpy file with int32 data type", "[.with-data]") {
|
||||||
|
|
||||||
auto fpath = test_data_path() / "numpy" / "test_1d_int32.npy";
|
auto fpath = test_data_path() / "numpy" / "test_1d_int32.npy";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
@@ -24,7 +24,7 @@ TEST_CASE("Read a 1D numpy file with int32 data type", "[.integration]") {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Read a 3D numpy file with np.double data type", "[.integration]") {
|
TEST_CASE("Read a 3D numpy file with np.double data type", "[.with-data]") {
|
||||||
|
|
||||||
auto fpath = test_data_path() / "numpy" / "test_3d_double.npy";
|
auto fpath = test_data_path() / "numpy" / "test_3d_double.npy";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|||||||
@@ -104,22 +104,34 @@ NDArray<ssize_t, 2> GenerateEigerFlipRowsPixelMap() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
NDArray<ssize_t, 2> GenerateMH02SingleCounterPixelMap() {
|
NDArray<ssize_t, 2> GenerateMH02SingleCounterPixelMap() {
|
||||||
|
// This is the pixel map for a single counter Matterhorn02, i.e. 48x48 pixels.
|
||||||
|
// Data is read from two transceivers in blocks of 4 pixels.
|
||||||
NDArray<ssize_t, 2> order_map({48, 48});
|
NDArray<ssize_t, 2> order_map({48, 48});
|
||||||
|
size_t offset = 0;
|
||||||
|
size_t nSamples = 4;
|
||||||
for (int row = 0; row < 48; row++) {
|
for (int row = 0; row < 48; row++) {
|
||||||
for (int col = 0; col < 48; col++) {
|
for (int col = 0; col < 24; col++) {
|
||||||
order_map(row, col) = row * 48 + col;
|
for (int iTrans = 0; iTrans < 2; iTrans++) {
|
||||||
|
order_map(row, iTrans * 24 + col) = offset + nSamples * iTrans;
|
||||||
|
}
|
||||||
|
offset += 1;
|
||||||
|
if ((col + 1) % nSamples == 0) {
|
||||||
|
offset += nSamples;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return order_map;
|
return order_map;
|
||||||
}
|
}
|
||||||
|
|
||||||
NDArray<ssize_t, 3> GenerateMH02FourCounterPixelMap() {
|
NDArray<ssize_t, 3> GenerateMH02FourCounterPixelMap() {
|
||||||
|
auto single_counter_map = GenerateMH02SingleCounterPixelMap();
|
||||||
NDArray<ssize_t, 3> order_map({4, 48, 48});
|
NDArray<ssize_t, 3> order_map({4, 48, 48});
|
||||||
for (int counter = 0; counter < 4; counter++) {
|
for (int counter = 0; counter < 4; counter++) {
|
||||||
for (int row = 0; row < 48; row++) {
|
for (int row = 0; row < 48; row++) {
|
||||||
for (int col = 0; col < 48; col++) {
|
for (int col = 0; col < 48; col++) {
|
||||||
order_map(counter, row, col) =
|
order_map(counter, row, col) =
|
||||||
counter * 48 * 48 + row * 48 + col;
|
single_counter_map(row, col) +
|
||||||
|
counter * 48 * 48;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
130
src/RawFile.cpp
130
src/RawFile.cpp
@@ -1,8 +1,8 @@
|
|||||||
#include "aare/RawFile.hpp"
|
#include "aare/RawFile.hpp"
|
||||||
|
#include "aare/DetectorGeometry.hpp"
|
||||||
#include "aare/PixelMap.hpp"
|
#include "aare/PixelMap.hpp"
|
||||||
#include "aare/algorithm.hpp"
|
#include "aare/algorithm.hpp"
|
||||||
#include "aare/defs.hpp"
|
#include "aare/defs.hpp"
|
||||||
#include "aare/geo_helpers.hpp"
|
|
||||||
#include "aare/logger.hpp"
|
#include "aare/logger.hpp"
|
||||||
|
|
||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
@@ -13,13 +13,14 @@ using json = nlohmann::json;
|
|||||||
namespace aare {
|
namespace aare {
|
||||||
|
|
||||||
RawFile::RawFile(const std::filesystem::path &fname, const std::string &mode)
|
RawFile::RawFile(const std::filesystem::path &fname, const std::string &mode)
|
||||||
: m_master(fname) {
|
: m_master(fname),
|
||||||
|
m_geometry(m_master.geometry(), m_master.pixels_x(), m_master.pixels_y(),
|
||||||
|
m_master.udp_interfaces_per_module(), m_master.quad()) {
|
||||||
m_mode = mode;
|
m_mode = mode;
|
||||||
|
|
||||||
if (mode == "r") {
|
if (mode == "r") {
|
||||||
find_geometry();
|
|
||||||
if (m_master.roi()) {
|
if (m_master.roi()) {
|
||||||
m_geometry =
|
m_geometry.update_geometry_with_roi(m_master.roi().value());
|
||||||
update_geometry_with_roi(m_geometry, m_master.roi().value());
|
|
||||||
}
|
}
|
||||||
open_subfiles();
|
open_subfiles();
|
||||||
} else {
|
} else {
|
||||||
@@ -61,26 +62,25 @@ void RawFile::read_into(std::byte *image_buf, size_t n_frames,
|
|||||||
this->get_frame_into(m_current_frame++, image_buf, header);
|
this->get_frame_into(m_current_frame++, image_buf, header);
|
||||||
image_buf += bytes_per_frame();
|
image_buf += bytes_per_frame();
|
||||||
if (header)
|
if (header)
|
||||||
header += n_modules();
|
header += m_geometry.n_modules_in_roi();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t RawFile::n_modules() const { return m_master.n_modules(); }
|
|
||||||
|
|
||||||
size_t RawFile::bytes_per_frame() {
|
size_t RawFile::bytes_per_frame() {
|
||||||
return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() /
|
return m_geometry.pixels_x() * m_geometry.pixels_y() * m_master.bitdepth() /
|
||||||
bits_per_byte;
|
bits_per_byte;
|
||||||
}
|
}
|
||||||
size_t RawFile::pixels_per_frame() {
|
size_t RawFile::pixels_per_frame() {
|
||||||
// return m_rows * m_cols;
|
// return m_rows * m_cols;
|
||||||
return m_geometry.pixels_x * m_geometry.pixels_y;
|
return m_geometry.pixels_x() * m_geometry.pixels_y();
|
||||||
}
|
}
|
||||||
|
|
||||||
DetectorType RawFile::detector_type() const { return m_master.detector_type(); }
|
DetectorType RawFile::detector_type() const { return m_master.detector_type(); }
|
||||||
|
|
||||||
void RawFile::seek(size_t frame_index) {
|
void RawFile::seek(size_t frame_index) {
|
||||||
// check if the frame number is greater than the total frames
|
// check if the frame number is greater than the total frames
|
||||||
// if frame_number == total_frames, then the next read will throw an error
|
// if frame_number == total_frames, then the next read will throw an
|
||||||
|
// error
|
||||||
if (frame_index > total_frames()) {
|
if (frame_index > total_frames()) {
|
||||||
throw std::runtime_error(
|
throw std::runtime_error(
|
||||||
fmt::format("frame number {} is greater than total frames {}",
|
fmt::format("frame number {} is greater than total frames {}",
|
||||||
@@ -92,15 +92,23 @@ void RawFile::seek(size_t frame_index) {
|
|||||||
size_t RawFile::tell() { return m_current_frame; }
|
size_t RawFile::tell() { return m_current_frame; }
|
||||||
|
|
||||||
size_t RawFile::total_frames() const { return m_master.frames_in_file(); }
|
size_t RawFile::total_frames() const { return m_master.frames_in_file(); }
|
||||||
size_t RawFile::rows() const { return m_geometry.pixels_y; }
|
size_t RawFile::rows() const { return m_geometry.pixels_y(); }
|
||||||
size_t RawFile::cols() const { return m_geometry.pixels_x; }
|
size_t RawFile::cols() const { return m_geometry.pixels_x(); }
|
||||||
size_t RawFile::bitdepth() const { return m_master.bitdepth(); }
|
size_t RawFile::bitdepth() const { return m_master.bitdepth(); }
|
||||||
xy RawFile::geometry() { return m_master.geometry(); }
|
xy RawFile::geometry() const {
|
||||||
|
return xy{static_cast<uint32_t>(m_geometry.modules_y()),
|
||||||
|
static_cast<uint32_t>(m_geometry.modules_x())};
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t RawFile::n_modules() const { return m_geometry.n_modules(); };
|
||||||
|
size_t RawFile::n_modules_in_roi() const {
|
||||||
|
return m_geometry.n_modules_in_roi();
|
||||||
|
};
|
||||||
|
|
||||||
void RawFile::open_subfiles() {
|
void RawFile::open_subfiles() {
|
||||||
if (m_mode == "r")
|
if (m_mode == "r")
|
||||||
for (size_t i = 0; i != n_modules(); ++i) {
|
for (size_t i : m_geometry.get_modules_in_roi()) {
|
||||||
auto pos = m_geometry.module_pixel_0[i];
|
auto pos = m_geometry.get_module_geometries(i);
|
||||||
m_subfiles.emplace_back(std::make_unique<RawSubFile>(
|
m_subfiles.emplace_back(std::make_unique<RawSubFile>(
|
||||||
m_master.data_fname(i, 0), m_master.detector_type(), pos.height,
|
m_master.data_fname(i, 0), m_master.detector_type(), pos.height,
|
||||||
pos.width, m_master.bitdepth(), pos.row_index, pos.col_index));
|
pos.width, m_master.bitdepth(), pos.row_index, pos.col_index));
|
||||||
@@ -130,45 +138,8 @@ DetectorHeader RawFile::read_header(const std::filesystem::path &fname) {
|
|||||||
|
|
||||||
RawMasterFile RawFile::master() const { return m_master; }
|
RawMasterFile RawFile::master() const { return m_master; }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Find the geometry of the detector by opening all the subfiles and
|
|
||||||
* reading the headers.
|
|
||||||
*/
|
|
||||||
void RawFile::find_geometry() {
|
|
||||||
|
|
||||||
// Hold the maximal row and column number found
|
|
||||||
// Later used for calculating the total number of rows and columns
|
|
||||||
uint16_t r{};
|
|
||||||
uint16_t c{};
|
|
||||||
|
|
||||||
for (size_t i = 0; i < n_modules(); i++) {
|
|
||||||
auto h = read_header(m_master.data_fname(i, 0));
|
|
||||||
r = std::max(r, h.row);
|
|
||||||
c = std::max(c, h.column);
|
|
||||||
// positions.push_back({h.row, h.column});
|
|
||||||
|
|
||||||
ModuleGeometry g;
|
|
||||||
g.origin_x = h.column * m_master.pixels_x();
|
|
||||||
g.origin_y = h.row * m_master.pixels_y();
|
|
||||||
g.row_index = h.row;
|
|
||||||
g.col_index = h.column;
|
|
||||||
g.width = m_master.pixels_x();
|
|
||||||
g.height = m_master.pixels_y();
|
|
||||||
m_geometry.module_pixel_0.push_back(g);
|
|
||||||
}
|
|
||||||
|
|
||||||
r++;
|
|
||||||
c++;
|
|
||||||
|
|
||||||
m_geometry.pixels_y = (r * m_master.pixels_y());
|
|
||||||
m_geometry.pixels_x = (c * m_master.pixels_x());
|
|
||||||
m_geometry.modules_x = c;
|
|
||||||
m_geometry.modules_y = r;
|
|
||||||
m_geometry.pixels_y += static_cast<size_t>((r - 1) * cfg.module_gap_row);
|
|
||||||
}
|
|
||||||
|
|
||||||
Frame RawFile::get_frame(size_t frame_index) {
|
Frame RawFile::get_frame(size_t frame_index) {
|
||||||
auto f = Frame(m_geometry.pixels_y, m_geometry.pixels_x,
|
auto f = Frame(m_geometry.pixels_y(), m_geometry.pixels_x(),
|
||||||
Dtype::from_bitdepth(m_master.bitdepth()));
|
Dtype::from_bitdepth(m_master.bitdepth()));
|
||||||
std::byte *frame_buffer = f.data();
|
std::byte *frame_buffer = f.data();
|
||||||
get_frame_into(frame_index, frame_buffer);
|
get_frame_into(frame_index, frame_buffer);
|
||||||
@@ -183,13 +154,15 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer,
|
|||||||
if (frame_index >= total_frames()) {
|
if (frame_index >= total_frames()) {
|
||||||
throw std::runtime_error(LOCATION + "Frame number out of range");
|
throw std::runtime_error(LOCATION + "Frame number out of range");
|
||||||
}
|
}
|
||||||
std::vector<size_t> frame_numbers(n_modules());
|
std::vector<size_t> frame_numbers(m_geometry.n_modules_in_roi());
|
||||||
std::vector<size_t> frame_indices(n_modules(), frame_index);
|
std::vector<size_t> frame_indices(m_geometry.n_modules_in_roi(),
|
||||||
|
frame_index);
|
||||||
|
|
||||||
// sync the frame numbers
|
// sync the frame numbers
|
||||||
|
|
||||||
if (n_modules() != 1) { // if we have more than one module
|
if (m_geometry.n_modules() != 1) { // if we have more than one module
|
||||||
for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) {
|
for (size_t part_idx = 0; part_idx != m_geometry.n_modules_in_roi();
|
||||||
|
++part_idx) {
|
||||||
frame_numbers[part_idx] =
|
frame_numbers[part_idx] =
|
||||||
m_subfiles[part_idx]->frame_number(frame_index);
|
m_subfiles[part_idx]->frame_number(frame_index);
|
||||||
}
|
}
|
||||||
@@ -219,19 +192,32 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer,
|
|||||||
|
|
||||||
if (m_master.geometry().col == 1) {
|
if (m_master.geometry().col == 1) {
|
||||||
// get the part from each subfile and copy it to the frame
|
// get the part from each subfile and copy it to the frame
|
||||||
for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) {
|
for (size_t part_idx = 0; part_idx != m_geometry.n_modules_in_roi();
|
||||||
|
++part_idx) {
|
||||||
auto corrected_idx = frame_indices[part_idx];
|
auto corrected_idx = frame_indices[part_idx];
|
||||||
|
|
||||||
// This is where we start writing
|
// This is where we start writing
|
||||||
auto offset = (m_geometry.module_pixel_0[part_idx].origin_y *
|
auto offset = (m_geometry
|
||||||
m_geometry.pixels_x +
|
.get_module_geometries(
|
||||||
m_geometry.module_pixel_0[part_idx].origin_x) *
|
m_geometry.get_modules_in_roi(part_idx))
|
||||||
|
.origin_y *
|
||||||
|
m_geometry.pixels_x() +
|
||||||
|
m_geometry
|
||||||
|
.get_module_geometries(
|
||||||
|
m_geometry.get_modules_in_roi(part_idx))
|
||||||
|
.origin_x) *
|
||||||
m_master.bitdepth() / 8;
|
m_master.bitdepth() / 8;
|
||||||
|
|
||||||
if (m_geometry.module_pixel_0[part_idx].origin_x != 0)
|
if (m_geometry
|
||||||
throw std::runtime_error(LOCATION +
|
.get_module_geometries(
|
||||||
" Implementation error. x pos not 0.");
|
m_geometry.get_modules_in_roi(part_idx))
|
||||||
|
.origin_x != 0)
|
||||||
|
throw std::runtime_error(
|
||||||
|
LOCATION +
|
||||||
|
" Implementation error. x pos not 0."); // TODO: origin
|
||||||
|
// can still
|
||||||
|
// change if roi
|
||||||
|
// changes
|
||||||
// TODO! What if the files don't match?
|
// TODO! What if the files don't match?
|
||||||
m_subfiles[part_idx]->seek(corrected_idx);
|
m_subfiles[part_idx]->seek(corrected_idx);
|
||||||
m_subfiles[part_idx]->read_into(frame_buffer + offset, header);
|
m_subfiles[part_idx]->read_into(frame_buffer + offset, header);
|
||||||
@@ -249,11 +235,13 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer,
|
|||||||
|
|
||||||
auto *part_buffer = new std::byte[bytes_per_part];
|
auto *part_buffer = new std::byte[bytes_per_part];
|
||||||
|
|
||||||
// TODO! if we have many submodules we should reorder them on the module
|
// TODO! if we have many submodules we should reorder them on the
|
||||||
// level
|
// module level
|
||||||
|
|
||||||
for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) {
|
for (size_t part_idx = 0; part_idx != m_geometry.n_modules_in_roi();
|
||||||
auto pos = m_geometry.module_pixel_0[part_idx];
|
++part_idx) {
|
||||||
|
auto pos = m_geometry.get_module_geometries(
|
||||||
|
m_geometry.get_modules_in_roi(part_idx));
|
||||||
auto corrected_idx = frame_indices[part_idx];
|
auto corrected_idx = frame_indices[part_idx];
|
||||||
|
|
||||||
m_subfiles[part_idx]->seek(corrected_idx);
|
m_subfiles[part_idx]->seek(corrected_idx);
|
||||||
@@ -266,7 +254,7 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer,
|
|||||||
|
|
||||||
auto irow = (pos.origin_y + cur_row);
|
auto irow = (pos.origin_y + cur_row);
|
||||||
auto icol = pos.origin_x;
|
auto icol = pos.origin_x;
|
||||||
auto dest = (irow * this->m_geometry.pixels_x + icol);
|
auto dest = (irow * this->m_geometry.pixels_x() + icol);
|
||||||
dest = dest * m_master.bitdepth() / 8;
|
dest = dest * m_master.bitdepth() / 8;
|
||||||
memcpy(frame_buffer + dest,
|
memcpy(frame_buffer + dest,
|
||||||
part_buffer +
|
part_buffer +
|
||||||
|
|||||||
@@ -3,25 +3,29 @@
|
|||||||
#include "aare/RawMasterFile.hpp" //needed for ROI
|
#include "aare/RawMasterFile.hpp" //needed for ROI
|
||||||
|
|
||||||
#include <catch2/catch_test_macros.hpp>
|
#include <catch2/catch_test_macros.hpp>
|
||||||
|
#include <catch2/generators/catch_generators.hpp>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
|
|
||||||
#include "test_config.hpp"
|
#include "test_config.hpp"
|
||||||
|
#include "test_macros.hpp"
|
||||||
|
|
||||||
using aare::File;
|
using aare::File;
|
||||||
|
using aare::RawFile;
|
||||||
|
using namespace aare;
|
||||||
|
|
||||||
TEST_CASE("Read number of frames from a jungfrau raw file", "[.integration]") {
|
TEST_CASE("Read number of frames from a jungfrau raw file", "[.with-data]") {
|
||||||
|
|
||||||
auto fpath =
|
auto fpath =
|
||||||
test_data_path() / "jungfrau" / "jungfrau_single_master_0.json";
|
test_data_path() / "raw/jungfrau/jungfrau_single_master_0.json";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
File f(fpath, "r");
|
File f(fpath, "r");
|
||||||
REQUIRE(f.total_frames() == 10);
|
REQUIRE(f.total_frames() == 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Read frame numbers from a jungfrau raw file", "[.integration]") {
|
TEST_CASE("Read frame numbers from a jungfrau raw file", "[.with-data]") {
|
||||||
auto fpath =
|
auto fpath =
|
||||||
test_data_path() / "jungfrau" / "jungfrau_single_master_0.json";
|
test_data_path() / "raw/jungfrau/jungfrau_single_master_0.json";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
File f(fpath, "r");
|
File f(fpath, "r");
|
||||||
@@ -36,9 +40,9 @@ TEST_CASE("Read frame numbers from a jungfrau raw file", "[.integration]") {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Read a frame number too high throws", "[.integration]") {
|
TEST_CASE("Read a frame number too high throws", "[.with-data]") {
|
||||||
auto fpath =
|
auto fpath =
|
||||||
test_data_path() / "jungfrau" / "jungfrau_single_master_0.json";
|
test_data_path() / "raw/jungfrau/jungfrau_single_master_0.json";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
File f(fpath, "r");
|
File f(fpath, "r");
|
||||||
@@ -52,9 +56,10 @@ TEST_CASE("Read a frame number too high throws", "[.integration]") {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Read a frame numbers where the subfile is missing throws",
|
TEST_CASE("Read a frame numbers where the subfile is missing throws",
|
||||||
"[.integration]") {
|
"[.with-data]") {
|
||||||
auto fpath = test_data_path() / "jungfrau" /
|
auto fpath = test_data_path() / "raw/jungfrau" /
|
||||||
"jungfrau_missing_subfile_master_0.json";
|
"jungfrau_missing_subfile_master_0.json";
|
||||||
|
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
File f(fpath, "r");
|
File f(fpath, "r");
|
||||||
@@ -74,9 +79,9 @@ TEST_CASE("Read a frame numbers where the subfile is missing throws",
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Read data from a jungfrau 500k single port raw file",
|
TEST_CASE("Read data from a jungfrau 500k single port raw file",
|
||||||
"[.integration]") {
|
"[.with-data]") {
|
||||||
auto fpath =
|
auto fpath =
|
||||||
test_data_path() / "jungfrau" / "jungfrau_single_master_0.json";
|
test_data_path() / "raw/jungfrau/jungfrau_single_master_0.json";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
File f(fpath, "r");
|
File f(fpath, "r");
|
||||||
@@ -93,8 +98,9 @@ TEST_CASE("Read data from a jungfrau 500k single port raw file",
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Read frame numbers from a raw file", "[.integration]") {
|
TEST_CASE("Read frame numbers from a raw file", "[.with-data]") {
|
||||||
auto fpath = test_data_path() / "eiger" / "eiger_500k_16bit_master_0.json";
|
auto fpath =
|
||||||
|
test_data_path() / "raw/eiger" / "eiger_500k_16bit_master_0.json";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
// we know this file has 3 frames with frame numbers 14, 15, 16
|
// we know this file has 3 frames with frame numbers 14, 15, 16
|
||||||
@@ -106,7 +112,9 @@ TEST_CASE("Read frame numbers from a raw file", "[.integration]") {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Compare reading from a numpy file with a raw file", "[.files]") {
|
TEST_CASE("Compare reading from a numpy file with a raw file", "[.with-data]") {
|
||||||
|
|
||||||
|
SECTION("jungfrau data") {
|
||||||
auto fpath_raw =
|
auto fpath_raw =
|
||||||
test_data_path() / "raw/jungfrau" / "jungfrau_single_master_0.json";
|
test_data_path() / "raw/jungfrau" / "jungfrau_single_master_0.json";
|
||||||
REQUIRE(std::filesystem::exists(fpath_raw));
|
REQUIRE(std::filesystem::exists(fpath_raw));
|
||||||
@@ -129,9 +137,47 @@ TEST_CASE("Compare reading from a numpy file with a raw file", "[.files]") {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Read multipart files", "[.integration]") {
|
SECTION("eiger quad data") {
|
||||||
|
auto fpath_raw =
|
||||||
|
test_data_path() / "raw/eiger_quad_data" /
|
||||||
|
"W13_vrpreampscan_m21C_300V_800eV_vthre2000_master_0.json";
|
||||||
|
REQUIRE(std::filesystem::exists(fpath_raw));
|
||||||
|
|
||||||
|
auto fpath_npy = test_data_path() / "raw/eiger_quad_data" /
|
||||||
|
"W13_vrpreampscan_m21C_300V_800eV_vthre2000.npy";
|
||||||
|
REQUIRE(std::filesystem::exists(fpath_npy));
|
||||||
|
|
||||||
|
File raw(fpath_raw, "r");
|
||||||
|
File npy(fpath_npy, "r");
|
||||||
|
|
||||||
|
raw.seek(20);
|
||||||
|
auto raw_frame = raw.read_frame();
|
||||||
|
|
||||||
|
auto npy_frame = npy.read_frame();
|
||||||
|
CHECK((raw_frame.view<uint32_t>() == npy_frame.view<uint32_t>()));
|
||||||
|
}
|
||||||
|
SECTION("eiger data") {
|
||||||
|
auto fpath_raw = test_data_path() / "raw/eiger" /
|
||||||
|
"Lab6_20500eV_2deg_20240629_master_7.json";
|
||||||
|
REQUIRE(std::filesystem::exists(fpath_raw));
|
||||||
|
|
||||||
|
auto fpath_npy =
|
||||||
|
test_data_path() / "raw/eiger" / "Lab6_20500eV_2deg_20240629_7.npy";
|
||||||
|
REQUIRE(std::filesystem::exists(fpath_npy));
|
||||||
|
|
||||||
|
File raw(fpath_raw, "r");
|
||||||
|
File npy(fpath_npy, "r");
|
||||||
|
|
||||||
|
auto raw_frame = raw.read_frame();
|
||||||
|
|
||||||
|
auto npy_frame = npy.read_frame();
|
||||||
|
CHECK((raw_frame.view<uint32_t>() == npy_frame.view<uint32_t>()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Read multipart files", "[.with-data]") {
|
||||||
auto fpath =
|
auto fpath =
|
||||||
test_data_path() / "jungfrau" / "jungfrau_double_master_0.json";
|
test_data_path() / "raw/jungfrau" / "jungfrau_double_master_0.json";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
File f(fpath, "r");
|
File f(fpath, "r");
|
||||||
@@ -160,9 +206,116 @@ TEST_CASE("Read multipart files", "[.integration]") {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Read file with unordered frames", "[.integration]") {
|
struct TestParameters {
|
||||||
|
const std::string master_filename{};
|
||||||
|
const uint8_t num_ports{};
|
||||||
|
const size_t modules_x{};
|
||||||
|
const size_t modules_y{};
|
||||||
|
const size_t pixels_x{};
|
||||||
|
const size_t pixels_y{};
|
||||||
|
std::vector<ModuleGeometry> module_geometries{};
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_CASE("check find_geometry", "[.with-data]") {
|
||||||
|
|
||||||
|
auto test_parameters = GENERATE(
|
||||||
|
TestParameters{"raw/jungfrau_2modules_version6.1.2/run_master_0.raw", 2,
|
||||||
|
1, 2, 1024, 1024,
|
||||||
|
std::vector<ModuleGeometry>{
|
||||||
|
ModuleGeometry{0, 0, 512, 1024, 0, 0},
|
||||||
|
ModuleGeometry{0, 512, 512, 1024, 0, 1}}},
|
||||||
|
TestParameters{
|
||||||
|
"raw/eiger_1_module_version7.0.0/eiger_1mod_master_7.json", 4, 2, 2,
|
||||||
|
1024, 512,
|
||||||
|
std::vector<ModuleGeometry>{
|
||||||
|
ModuleGeometry{0, 0, 256, 512, 0, 0},
|
||||||
|
ModuleGeometry{512, 0, 256, 512, 0, 1},
|
||||||
|
ModuleGeometry{0, 256, 256, 512, 1, 0},
|
||||||
|
ModuleGeometry{512, 256, 256, 512, 1, 1}}},
|
||||||
|
|
||||||
|
TestParameters{"raw/jungfrau_2modules_2interfaces/run_master_0.json", 4,
|
||||||
|
1, 4, 1024, 1024,
|
||||||
|
std::vector<ModuleGeometry>{
|
||||||
|
ModuleGeometry{0, 0, 256, 1024, 0, 0},
|
||||||
|
ModuleGeometry{0, 256, 256, 1024, 1, 0},
|
||||||
|
ModuleGeometry{0, 512, 256, 1024, 2, 0},
|
||||||
|
ModuleGeometry{0, 768, 256, 1024, 3, 0}}},
|
||||||
|
TestParameters{
|
||||||
|
"raw/eiger_quad_data/"
|
||||||
|
"W13_vthreshscan_m21C_300V_800eV_vrpre3400_master_0.json",
|
||||||
|
2, 1, 2, 512, 512,
|
||||||
|
std::vector<ModuleGeometry>{ModuleGeometry{0, 256, 256, 512, 1, 0},
|
||||||
|
ModuleGeometry{0, 0, 256, 512, 0, 0}}});
|
||||||
|
|
||||||
|
auto fpath = test_data_path() / test_parameters.master_filename;
|
||||||
|
|
||||||
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
|
RawMasterFile master_file(fpath);
|
||||||
|
|
||||||
|
auto geometry = DetectorGeometry(
|
||||||
|
master_file.geometry(), master_file.pixels_x(), master_file.pixels_y(),
|
||||||
|
master_file.udp_interfaces_per_module(), master_file.quad());
|
||||||
|
|
||||||
|
CHECK(geometry.modules_x() == test_parameters.modules_x);
|
||||||
|
CHECK(geometry.modules_y() == test_parameters.modules_y);
|
||||||
|
CHECK(geometry.pixels_x() == test_parameters.pixels_x);
|
||||||
|
CHECK(geometry.pixels_y() == test_parameters.pixels_y);
|
||||||
|
|
||||||
|
REQUIRE(geometry.get_module_geometries().size() ==
|
||||||
|
test_parameters.num_ports);
|
||||||
|
|
||||||
|
// compare to data stored in header
|
||||||
|
RawFile f(fpath, "r");
|
||||||
|
for (size_t i = 0; i < test_parameters.num_ports; ++i) {
|
||||||
|
|
||||||
|
auto subfile1_path = f.master().data_fname(i, 0);
|
||||||
|
REQUIRE(std::filesystem::exists(subfile1_path));
|
||||||
|
|
||||||
|
auto header = RawFile::read_header(subfile1_path);
|
||||||
|
|
||||||
|
CHECK(header.column == geometry.get_module_geometries(i).col_index);
|
||||||
|
CHECK(header.row == geometry.get_module_geometries(i).row_index);
|
||||||
|
|
||||||
|
CHECK(geometry.get_module_geometries(i).height ==
|
||||||
|
test_parameters.module_geometries[i].height);
|
||||||
|
CHECK(geometry.get_module_geometries(i).width ==
|
||||||
|
test_parameters.module_geometries[i].width);
|
||||||
|
|
||||||
|
CHECK(geometry.get_module_geometries(i).origin_x ==
|
||||||
|
test_parameters.module_geometries[i].origin_x);
|
||||||
|
CHECK(geometry.get_module_geometries(i).origin_y ==
|
||||||
|
test_parameters.module_geometries[i].origin_y);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Open multi module file with ROI", "[.with-data]") {
|
||||||
|
|
||||||
|
auto fpath = test_data_path() / "raw/SingleChipROI/Data_master_0.json";
|
||||||
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|
||||||
|
RawFile f(fpath, "r");
|
||||||
|
|
||||||
|
SECTION("read 2 frames") {
|
||||||
|
REQUIRE(f.master().roi().value().width() == 256);
|
||||||
|
REQUIRE(f.master().roi().value().height() == 256);
|
||||||
|
|
||||||
|
CHECK(f.n_modules() == 2);
|
||||||
|
|
||||||
|
CHECK(f.n_modules_in_roi() == 1);
|
||||||
|
|
||||||
|
auto frames = f.read_n(2);
|
||||||
|
|
||||||
|
CHECK(frames.size() == 2);
|
||||||
|
|
||||||
|
CHECK(frames[0].rows() == 256);
|
||||||
|
CHECK(frames[1].cols() == 256);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Read file with unordered frames", "[.with-data]") {
|
||||||
// TODO! Better explanation and error message
|
// TODO! Better explanation and error message
|
||||||
auto fpath = test_data_path() / "mythen" / "scan242_master_3.raw";
|
auto fpath = test_data_path() / "raw/mythen/scan242_master_3.raw";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
File f(fpath);
|
File f(fpath);
|
||||||
REQUIRE_THROWS((f.read_frame()));
|
REQUIRE_THROWS((f.read_frame()));
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
#include "aare/RawMasterFile.hpp"
|
#include "aare/RawMasterFile.hpp"
|
||||||
|
#include "aare/RawFile.hpp"
|
||||||
|
#include "aare/logger.hpp"
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
|
||||||
namespace aare {
|
namespace aare {
|
||||||
|
|
||||||
RawFileNameComponents::RawFileNameComponents(
|
RawFileNameComponents::RawFileNameComponents(
|
||||||
@@ -61,6 +64,12 @@ const std::string &RawFileNameComponents::base_name() const {
|
|||||||
const std::string &RawFileNameComponents::ext() const { return m_ext; }
|
const std::string &RawFileNameComponents::ext() const { return m_ext; }
|
||||||
int RawFileNameComponents::file_index() const { return m_file_index; }
|
int RawFileNameComponents::file_index() const { return m_file_index; }
|
||||||
|
|
||||||
|
ScanParameters::ScanParameters(const bool enabled, const DACIndex dac,
|
||||||
|
const int start, const int stop, const int step,
|
||||||
|
const int64_t settleTime)
|
||||||
|
: m_enabled(enabled), m_dac(dac), m_start(start), m_stop(stop),
|
||||||
|
m_step(step), m_settleTime(settleTime){};
|
||||||
|
|
||||||
// "[enabled\ndac dac 4\nstart 500\nstop 2200\nstep 5\nsettleTime 100us\n]"
|
// "[enabled\ndac dac 4\nstart 500\nstop 2200\nstep 5\nsettleTime 100us\n]"
|
||||||
ScanParameters::ScanParameters(const std::string &par) {
|
ScanParameters::ScanParameters(const std::string &par) {
|
||||||
std::istringstream iss(par.substr(1, par.size() - 2));
|
std::istringstream iss(par.substr(1, par.size() - 2));
|
||||||
@@ -69,7 +78,7 @@ ScanParameters::ScanParameters(const std::string &par) {
|
|||||||
if (line == "enabled") {
|
if (line == "enabled") {
|
||||||
m_enabled = true;
|
m_enabled = true;
|
||||||
} else if (line.find("dac") != std::string::npos) {
|
} else if (line.find("dac") != std::string::npos) {
|
||||||
m_dac = line.substr(4);
|
m_dac = StringTo<DACIndex>(line.substr(4));
|
||||||
} else if (line.find("start") != std::string::npos) {
|
} else if (line.find("start") != std::string::npos) {
|
||||||
m_start = std::stoi(line.substr(6));
|
m_start = std::stoi(line.substr(6));
|
||||||
} else if (line.find("stop") != std::string::npos) {
|
} else if (line.find("stop") != std::string::npos) {
|
||||||
@@ -84,8 +93,9 @@ int ScanParameters::start() const { return m_start; }
|
|||||||
int ScanParameters::stop() const { return m_stop; }
|
int ScanParameters::stop() const { return m_stop; }
|
||||||
void ScanParameters::increment_stop() { m_stop += 1; }
|
void ScanParameters::increment_stop() { m_stop += 1; }
|
||||||
int ScanParameters::step() const { return m_step; }
|
int ScanParameters::step() const { return m_step; }
|
||||||
const std::string &ScanParameters::dac() const { return m_dac; }
|
DACIndex ScanParameters::dac() const { return m_dac; }
|
||||||
bool ScanParameters::enabled() const { return m_enabled; }
|
bool ScanParameters::enabled() const { return m_enabled; }
|
||||||
|
int64_t ScanParameters::settleTime() const { return m_settleTime; }
|
||||||
|
|
||||||
RawMasterFile::RawMasterFile(const std::filesystem::path &fpath)
|
RawMasterFile::RawMasterFile(const std::filesystem::path &fpath)
|
||||||
: m_fnc(fpath) {
|
: m_fnc(fpath) {
|
||||||
@@ -138,7 +148,11 @@ size_t RawMasterFile::n_modules() const {
|
|||||||
return m_geometry.row * m_geometry.col;
|
return m_geometry.row * m_geometry.col;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<uint8_t> RawMasterFile::quad() const { return m_quad; }
|
xy RawMasterFile::udp_interfaces_per_module() const {
|
||||||
|
return m_udp_interfaces_per_module;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint8_t RawMasterFile::quad() const { return m_quad; }
|
||||||
|
|
||||||
// optional values, these may or may not be present in the master file
|
// optional values, these may or may not be present in the master file
|
||||||
// and are therefore modeled as std::optional
|
// and are therefore modeled as std::optional
|
||||||
@@ -163,15 +177,21 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
std::ifstream ifs(fpath);
|
std::ifstream ifs(fpath);
|
||||||
json j;
|
json j;
|
||||||
ifs >> j;
|
ifs >> j;
|
||||||
|
|
||||||
double v = j["Version"];
|
double v = j["Version"];
|
||||||
m_version = fmt::format("{:.1f}", v);
|
m_version = fmt::format("{:.1f}", v);
|
||||||
|
|
||||||
m_type = StringTo<DetectorType>(j["Detector Type"].get<std::string>());
|
m_type = StringTo<DetectorType>(j["Detector Type"].get<std::string>());
|
||||||
m_timing_mode = StringTo<TimingMode>(j["Timing Mode"].get<std::string>());
|
m_timing_mode = StringTo<TimingMode>(j["Timing Mode"].get<std::string>());
|
||||||
|
|
||||||
m_geometry = {j["Geometry"]["y"], j["Geometry"]["x"]};
|
m_geometry = {
|
||||||
|
j["Geometry"]["y"],
|
||||||
|
j["Geometry"]["x"]}; // TODO: isnt it only available for version > 7.1?
|
||||||
|
// - try block default should be 1x1
|
||||||
|
|
||||||
|
m_image_size_in_bytes =
|
||||||
|
v < 8.0 ? j["Image Size in bytes"] : j["Image Size"];
|
||||||
|
|
||||||
m_image_size_in_bytes = j["Image Size in bytes"];
|
|
||||||
m_frames_in_file = j["Frames in File"];
|
m_frames_in_file = j["Frames in File"];
|
||||||
m_pixels_y = j["Pixels"]["y"];
|
m_pixels_y = j["Pixels"]["y"];
|
||||||
m_pixels_x = j["Pixels"]["x"];
|
m_pixels_x = j["Pixels"]["x"];
|
||||||
@@ -196,7 +216,6 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
} catch (const json::out_of_range &e) {
|
} catch (const json::out_of_range &e) {
|
||||||
// keep the optional empty
|
// keep the optional empty
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------------------------------------------------------------
|
// ----------------------------------------------------------------
|
||||||
// Special treatment of analog flag because of Moench03
|
// Special treatment of analog flag because of Moench03
|
||||||
try {
|
try {
|
||||||
@@ -217,7 +236,6 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
m_analog_flag = 0;
|
m_analog_flag = 0;
|
||||||
}
|
}
|
||||||
//-----------------------------------------------------------------
|
//-----------------------------------------------------------------
|
||||||
|
|
||||||
try {
|
try {
|
||||||
m_quad = j.at("Quad");
|
m_quad = j.at("Quad");
|
||||||
} catch (const json::out_of_range &e) {
|
} catch (const json::out_of_range &e) {
|
||||||
@@ -229,7 +247,6 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
// }catch (const json::out_of_range &e) {
|
// }catch (const json::out_of_range &e) {
|
||||||
// m_adc_mask = 0;
|
// m_adc_mask = 0;
|
||||||
// }
|
// }
|
||||||
|
|
||||||
try {
|
try {
|
||||||
int digital_flag = j.at("Digital Flag");
|
int digital_flag = j.at("Digital Flag");
|
||||||
if (digital_flag) {
|
if (digital_flag) {
|
||||||
@@ -238,7 +255,6 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
} catch (const json::out_of_range &e) {
|
} catch (const json::out_of_range &e) {
|
||||||
// keep the optional empty
|
// keep the optional empty
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
m_transceiver_flag = j.at("Transceiver Flag");
|
m_transceiver_flag = j.at("Transceiver Flag");
|
||||||
if (m_transceiver_flag) {
|
if (m_transceiver_flag) {
|
||||||
@@ -247,10 +263,20 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
} catch (const json::out_of_range &e) {
|
} catch (const json::out_of_range &e) {
|
||||||
// keep the optional empty
|
// keep the optional empty
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
if (v < 8.0) {
|
||||||
std::string scan_parameters = j.at("Scan Parameters");
|
std::string scan_parameters = j.at("Scan Parameters");
|
||||||
m_scan_parameters = ScanParameters(scan_parameters);
|
m_scan_parameters = ScanParameters(scan_parameters);
|
||||||
|
} else {
|
||||||
|
auto json_obj = j.at("Scan Parameters");
|
||||||
|
m_scan_parameters = ScanParameters(
|
||||||
|
json_obj.at("enable").get<int>(),
|
||||||
|
static_cast<DACIndex>(json_obj.at("dacInd").get<int>()),
|
||||||
|
json_obj.at("start offset").get<int>(),
|
||||||
|
json_obj.at("stop offset").get<int>(),
|
||||||
|
json_obj.at("step size").get<int>(),
|
||||||
|
json_obj.at("dac settle time ns").get<int>());
|
||||||
|
}
|
||||||
if (v < 7.21) {
|
if (v < 7.21) {
|
||||||
m_scan_parameters
|
m_scan_parameters
|
||||||
.increment_stop(); // adjust for endpoint being included
|
.increment_stop(); // adjust for endpoint being included
|
||||||
@@ -259,34 +285,45 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
|
|||||||
// not a scan
|
// not a scan
|
||||||
}
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
m_udp_interfaces_per_module = {j.at("Number of UDP Interfaces"), 1};
|
||||||
|
} catch (const json::out_of_range &e) {
|
||||||
|
if (m_type == DetectorType::Eiger && m_quad == 1)
|
||||||
|
m_udp_interfaces_per_module = {2, 1};
|
||||||
|
else if (m_type == DetectorType::Eiger) {
|
||||||
|
m_udp_interfaces_per_module = {1, 2};
|
||||||
|
}
|
||||||
|
}
|
||||||
try {
|
try {
|
||||||
ROI tmp_roi;
|
ROI tmp_roi;
|
||||||
|
if (v < 8.0) {
|
||||||
auto obj = j.at("Receiver Roi");
|
auto obj = j.at("Receiver Roi");
|
||||||
tmp_roi.xmin = obj.at("xmin");
|
tmp_roi.xmin = obj.at("xmin");
|
||||||
tmp_roi.xmax = obj.at("xmax");
|
tmp_roi.xmax = obj.at("xmax");
|
||||||
tmp_roi.ymin = obj.at("ymin");
|
tmp_roi.ymin = obj.at("ymin");
|
||||||
tmp_roi.ymax = obj.at("ymax");
|
tmp_roi.ymax = obj.at("ymax");
|
||||||
|
} else {
|
||||||
|
// TODO: for now only handle single ROI
|
||||||
|
auto obj = j.at("Receiver Rois");
|
||||||
|
tmp_roi.xmin = obj[0].at("xmin");
|
||||||
|
tmp_roi.xmax = obj[0].at("xmax");
|
||||||
|
tmp_roi.ymin = obj[0].at("ymin");
|
||||||
|
tmp_roi.ymax = obj[0].at("ymax");
|
||||||
|
}
|
||||||
|
|
||||||
// if any of the values are set update the roi
|
// if any of the values are set update the roi
|
||||||
if (tmp_roi.xmin != 4294967295 || tmp_roi.xmax != 4294967295 ||
|
if (tmp_roi.xmin != 4294967295 || tmp_roi.xmax != 4294967295 ||
|
||||||
tmp_roi.ymin != 4294967295 || tmp_roi.ymax != 4294967295) {
|
tmp_roi.ymin != 4294967295 || tmp_roi.ymax != 4294967295) {
|
||||||
|
|
||||||
if (v < 7.21) {
|
|
||||||
tmp_roi.xmax++;
|
tmp_roi.xmax++;
|
||||||
tmp_roi.ymax++;
|
tmp_roi.ymax++;
|
||||||
}
|
|
||||||
|
|
||||||
m_roi = tmp_roi;
|
m_roi = tmp_roi;
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch (const json::out_of_range &e) {
|
} catch (const json::out_of_range &e) {
|
||||||
|
LOG(TLogLevel::logERROR) << e.what() << std::endl;
|
||||||
// leave the optional empty
|
// leave the optional empty
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we have an roi we need to update the geometry for the subfiles
|
|
||||||
if (m_roi) {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update detector type for Moench
|
// Update detector type for Moench
|
||||||
// TODO! How does this work with old .raw master files?
|
// TODO! How does this work with old .raw master files?
|
||||||
#ifdef AARE_VERBOSE
|
#ifdef AARE_VERBOSE
|
||||||
@@ -389,21 +426,54 @@ void RawMasterFile::parse_raw(const std::filesystem::path &fpath) {
|
|||||||
m_geometry = {
|
m_geometry = {
|
||||||
static_cast<uint32_t>(std::stoi(value.substr(1, pos))),
|
static_cast<uint32_t>(std::stoi(value.substr(1, pos))),
|
||||||
static_cast<uint32_t>(std::stoi(value.substr(pos + 1)))};
|
static_cast<uint32_t>(std::stoi(value.substr(pos + 1)))};
|
||||||
|
} else if (key == "Number of UDP Interfaces") {
|
||||||
|
m_udp_interfaces_per_module = {
|
||||||
|
static_cast<uint32_t>(std::stoi(value)), 1};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (m_type == DetectorType::Eiger && m_quad == 1) {
|
||||||
|
m_udp_interfaces_per_module = {2, 1};
|
||||||
|
} else if (m_type == DetectorType::Eiger) {
|
||||||
|
m_udp_interfaces_per_module = {1, 2};
|
||||||
|
}
|
||||||
|
|
||||||
if (m_pixels_x == 400 && m_pixels_y == 400) {
|
if (m_pixels_x == 400 && m_pixels_y == 400) {
|
||||||
m_type = DetectorType::Moench03_old;
|
m_type = DetectorType::Moench03_old;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO! Look for d0, d1...dn and update geometry
|
|
||||||
if (m_geometry.col == 0 && m_geometry.row == 0) {
|
if (m_geometry.col == 0 && m_geometry.row == 0) {
|
||||||
m_geometry = {1, 1};
|
retrieve_geometry();
|
||||||
fmt::print("Warning: No geometry found in master file. Assuming 1x1\n");
|
LOG(TLogLevel::logWARNING)
|
||||||
|
<< "No geometry found in master file. Retrieved geometry of "
|
||||||
|
<< m_geometry.row << " x " << m_geometry.col << "\n ";
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO! Read files and find actual frames
|
// TODO! Read files and find actual frames
|
||||||
if (m_frames_in_file == 0)
|
if (m_frames_in_file == 0)
|
||||||
m_frames_in_file = m_total_frames_expected;
|
m_frames_in_file = m_total_frames_expected;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void RawMasterFile::retrieve_geometry() {
|
||||||
|
uint32_t module_index = 0;
|
||||||
|
uint16_t rows = 0;
|
||||||
|
uint16_t cols = 0;
|
||||||
|
// TODO use case for Eiger
|
||||||
|
|
||||||
|
while (std::filesystem::exists(data_fname(module_index, 0))) {
|
||||||
|
|
||||||
|
auto header = RawFile::read_header(data_fname(module_index, 0));
|
||||||
|
|
||||||
|
rows = std::max(rows, header.row);
|
||||||
|
cols = std::max(cols, header.column);
|
||||||
|
|
||||||
|
++module_index;
|
||||||
|
}
|
||||||
|
++rows;
|
||||||
|
++cols;
|
||||||
|
|
||||||
|
m_geometry = {rows, cols};
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include "test_config.hpp"
|
#include "test_config.hpp"
|
||||||
#include <catch2/catch_test_macros.hpp>
|
#include <catch2/catch_test_macros.hpp>
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
using namespace aare;
|
using namespace aare;
|
||||||
|
|
||||||
@@ -50,7 +51,7 @@ TEST_CASE("Parse scan parameters") {
|
|||||||
ScanParameters s("[enabled\ndac dac 4\nstart 500\nstop 2200\nstep "
|
ScanParameters s("[enabled\ndac dac 4\nstart 500\nstop 2200\nstep "
|
||||||
"5\nsettleTime 100us\n]");
|
"5\nsettleTime 100us\n]");
|
||||||
REQUIRE(s.enabled());
|
REQUIRE(s.enabled());
|
||||||
REQUIRE(s.dac() == "dac 4");
|
REQUIRE(s.dac() == DACIndex::DAC_4);
|
||||||
REQUIRE(s.start() == 500);
|
REQUIRE(s.start() == 500);
|
||||||
REQUIRE(s.stop() == 2200);
|
REQUIRE(s.stop() == 2200);
|
||||||
REQUIRE(s.step() == 5);
|
REQUIRE(s.step() == 5);
|
||||||
@@ -59,7 +60,7 @@ TEST_CASE("Parse scan parameters") {
|
|||||||
TEST_CASE("A disabled scan") {
|
TEST_CASE("A disabled scan") {
|
||||||
ScanParameters s("[disabled]");
|
ScanParameters s("[disabled]");
|
||||||
REQUIRE_FALSE(s.enabled());
|
REQUIRE_FALSE(s.enabled());
|
||||||
REQUIRE(s.dac() == "");
|
REQUIRE(s.dac() == DACIndex::DAC_0);
|
||||||
REQUIRE(s.start() == 0);
|
REQUIRE(s.start() == 0);
|
||||||
REQUIRE(s.stop() == 0);
|
REQUIRE(s.stop() == 0);
|
||||||
REQUIRE(s.step() == 0);
|
REQUIRE(s.step() == 0);
|
||||||
@@ -67,7 +68,7 @@ TEST_CASE("A disabled scan") {
|
|||||||
|
|
||||||
TEST_CASE("Parse a master file in .json format", "[.integration]") {
|
TEST_CASE("Parse a master file in .json format", "[.integration]") {
|
||||||
auto fpath =
|
auto fpath =
|
||||||
test_data_path() / "jungfrau" / "jungfrau_single_master_0.json";
|
test_data_path() / "raw" / "jungfrau" / "jungfrau_single_master_0.json";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
RawMasterFile f(fpath);
|
RawMasterFile f(fpath);
|
||||||
|
|
||||||
@@ -145,6 +146,19 @@ TEST_CASE("Parse a master file in .json format", "[.integration]") {
|
|||||||
REQUIRE_FALSE(f.digital_samples());
|
REQUIRE_FALSE(f.digital_samples());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Parse a master file in old .raw format",
|
||||||
|
"[.integration][.with-data][.rawmasterfile]") {
|
||||||
|
auto fpath = test_data_path() /
|
||||||
|
"raw/jungfrau_2modules_version6.1.2/run_master_0.raw";
|
||||||
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
RawMasterFile f(fpath);
|
||||||
|
|
||||||
|
CHECK(f.udp_interfaces_per_module() == xy{1, 1});
|
||||||
|
CHECK(f.n_modules() == 2);
|
||||||
|
CHECK(f.geometry().row == 2);
|
||||||
|
CHECK(f.geometry().col == 1);
|
||||||
|
}
|
||||||
|
|
||||||
TEST_CASE("Parse a master file in .raw format", "[.integration]") {
|
TEST_CASE("Parse a master file in .raw format", "[.integration]") {
|
||||||
|
|
||||||
auto fpath =
|
auto fpath =
|
||||||
@@ -210,6 +224,41 @@ TEST_CASE("Parse a master file in .raw format", "[.integration]") {
|
|||||||
// Packets Caught Mask : 64 bytes
|
// Packets Caught Mask : 64 bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Parse a master file in new .json format",
|
||||||
|
"[.integration][.width-data]") {
|
||||||
|
|
||||||
|
auto file_path =
|
||||||
|
test_data_path() / "raw" / "newmythen03" / "run_87_master_0.json";
|
||||||
|
REQUIRE(std::filesystem::exists(file_path));
|
||||||
|
|
||||||
|
RawMasterFile f(file_path);
|
||||||
|
|
||||||
|
// Version : 8.0
|
||||||
|
REQUIRE(f.version() == "8.0");
|
||||||
|
|
||||||
|
REQUIRE(f.detector_type() == DetectorType::Mythen3);
|
||||||
|
// Timing Mode : auto
|
||||||
|
REQUIRE(f.timing_mode() == TimingMode::Auto);
|
||||||
|
// Geometry : [2, 1]
|
||||||
|
REQUIRE(f.geometry().col == 2);
|
||||||
|
REQUIRE(f.geometry().row == 1);
|
||||||
|
// Image Size : 5120 bytes
|
||||||
|
REQUIRE(f.image_size_in_bytes() == 5120);
|
||||||
|
|
||||||
|
REQUIRE(f.scan_parameters().enabled() == false);
|
||||||
|
REQUIRE(f.scan_parameters().dac() == DACIndex::DAC_0);
|
||||||
|
REQUIRE(f.scan_parameters().start() == 0);
|
||||||
|
REQUIRE(f.scan_parameters().stop() == 0);
|
||||||
|
REQUIRE(f.scan_parameters().step() == 0);
|
||||||
|
REQUIRE(f.scan_parameters().settleTime() == 0);
|
||||||
|
|
||||||
|
auto roi = f.roi().value();
|
||||||
|
REQUIRE(roi.xmin == 0);
|
||||||
|
REQUIRE(roi.xmax == 2559);
|
||||||
|
REQUIRE(roi.ymin == -1);
|
||||||
|
REQUIRE(roi.ymax == -1);
|
||||||
|
}
|
||||||
|
|
||||||
TEST_CASE("Read eiger master file", "[.integration]") {
|
TEST_CASE("Read eiger master file", "[.integration]") {
|
||||||
auto fpath = test_data_path() / "eiger" / "eiger_500k_32bit_master_0.json";
|
auto fpath = test_data_path() / "eiger" / "eiger_500k_32bit_master_0.json";
|
||||||
REQUIRE(std::filesystem::exists(fpath));
|
REQUIRE(std::filesystem::exists(fpath));
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
using namespace aare;
|
using namespace aare;
|
||||||
|
|
||||||
TEST_CASE("Read frames directly from a RawSubFile", "[.files]") {
|
TEST_CASE("Read frames directly from a RawSubFile", "[.with-data]") {
|
||||||
auto fpath_raw =
|
auto fpath_raw =
|
||||||
test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f0_0.raw";
|
test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f0_0.raw";
|
||||||
REQUIRE(std::filesystem::exists(fpath_raw));
|
REQUIRE(std::filesystem::exists(fpath_raw));
|
||||||
@@ -40,7 +40,7 @@ TEST_CASE("Read frames directly from a RawSubFile", "[.files]") {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Read frames directly from a RawSubFile starting at the second file",
|
TEST_CASE("Read frames directly from a RawSubFile starting at the second file",
|
||||||
"[.files]") {
|
"[.with-data]") {
|
||||||
// we know this file has 10 frames with frame numbers 1 to 10
|
// we know this file has 10 frames with frame numbers 1 to 10
|
||||||
// f0 1,2,3
|
// f0 1,2,3
|
||||||
// f1 4,5,6 <-- starting here
|
// f1 4,5,6 <-- starting here
|
||||||
|
|||||||
44
src/calibration.cpp
Normal file
44
src/calibration.cpp
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
#include "aare/calibration.hpp"
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
NDArray<int, 2> count_switching_pixels(NDView<uint16_t, 3> raw_data) {
|
||||||
|
NDArray<int, 2> switched(
|
||||||
|
std::array<ssize_t, 2>{raw_data.shape(1), raw_data.shape(2)}, 0);
|
||||||
|
for (int frame_nr = 0; frame_nr != raw_data.shape(0); ++frame_nr) {
|
||||||
|
for (int row = 0; row != raw_data.shape(1); ++row) {
|
||||||
|
for (int col = 0; col != raw_data.shape(2); ++col) {
|
||||||
|
auto [value, gain] =
|
||||||
|
get_value_and_gain(raw_data(frame_nr, row, col));
|
||||||
|
if (gain != 0) {
|
||||||
|
switched(row, col) += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return switched;
|
||||||
|
}
|
||||||
|
|
||||||
|
NDArray<int, 2> count_switching_pixels(NDView<uint16_t, 3> raw_data,
|
||||||
|
ssize_t n_threads) {
|
||||||
|
NDArray<int, 2> switched(
|
||||||
|
std::array<ssize_t, 2>{raw_data.shape(1), raw_data.shape(2)}, 0);
|
||||||
|
std::vector<std::future<NDArray<int, 2>>> futures;
|
||||||
|
futures.reserve(n_threads);
|
||||||
|
|
||||||
|
auto subviews = make_subviews(raw_data, n_threads);
|
||||||
|
|
||||||
|
for (auto view : subviews) {
|
||||||
|
futures.push_back(
|
||||||
|
std::async(static_cast<NDArray<int, 2> (*)(NDView<uint16_t, 3>)>(
|
||||||
|
&count_switching_pixels),
|
||||||
|
view));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto &f : futures) {
|
||||||
|
switched += f.get();
|
||||||
|
}
|
||||||
|
return switched;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
49
src/calibration.test.cpp
Normal file
49
src/calibration.test.cpp
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
/************************************************
|
||||||
|
* @file test-Cluster.cpp
|
||||||
|
* @short test case for generic Cluster, ClusterVector, and calculate_eta2
|
||||||
|
***********************************************/
|
||||||
|
|
||||||
|
#include "aare/calibration.hpp"
|
||||||
|
|
||||||
|
// #include "catch.hpp"
|
||||||
|
#include <array>
|
||||||
|
#include <catch2/catch_all.hpp>
|
||||||
|
#include <catch2/catch_test_macros.hpp>
|
||||||
|
|
||||||
|
using namespace aare;
|
||||||
|
|
||||||
|
TEST_CASE("Test Pedestal Generation", "[.calibration]") {
|
||||||
|
NDArray<uint16_t, 3> raw(std::array<ssize_t, 3>{3, 2, 2}, 0);
|
||||||
|
|
||||||
|
// gain 0
|
||||||
|
raw(0, 0, 0) = 100;
|
||||||
|
raw(1, 0, 0) = 200;
|
||||||
|
raw(2, 0, 0) = 300;
|
||||||
|
|
||||||
|
// gain 1
|
||||||
|
raw(0, 0, 1) = (1 << 14) + 100;
|
||||||
|
raw(1, 0, 1) = (1 << 14) + 200;
|
||||||
|
raw(2, 0, 1) = (1 << 14) + 300;
|
||||||
|
|
||||||
|
raw(0, 1, 0) = (1 << 14) + 37;
|
||||||
|
raw(1, 1, 0) = 38;
|
||||||
|
raw(2, 1, 0) = (3 << 14) + 39;
|
||||||
|
|
||||||
|
// gain 2
|
||||||
|
raw(0, 1, 1) = (3 << 14) + 100;
|
||||||
|
raw(1, 1, 1) = (3 << 14) + 200;
|
||||||
|
raw(2, 1, 1) = (3 << 14) + 300;
|
||||||
|
|
||||||
|
auto pedestal = calculate_pedestal<double>(raw.view(), 4);
|
||||||
|
|
||||||
|
REQUIRE(pedestal.size() == raw.size());
|
||||||
|
CHECK(pedestal(0, 0, 0) == 200);
|
||||||
|
CHECK(pedestal(1, 0, 0) == 0);
|
||||||
|
CHECK(pedestal(1, 0, 1) == 200);
|
||||||
|
|
||||||
|
auto pedestal_gain0 = calculate_pedestal_g0<double>(raw.view(), 4);
|
||||||
|
|
||||||
|
REQUIRE(pedestal_gain0.size() == 4);
|
||||||
|
CHECK(pedestal_gain0(0, 0) == 200);
|
||||||
|
CHECK(pedestal_gain0(1, 0) == 38);
|
||||||
|
}
|
||||||
182
src/defs.cpp
182
src/defs.cpp
@@ -115,4 +115,186 @@ template <> FrameDiscardPolicy StringTo(const std::string &arg) {
|
|||||||
|
|
||||||
// template <> TimingMode StringTo<TimingMode>(std::string mode);
|
// template <> TimingMode StringTo<TimingMode>(std::string mode);
|
||||||
|
|
||||||
|
template <> DACIndex StringTo(const std::string &arg) {
|
||||||
|
if (arg == "dac 0")
|
||||||
|
return DACIndex::DAC_0;
|
||||||
|
else if (arg == "dac 1")
|
||||||
|
return DACIndex::DAC_1;
|
||||||
|
else if (arg == "dac 2")
|
||||||
|
return DACIndex::DAC_2;
|
||||||
|
else if (arg == "dac 3")
|
||||||
|
return DACIndex::DAC_3;
|
||||||
|
else if (arg == "dac 4")
|
||||||
|
return DACIndex::DAC_4;
|
||||||
|
else if (arg == "dac 5")
|
||||||
|
return DACIndex::DAC_5;
|
||||||
|
else if (arg == "dac 6")
|
||||||
|
return DACIndex::DAC_6;
|
||||||
|
else if (arg == "dac 7")
|
||||||
|
return DACIndex::DAC_7;
|
||||||
|
else if (arg == "dac 8")
|
||||||
|
return DACIndex::DAC_8;
|
||||||
|
else if (arg == "dac 9")
|
||||||
|
return DACIndex::DAC_9;
|
||||||
|
else if (arg == "dac 10")
|
||||||
|
return DACIndex::DAC_10;
|
||||||
|
else if (arg == "dac 11")
|
||||||
|
return DACIndex::DAC_11;
|
||||||
|
else if (arg == "dac 12")
|
||||||
|
return DACIndex::DAC_12;
|
||||||
|
else if (arg == "dac 13")
|
||||||
|
return DACIndex::DAC_13;
|
||||||
|
else if (arg == "dac 14")
|
||||||
|
return DACIndex::DAC_14;
|
||||||
|
else if (arg == "dac 15")
|
||||||
|
return DACIndex::DAC_15;
|
||||||
|
else if (arg == "dac 16")
|
||||||
|
return DACIndex::DAC_16;
|
||||||
|
else if (arg == "dac 17")
|
||||||
|
return DACIndex::DAC_17;
|
||||||
|
else if (arg == "vsvp")
|
||||||
|
return DACIndex::VSVP;
|
||||||
|
else if (arg == "vtrim")
|
||||||
|
return DACIndex::VTRIM;
|
||||||
|
else if (arg == "vrpreamp")
|
||||||
|
return DACIndex::VRPREAMP;
|
||||||
|
else if (arg == "vrshaper")
|
||||||
|
return DACIndex::VRSHAPER;
|
||||||
|
else if (arg == "vsvn")
|
||||||
|
return DACIndex::VSVN;
|
||||||
|
else if (arg == "vtgstv")
|
||||||
|
return DACIndex::VTGSTV;
|
||||||
|
else if (arg == "vcmp_ll")
|
||||||
|
return DACIndex::VCMP_LL;
|
||||||
|
else if (arg == "vcmp_lr")
|
||||||
|
return DACIndex::VCMP_LR;
|
||||||
|
else if (arg == "vcal")
|
||||||
|
return DACIndex::VCAL;
|
||||||
|
else if (arg == "vcmp_rl")
|
||||||
|
return DACIndex::VCMP_RL;
|
||||||
|
else if (arg == "rxb_rb")
|
||||||
|
return DACIndex::RXB_RB;
|
||||||
|
else if (arg == "rxb_lb")
|
||||||
|
return DACIndex::RXB_LB;
|
||||||
|
else if (arg == "vcmp_rr")
|
||||||
|
return DACIndex::VCMP_RR;
|
||||||
|
else if (arg == "vcp")
|
||||||
|
return DACIndex::VCP;
|
||||||
|
else if (arg == "vcn")
|
||||||
|
return DACIndex::VCN;
|
||||||
|
else if (arg == "vishaper")
|
||||||
|
return DACIndex::VISHAPER;
|
||||||
|
else if (arg == "vthreshold")
|
||||||
|
return DACIndex::VTHRESHOLD;
|
||||||
|
else if (arg == "vref_ds")
|
||||||
|
return DACIndex::VREF_DS;
|
||||||
|
else if (arg == "vout_cm")
|
||||||
|
return DACIndex::VOUT_CM;
|
||||||
|
else if (arg == "vin_cm")
|
||||||
|
return DACIndex::VIN_CM;
|
||||||
|
else if (arg == "vref_comp")
|
||||||
|
return DACIndex::VREF_COMP;
|
||||||
|
else if (arg == "vb_comp")
|
||||||
|
return DACIndex::VB_COMP;
|
||||||
|
else if (arg == "vdd_prot")
|
||||||
|
return DACIndex::VDD_PROT;
|
||||||
|
else if (arg == "vin_com")
|
||||||
|
return DACIndex::VIN_COM;
|
||||||
|
else if (arg == "vref_prech")
|
||||||
|
return DACIndex::VREF_PRECH;
|
||||||
|
else if (arg == "vb_pixbuf")
|
||||||
|
return DACIndex::VB_PIXBUF;
|
||||||
|
else if (arg == "vb_ds")
|
||||||
|
return DACIndex::VB_DS;
|
||||||
|
else if (arg == "vref_h_adc")
|
||||||
|
return DACIndex::VREF_H_ADC;
|
||||||
|
else if (arg == "vb_comp_fe")
|
||||||
|
return DACIndex::VB_COMP_FE;
|
||||||
|
else if (arg == "vb_comp_adc")
|
||||||
|
return DACIndex::VB_COMP_ADC;
|
||||||
|
else if (arg == "vcom_cds")
|
||||||
|
return DACIndex::VCOM_CDS;
|
||||||
|
else if (arg == "vref_rstore")
|
||||||
|
return DACIndex::VREF_RSTORE;
|
||||||
|
else if (arg == "vb_opa_1st")
|
||||||
|
return DACIndex::VB_OPA_1ST;
|
||||||
|
else if (arg == "vref_comp_fe")
|
||||||
|
return DACIndex::VREF_COMP_FE;
|
||||||
|
else if (arg == "vcom_adc1")
|
||||||
|
return DACIndex::VCOM_ADC1;
|
||||||
|
else if (arg == "vref_l_adc")
|
||||||
|
return DACIndex::VREF_L_ADC;
|
||||||
|
else if (arg == "vref_cds")
|
||||||
|
return DACIndex::VREF_CDS;
|
||||||
|
else if (arg == "vb_cs")
|
||||||
|
return DACIndex::VB_CS;
|
||||||
|
else if (arg == "vb_opa_fd")
|
||||||
|
return DACIndex::VB_OPA_FD;
|
||||||
|
else if (arg == "vcom_adc2")
|
||||||
|
return DACIndex::VCOM_ADC2;
|
||||||
|
else if (arg == "vcassh")
|
||||||
|
return DACIndex::VCASSH;
|
||||||
|
else if (arg == "vth2")
|
||||||
|
return DACIndex::VTH2;
|
||||||
|
else if (arg == "vrshaper_n")
|
||||||
|
return DACIndex::VRSHAPER_N;
|
||||||
|
else if (arg == "vipre_out")
|
||||||
|
return DACIndex::VIPRE_OUT;
|
||||||
|
else if (arg == "vth3")
|
||||||
|
return DACIndex::VTH3;
|
||||||
|
else if (arg == "vth1")
|
||||||
|
return DACIndex::VTH1;
|
||||||
|
else if (arg == "vicin")
|
||||||
|
return DACIndex::VICIN;
|
||||||
|
else if (arg == "vcas")
|
||||||
|
return DACIndex::VCAS;
|
||||||
|
else if (arg == "vcal_n")
|
||||||
|
return DACIndex::VCAL_N;
|
||||||
|
else if (arg == "vipre")
|
||||||
|
return DACIndex::VIPRE;
|
||||||
|
else if (arg == "vcal_p")
|
||||||
|
return DACIndex::VCAL_P;
|
||||||
|
else if (arg == "vdcsh")
|
||||||
|
return DACIndex::VDCSH;
|
||||||
|
else if (arg == "vbp_colbuf")
|
||||||
|
return DACIndex::VBP_COLBUF;
|
||||||
|
else if (arg == "vb_sda")
|
||||||
|
return DACIndex::VB_SDA;
|
||||||
|
else if (arg == "vcasc_sfp")
|
||||||
|
return DACIndex::VCASC_SFP;
|
||||||
|
else if (arg == "vipre_cds")
|
||||||
|
return DACIndex::VIPRE_CDS;
|
||||||
|
else if (arg == "ibias_sfp")
|
||||||
|
return DACIndex::IBIAS_SFP;
|
||||||
|
else if (arg == "trimbits")
|
||||||
|
return DACIndex::TRIMBIT_SCAN;
|
||||||
|
else if (arg == "highvoltage")
|
||||||
|
return DACIndex::HIGH_VOLTAGE;
|
||||||
|
else if (arg == "iodelay")
|
||||||
|
return DACIndex::IO_DELAY;
|
||||||
|
else if (arg == "temp_adc")
|
||||||
|
return DACIndex::TEMPERATURE_ADC;
|
||||||
|
else if (arg == "temp_fpga")
|
||||||
|
return DACIndex::TEMPERATURE_FPGA;
|
||||||
|
else if (arg == "temp_fpgaext")
|
||||||
|
return DACIndex::TEMPERATURE_FPGAEXT;
|
||||||
|
else if (arg == "temp_10ge")
|
||||||
|
return DACIndex::TEMPERATURE_10GE;
|
||||||
|
else if (arg == "temp_dcdc")
|
||||||
|
return DACIndex::TEMPERATURE_DCDC;
|
||||||
|
else if (arg == "temp_sodl")
|
||||||
|
return DACIndex::TEMPERATURE_SODL;
|
||||||
|
else if (arg == "temp_sodr")
|
||||||
|
return DACIndex::TEMPERATURE_SODR;
|
||||||
|
else if (arg == "temp_fpgafl")
|
||||||
|
return DACIndex::TEMPERATURE_FPGA2;
|
||||||
|
else if (arg == "temp_fpgafr")
|
||||||
|
return DACIndex::TEMPERATURE_FPGA3;
|
||||||
|
else if (arg == "temp_slowadc")
|
||||||
|
return DACIndex::SLOW_ADC_TEMP;
|
||||||
|
else
|
||||||
|
throw std::invalid_argument("Could not decode DACIndex from: \"" + arg +
|
||||||
|
"\"");
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
||||||
@@ -1,72 +0,0 @@
|
|||||||
|
|
||||||
#include "aare/geo_helpers.hpp"
|
|
||||||
#include "fmt/core.h"
|
|
||||||
|
|
||||||
namespace aare {
|
|
||||||
|
|
||||||
DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) {
|
|
||||||
#ifdef AARE_VERBOSE
|
|
||||||
fmt::println("update_geometry_with_roi() called with ROI: {} {} {} {}",
|
|
||||||
roi.xmin, roi.xmax, roi.ymin, roi.ymax);
|
|
||||||
fmt::println("Geometry: {} {} {} {} {} {}", geo.modules_x, geo.modules_y,
|
|
||||||
geo.pixels_x, geo.pixels_y, geo.module_gap_row,
|
|
||||||
geo.module_gap_col);
|
|
||||||
#endif
|
|
||||||
int pos_y = 0;
|
|
||||||
int pos_y_increment = 0;
|
|
||||||
for (int row = 0; row < geo.modules_y; row++) {
|
|
||||||
int pos_x = 0;
|
|
||||||
for (int col = 0; col < geo.modules_x; col++) {
|
|
||||||
auto &m = geo.module_pixel_0[row * geo.modules_x + col];
|
|
||||||
auto original_height = m.height;
|
|
||||||
auto original_width = m.width;
|
|
||||||
|
|
||||||
// module is to the left of the roi
|
|
||||||
if (m.origin_x + m.width < roi.xmin) {
|
|
||||||
m.width = 0;
|
|
||||||
|
|
||||||
// roi is in module
|
|
||||||
} else {
|
|
||||||
// here we only arrive when the roi is in or to the left of
|
|
||||||
// the module
|
|
||||||
if (roi.xmin > m.origin_x) {
|
|
||||||
m.width -= roi.xmin - m.origin_x;
|
|
||||||
}
|
|
||||||
if (roi.xmax < m.origin_x + original_width) {
|
|
||||||
m.width -= m.origin_x + original_width - roi.xmax;
|
|
||||||
}
|
|
||||||
m.origin_x = pos_x;
|
|
||||||
pos_x += m.width;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (m.origin_y + m.height < roi.ymin) {
|
|
||||||
m.height = 0;
|
|
||||||
} else {
|
|
||||||
if ((roi.ymin > m.origin_y) &&
|
|
||||||
(roi.ymin < m.origin_y + m.height)) {
|
|
||||||
m.height -= roi.ymin - m.origin_y;
|
|
||||||
}
|
|
||||||
if (roi.ymax < m.origin_y + original_height) {
|
|
||||||
m.height -= m.origin_y + original_height - roi.ymax;
|
|
||||||
}
|
|
||||||
m.origin_y = pos_y;
|
|
||||||
pos_y_increment = m.height;
|
|
||||||
}
|
|
||||||
#ifdef AARE_VERBOSE
|
|
||||||
fmt::println("Module {} {} {} {}", m.origin_x, m.origin_y, m.width,
|
|
||||||
m.height);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
// increment pos_y
|
|
||||||
pos_y += pos_y_increment;
|
|
||||||
}
|
|
||||||
|
|
||||||
// m_rows = roi.height();
|
|
||||||
// m_cols = roi.width();
|
|
||||||
geo.pixels_x = roi.width();
|
|
||||||
geo.pixels_y = roi.height();
|
|
||||||
|
|
||||||
return geo;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace aare
|
|
||||||
@@ -1,228 +0,0 @@
|
|||||||
#include "aare/File.hpp"
|
|
||||||
#include "aare/RawFile.hpp"
|
|
||||||
#include "aare/RawMasterFile.hpp" //needed for ROI
|
|
||||||
|
|
||||||
#include <catch2/catch_test_macros.hpp>
|
|
||||||
#include <filesystem>
|
|
||||||
|
|
||||||
#include "aare/geo_helpers.hpp"
|
|
||||||
#include "test_config.hpp"
|
|
||||||
|
|
||||||
TEST_CASE("Simple ROIs on one module") {
|
|
||||||
// DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI
|
|
||||||
// roi)
|
|
||||||
aare::DetectorGeometry geo;
|
|
||||||
|
|
||||||
aare::ModuleGeometry mod;
|
|
||||||
mod.origin_x = 0;
|
|
||||||
mod.origin_y = 0;
|
|
||||||
mod.width = 1024;
|
|
||||||
mod.height = 512;
|
|
||||||
|
|
||||||
geo.pixels_x = 1024;
|
|
||||||
geo.pixels_y = 512;
|
|
||||||
geo.modules_x = 1;
|
|
||||||
geo.modules_y = 1;
|
|
||||||
geo.module_pixel_0.push_back(mod);
|
|
||||||
|
|
||||||
SECTION("ROI is the whole module") {
|
|
||||||
aare::ROI roi;
|
|
||||||
roi.xmin = 0;
|
|
||||||
roi.xmax = 1024;
|
|
||||||
roi.ymin = 0;
|
|
||||||
roi.ymax = 512;
|
|
||||||
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
|
|
||||||
|
|
||||||
REQUIRE(updated_geo.pixels_x == 1024);
|
|
||||||
REQUIRE(updated_geo.pixels_y == 512);
|
|
||||||
REQUIRE(updated_geo.modules_x == 1);
|
|
||||||
REQUIRE(updated_geo.modules_y == 1);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[0].height == 512);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[0].width == 1024);
|
|
||||||
}
|
|
||||||
SECTION("ROI is the top left corner of the module") {
|
|
||||||
aare::ROI roi;
|
|
||||||
roi.xmin = 100;
|
|
||||||
roi.xmax = 200;
|
|
||||||
roi.ymin = 150;
|
|
||||||
roi.ymax = 200;
|
|
||||||
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
|
|
||||||
|
|
||||||
REQUIRE(updated_geo.pixels_x == 100);
|
|
||||||
REQUIRE(updated_geo.pixels_y == 50);
|
|
||||||
REQUIRE(updated_geo.modules_x == 1);
|
|
||||||
REQUIRE(updated_geo.modules_y == 1);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[0].height == 50);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[0].width == 100);
|
|
||||||
}
|
|
||||||
|
|
||||||
SECTION("ROI is a small square") {
|
|
||||||
aare::ROI roi;
|
|
||||||
roi.xmin = 1000;
|
|
||||||
roi.xmax = 1010;
|
|
||||||
roi.ymin = 500;
|
|
||||||
roi.ymax = 510;
|
|
||||||
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
|
|
||||||
|
|
||||||
REQUIRE(updated_geo.pixels_x == 10);
|
|
||||||
REQUIRE(updated_geo.pixels_y == 10);
|
|
||||||
REQUIRE(updated_geo.modules_x == 1);
|
|
||||||
REQUIRE(updated_geo.modules_y == 1);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[0].height == 10);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[0].width == 10);
|
|
||||||
}
|
|
||||||
SECTION("ROI is a few columns") {
|
|
||||||
aare::ROI roi;
|
|
||||||
roi.xmin = 750;
|
|
||||||
roi.xmax = 800;
|
|
||||||
roi.ymin = 0;
|
|
||||||
roi.ymax = 512;
|
|
||||||
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
|
|
||||||
|
|
||||||
REQUIRE(updated_geo.pixels_x == 50);
|
|
||||||
REQUIRE(updated_geo.pixels_y == 512);
|
|
||||||
REQUIRE(updated_geo.modules_x == 1);
|
|
||||||
REQUIRE(updated_geo.modules_y == 1);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[0].height == 512);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[0].width == 50);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_CASE("Two modules side by side") {
|
|
||||||
// DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI
|
|
||||||
// roi)
|
|
||||||
aare::DetectorGeometry geo;
|
|
||||||
|
|
||||||
aare::ModuleGeometry mod;
|
|
||||||
mod.origin_x = 0;
|
|
||||||
mod.origin_y = 0;
|
|
||||||
mod.width = 1024;
|
|
||||||
mod.height = 512;
|
|
||||||
|
|
||||||
geo.pixels_x = 2048;
|
|
||||||
geo.pixels_y = 512;
|
|
||||||
geo.modules_x = 2;
|
|
||||||
geo.modules_y = 1;
|
|
||||||
|
|
||||||
geo.module_pixel_0.push_back(mod);
|
|
||||||
mod.origin_x = 1024;
|
|
||||||
geo.module_pixel_0.push_back(mod);
|
|
||||||
|
|
||||||
SECTION("ROI is the whole image") {
|
|
||||||
aare::ROI roi;
|
|
||||||
roi.xmin = 0;
|
|
||||||
roi.xmax = 2048;
|
|
||||||
roi.ymin = 0;
|
|
||||||
roi.ymax = 512;
|
|
||||||
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
|
|
||||||
|
|
||||||
REQUIRE(updated_geo.pixels_x == 2048);
|
|
||||||
REQUIRE(updated_geo.pixels_y == 512);
|
|
||||||
REQUIRE(updated_geo.modules_x == 2);
|
|
||||||
REQUIRE(updated_geo.modules_y == 1);
|
|
||||||
}
|
|
||||||
SECTION("rectangle on both modules") {
|
|
||||||
aare::ROI roi;
|
|
||||||
roi.xmin = 800;
|
|
||||||
roi.xmax = 1300;
|
|
||||||
roi.ymin = 200;
|
|
||||||
roi.ymax = 499;
|
|
||||||
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
|
|
||||||
|
|
||||||
REQUIRE(updated_geo.pixels_x == 500);
|
|
||||||
REQUIRE(updated_geo.pixels_y == 299);
|
|
||||||
REQUIRE(updated_geo.modules_x == 2);
|
|
||||||
REQUIRE(updated_geo.modules_y == 1);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[0].height == 299);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[0].width == 224);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[1].height == 299);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[1].width == 276);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_CASE("Three modules side by side") {
|
|
||||||
// DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI
|
|
||||||
// roi)
|
|
||||||
aare::DetectorGeometry geo;
|
|
||||||
aare::ROI roi;
|
|
||||||
roi.xmin = 700;
|
|
||||||
roi.xmax = 2500;
|
|
||||||
roi.ymin = 0;
|
|
||||||
roi.ymax = 123;
|
|
||||||
|
|
||||||
aare::ModuleGeometry mod;
|
|
||||||
mod.origin_x = 0;
|
|
||||||
mod.origin_y = 0;
|
|
||||||
mod.width = 1024;
|
|
||||||
mod.height = 512;
|
|
||||||
|
|
||||||
geo.pixels_x = 3072;
|
|
||||||
geo.pixels_y = 512;
|
|
||||||
geo.modules_x = 3;
|
|
||||||
geo.modules_y = 1;
|
|
||||||
|
|
||||||
geo.module_pixel_0.push_back(mod);
|
|
||||||
mod.origin_x = 1024;
|
|
||||||
geo.module_pixel_0.push_back(mod);
|
|
||||||
mod.origin_x = 2048;
|
|
||||||
geo.module_pixel_0.push_back(mod);
|
|
||||||
|
|
||||||
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
|
|
||||||
|
|
||||||
REQUIRE(updated_geo.pixels_x == 1800);
|
|
||||||
REQUIRE(updated_geo.pixels_y == 123);
|
|
||||||
REQUIRE(updated_geo.modules_x == 3);
|
|
||||||
REQUIRE(updated_geo.modules_y == 1);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[0].height == 123);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[0].width == 324);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[1].height == 123);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[1].width == 1024);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[2].height == 123);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[2].width == 452);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_CASE("Four modules as a square") {
|
|
||||||
// DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI
|
|
||||||
// roi)
|
|
||||||
aare::DetectorGeometry geo;
|
|
||||||
aare::ROI roi;
|
|
||||||
roi.xmin = 500;
|
|
||||||
roi.xmax = 2000;
|
|
||||||
roi.ymin = 500;
|
|
||||||
roi.ymax = 600;
|
|
||||||
|
|
||||||
aare::ModuleGeometry mod;
|
|
||||||
mod.origin_x = 0;
|
|
||||||
mod.origin_y = 0;
|
|
||||||
mod.width = 1024;
|
|
||||||
mod.height = 512;
|
|
||||||
|
|
||||||
geo.pixels_x = 2048;
|
|
||||||
geo.pixels_y = 1024;
|
|
||||||
geo.modules_x = 2;
|
|
||||||
geo.modules_y = 2;
|
|
||||||
|
|
||||||
geo.module_pixel_0.push_back(mod);
|
|
||||||
mod.origin_x = 1024;
|
|
||||||
geo.module_pixel_0.push_back(mod);
|
|
||||||
mod.origin_x = 0;
|
|
||||||
mod.origin_y = 512;
|
|
||||||
geo.module_pixel_0.push_back(mod);
|
|
||||||
mod.origin_x = 1024;
|
|
||||||
geo.module_pixel_0.push_back(mod);
|
|
||||||
|
|
||||||
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
|
|
||||||
|
|
||||||
REQUIRE(updated_geo.pixels_x == 1500);
|
|
||||||
REQUIRE(updated_geo.pixels_y == 100);
|
|
||||||
REQUIRE(updated_geo.modules_x == 2);
|
|
||||||
REQUIRE(updated_geo.modules_y == 2);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[0].height == 12);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[0].width == 524);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[1].height == 12);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[1].width == 976);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[2].height == 88);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[2].width == 524);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[3].height == 88);
|
|
||||||
REQUIRE(updated_geo.module_pixel_0[3].width == 976);
|
|
||||||
}
|
|
||||||
@@ -40,5 +40,8 @@ target_sources(tests PRIVATE ${TestSources} )
|
|||||||
#configure a header to pass test file paths
|
#configure a header to pass test file paths
|
||||||
get_filename_component(TEST_FILE_PATH ${PROJECT_SOURCE_DIR}/data ABSOLUTE)
|
get_filename_component(TEST_FILE_PATH ${PROJECT_SOURCE_DIR}/data ABSOLUTE)
|
||||||
configure_file(test_config.hpp.in test_config.hpp)
|
configure_file(test_config.hpp.in test_config.hpp)
|
||||||
target_include_directories(tests PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
|
target_include_directories(tests PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
4
tests/friend_test.hpp
Normal file
4
tests/friend_test.hpp
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
#define FRIEND_TEST(test_name) friend void test_name##_impl();
|
||||||
|
|
||||||
|
#define TEST_CASE_PRIVATE_FWD(test_name) \
|
||||||
|
void test_name##_impl(); // foward declaration
|
||||||
20
tests/test_macros.hpp
Normal file
20
tests/test_macros.hpp
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
#include <catch2/catch_test_macros.hpp>
|
||||||
|
|
||||||
|
#include <catch2/interfaces/catch_interfaces_capture.hpp>
|
||||||
|
#include <catch2/internal/catch_test_registry.hpp>
|
||||||
|
#include <catch2/internal/catch_unique_ptr.hpp>
|
||||||
|
|
||||||
|
#define TEST_CASE_PRIVATE(namespace_name, test_name, test_name_str, \
|
||||||
|
test_tags_str) \
|
||||||
|
namespace namespace_name { \
|
||||||
|
void test_name##_impl(); \
|
||||||
|
\
|
||||||
|
struct test_name##_Invoker : Catch::ITestInvoker { \
|
||||||
|
void invoke() const override { test_name##_impl(); } \
|
||||||
|
}; \
|
||||||
|
Catch::AutoReg \
|
||||||
|
autoReg_##test_name(Catch::Detail::make_unique<test_name##_Invoker>(), \
|
||||||
|
Catch::SourceLineInfo(__FILE__, __LINE__), "", \
|
||||||
|
Catch::NameAndTags{test_name_str, test_tags_str}); \
|
||||||
|
\
|
||||||
|
void test_name##_impl()
|
||||||
@@ -7,6 +7,7 @@ Script to update VERSION file with semantic versioning if provided as an argumen
|
|||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
from packaging.version import Version, InvalidVersion
|
from packaging.version import Version, InvalidVersion
|
||||||
|
|
||||||
@@ -26,8 +27,8 @@ def get_version():
|
|||||||
|
|
||||||
# Check at least one argument is passed
|
# Check at least one argument is passed
|
||||||
if len(sys.argv) < 2:
|
if len(sys.argv) < 2:
|
||||||
return "0.0.0"
|
version = datetime.today().strftime('%Y.%-m.%-d')
|
||||||
|
else:
|
||||||
version = sys.argv[1]
|
version = sys.argv[1]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|||||||
Reference in New Issue
Block a user