mirror of
https://github.com/slsdetectorgroup/aare.git
synced 2025-12-14 17:11:25 +01:00
Compare commits
112 Commits
api_cluste
...
feature_id
| Author | SHA1 | Date | |
|---|---|---|---|
| e290c8f820 | |||
| c0ee17275e | |||
| ad3ef88607 | |||
| f814b3f4e7 | |||
| 1f46266183 | |||
| d3d9f760b3 | |||
| 0891ffb1ee | |||
| 0b74bc25d5 | |||
| 3ec40fa809 | |||
| 74280379ce | |||
| 474c35cc6b | |||
| e2a97d3c45 | |||
| bce8e9d5fc | |||
| 4c1e276e2c | |||
| 12114e7275 | |||
| 7926993bb2 | |||
| ed7fb1f1f9 | |||
|
|
8ab98b356b | ||
| d908ad3636 | |||
| 8733a1d66f | |||
| 437f7cec89 | |||
|
|
6c3524298f | ||
| b59277c4bf | |||
| cb163c79b4 | |||
|
|
a0fb4900f0 | ||
|
|
91d74110fa | ||
| f54e76e6bf | |||
|
|
c6da36d10b | ||
| 5107513ff5 | |||
| f7aa66a2c9 | |||
| 3ac94641e3 | |||
|
|
89bb8776ea | ||
|
|
1527a45cf3 | ||
|
|
3d6858ad33 | ||
|
|
d6222027d0 | ||
| 1195a5e100 | |||
| 1347158235 | |||
|
|
8c4d8b687e | ||
|
|
b8e91d0282 | ||
|
|
46876bfa73 | ||
|
|
348fd0f937 | ||
|
|
0fea0f5b0e | ||
|
|
cb439efb48 | ||
|
|
5de402f91b | ||
|
|
9a7713e98a | ||
|
|
b898e1c8d0 | ||
|
|
4073c0cbe0 | ||
|
|
9a3694b980 | ||
|
|
abae2674a9 | ||
| 1414d75320 | |||
|
|
85c3bf7bed | ||
|
|
fa3b7a5afe | ||
|
|
e95326faa1 | ||
|
|
8143524acf | ||
|
|
8e2346abf8 | ||
|
|
8eb7fec435 | ||
|
|
d8952eccc6 | ||
|
|
83717571c8 | ||
|
|
97dae4ac60 | ||
|
|
5a9c3b717e | ||
|
|
e3f4b34b72 | ||
|
|
6ec8fbee72 | ||
| 30822d9c5f | |||
| ff7312f45d | |||
| 8e7c9eadff | |||
| d35b7762b4 | |||
| df4dbb8fd0 | |||
| c92be4bca2 | |||
| 664055de92 | |||
| 318e640639 | |||
| c6990dabad | |||
| c9fe16b4c2 | |||
|
|
64438c8803 | ||
| 9f8eee5d08 | |||
| 35114cde9d | |||
| b13f864b2b | |||
| 05828baa54 | |||
| 0f56846e3d | |||
| be67bbab6b | |||
|
|
8354439605 | ||
|
|
11fa95b23c | ||
| bd7870e75a | |||
| 75f63607fc | |||
| cfe7c31fe4 | |||
|
|
4976ec1651 | ||
|
|
a9a55fb27d | ||
|
|
3cc44f780f | ||
| 19ecc82fff | |||
| 2a069f3b6e | |||
| f9751902a2 | |||
| 923f7d22b8 | |||
| 6438a4bef1 | |||
| ad7525cd02 | |||
| 87d8682b1e | |||
|
|
efd2338f54 | ||
|
|
b97f1e24f9 | ||
|
|
1bc2fd770a | ||
| 9c6e629298 | |||
|
|
69964e08d5 | ||
|
|
94ac58b09e | ||
|
|
9ecf4f4b44 | ||
|
|
f2a024644b | ||
|
|
9e1b8731b0 | ||
|
|
a6eebbe9bd | ||
|
|
81588fba3b | ||
| 276283ff14 | |||
|
|
cf158e2dcd | ||
|
|
12ae1424fb | ||
| 6db201f397 | |||
| d5226909fe | |||
|
|
2e0424254c | ||
|
|
fd0196f2fd |
14
.github/workflows/build_and_deploy_conda.yml
vendored
14
.github/workflows/build_and_deploy_conda.yml
vendored
@@ -1,9 +1,9 @@
|
||||
name: Build pkgs and deploy if on main
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -24,13 +24,13 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get conda
|
||||
uses: conda-incubator/setup-miniconda@v3.0.4
|
||||
uses: conda-incubator/setup-miniconda@v3
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
environment-file: etc/dev-env.yml
|
||||
miniforge-version: latest
|
||||
channels: conda-forge
|
||||
|
||||
- name: Prepare
|
||||
run: conda install conda-build=24.9 conda-verify pytest anaconda-client
|
||||
conda-remove-defaults: "true"
|
||||
|
||||
- name: Enable upload
|
||||
run: conda config --set anaconda_upload yes
|
||||
|
||||
9
.github/workflows/build_conda.yml
vendored
9
.github/workflows/build_conda.yml
vendored
@@ -24,14 +24,15 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get conda
|
||||
uses: conda-incubator/setup-miniconda@v3.0.4
|
||||
uses: conda-incubator/setup-miniconda@v3
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
environment-file: etc/dev-env.yml
|
||||
miniforge-version: latest
|
||||
channels: conda-forge
|
||||
conda-remove-defaults: "true"
|
||||
|
||||
- name: Prepare
|
||||
run: conda install conda-build=24.9 conda-verify pytest anaconda-client
|
||||
|
||||
|
||||
- name: Disable upload
|
||||
run: conda config --set anaconda_upload no
|
||||
|
||||
|
||||
9
.github/workflows/build_docs.yml
vendored
9
.github/workflows/build_docs.yml
vendored
@@ -2,7 +2,10 @@ name: Build the package using cmake then documentation
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
pull_request:
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
|
||||
|
||||
permissions:
|
||||
@@ -40,7 +43,7 @@ jobs:
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DAARE_SYSTEM_LIBRARIES=ON -DAARE_DOCS=ON
|
||||
cmake .. -DAARE_SYSTEM_LIBRARIES=ON -DAARE_PYTHON_BINDINGS=ON -DAARE_DOCS=ON
|
||||
make -j 2
|
||||
make docs
|
||||
|
||||
@@ -55,7 +58,7 @@ jobs:
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
if: github.ref == 'refs/heads/main'
|
||||
if: (github.event_name == 'release' && github.event.action == 'published') || (github.event_name == 'workflow_dispatch' )
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
cmake_minimum_required(VERSION 3.15)
|
||||
|
||||
project(aare
|
||||
VERSION 1.0.0
|
||||
DESCRIPTION "Data processing library for PSI detectors"
|
||||
HOMEPAGE_URL "https://github.com/slsdetectorgroup/aare"
|
||||
LANGUAGES C CXX
|
||||
)
|
||||
|
||||
# Read VERSION file into project version
|
||||
set(VERSION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/VERSION")
|
||||
file(READ "${VERSION_FILE}" VERSION_CONTENT)
|
||||
string(STRIP "${VERSION_CONTENT}" PROJECT_VERSION_STRING)
|
||||
set(PROJECT_VERSION ${PROJECT_VERSION_STRING})
|
||||
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
@@ -72,19 +77,12 @@ if(AARE_SYSTEM_LIBRARIES)
|
||||
# on conda-forge
|
||||
endif()
|
||||
|
||||
if(AARE_VERBOSE)
|
||||
add_compile_definitions(AARE_VERBOSE)
|
||||
endif()
|
||||
|
||||
if(AARE_CUSTOM_ASSERT)
|
||||
add_compile_definitions(AARE_CUSTOM_ASSERT)
|
||||
endif()
|
||||
|
||||
if(AARE_BENCHMARKS)
|
||||
add_subdirectory(benchmarks)
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||
|
||||
if(AARE_FETCH_LMFIT)
|
||||
@@ -326,13 +324,10 @@ if(AARE_ASAN)
|
||||
)
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if(AARE_TESTS)
|
||||
enable_testing()
|
||||
add_subdirectory(tests)
|
||||
target_compile_definitions(tests PRIVATE AARE_TESTS)
|
||||
endif()
|
||||
|
||||
###------------------------------------------------------------------------------MAIN LIBRARY
|
||||
@@ -355,8 +350,9 @@ set(PUBLICHEADERS
|
||||
include/aare/FilePtr.hpp
|
||||
include/aare/Frame.hpp
|
||||
include/aare/GainMap.hpp
|
||||
include/aare/geo_helpers.hpp
|
||||
include/aare/DetectorGeometry.hpp
|
||||
include/aare/JungfrauDataFile.hpp
|
||||
include/aare/logger.hpp
|
||||
include/aare/NDArray.hpp
|
||||
include/aare/NDView.hpp
|
||||
include/aare/NumpyFile.hpp
|
||||
@@ -372,27 +368,27 @@ set(PUBLICHEADERS
|
||||
|
||||
|
||||
set(SourceFiles
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/calibration.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/CtbRawFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/DetectorGeometry.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/FilePtr.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Fit.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp
|
||||
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp
|
||||
)
|
||||
)
|
||||
|
||||
add_library(aare_core STATIC ${SourceFiles})
|
||||
target_include_directories(aare_core PUBLIC
|
||||
@@ -400,6 +396,9 @@ target_include_directories(aare_core PUBLIC
|
||||
"$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>"
|
||||
)
|
||||
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
target_link_libraries(
|
||||
aare_core
|
||||
PUBLIC
|
||||
@@ -408,27 +407,41 @@ target_link_libraries(
|
||||
${STD_FS_LIB} # from helpers.cmake
|
||||
PRIVATE
|
||||
aare_compiler_flags
|
||||
Threads::Threads
|
||||
$<BUILD_INTERFACE:lmfit>
|
||||
|
||||
)
|
||||
|
||||
set_property(TARGET aare_core PROPERTY POSITION_INDEPENDENT_CODE ON)
|
||||
|
||||
if(AARE_TESTS)
|
||||
target_compile_definitions(aare_core PRIVATE AARE_TESTS)
|
||||
endif()
|
||||
if(AARE_VERBOSE)
|
||||
target_compile_definitions(aare_core PUBLIC AARE_VERBOSE)
|
||||
target_compile_definitions(aare_core PUBLIC AARE_LOG_LEVEL=aare::logDEBUG5)
|
||||
else()
|
||||
target_compile_definitions(aare_core PUBLIC AARE_LOG_LEVEL=aare::logERROR)
|
||||
endif()
|
||||
|
||||
if(AARE_CUSTOM_ASSERT)
|
||||
target_compile_definitions(aare_core PUBLIC AARE_CUSTOM_ASSERT)
|
||||
endif()
|
||||
|
||||
set_target_properties(aare_core PROPERTIES
|
||||
ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}
|
||||
PUBLIC_HEADER "${PUBLICHEADERS}"
|
||||
)
|
||||
|
||||
if (AARE_PYTHON_BINDINGS)
|
||||
set_property(TARGET aare_core PROPERTY POSITION_INDEPENDENT_CODE ON)
|
||||
endif()
|
||||
|
||||
if(AARE_TESTS)
|
||||
set(TestSources
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/calibration.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/DetectorGeometry.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp
|
||||
@@ -443,6 +456,7 @@ if(AARE_TESTS)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.test.cpp
|
||||
|
||||
)
|
||||
@@ -450,11 +464,6 @@ if(AARE_TESTS)
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
###------------------------------------------------------------------------------------------
|
||||
###------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
if(AARE_MASTER_PROJECT)
|
||||
install(TARGETS aare_core aare_compiler_flags
|
||||
EXPORT "${TARGETS_EXPORT_NAME}"
|
||||
@@ -464,7 +473,6 @@ if(AARE_MASTER_PROJECT)
|
||||
)
|
||||
endif()
|
||||
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
set(CMAKE_INSTALL_RPATH $ORIGIN)
|
||||
set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
|
||||
|
||||
|
||||
53
RELEASE.md
Normal file
53
RELEASE.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# Release notes
|
||||
|
||||
### 2025.10.1
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- File supports reading new master json file format (multiple ROI's not supported yet)
|
||||
|
||||
### 2025.8.22
|
||||
|
||||
Features:
|
||||
|
||||
- Apply calibration works in G0 if passes a 2D calibration and pedestal
|
||||
- count pixels that switch
|
||||
- calculate pedestal (also g0 version)
|
||||
- NDArray::view() needs an lvalue to reduce issues with the view outliving the array
|
||||
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- Now using glibc 2.17 in conda builds (was using the host)
|
||||
- Fixed shifted pixels in clusters close to the edge of a frame
|
||||
|
||||
### 2025.7.18
|
||||
|
||||
Features:
|
||||
|
||||
- Cluster finder now works with 5x5, 7x7 and 9x9 clusters
|
||||
- Added ClusterVector::empty() member
|
||||
- Added apply_calibration function for Jungfrau data
|
||||
|
||||
Bugfixes:
|
||||
- Fixed reading RawFiles with ROI fully excluding some sub files.
|
||||
- Decoding of MH02 files placed the pixels in wrong position
|
||||
- Removed unused file: ClusterFile.cpp
|
||||
|
||||
|
||||
### 2025.5.22
|
||||
|
||||
Features:
|
||||
|
||||
- Added scurve fitting
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- Fixed crash when opening raw files with large number of data files
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ FetchContent_MakeAvailable(benchmark)
|
||||
|
||||
add_executable(benchmarks)
|
||||
|
||||
target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp)
|
||||
target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp reduce_benchmark.cpp)
|
||||
|
||||
# Link Google Benchmark and other necessary libraries
|
||||
target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags)
|
||||
|
||||
@@ -41,8 +41,8 @@ BENCHMARK_F(ClusterFixture, Calculate2x2Eta)(benchmark::State &st) {
|
||||
}
|
||||
|
||||
// almost takes double the time
|
||||
BENCHMARK_F(ClusterFixture,
|
||||
CalculateGeneralEtaFor2x2Cluster)(benchmark::State &st) {
|
||||
BENCHMARK_F(ClusterFixture, CalculateGeneralEtaFor2x2Cluster)
|
||||
(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
Eta2 eta = calculate_eta2<int, 2, 2>(cluster_2x2);
|
||||
@@ -59,8 +59,8 @@ BENCHMARK_F(ClusterFixture, Calculate3x3Eta)(benchmark::State &st) {
|
||||
}
|
||||
|
||||
// almost takes double the time
|
||||
BENCHMARK_F(ClusterFixture,
|
||||
CalculateGeneralEtaFor3x3Cluster)(benchmark::State &st) {
|
||||
BENCHMARK_F(ClusterFixture, CalculateGeneralEtaFor3x3Cluster)
|
||||
(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
Eta2 eta = calculate_eta2<int, 3, 3>(cluster_3x3);
|
||||
|
||||
@@ -1,136 +1,132 @@
|
||||
#include <benchmark/benchmark.h>
|
||||
#include "aare/NDArray.hpp"
|
||||
|
||||
#include <benchmark/benchmark.h>
|
||||
|
||||
using aare::NDArray;
|
||||
|
||||
constexpr ssize_t size = 1024;
|
||||
class TwoArrays : public benchmark::Fixture {
|
||||
public:
|
||||
NDArray<int,2> a{{size,size},0};
|
||||
NDArray<int,2> b{{size,size},0};
|
||||
void SetUp(::benchmark::State& state) {
|
||||
for(uint32_t i = 0; i < size; i++){
|
||||
for(uint32_t j = 0; j < size; j++){
|
||||
a(i, j)= i*j+1;
|
||||
b(i, j)= i*j+1;
|
||||
}
|
||||
public:
|
||||
NDArray<int, 2> a{{size, size}, 0};
|
||||
NDArray<int, 2> b{{size, size}, 0};
|
||||
void SetUp(::benchmark::State &state) {
|
||||
for (uint32_t i = 0; i < size; i++) {
|
||||
for (uint32_t j = 0; j < size; j++) {
|
||||
a(i, j) = i * j + 1;
|
||||
b(i, j) = i * j + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// void TearDown(::benchmark::State& state) {
|
||||
// }
|
||||
// void TearDown(::benchmark::State& state) {
|
||||
// }
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
BENCHMARK_F(TwoArrays, AddWithOperator)(benchmark::State& st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int,2> res = a+b;
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
BENCHMARK_F(TwoArrays, AddWithIndex)(benchmark::State& st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int,2> res(a.shape());
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
res(i) = a(i) + b(i);
|
||||
BENCHMARK_F(TwoArrays, AddWithOperator)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int, 2> res = a + b;
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
BENCHMARK_F(TwoArrays, AddWithIndex)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int, 2> res(a.shape());
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
res(i) = a(i) + b(i);
|
||||
}
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_F(TwoArrays, SubtractWithOperator)(benchmark::State& st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int,2> res = a-b;
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
BENCHMARK_F(TwoArrays, SubtractWithIndex)(benchmark::State& st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int,2> res(a.shape());
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
res(i) = a(i) - b(i);
|
||||
BENCHMARK_F(TwoArrays, SubtractWithOperator)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int, 2> res = a - b;
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
BENCHMARK_F(TwoArrays, SubtractWithIndex)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int, 2> res(a.shape());
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
res(i) = a(i) - b(i);
|
||||
}
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_F(TwoArrays, MultiplyWithOperator)(benchmark::State& st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int,2> res = a*b;
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
BENCHMARK_F(TwoArrays, MultiplyWithIndex)(benchmark::State& st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int,2> res(a.shape());
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
res(i) = a(i) * b(i);
|
||||
BENCHMARK_F(TwoArrays, MultiplyWithOperator)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int, 2> res = a * b;
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
BENCHMARK_F(TwoArrays, MultiplyWithIndex)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int, 2> res(a.shape());
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
res(i) = a(i) * b(i);
|
||||
}
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_F(TwoArrays, DivideWithOperator)(benchmark::State& st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int,2> res = a/b;
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
BENCHMARK_F(TwoArrays, DivideWithIndex)(benchmark::State& st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int,2> res(a.shape());
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
res(i) = a(i) / b(i);
|
||||
BENCHMARK_F(TwoArrays, DivideWithOperator)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int, 2> res = a / b;
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
BENCHMARK_F(TwoArrays, DivideWithIndex)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int, 2> res(a.shape());
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
res(i) = a(i) / b(i);
|
||||
}
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_F(TwoArrays, FourAddWithOperator)(benchmark::State& st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int,2> res = a+b+a+b;
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
BENCHMARK_F(TwoArrays, FourAddWithIndex)(benchmark::State& st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int,2> res(a.shape());
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
res(i) = a(i) + b(i) + a(i) + b(i);
|
||||
BENCHMARK_F(TwoArrays, FourAddWithOperator)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int, 2> res = a + b + a + b;
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
BENCHMARK_F(TwoArrays, FourAddWithIndex)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int, 2> res(a.shape());
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
res(i) = a(i) + b(i) + a(i) + b(i);
|
||||
}
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_F(TwoArrays, MultiplyAddDivideWithOperator)(benchmark::State& st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int,2> res = a*a+b/a;
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
BENCHMARK_F(TwoArrays, MultiplyAddDivideWithIndex)(benchmark::State& st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int,2> res(a.shape());
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
res(i) = a(i) * a(i) + b(i) / a(i);
|
||||
BENCHMARK_F(TwoArrays, MultiplyAddDivideWithOperator)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int, 2> res = a * a + b / a;
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
BENCHMARK_F(TwoArrays, MultiplyAddDivideWithIndex)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
NDArray<int, 2> res(a.shape());
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
res(i) = a(i) * a(i) + b(i) / a(i);
|
||||
}
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
benchmark::DoNotOptimize(res);
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_MAIN();
|
||||
168
benchmarks/reduce_benchmark.cpp
Normal file
168
benchmarks/reduce_benchmark.cpp
Normal file
@@ -0,0 +1,168 @@
|
||||
#include "aare/Cluster.hpp"
|
||||
#include <benchmark/benchmark.h>
|
||||
|
||||
using namespace aare;
|
||||
|
||||
class ClustersForReduceFixture : public benchmark::Fixture {
|
||||
public:
|
||||
Cluster<int, 5, 5> cluster_5x5{};
|
||||
Cluster<int, 3, 3> cluster_3x3{};
|
||||
|
||||
private:
|
||||
using benchmark::Fixture::SetUp;
|
||||
|
||||
void SetUp([[maybe_unused]] const benchmark::State &state) override {
|
||||
int temp_data[25] = {1, 1, 1, 1, 1, 1, 1, 2, 1, 1,
|
||||
1, 2, 3, 1, 2, 1, 1, 1, 1, 2};
|
||||
std::copy(std::begin(temp_data), std::end(temp_data),
|
||||
std::begin(cluster_5x5.data));
|
||||
|
||||
cluster_5x5.x = 5;
|
||||
cluster_5x5.y = 5;
|
||||
|
||||
int temp_data2[9] = {1, 1, 1, 2, 3, 1, 2, 2, 1};
|
||||
std::copy(std::begin(temp_data2), std::end(temp_data2),
|
||||
std::begin(cluster_3x3.data));
|
||||
|
||||
cluster_3x3.x = 5;
|
||||
cluster_3x3.y = 5;
|
||||
}
|
||||
|
||||
// void TearDown(::benchmark::State& state) {
|
||||
// }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
Cluster<T, 3, 3, int16_t> reduce_to_3x3(const Cluster<T, 5, 5, int16_t> &c) {
|
||||
Cluster<T, 3, 3, int16_t> result;
|
||||
|
||||
// Write out the sums in the hope that the compiler can optimize this
|
||||
std::array<T, 9> sum_3x3_subclusters;
|
||||
|
||||
// Write out the sums in the hope that the compiler can optimize this
|
||||
sum_3x3_subclusters[0] = c.data[0] + c.data[1] + c.data[2] + c.data[5] +
|
||||
c.data[6] + c.data[7] + c.data[10] + c.data[11] +
|
||||
c.data[12];
|
||||
sum_3x3_subclusters[1] = c.data[1] + c.data[2] + c.data[3] + c.data[6] +
|
||||
c.data[7] + c.data[8] + c.data[11] + c.data[12] +
|
||||
c.data[13];
|
||||
sum_3x3_subclusters[2] = c.data[2] + c.data[3] + c.data[4] + c.data[7] +
|
||||
c.data[8] + c.data[9] + c.data[12] + c.data[13] +
|
||||
c.data[14];
|
||||
sum_3x3_subclusters[3] = c.data[5] + c.data[6] + c.data[7] + c.data[10] +
|
||||
c.data[11] + c.data[12] + c.data[15] + c.data[16] +
|
||||
c.data[17];
|
||||
sum_3x3_subclusters[4] = c.data[6] + c.data[7] + c.data[8] + c.data[11] +
|
||||
c.data[12] + c.data[13] + c.data[16] + c.data[17] +
|
||||
c.data[18];
|
||||
sum_3x3_subclusters[5] = c.data[7] + c.data[8] + c.data[9] + c.data[12] +
|
||||
c.data[13] + c.data[14] + c.data[17] + c.data[18] +
|
||||
c.data[19];
|
||||
sum_3x3_subclusters[6] = c.data[10] + c.data[11] + c.data[12] + c.data[15] +
|
||||
c.data[16] + c.data[17] + c.data[20] + c.data[21] +
|
||||
c.data[22];
|
||||
sum_3x3_subclusters[7] = c.data[11] + c.data[12] + c.data[13] + c.data[16] +
|
||||
c.data[17] + c.data[18] + c.data[21] + c.data[22] +
|
||||
c.data[23];
|
||||
sum_3x3_subclusters[8] = c.data[12] + c.data[13] + c.data[14] + c.data[17] +
|
||||
c.data[18] + c.data[19] + c.data[22] + c.data[23] +
|
||||
c.data[24];
|
||||
|
||||
auto index = std::max_element(sum_3x3_subclusters.begin(),
|
||||
sum_3x3_subclusters.end()) -
|
||||
sum_3x3_subclusters.begin();
|
||||
|
||||
switch (index) {
|
||||
case 0:
|
||||
result.x = c.x - 1;
|
||||
result.y = c.y + 1;
|
||||
result.data = {c.data[0], c.data[1], c.data[2], c.data[5], c.data[6],
|
||||
c.data[7], c.data[10], c.data[11], c.data[12]};
|
||||
break;
|
||||
case 1:
|
||||
result.x = c.x;
|
||||
result.y = c.y + 1;
|
||||
result.data = {c.data[1], c.data[2], c.data[3], c.data[6], c.data[7],
|
||||
c.data[8], c.data[11], c.data[12], c.data[13]};
|
||||
break;
|
||||
case 2:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y + 1;
|
||||
result.data = {c.data[2], c.data[3], c.data[4], c.data[7], c.data[8],
|
||||
c.data[9], c.data[12], c.data[13], c.data[14]};
|
||||
break;
|
||||
case 3:
|
||||
result.x = c.x - 1;
|
||||
result.y = c.y;
|
||||
result.data = {c.data[5], c.data[6], c.data[7],
|
||||
c.data[10], c.data[11], c.data[12],
|
||||
c.data[15], c.data[16], c.data[17]};
|
||||
break;
|
||||
case 4:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y;
|
||||
result.data = {c.data[6], c.data[7], c.data[8],
|
||||
c.data[11], c.data[12], c.data[13],
|
||||
c.data[16], c.data[17], c.data[18]};
|
||||
break;
|
||||
case 5:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y;
|
||||
result.data = {c.data[7], c.data[8], c.data[9],
|
||||
c.data[12], c.data[13], c.data[14],
|
||||
c.data[17], c.data[18], c.data[19]};
|
||||
break;
|
||||
case 6:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y - 1;
|
||||
result.data = {c.data[10], c.data[11], c.data[12],
|
||||
c.data[15], c.data[16], c.data[17],
|
||||
c.data[20], c.data[21], c.data[22]};
|
||||
break;
|
||||
case 7:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y - 1;
|
||||
result.data = {c.data[11], c.data[12], c.data[13],
|
||||
c.data[16], c.data[17], c.data[18],
|
||||
c.data[21], c.data[22], c.data[23]};
|
||||
break;
|
||||
case 8:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y - 1;
|
||||
result.data = {c.data[12], c.data[13], c.data[14],
|
||||
c.data[17], c.data[18], c.data[19],
|
||||
c.data[22], c.data[23], c.data[24]};
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
BENCHMARK_F(ClustersForReduceFixture, Reduce2x2)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
benchmark::DoNotOptimize(reduce_to_2x2<int, 3, 3, int16_t>(
|
||||
cluster_3x3)); // make sure compiler evaluates the expression
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_F(ClustersForReduceFixture, SpecificReduce2x2)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
benchmark::DoNotOptimize(reduce_to_2x2<int>(cluster_3x3));
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_F(ClustersForReduceFixture, Reduce3x3)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
benchmark::DoNotOptimize(
|
||||
reduce_to_3x3<int, 5, 5, int16_t>(cluster_5x5));
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_F(ClustersForReduceFixture, SpecificReduce3x3)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
benchmark::DoNotOptimize(reduce_to_3x3<int>(cluster_5x5));
|
||||
}
|
||||
}
|
||||
@@ -1,28 +1,16 @@
|
||||
python:
|
||||
- 3.11
|
||||
- 3.11
|
||||
- 3.11
|
||||
- 3.12
|
||||
- 3.12
|
||||
- 3.12
|
||||
- 3.13
|
||||
|
||||
c_compiler:
|
||||
- gcc # [linux]
|
||||
|
||||
|
||||
numpy:
|
||||
- 1.26
|
||||
- 2.0
|
||||
- 2.1
|
||||
- 1.26
|
||||
- 2.0
|
||||
- 2.1
|
||||
- 2.1
|
||||
c_stdlib:
|
||||
- sysroot # [linux]
|
||||
|
||||
cxx_compiler:
|
||||
- gxx # [linux]
|
||||
|
||||
zip_keys:
|
||||
- python
|
||||
- numpy
|
||||
|
||||
pin_run_as_build:
|
||||
numpy: x.x
|
||||
python: x.x
|
||||
c_stdlib_version: # [linux]
|
||||
- 2.17 # [linux]
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
source:
|
||||
path: ../
|
||||
|
||||
{% set version = load_file_regex(load_file = 'VERSION', regex_pattern = '(\d+(?:\.\d+)*(?:[\+\w\.]+))').group(1) %}
|
||||
package:
|
||||
name: aare
|
||||
version: 2025.4.22 #TODO! how to not duplicate this?
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
version: {{version}}
|
||||
|
||||
source:
|
||||
path: ..
|
||||
@@ -13,45 +12,41 @@ source:
|
||||
build:
|
||||
number: 0
|
||||
script:
|
||||
- unset CMAKE_GENERATOR && {{ PYTHON }} -m pip install . -vv # [not win]
|
||||
- {{ PYTHON }} -m pip install . -vv # [win]
|
||||
- unset CMAKE_GENERATOR && {{ PYTHON }} -m pip install . -vv
|
||||
|
||||
requirements:
|
||||
build:
|
||||
- python {{python}}
|
||||
- numpy {{ numpy }}
|
||||
- {{ compiler('c') }}
|
||||
- {{ stdlib("c") }}
|
||||
- {{ compiler('cxx') }}
|
||||
|
||||
|
||||
host:
|
||||
- cmake
|
||||
- ninja
|
||||
- python {{python}}
|
||||
- numpy {{ numpy }}
|
||||
|
||||
host:
|
||||
- python
|
||||
- pip
|
||||
- numpy=2.1
|
||||
- scikit-build-core
|
||||
- pybind11 >=2.13.0
|
||||
- fmt
|
||||
- zeromq
|
||||
- nlohmann_json
|
||||
- catch2
|
||||
- matplotlib # needed in host to solve the environment for run
|
||||
|
||||
run:
|
||||
- python {{python}}
|
||||
- numpy {{ numpy }}
|
||||
- python
|
||||
- {{ pin_compatible('numpy') }}
|
||||
- matplotlib
|
||||
|
||||
|
||||
|
||||
test:
|
||||
imports:
|
||||
- aare
|
||||
# requires:
|
||||
# - pytest
|
||||
# source_files:
|
||||
# - tests
|
||||
# commands:
|
||||
# - pytest tests
|
||||
requires:
|
||||
- pytest
|
||||
- boost-histogram
|
||||
source_files:
|
||||
- python/tests
|
||||
commands:
|
||||
- python -m pytest python/tests
|
||||
|
||||
about:
|
||||
summary: An example project built with pybind11 and scikit-build.
|
||||
# license_file: LICENSE
|
||||
summary: Data analysis library for hybrid pixel detectors from PSI
|
||||
|
||||
@@ -45,12 +45,3 @@ add_custom_target(
|
||||
COMMENT "Generating documentation with Sphinx"
|
||||
)
|
||||
|
||||
add_custom_target(
|
||||
rst
|
||||
COMMAND ${SPHINX_EXECUTABLE} -a -b html
|
||||
-Dbreathe_projects.aare=${CMAKE_CURRENT_BINARY_DIR}/xml
|
||||
-c "${SPHINX_BUILD}"
|
||||
${SPHINX_BUILD}/src
|
||||
${SPHINX_BUILD}/html
|
||||
COMMENT "Generating documentation with Sphinx"
|
||||
)
|
||||
@@ -886,7 +886,7 @@ EXCLUDE_SYMLINKS = NO
|
||||
# Note that the wildcards are matched against the file with absolute path, so to
|
||||
# exclude all test directories for example use the pattern */test/*
|
||||
|
||||
EXCLUDE_PATTERNS = */docs/* */tests/* */python/* */manual */slsDetectorServers/* */libs/* */integrationTests *README* */slsDetectorGui/* */ctbGui/* */slsDetectorCalibration/* *TobiSchluter*
|
||||
EXCLUDE_PATTERNS = *build* */docs/* */tests/* *.test.cpp* */python/* */manual */slsDetectorServers/* */libs/* */integrationTests *README* *_deps* *TobiSchluter*
|
||||
|
||||
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
|
||||
# (namespaces, classes, functions, etc.) that should be excluded from the
|
||||
|
||||
@@ -4,4 +4,5 @@ ClusterFile
|
||||
.. doxygenclass:: aare::ClusterFile
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
:private-members:
|
||||
|
||||
|
||||
@@ -3,4 +3,20 @@ ClusterVector
|
||||
|
||||
.. doxygenclass:: aare::ClusterVector
|
||||
:members:
|
||||
:undoc-members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
|
||||
|
||||
|
||||
.. doxygenclass:: aare::ClusterVector< Cluster< T, ClusterSizeX, ClusterSizeY, CoordType > >
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
|
||||
|
||||
**Free Functions:**
|
||||
|
||||
.. doxygenfunction:: aare::reduce_to_3x3(const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>&)
|
||||
|
||||
.. doxygenfunction:: aare::reduce_to_2x2(const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>&)
|
||||
|
||||
47
docs/src/Philosophy.rst
Normal file
47
docs/src/Philosophy.rst
Normal file
@@ -0,0 +1,47 @@
|
||||
****************
|
||||
Philosophy
|
||||
****************
|
||||
|
||||
|
||||
Fast code with a simple interface
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Aare should be fast and efficient, but also easy to use. We strive to keep a simple interface that feels intuitive.
|
||||
Internally we use C++ for performance and the ability to integrate the library in other programs, but we see most
|
||||
users using the Python interface.
|
||||
|
||||
Live at head
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
As a user of the library you should be able to, and is expected to, use the latest version. Bug fixes will rarely be backported
|
||||
to older releases. By upgrading frequently you will benefit from the latest features and minimize the effort to maintain your scripts/code
|
||||
by doing several small upgrades instead of one big upgrade.
|
||||
|
||||
API
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
We aim to keep the API stable and only break it for good reasons. But specially now in the early stages of development
|
||||
the API will change. On those occasions it will be clearly stated in the release notes. However, the norm should be a
|
||||
backward compatible API.
|
||||
|
||||
Documentation
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Being a library it is important to have a well documented API. We use Doxygen to generate the C++ documentation
|
||||
and Sphinx for the Python part. Breathe is used to integrate the two into one Sphinx html site. The documentation is built
|
||||
automatically on release by the CI and published to GitHub pages. In addition to the generated API documentation,
|
||||
certain classes might need more descriptions of the usage. This is then placed in the .rst files in the docs/src directory.
|
||||
|
||||
.. attention::
|
||||
|
||||
The code should be well documented, but using descriptive names is more important. In the same spirit
|
||||
if a function is called `getNumberOfFrames()` you don't need to write a comment saying that it gets the
|
||||
number of frames.
|
||||
|
||||
|
||||
Dependencies
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Deployment in the scientific community is often tricky. Either due to old OS versions or the lack of package managers.
|
||||
We strive to keep the dependencies to a minimum and will vendor some libraries to simplify deployment even though it comes
|
||||
at a cost of build time.
|
||||
@@ -2,18 +2,21 @@ Requirements
|
||||
==============================================
|
||||
|
||||
- C++17 compiler (gcc 8/clang 7)
|
||||
- CMake 3.14+
|
||||
- CMake 3.15+
|
||||
|
||||
**Internally used libraries**
|
||||
|
||||
.. note ::
|
||||
|
||||
These can also be picked up from the system/conda environment by specifying:
|
||||
To save compile time some of the dependencies can also be picked up from the system/conda environment by specifying:
|
||||
-DAARE_SYSTEM_LIBRARIES=ON during the cmake configuration.
|
||||
|
||||
- pybind11
|
||||
To simplify deployment we build and statically link a few libraries.
|
||||
|
||||
- fmt
|
||||
- lmfit - https://jugit.fz-juelich.de/mlz/lmfit
|
||||
- nlohmann_json
|
||||
- pybind11
|
||||
- ZeroMQ
|
||||
|
||||
**Extra dependencies for building documentation**
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Tests
|
||||
****************
|
||||
|
||||
We test the code both from the C++ and Python API. By default only tests that does not require image data is run.
|
||||
We test the code both from C++ and Python. By default only tests that does not require additional data are run.
|
||||
|
||||
C++
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
@@ -15,7 +15,7 @@ C++
|
||||
make -j 4
|
||||
|
||||
export AARE_TEST_DATA=/path/to/test/data
|
||||
./run_test [.files] #or using ctest, [.files] is the option to include tests needing data
|
||||
./run_test [.with-data] #or using ctest, [.with-data] is the option to include tests needing data
|
||||
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ Python
|
||||
.. code-block:: bash
|
||||
|
||||
#From the root dir of the library
|
||||
python -m pytest python/tests --files # passing --files will run the tests needing data
|
||||
python -m pytest python/tests --with-data # passing --with-data will run the tests needing data
|
||||
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ Getting the test data
|
||||
.. attention ::
|
||||
|
||||
The tests needing the test data are not run by default. To make the data available, you need to set the environment variable
|
||||
AARE_TEST_DATA to the path of the test data directory. Then pass either [.files] for the C++ tests or --files for Python
|
||||
AARE_TEST_DATA to the path of the test data directory. Then pass either [.with-data] for the C++ tests or --files for Python
|
||||
|
||||
The image files needed for the test are large and are not included in the repository. They are stored
|
||||
using GIT LFS in a separate repository. To get the test data, you need to clone the repository.
|
||||
|
||||
86
docs/src/Workflow.rst
Normal file
86
docs/src/Workflow.rst
Normal file
@@ -0,0 +1,86 @@
|
||||
****************
|
||||
Workflow
|
||||
****************
|
||||
|
||||
This page describes how we develop aare.
|
||||
|
||||
GitHub centric
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
We use GitHub for all development. Issues and pull requests provide a platform for collaboration as well
|
||||
as a record of the development process. Even if we discuss things in person, we record the outcome in an issue.
|
||||
If a particular implementation is chosen over another, the reason should be recorded in the pull request.
|
||||
|
||||
|
||||
Branches
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
We aim for an as lightweight branching strategy as possible. Short-lived feature branches are merged back into main.
|
||||
The main branch is expected to always be in a releasable state. A release is simply a tag on main which provides a
|
||||
reference and triggers the CI to build the release artifacts (conda, pypi etc.). For large features consider merging
|
||||
smaller chunks into main as they are completed, rather than waiting for the entire feature to be finished. Worst case
|
||||
make sure your feature branch merges with main regularly to avoid large merge conflicts later on.
|
||||
|
||||
.. note::
|
||||
|
||||
The main branch is expected to always work. Feel free to pull from main instead of sticking to a
|
||||
release
|
||||
|
||||
|
||||
Releases
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Release early, release often. As soon as "enough" new features have been implemented, a release is created.
|
||||
A release should not be a big thing, rather a routine part of development that does not require any special person or
|
||||
unfamiliar steps.
|
||||
|
||||
|
||||
|
||||
Checklists for deployment
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Feature:**
|
||||
|
||||
#. Create a new issue for the feature (label feature)
|
||||
#. Create a new branch from main.
|
||||
#. Implement the feature including test and documentation
|
||||
#. Add the feature to RELEASE.md under head
|
||||
#. Create a pull request linked to the issue
|
||||
#. Code is reviewed by at least one other person
|
||||
#. Once approved, the branch is merged into main
|
||||
|
||||
|
||||
**BugFix:**
|
||||
|
||||
Essentially the same as for a feature, if possible start with
|
||||
a failing test that demonstrates the bug.
|
||||
|
||||
#. Create a new issue for the bug (label bug)
|
||||
#. Create a new branch from main.
|
||||
#. **Write a test that fails for the bug**
|
||||
#. Implement the fix
|
||||
#. **Run the test to ensure it passes**
|
||||
#. Add the bugfix to RELEASE.md under head
|
||||
#. Create a pull request linked to the issue.
|
||||
#. Code is reviewed by at least one other person
|
||||
#. Once approved, the branch is merged into main
|
||||
|
||||
**Release:**
|
||||
|
||||
#. Once "enough" new features have been implemented, a release is created
|
||||
#. Update RELEASE.md with the tag of the release and verify that it is complete
|
||||
#. Create the release in GitHub describing the new features and bug fixes
|
||||
#. CI makes magic
|
||||
|
||||
|
||||
**Update documentation only:**
|
||||
|
||||
.. attention::
|
||||
|
||||
It's possible to update the documentation without changing the code, but take
|
||||
care since the docs will reflect the code in main and not the latest release.
|
||||
|
||||
#. Create a PR to main with the documentation changes
|
||||
#. Create a pull request linked to the issue.
|
||||
#. Code is reviewed by at least one other person
|
||||
#. Once merged you can manually trigger the CI workflow for documentation
|
||||
@@ -25,6 +25,7 @@ AARE
|
||||
:maxdepth: 1
|
||||
|
||||
pyFile
|
||||
pycalibration
|
||||
pyCtbRawFile
|
||||
pyClusterFile
|
||||
pyClusterVector
|
||||
@@ -63,4 +64,6 @@ AARE
|
||||
:caption: Developer
|
||||
:maxdepth: 3
|
||||
|
||||
Philosophy
|
||||
Workflow
|
||||
Tests
|
||||
@@ -2,9 +2,24 @@
|
||||
ClusterFile
|
||||
============
|
||||
|
||||
|
||||
The :class:`ClusterFile` class is the main interface to read and write clusters in aare. Unfortunately the
|
||||
old file format does not include metadata like the cluster size and the data type. This means that the
|
||||
user has to know this information from other sources. Specifying the wrong cluster size or data type
|
||||
will lead to garbage data being read.
|
||||
|
||||
.. py:currentmodule:: aare
|
||||
|
||||
.. autoclass:: ClusterFile
|
||||
:members:
|
||||
:undoc-members:
|
||||
:inherited-members:
|
||||
|
||||
|
||||
Below is the API of the ClusterFile_Cluster3x3i but all variants share the same API.
|
||||
|
||||
.. autoclass:: aare._aare.ClusterFile_Cluster3x3i
|
||||
:special-members: __init__
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
@@ -2,8 +2,10 @@ ClusterVector
|
||||
================
|
||||
|
||||
The ClusterVector, holds clusters from the ClusterFinder. Since it is templated
|
||||
in C++ we use a suffix indicating the data type in python. The suffix is
|
||||
``_i`` for integer, ``_f`` for float, and ``_d`` for double.
|
||||
in C++ we use a suffix indicating the type of cluster it holds. The suffix follows
|
||||
the same pattern as for ClusterFile i.e. ``ClusterVector_Cluster3x3i``
|
||||
for a vector holding 3x3 integer clusters.
|
||||
|
||||
|
||||
At the moment the functionality from python is limited and it is not supported
|
||||
to push_back clusters to the vector. The intended use case is to pass it to
|
||||
@@ -26,8 +28,22 @@ C++ functions that support the ClusterVector or to view it as a numpy array.
|
||||
|
||||
.. py:currentmodule:: aare
|
||||
|
||||
.. autoclass:: ClusterVector_i
|
||||
.. autoclass:: aare._aare.ClusterVector_Cluster3x3i
|
||||
:special-members: __init__
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
:inherited-members:
|
||||
:inherited-members:
|
||||
|
||||
|
||||
**Free Functions:**
|
||||
|
||||
.. autofunction:: reduce_to_3x3
|
||||
:noindex:
|
||||
|
||||
Reduce a single Cluster to 3x3 by taking the 3x3 subcluster with highest photon energy.
|
||||
|
||||
.. autofunction:: reduce_to_2x2
|
||||
:noindex:
|
||||
|
||||
Reduce a single Cluster to 2x2 by taking the 2x2 subcluster with highest photon energy.
|
||||
|
||||
40
docs/src/pycalibration.rst
Normal file
40
docs/src/pycalibration.rst
Normal file
@@ -0,0 +1,40 @@
|
||||
|
||||
Calibration
|
||||
==============
|
||||
|
||||
Functions for applying calibration to data.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import aare
|
||||
|
||||
# Load calibration data for a single JF module (512x1024 pixels)
|
||||
calibration = aare.load_calibration('path/to/calibration/file.bin')
|
||||
|
||||
raw_data = ... # Load your raw data here
|
||||
pedestal = ... # Load your pedestal data here
|
||||
|
||||
# Apply calibration to raw data to convert from raw ADC values to keV
|
||||
data = aare.apply_calibration(raw_data, pd=pedestal, cal=calibration)
|
||||
|
||||
# If you pass a 2D pedestal and calibration only G0 will be used for the conversion
|
||||
# Pixels that switched to G1 or G2 will be set to 0
|
||||
data = aare.apply_calibration(raw_data, pd=pedestal[0], cal=calibration[0])
|
||||
|
||||
|
||||
|
||||
.. py:currentmodule:: aare
|
||||
|
||||
.. autofunction:: apply_calibration
|
||||
|
||||
.. autofunction:: load_calibration
|
||||
|
||||
.. autofunction:: calculate_pedestal
|
||||
|
||||
.. autofunction:: calculate_pedestal_float
|
||||
|
||||
.. autofunction:: calculate_pedestal_g0
|
||||
|
||||
.. autofunction:: calculate_pedestal_g0_float
|
||||
|
||||
.. autofunction:: count_switching_pixels
|
||||
@@ -3,13 +3,14 @@ channels:
|
||||
- conda-forge
|
||||
dependencies:
|
||||
- anaconda-client
|
||||
- conda-build
|
||||
- doxygen
|
||||
- sphinx=7.1.2
|
||||
- sphinx
|
||||
- breathe
|
||||
- pybind11
|
||||
- sphinx_rtd_theme
|
||||
- furo
|
||||
- nlohmann_json
|
||||
- zeromq
|
||||
- fmt
|
||||
- pybind11
|
||||
- numpy
|
||||
- matplotlib
|
||||
|
||||
|
||||
@@ -1,22 +1,25 @@
|
||||
#pragma once
|
||||
#include <cstdint> //int64_t
|
||||
#include <cstddef> //size_t
|
||||
#include <array>
|
||||
|
||||
#include "aare/defs.hpp"
|
||||
#include <array>
|
||||
#include <cassert>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
|
||||
namespace aare {
|
||||
|
||||
template <typename E, int64_t Ndim> class ArrayExpr {
|
||||
template <typename E, ssize_t Ndim> class ArrayExpr {
|
||||
public:
|
||||
static constexpr bool is_leaf = false;
|
||||
|
||||
auto operator[](size_t i) const { return static_cast<E const &>(*this)[i]; }
|
||||
auto operator()(size_t i) const { return static_cast<E const &>(*this)[i]; }
|
||||
auto size() const { return static_cast<E const &>(*this).size(); }
|
||||
std::array<int64_t, Ndim> shape() const { return static_cast<E const &>(*this).shape(); }
|
||||
std::array<ssize_t, Ndim> shape() const {
|
||||
return static_cast<E const &>(*this).shape();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename A, typename B, int64_t Ndim>
|
||||
template <typename A, typename B, ssize_t Ndim>
|
||||
class ArrayAdd : public ArrayExpr<ArrayAdd<A, B, Ndim>, Ndim> {
|
||||
const A &arr1_;
|
||||
const B &arr2_;
|
||||
@@ -27,10 +30,10 @@ class ArrayAdd : public ArrayExpr<ArrayAdd<A, B, Ndim>, Ndim> {
|
||||
}
|
||||
auto operator[](int i) const { return arr1_[i] + arr2_[i]; }
|
||||
size_t size() const { return arr1_.size(); }
|
||||
std::array<int64_t, Ndim> shape() const { return arr1_.shape(); }
|
||||
std::array<ssize_t, Ndim> shape() const { return arr1_.shape(); }
|
||||
};
|
||||
|
||||
template <typename A, typename B, int64_t Ndim>
|
||||
template <typename A, typename B, ssize_t Ndim>
|
||||
class ArraySub : public ArrayExpr<ArraySub<A, B, Ndim>, Ndim> {
|
||||
const A &arr1_;
|
||||
const B &arr2_;
|
||||
@@ -41,11 +44,11 @@ class ArraySub : public ArrayExpr<ArraySub<A, B, Ndim>, Ndim> {
|
||||
}
|
||||
auto operator[](int i) const { return arr1_[i] - arr2_[i]; }
|
||||
size_t size() const { return arr1_.size(); }
|
||||
std::array<int64_t, Ndim> shape() const { return arr1_.shape(); }
|
||||
std::array<ssize_t, Ndim> shape() const { return arr1_.shape(); }
|
||||
};
|
||||
|
||||
template <typename A, typename B, int64_t Ndim>
|
||||
class ArrayMul : public ArrayExpr<ArrayMul<A, B, Ndim>,Ndim> {
|
||||
template <typename A, typename B, ssize_t Ndim>
|
||||
class ArrayMul : public ArrayExpr<ArrayMul<A, B, Ndim>, Ndim> {
|
||||
const A &arr1_;
|
||||
const B &arr2_;
|
||||
|
||||
@@ -55,10 +58,10 @@ class ArrayMul : public ArrayExpr<ArrayMul<A, B, Ndim>,Ndim> {
|
||||
}
|
||||
auto operator[](int i) const { return arr1_[i] * arr2_[i]; }
|
||||
size_t size() const { return arr1_.size(); }
|
||||
std::array<int64_t, Ndim> shape() const { return arr1_.shape(); }
|
||||
std::array<ssize_t, Ndim> shape() const { return arr1_.shape(); }
|
||||
};
|
||||
|
||||
template <typename A, typename B, int64_t Ndim>
|
||||
template <typename A, typename B, ssize_t Ndim>
|
||||
class ArrayDiv : public ArrayExpr<ArrayDiv<A, B, Ndim>, Ndim> {
|
||||
const A &arr1_;
|
||||
const B &arr2_;
|
||||
@@ -69,31 +72,27 @@ class ArrayDiv : public ArrayExpr<ArrayDiv<A, B, Ndim>, Ndim> {
|
||||
}
|
||||
auto operator[](int i) const { return arr1_[i] / arr2_[i]; }
|
||||
size_t size() const { return arr1_.size(); }
|
||||
std::array<int64_t, Ndim> shape() const { return arr1_.shape(); }
|
||||
std::array<ssize_t, Ndim> shape() const { return arr1_.shape(); }
|
||||
};
|
||||
|
||||
|
||||
|
||||
template <typename A, typename B, int64_t Ndim>
|
||||
template <typename A, typename B, ssize_t Ndim>
|
||||
auto operator+(const ArrayExpr<A, Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
|
||||
return ArrayAdd<ArrayExpr<A, Ndim>, ArrayExpr<B, Ndim>, Ndim>(arr1, arr2);
|
||||
}
|
||||
|
||||
template <typename A, typename B, int64_t Ndim>
|
||||
auto operator-(const ArrayExpr<A,Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
|
||||
template <typename A, typename B, ssize_t Ndim>
|
||||
auto operator-(const ArrayExpr<A, Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
|
||||
return ArraySub<ArrayExpr<A, Ndim>, ArrayExpr<B, Ndim>, Ndim>(arr1, arr2);
|
||||
}
|
||||
|
||||
template <typename A, typename B, int64_t Ndim>
|
||||
template <typename A, typename B, ssize_t Ndim>
|
||||
auto operator*(const ArrayExpr<A, Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
|
||||
return ArrayMul<ArrayExpr<A, Ndim>, ArrayExpr<B, Ndim>, Ndim>(arr1, arr2);
|
||||
}
|
||||
|
||||
template <typename A, typename B, int64_t Ndim>
|
||||
template <typename A, typename B, ssize_t Ndim>
|
||||
auto operator/(const ArrayExpr<A, Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
|
||||
return ArrayDiv<ArrayExpr<A, Ndim>, ArrayExpr<B, Ndim>, Ndim>(arr1, arr2);
|
||||
}
|
||||
|
||||
|
||||
|
||||
} // namespace aare
|
||||
@@ -28,7 +28,7 @@ enum class pixel : int {
|
||||
template <typename T> struct Eta2 {
|
||||
double x;
|
||||
double y;
|
||||
int c;
|
||||
int c{0};
|
||||
T sum;
|
||||
};
|
||||
|
||||
@@ -70,6 +70,8 @@ calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||
size_t index_bottom_left_max_2x2_subcluster =
|
||||
(int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1);
|
||||
|
||||
// calculate direction of gradient
|
||||
|
||||
// check that cluster center is in max subcluster
|
||||
if (cluster_center_index != index_bottom_left_max_2x2_subcluster &&
|
||||
cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 &&
|
||||
@@ -128,12 +130,15 @@ Eta2<T> calculate_eta2(const Cluster<T, 2, 2, int16_t> &cl) {
|
||||
Eta2<T> eta{};
|
||||
|
||||
if ((cl.data[0] + cl.data[1]) != 0)
|
||||
eta.x = static_cast<double>(cl.data[1]) / (cl.data[0] + cl.data[1]);
|
||||
eta.x = static_cast<double>(cl.data[1]) /
|
||||
(cl.data[0] + cl.data[1]); // between (0,1) the closer to zero
|
||||
// left value probably larger
|
||||
if ((cl.data[0] + cl.data[2]) != 0)
|
||||
eta.y = static_cast<double>(cl.data[2]) / (cl.data[0] + cl.data[2]);
|
||||
eta.y = static_cast<double>(cl.data[2]) /
|
||||
(cl.data[0] + cl.data[2]); // between (0,1) the closer to zero
|
||||
// bottom value probably larger
|
||||
eta.sum = cl.sum();
|
||||
eta.c = static_cast<int>(corner::cBottomLeft); // TODO! This is not correct,
|
||||
// but need to put something
|
||||
|
||||
return eta;
|
||||
}
|
||||
|
||||
@@ -150,13 +155,11 @@ template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) {
|
||||
|
||||
eta.sum = sum;
|
||||
|
||||
eta.c = corner::cBottomLeft;
|
||||
|
||||
if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0)
|
||||
|
||||
eta.x = static_cast<double>(-cl.data[3] + cl.data[3 + 2]) /
|
||||
|
||||
(cl.data[3] + cl.data[4] + cl.data[5]);
|
||||
(cl.data[3] + cl.data[4] + cl.data[5]); // (-1,1)
|
||||
|
||||
if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0)
|
||||
|
||||
|
||||
@@ -17,7 +17,8 @@ template <class ItemType> class CircularFifo {
|
||||
|
||||
public:
|
||||
CircularFifo() : CircularFifo(100){};
|
||||
CircularFifo(uint32_t size) : fifo_size(size), free_slots(size + 1), filled_slots(size + 1) {
|
||||
CircularFifo(uint32_t size)
|
||||
: fifo_size(size), free_slots(size + 1), filled_slots(size + 1) {
|
||||
|
||||
// TODO! how do we deal with alignment for writing? alignas???
|
||||
// Do we give the user a chance to provide memory locations?
|
||||
@@ -55,7 +56,8 @@ template <class ItemType> class CircularFifo {
|
||||
|
||||
bool try_pop_free(ItemType &v) { return free_slots.read(v); }
|
||||
|
||||
ItemType pop_value(std::chrono::nanoseconds wait, std::atomic<bool> &stopped) {
|
||||
ItemType pop_value(std::chrono::nanoseconds wait,
|
||||
std::atomic<bool> &stopped) {
|
||||
ItemType v;
|
||||
while (!filled_slots.read(v) && !stopped) {
|
||||
std::this_thread::sleep_for(wait);
|
||||
|
||||
158
include/aare/Cluster.hpp
Normal file → Executable file
158
include/aare/Cluster.hpp
Normal file → Executable file
@@ -8,6 +8,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "logger.hpp"
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <cstdint>
|
||||
@@ -74,6 +75,163 @@ struct Cluster {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Reduce a cluster to a 2x2 cluster by selecting the 2x2 block with the
|
||||
* highest sum.
|
||||
* @param c Cluster to reduce
|
||||
* @return reduced cluster
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = int16_t>
|
||||
Cluster<T, 2, 2, CoordType>
|
||||
reduce_to_2x2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
|
||||
|
||||
static_assert(ClusterSizeX >= 2 && ClusterSizeY >= 2,
|
||||
"Cluster sizes must be at least 2x2 for reduction to 2x2");
|
||||
|
||||
// TODO maybe add sanity check and check that center is in max subcluster
|
||||
Cluster<T, 2, 2, CoordType> result;
|
||||
|
||||
auto [sum, index] = c.max_sum_2x2();
|
||||
|
||||
int16_t cluster_center_index =
|
||||
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||
|
||||
int16_t index_bottom_left_max_2x2_subcluster =
|
||||
(int(index / (ClusterSizeX - 1))) * ClusterSizeX +
|
||||
index % (ClusterSizeX - 1);
|
||||
|
||||
result.x =
|
||||
c.x + (index_bottom_left_max_2x2_subcluster - cluster_center_index) %
|
||||
ClusterSizeX;
|
||||
|
||||
result.y =
|
||||
c.y - (index_bottom_left_max_2x2_subcluster - cluster_center_index) /
|
||||
ClusterSizeX;
|
||||
result.data = {
|
||||
c.data[index_bottom_left_max_2x2_subcluster],
|
||||
c.data[index_bottom_left_max_2x2_subcluster + 1],
|
||||
c.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX],
|
||||
c.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1]};
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Cluster<T, 2, 2, int16_t> reduce_to_2x2(const Cluster<T, 3, 3, int16_t> &c) {
|
||||
Cluster<T, 2, 2, int16_t> result;
|
||||
|
||||
auto [s, i] = c.max_sum_2x2();
|
||||
switch (i) {
|
||||
case 0:
|
||||
result.x = c.x - 1;
|
||||
result.y = c.y + 1;
|
||||
result.data = {c.data[0], c.data[1], c.data[3], c.data[4]};
|
||||
break;
|
||||
case 1:
|
||||
result.x = c.x;
|
||||
result.y = c.y + 1;
|
||||
result.data = {c.data[1], c.data[2], c.data[4], c.data[5]};
|
||||
break;
|
||||
case 2:
|
||||
result.x = c.x - 1;
|
||||
result.y = c.y;
|
||||
result.data = {c.data[3], c.data[4], c.data[6], c.data[7]};
|
||||
break;
|
||||
case 3:
|
||||
result.x = c.x;
|
||||
result.y = c.y;
|
||||
result.data = {c.data[4], c.data[5], c.data[7], c.data[8]};
|
||||
break;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = int16_t>
|
||||
inline std::pair<T, uint16_t>
|
||||
max_3x3_sum(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cluster) {
|
||||
|
||||
if constexpr (ClusterSizeX == 3 && ClusterSizeY == 3) {
|
||||
return std::make_pair(cluster.sum(), 0);
|
||||
} else {
|
||||
|
||||
size_t index = 0;
|
||||
T max_3x3_subcluster_sum = 0;
|
||||
for (size_t i = 0; i < ClusterSizeY - 2; ++i) {
|
||||
for (size_t j = 0; j < ClusterSizeX - 2; ++j) {
|
||||
|
||||
T sum = cluster.data[i * ClusterSizeX + j] +
|
||||
cluster.data[i * ClusterSizeX + j + 1] +
|
||||
cluster.data[i * ClusterSizeX + j + 2] +
|
||||
cluster.data[(i + 1) * ClusterSizeX + j] +
|
||||
cluster.data[(i + 1) * ClusterSizeX + j + 1] +
|
||||
cluster.data[(i + 1) * ClusterSizeX + j + 2] +
|
||||
cluster.data[(i + 2) * ClusterSizeX + j] +
|
||||
cluster.data[(i + 2) * ClusterSizeX + j + 1] +
|
||||
cluster.data[(i + 2) * ClusterSizeX + j + 2];
|
||||
if (sum > max_3x3_subcluster_sum) {
|
||||
max_3x3_subcluster_sum = sum;
|
||||
index = i * (ClusterSizeX - 2) + j;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return std::make_pair(max_3x3_subcluster_sum, index);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reduce a cluster to a 3x3 cluster by selecting the 3x3 block with the
|
||||
* highest sum.
|
||||
* @param c Cluster to reduce
|
||||
* @return reduced cluster
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = int16_t>
|
||||
Cluster<T, 3, 3, CoordType>
|
||||
reduce_to_3x3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
|
||||
|
||||
static_assert(ClusterSizeX >= 3 && ClusterSizeY >= 3,
|
||||
"Cluster sizes must be at least 3x3 for reduction to 3x3");
|
||||
|
||||
Cluster<T, 3, 3, CoordType> result;
|
||||
|
||||
// TODO maybe add sanity check and check that center is in max subcluster
|
||||
|
||||
auto [sum, index] = max_3x3_sum(c);
|
||||
|
||||
int16_t cluster_center_index =
|
||||
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||
|
||||
int16_t index_center_max_3x3_subcluster =
|
||||
(int(index / (ClusterSizeX - 2))) * ClusterSizeX + ClusterSizeX +
|
||||
index % (ClusterSizeX - 2) + 1;
|
||||
|
||||
int16_t index_3x3_subcluster_cluster_center =
|
||||
int((cluster_center_index - 1 - ClusterSizeX) / ClusterSizeX) *
|
||||
(ClusterSizeX - 2) +
|
||||
(cluster_center_index - 1 - ClusterSizeX) % ClusterSizeX;
|
||||
|
||||
result.x =
|
||||
c.x + (index % (ClusterSizeX - 2) -
|
||||
(index_3x3_subcluster_cluster_center % (ClusterSizeX - 2)));
|
||||
result.y =
|
||||
c.y - (index / (ClusterSizeX - 2) -
|
||||
(index_3x3_subcluster_cluster_center / (ClusterSizeX - 2)));
|
||||
|
||||
result.data = {c.data[index_center_max_3x3_subcluster - ClusterSizeX - 1],
|
||||
c.data[index_center_max_3x3_subcluster - ClusterSizeX],
|
||||
c.data[index_center_max_3x3_subcluster - ClusterSizeX + 1],
|
||||
c.data[index_center_max_3x3_subcluster - 1],
|
||||
c.data[index_center_max_3x3_subcluster],
|
||||
c.data[index_center_max_3x3_subcluster + 1],
|
||||
c.data[index_center_max_3x3_subcluster + ClusterSizeX - 1],
|
||||
c.data[index_center_max_3x3_subcluster + ClusterSizeX],
|
||||
c.data[index_center_max_3x3_subcluster + ClusterSizeX + 1]};
|
||||
return result;
|
||||
}
|
||||
|
||||
// Type Traits for is_cluster_type
|
||||
template <typename T>
|
||||
struct is_cluster : std::false_type {}; // Default case: Not a Cluster
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
#include "aare/GainMap.hpp"
|
||||
#include "aare/NDArray.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
#include "aare/logger.hpp"
|
||||
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <optional>
|
||||
@@ -12,7 +14,7 @@
|
||||
namespace aare {
|
||||
|
||||
/*
|
||||
Binary cluster file. Expects data to be layed out as:
|
||||
Binary cluster file. Expects data to be laid out as:
|
||||
int32_t frame_number
|
||||
uint32_t number_of_clusters
|
||||
int16_t x, int16_t y, int32_t data[9] x number_of_clusters
|
||||
@@ -369,11 +371,15 @@ ClusterFile<ClusterType, Enable>::read_frame_without_cut() {
|
||||
"Could not read number of clusters");
|
||||
}
|
||||
|
||||
LOG(logDEBUG1) << "Reading " << n_clusters << " clusters from frame "
|
||||
<< frame_number;
|
||||
|
||||
ClusterVector<ClusterType> clusters(n_clusters);
|
||||
clusters.set_frame_number(frame_number);
|
||||
|
||||
clusters.resize(n_clusters);
|
||||
|
||||
LOG(logDEBUG1) << "clusters.item_size(): " << clusters.item_size();
|
||||
|
||||
if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) !=
|
||||
static_cast<size_t>(n_clusters)) {
|
||||
throw std::runtime_error(LOCATION + "Could not read clusters");
|
||||
|
||||
@@ -21,7 +21,7 @@ class ClusterFileSink {
|
||||
|
||||
void process() {
|
||||
m_stopped = false;
|
||||
fmt::print("ClusterFileSink started\n");
|
||||
LOG(logDEBUG) << "ClusterFileSink started";
|
||||
while (!m_stop_requested || !m_source->isEmpty()) {
|
||||
if (ClusterVector<ClusterType> *clusters = m_source->frontPtr();
|
||||
clusters != nullptr) {
|
||||
@@ -41,13 +41,16 @@ class ClusterFileSink {
|
||||
std::this_thread::sleep_for(m_default_wait);
|
||||
}
|
||||
}
|
||||
fmt::print("ClusterFileSink stopped\n");
|
||||
LOG(logDEBUG) << "ClusterFileSink stopped";
|
||||
m_stopped = true;
|
||||
}
|
||||
|
||||
public:
|
||||
ClusterFileSink(ClusterFinderMT<ClusterType, uint16_t, double> *source,
|
||||
const std::filesystem::path &fname) {
|
||||
LOG(logDEBUG) << "ClusterFileSink: "
|
||||
<< "source: " << source->sink()
|
||||
<< ", file: " << fname.string();
|
||||
m_source = source->sink();
|
||||
m_thread = std::thread(&ClusterFileSink::process, this);
|
||||
m_file.open(fname, std::ios::binary);
|
||||
|
||||
@@ -38,7 +38,11 @@ class ClusterFinder {
|
||||
: m_image_size(image_size), m_nSigma(nSigma),
|
||||
c2(sqrt((ClusterSizeY + 1) / 2 * (ClusterSizeX + 1) / 2)),
|
||||
c3(sqrt(ClusterSizeX * ClusterSizeY)),
|
||||
m_pedestal(image_size[0], image_size[1]), m_clusters(capacity) {};
|
||||
m_pedestal(image_size[0], image_size[1]), m_clusters(capacity) {
|
||||
LOG(logDEBUG) << "ClusterFinder: "
|
||||
<< "image_size: " << image_size[0] << "x" << image_size[1]
|
||||
<< ", nSigma: " << nSigma << ", capacity: " << capacity;
|
||||
}
|
||||
|
||||
void push_pedestal_frame(NDView<FRAME_TYPE, 2> frame) {
|
||||
m_pedestal.push(frame);
|
||||
@@ -140,9 +144,9 @@ class ClusterFinder {
|
||||
static_cast<CT>(
|
||||
m_pedestal.mean(iy + ir, ix + ic));
|
||||
cluster.data[i] =
|
||||
tmp; // Watch for out of bounds access
|
||||
i++;
|
||||
tmp; // Watch for out of bounds access
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include "aare/ClusterFinder.hpp"
|
||||
#include "aare/NDArray.hpp"
|
||||
#include "aare/ProducerConsumerQueue.hpp"
|
||||
#include "aare/logger.hpp"
|
||||
|
||||
namespace aare {
|
||||
|
||||
@@ -123,6 +124,12 @@ class ClusterFinderMT {
|
||||
size_t capacity = 2000, size_t n_threads = 3)
|
||||
: m_n_threads(n_threads) {
|
||||
|
||||
LOG(logDEBUG1) << "ClusterFinderMT: "
|
||||
<< "image_size: " << image_size[0] << "x"
|
||||
<< image_size[1] << ", nSigma: " << nSigma
|
||||
<< ", capacity: " << capacity
|
||||
<< ", n_threads: " << n_threads;
|
||||
|
||||
for (size_t i = 0; i < n_threads; i++) {
|
||||
m_cluster_finders.push_back(
|
||||
std::make_unique<
|
||||
|
||||
@@ -122,6 +122,11 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
*/
|
||||
size_t size() const { return m_data.size(); }
|
||||
|
||||
/**
|
||||
* @brief Check if the vector is empty
|
||||
*/
|
||||
bool empty() const { return m_data.empty(); }
|
||||
|
||||
uint8_t cluster_size_x() const { return ClusterSizeX; }
|
||||
|
||||
uint8_t cluster_size_y() const { return ClusterSizeY; }
|
||||
@@ -133,9 +138,9 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
*/
|
||||
size_t capacity() const { return m_data.capacity(); }
|
||||
|
||||
const auto begin() const { return m_data.begin(); }
|
||||
auto begin() const { return m_data.begin(); }
|
||||
|
||||
const auto end() const { return m_data.end(); }
|
||||
auto end() const { return m_data.end(); }
|
||||
|
||||
/**
|
||||
* @brief Return the size in bytes of a single cluster
|
||||
@@ -167,4 +172,40 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Reduce a cluster to a 2x2 cluster by selecting the 2x2 block with the
|
||||
* highest sum.
|
||||
* @param cv Clustervector containing clusters to reduce
|
||||
* @return Clustervector with reduced clusters
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
ClusterVector<Cluster<T, 2, 2, CoordType>> reduce_to_2x2(
|
||||
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
|
||||
&cv) {
|
||||
ClusterVector<Cluster<T, 2, 2, CoordType>> result;
|
||||
for (const auto &c : cv) {
|
||||
result.push_back(reduce_to_2x2(c));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reduce a cluster to a 3x3 cluster by selecting the 3x3 block with the
|
||||
* highest sum.
|
||||
* @param cv Clustervector containing clusters to reduce
|
||||
* @return Clustervector with reduced clusters
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
ClusterVector<Cluster<T, 3, 3, CoordType>> reduce_to_3x3(
|
||||
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
|
||||
&cv) {
|
||||
ClusterVector<Cluster<T, 3, 3, CoordType>> result;
|
||||
for (const auto &c : cv) {
|
||||
result.push_back(reduce_to_3x3(c));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,27 +1,27 @@
|
||||
#pragma once
|
||||
|
||||
#include "aare/FileInterface.hpp"
|
||||
#include "aare/RawMasterFile.hpp"
|
||||
#include "aare/Frame.hpp"
|
||||
#include "aare/RawMasterFile.hpp"
|
||||
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
|
||||
namespace aare{
|
||||
namespace aare {
|
||||
|
||||
|
||||
class CtbRawFile{
|
||||
class CtbRawFile {
|
||||
RawMasterFile m_master;
|
||||
std::ifstream m_file;
|
||||
size_t m_current_frame{0};
|
||||
size_t m_current_subfile{0};
|
||||
size_t m_num_subfiles{0};
|
||||
public:
|
||||
|
||||
public:
|
||||
CtbRawFile(const std::filesystem::path &fname);
|
||||
|
||||
void read_into(std::byte *image_buf, DetectorHeader* header = nullptr);
|
||||
void seek(size_t frame_index); //!< seek to the given frame index
|
||||
size_t tell() const; //!< get the frame index of the file pointer
|
||||
void read_into(std::byte *image_buf, DetectorHeader *header = nullptr);
|
||||
void seek(size_t frame_index); //!< seek to the given frame index
|
||||
size_t tell() const; //!< get the frame index of the file pointer
|
||||
|
||||
// in the specific class we can expose more functionality
|
||||
|
||||
@@ -29,13 +29,13 @@ public:
|
||||
size_t frames_in_file() const;
|
||||
|
||||
RawMasterFile master() const;
|
||||
private:
|
||||
|
||||
private:
|
||||
void find_subfiles();
|
||||
size_t sub_file_index(size_t frame_index) const {
|
||||
return frame_index / m_master.max_frames_per_file();
|
||||
}
|
||||
void open_data_file(size_t subfile_index);
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace aare
|
||||
81
include/aare/DetectorGeometry.hpp
Normal file
81
include/aare/DetectorGeometry.hpp
Normal file
@@ -0,0 +1,81 @@
|
||||
#pragma once
|
||||
#include "aare/RawMasterFile.hpp" //ROI refactor away
|
||||
#include "aare/defs.hpp"
|
||||
namespace aare {
|
||||
|
||||
struct ModuleConfig {
|
||||
int module_gap_row{};
|
||||
int module_gap_col{};
|
||||
|
||||
bool operator==(const ModuleConfig &other) const {
|
||||
if (module_gap_col != other.module_gap_col)
|
||||
return false;
|
||||
if (module_gap_row != other.module_gap_row)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Class to hold the geometry of a module. Where pixel 0 is located and
|
||||
* the size of the module
|
||||
*/
|
||||
struct ModuleGeometry {
|
||||
int origin_x{};
|
||||
int origin_y{};
|
||||
int height{};
|
||||
int width{};
|
||||
int row_index{};
|
||||
int col_index{};
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Class to hold the geometry of a detector. Number of modules, their
|
||||
* size and where pixel 0 for each module is located
|
||||
*/
|
||||
class DetectorGeometry {
|
||||
public:
|
||||
DetectorGeometry(const xy &geometry, const ssize_t module_pixels_x,
|
||||
const ssize_t module_pixels_y,
|
||||
const xy udp_interfaces_per_module = xy{1, 1},
|
||||
const bool quad = false);
|
||||
|
||||
~DetectorGeometry() = default;
|
||||
|
||||
/**
|
||||
* @brief Update the detector geometry given a region of interest
|
||||
*
|
||||
* @param roi
|
||||
* @return DetectorGeometry
|
||||
*/
|
||||
void update_geometry_with_roi(ROI roi);
|
||||
|
||||
size_t n_modules() const;
|
||||
|
||||
size_t n_modules_in_roi() const;
|
||||
|
||||
size_t pixels_x() const;
|
||||
size_t pixels_y() const;
|
||||
|
||||
size_t modules_x() const;
|
||||
size_t modules_y() const;
|
||||
|
||||
const std::vector<ssize_t> &get_modules_in_roi() const;
|
||||
|
||||
ssize_t get_modules_in_roi(const size_t index) const;
|
||||
|
||||
const std::vector<ModuleGeometry> &get_module_geometries() const;
|
||||
|
||||
const ModuleGeometry &get_module_geometries(const size_t index) const;
|
||||
|
||||
private:
|
||||
size_t m_modules_x{};
|
||||
size_t m_modules_y{};
|
||||
size_t m_pixels_x{};
|
||||
size_t m_pixels_y{};
|
||||
static constexpr ModuleConfig cfg{0, 0};
|
||||
std::vector<ModuleGeometry> module_geometries{};
|
||||
std::vector<ssize_t> modules_in_roi{};
|
||||
};
|
||||
|
||||
} // namespace aare
|
||||
@@ -6,31 +6,37 @@
|
||||
|
||||
namespace aare {
|
||||
|
||||
// The format descriptor is a single character that specifies the type of the data
|
||||
// The format descriptor is a single character that specifies the type of the
|
||||
// data
|
||||
// - python documentation: https://docs.python.org/3/c-api/arg.html#numbers
|
||||
// - py::format_descriptor<T>::format() (in pybind11) does not return the same format as
|
||||
// - py::format_descriptor<T>::format() (in pybind11) does not return the same
|
||||
// format as
|
||||
// written in python.org documentation.
|
||||
// - numpy also doesn't use the same format. and also numpy associates the format
|
||||
// with variable bitdepth types. (e.g. long is int64 on linux64 and int32 on win64)
|
||||
// https://numpy.org/doc/stable/reference/arrays.scalars.html
|
||||
// - numpy also doesn't use the same format. and also numpy associates the
|
||||
// format
|
||||
// with variable bitdepth types. (e.g. long is int64 on linux64 and int32 on
|
||||
// win64) https://numpy.org/doc/stable/reference/arrays.scalars.html
|
||||
//
|
||||
// github issue discussing this:
|
||||
// https://github.com/pybind/pybind11/issues/1908#issuecomment-658358767
|
||||
//
|
||||
// [IN LINUX] the difference is for int64 (long) and uint64 (unsigned long). The format
|
||||
// descriptor is 'q' and 'Q' respectively and in the documentation it is 'l' and 'k'.
|
||||
// [IN LINUX] the difference is for int64 (long) and uint64 (unsigned long). The
|
||||
// format descriptor is 'q' and 'Q' respectively and in the documentation it is
|
||||
// 'l' and 'k'.
|
||||
|
||||
// in practice numpy doesn't seem to care when reading buffer info: the library
|
||||
// interprets 'q' or 'l' as int64 and 'Q' or 'L' as uint64.
|
||||
// for this reason we decided to use the same format descriptor as pybind to avoid
|
||||
// any further discrepancies.
|
||||
// for this reason we decided to use the same format descriptor as pybind to
|
||||
// avoid any further discrepancies.
|
||||
|
||||
// in the following order:
|
||||
// int8, uint8, int16, uint16, int32, uint32, int64, uint64, float, double
|
||||
const char DTYPE_FORMAT_DSC[] = {'b', 'B', 'h', 'H', 'i', 'I', 'q', 'Q', 'f', 'd'};
|
||||
const char DTYPE_FORMAT_DSC[] = {'b', 'B', 'h', 'H', 'i',
|
||||
'I', 'q', 'Q', 'f', 'd'};
|
||||
|
||||
// on linux64 & apple
|
||||
const char NUMPY_FORMAT_DSC[] = {'b', 'B', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'};
|
||||
const char NUMPY_FORMAT_DSC[] = {'b', 'B', 'h', 'H', 'i',
|
||||
'I', 'l', 'L', 'f', 'd'};
|
||||
/**
|
||||
* @brief enum class to define the endianess of the system
|
||||
*/
|
||||
@@ -52,12 +58,29 @@ enum class endian {
|
||||
*/
|
||||
class Dtype {
|
||||
public:
|
||||
enum TypeIndex { INT8, UINT8, INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT, DOUBLE, ERROR, NONE };
|
||||
enum TypeIndex {
|
||||
INT8,
|
||||
UINT8,
|
||||
INT16,
|
||||
UINT16,
|
||||
INT32,
|
||||
UINT32,
|
||||
INT64,
|
||||
UINT64,
|
||||
FLOAT,
|
||||
DOUBLE,
|
||||
ERROR,
|
||||
NONE
|
||||
};
|
||||
|
||||
uint8_t bitdepth() const;
|
||||
size_t bytes() const;
|
||||
std::string format_descr() const { return std::string(1, DTYPE_FORMAT_DSC[static_cast<int>(m_type)]); }
|
||||
std::string numpy_descr() const { return std::string(1, NUMPY_FORMAT_DSC[static_cast<int>(m_type)]); }
|
||||
std::string format_descr() const {
|
||||
return std::string(1, DTYPE_FORMAT_DSC[static_cast<int>(m_type)]);
|
||||
}
|
||||
std::string numpy_descr() const {
|
||||
return std::string(1, NUMPY_FORMAT_DSC[static_cast<int>(m_type)]);
|
||||
}
|
||||
|
||||
explicit Dtype(const std::type_info &t);
|
||||
explicit Dtype(std::string_view sv);
|
||||
|
||||
@@ -5,12 +5,12 @@
|
||||
namespace aare {
|
||||
|
||||
/**
|
||||
* @brief RAII File class for reading, and in the future potentially writing
|
||||
* image files in various formats. Minimal generic interface. For specail fuctions
|
||||
* plase use the RawFile or NumpyFile classes directly.
|
||||
* Wraps FileInterface to abstract the underlying file format
|
||||
* @note **frame_number** refers the the frame number sent by the detector while **frame_index**
|
||||
* is the position of the frame in the file
|
||||
* @brief RAII File class for reading, and in the future potentially writing
|
||||
* image files in various formats. Minimal generic interface. For specail
|
||||
* fuctions plase use the RawFile or NumpyFile classes directly. Wraps
|
||||
* FileInterface to abstract the underlying file format
|
||||
* @note **frame_number** refers the the frame number sent by the detector while
|
||||
* **frame_index** is the position of the frame in the file
|
||||
*/
|
||||
class File {
|
||||
std::unique_ptr<FileInterface> file_impl;
|
||||
@@ -25,42 +25,46 @@ class File {
|
||||
* @throws std::invalid_argument if the file mode is not supported
|
||||
*
|
||||
*/
|
||||
File(const std::filesystem::path &fname, const std::string &mode="r", const FileConfig &cfg = {});
|
||||
|
||||
/**Since the object is responsible for managing the file we disable copy construction */
|
||||
File(File const &other) = delete;
|
||||
File(const std::filesystem::path &fname, const std::string &mode = "r",
|
||||
const FileConfig &cfg = {});
|
||||
|
||||
/**Since the object is responsible for managing the file we disable copy
|
||||
* construction */
|
||||
File(File const &other) = delete;
|
||||
|
||||
/**The same goes for copy assignment */
|
||||
File& operator=(File const &other) = delete;
|
||||
File &operator=(File const &other) = delete;
|
||||
|
||||
File(File &&other) noexcept;
|
||||
File& operator=(File &&other) noexcept;
|
||||
File &operator=(File &&other) noexcept;
|
||||
~File() = default;
|
||||
|
||||
// void close(); //!< close the file
|
||||
|
||||
Frame read_frame(); //!< read one frame from the file at the current position
|
||||
Frame read_frame(size_t frame_index); //!< read one frame at the position given by frame number
|
||||
std::vector<Frame> read_n(size_t n_frames); //!< read n_frames from the file at the current position
|
||||
|
||||
Frame
|
||||
read_frame(); //!< read one frame from the file at the current position
|
||||
Frame read_frame(size_t frame_index); //!< read one frame at the position
|
||||
//!< given by frame number
|
||||
std::vector<Frame> read_n(size_t n_frames); //!< read n_frames from the file
|
||||
//!< at the current position
|
||||
|
||||
void read_into(std::byte *image_buf);
|
||||
void read_into(std::byte *image_buf, size_t n_frames);
|
||||
|
||||
size_t frame_number(); //!< get the frame number at the current position
|
||||
size_t frame_number(size_t frame_index); //!< get the frame number at the given frame index
|
||||
size_t bytes_per_frame() const;
|
||||
size_t pixels_per_frame() const;
|
||||
size_t bytes_per_pixel() const;
|
||||
|
||||
size_t frame_number(); //!< get the frame number at the current position
|
||||
size_t frame_number(
|
||||
size_t frame_index); //!< get the frame number at the given frame index
|
||||
size_t bytes_per_frame() const;
|
||||
size_t pixels_per_frame() const;
|
||||
size_t bytes_per_pixel() const;
|
||||
size_t bitdepth() const;
|
||||
void seek(size_t frame_index); //!< seek to the given frame index
|
||||
size_t tell() const; //!< get the frame index of the file pointer
|
||||
void seek(size_t frame_index); //!< seek to the given frame index
|
||||
size_t tell() const; //!< get the frame index of the file pointer
|
||||
size_t total_frames() const;
|
||||
size_t rows() const;
|
||||
size_t cols() const;
|
||||
|
||||
DetectorType detector_type() const;
|
||||
|
||||
|
||||
};
|
||||
|
||||
} // namespace aare
|
||||
@@ -20,8 +20,10 @@ struct FileConfig {
|
||||
uint64_t rows{};
|
||||
uint64_t cols{};
|
||||
bool operator==(const FileConfig &other) const {
|
||||
return dtype == other.dtype && rows == other.rows && cols == other.cols && geometry == other.geometry &&
|
||||
detector_type == other.detector_type && max_frames_per_file == other.max_frames_per_file;
|
||||
return dtype == other.dtype && rows == other.rows &&
|
||||
cols == other.cols && geometry == other.geometry &&
|
||||
detector_type == other.detector_type &&
|
||||
max_frames_per_file == other.max_frames_per_file;
|
||||
}
|
||||
bool operator!=(const FileConfig &other) const { return !(*this == other); }
|
||||
|
||||
@@ -32,8 +34,11 @@ struct FileConfig {
|
||||
int max_frames_per_file{};
|
||||
size_t total_frames{};
|
||||
std::string to_string() const {
|
||||
return "{ dtype: " + dtype.to_string() + ", rows: " + std::to_string(rows) + ", cols: " + std::to_string(cols) +
|
||||
", geometry: " + geometry.to_string() + ", detector_type: " + ToString(detector_type) +
|
||||
return "{ dtype: " + dtype.to_string() +
|
||||
", rows: " + std::to_string(rows) +
|
||||
", cols: " + std::to_string(cols) +
|
||||
", geometry: " + geometry.to_string() +
|
||||
", detector_type: " + ToString(detector_type) +
|
||||
", max_frames_per_file: " + std::to_string(max_frames_per_file) +
|
||||
", total_frames: " + std::to_string(total_frames) + " }";
|
||||
}
|
||||
@@ -42,7 +47,8 @@ struct FileConfig {
|
||||
/**
|
||||
* @brief FileInterface class to define the interface for file operations
|
||||
* @note parent class for NumpyFile and RawFile
|
||||
* @note all functions are pure virtual and must be implemented by the derived classes
|
||||
* @note all functions are pure virtual and must be implemented by the derived
|
||||
* classes
|
||||
*/
|
||||
class FileInterface {
|
||||
public:
|
||||
@@ -64,17 +70,20 @@ class FileInterface {
|
||||
* @param n_frames number of frames to read
|
||||
* @return vector of frames
|
||||
*/
|
||||
virtual std::vector<Frame> read_n(size_t n_frames) = 0; // Is this the right interface?
|
||||
virtual std::vector<Frame>
|
||||
read_n(size_t n_frames) = 0; // Is this the right interface?
|
||||
|
||||
/**
|
||||
* @brief read one frame from the file at the current position and store it in the provided buffer
|
||||
* @brief read one frame from the file at the current position and store it
|
||||
* in the provided buffer
|
||||
* @param image_buf buffer to store the frame
|
||||
* @return void
|
||||
*/
|
||||
virtual void read_into(std::byte *image_buf) = 0;
|
||||
|
||||
/**
|
||||
* @brief read n_frames from the file at the current position and store them in the provided buffer
|
||||
* @brief read n_frames from the file at the current position and store them
|
||||
* in the provided buffer
|
||||
* @param image_buf buffer to store the frames
|
||||
* @param n_frames number of frames to read
|
||||
* @return void
|
||||
@@ -134,7 +143,6 @@ class FileInterface {
|
||||
*/
|
||||
virtual size_t bitdepth() const = 0;
|
||||
|
||||
|
||||
virtual DetectorType detector_type() const = 0;
|
||||
|
||||
// function to query the data type of the file
|
||||
|
||||
@@ -12,14 +12,14 @@ class FilePtr {
|
||||
|
||||
public:
|
||||
FilePtr() = default;
|
||||
FilePtr(const std::filesystem::path& fname, const std::string& mode);
|
||||
FilePtr(const std::filesystem::path &fname, const std::string &mode);
|
||||
FilePtr(const FilePtr &) = delete; // we don't want a copy
|
||||
FilePtr &operator=(const FilePtr &) = delete; // since we handle a resource
|
||||
FilePtr(FilePtr &&other);
|
||||
FilePtr &operator=(FilePtr &&other);
|
||||
FILE *get();
|
||||
int64_t tell();
|
||||
void seek(int64_t offset, int whence = SEEK_SET) {
|
||||
ssize_t tell();
|
||||
void seek(ssize_t offset, int whence = SEEK_SET) {
|
||||
if (fseek(fp_, offset, whence) != 0)
|
||||
throw std::runtime_error("Error seeking in file");
|
||||
}
|
||||
|
||||
@@ -15,15 +15,27 @@ NDArray<double, 1> gaus(NDView<double, 1> x, NDView<double, 1> par);
|
||||
double pol1(const double x, const double *par);
|
||||
NDArray<double, 1> pol1(NDView<double, 1> x, NDView<double, 1> par);
|
||||
|
||||
} // namespace func
|
||||
double scurve(const double x, const double *par);
|
||||
NDArray<double, 1> scurve(NDView<double, 1> x, NDView<double, 1> par);
|
||||
|
||||
double scurve2(const double x, const double *par);
|
||||
NDArray<double, 1> scurve2(NDView<double, 1> x, NDView<double, 1> par);
|
||||
|
||||
} // namespace func
|
||||
|
||||
/**
|
||||
* @brief Estimate the initial parameters for a Gaussian fit
|
||||
*/
|
||||
std::array<double, 3> gaus_init_par(const NDView<double, 1> x, const NDView<double, 1> y);
|
||||
std::array<double, 3> gaus_init_par(const NDView<double, 1> x,
|
||||
const NDView<double, 1> y);
|
||||
|
||||
std::array<double, 2> pol1_init_par(const NDView<double, 1> x, const NDView<double, 1> y);
|
||||
std::array<double, 2> pol1_init_par(const NDView<double, 1> x,
|
||||
const NDView<double, 1> y);
|
||||
|
||||
std::array<double, 6> scurve_init_par(const NDView<double, 1> x,
|
||||
const NDView<double, 1> y);
|
||||
std::array<double, 6> scurve2_init_par(const NDView<double, 1> x,
|
||||
const NDView<double, 1> y);
|
||||
|
||||
static constexpr int DEFAULT_NUM_THREADS = 4;
|
||||
|
||||
@@ -34,46 +46,41 @@ static constexpr int DEFAULT_NUM_THREADS = 4;
|
||||
*/
|
||||
NDArray<double, 1> fit_gaus(NDView<double, 1> x, NDView<double, 1> y);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Fit a 1D Gaussian to each pixel. Data layout [row, col, values]
|
||||
* @param x x values
|
||||
* @param y y vales, layout [row, col, values]
|
||||
* @param y y values, layout [row, col, values]
|
||||
* @param n_threads number of threads to use
|
||||
*/
|
||||
|
||||
NDArray<double, 3> fit_gaus(NDView<double, 1> x, NDView<double, 3> y,
|
||||
int n_threads = DEFAULT_NUM_THREADS);
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief Fit a 1D Gaussian with error estimates
|
||||
* @param x x values
|
||||
* @param y y vales, layout [row, col, values]
|
||||
* @param y y values, layout [row, col, values]
|
||||
* @param y_err error in y, layout [row, col, values]
|
||||
* @param par_out output parameters
|
||||
* @param par_err_out output error parameters
|
||||
*/
|
||||
void fit_gaus(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
|
||||
NDView<double, 1> par_out, NDView<double, 1> par_err_out,
|
||||
double& chi2);
|
||||
double &chi2);
|
||||
|
||||
/**
|
||||
* @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout
|
||||
* [row, col, values]
|
||||
* @param x x values
|
||||
* @param y y vales, layout [row, col, values]
|
||||
* @param y y values, layout [row, col, values]
|
||||
* @param y_err error in y, layout [row, col, values]
|
||||
* @param par_out output parameters, layout [row, col, values]
|
||||
* @param par_err_out output parameter errors, layout [row, col, values]
|
||||
* @param n_threads number of threads to use
|
||||
*/
|
||||
void fit_gaus(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
|
||||
NDView<double, 3> par_out, NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
|
||||
int n_threads = DEFAULT_NUM_THREADS
|
||||
);
|
||||
NDView<double, 3> par_out, NDView<double, 3> par_err_out,
|
||||
NDView<double, 2> chi2_out, int n_threads = DEFAULT_NUM_THREADS);
|
||||
|
||||
NDArray<double, 1> fit_pol1(NDView<double, 1> x, NDView<double, 1> y);
|
||||
|
||||
@@ -81,12 +88,33 @@ NDArray<double, 3> fit_pol1(NDView<double, 1> x, NDView<double, 3> y,
|
||||
int n_threads = DEFAULT_NUM_THREADS);
|
||||
|
||||
void fit_pol1(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
|
||||
NDView<double, 1> par_out, NDView<double, 1> par_err_out, double& chi2);
|
||||
NDView<double, 1> par_out, NDView<double, 1> par_err_out,
|
||||
double &chi2);
|
||||
|
||||
// TODO! not sure we need to offer the different version in C++
|
||||
void fit_pol1(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
|
||||
NDView<double, 3> par_out, NDView<double, 3> par_err_out,NDView<double, 2> chi2_out,
|
||||
int n_threads = DEFAULT_NUM_THREADS);
|
||||
NDView<double, 3> par_out, NDView<double, 3> par_err_out,
|
||||
NDView<double, 2> chi2_out, int n_threads = DEFAULT_NUM_THREADS);
|
||||
|
||||
NDArray<double, 1> fit_scurve(NDView<double, 1> x, NDView<double, 1> y);
|
||||
NDArray<double, 3> fit_scurve(NDView<double, 1> x, NDView<double, 3> y,
|
||||
int n_threads);
|
||||
void fit_scurve(NDView<double, 1> x, NDView<double, 1> y,
|
||||
NDView<double, 1> y_err, NDView<double, 1> par_out,
|
||||
NDView<double, 1> par_err_out, double &chi2);
|
||||
void fit_scurve(NDView<double, 1> x, NDView<double, 3> y,
|
||||
NDView<double, 3> y_err, NDView<double, 3> par_out,
|
||||
NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
|
||||
int n_threads);
|
||||
|
||||
NDArray<double, 1> fit_scurve2(NDView<double, 1> x, NDView<double, 1> y);
|
||||
NDArray<double, 3> fit_scurve2(NDView<double, 1> x, NDView<double, 3> y,
|
||||
int n_threads);
|
||||
void fit_scurve2(NDView<double, 1> x, NDView<double, 1> y,
|
||||
NDView<double, 1> y_err, NDView<double, 1> par_out,
|
||||
NDView<double, 1> par_err_out, double &chi2);
|
||||
void fit_scurve2(NDView<double, 1> x, NDView<double, 3> y,
|
||||
NDView<double, 3> y_err, NDView<double, 3> par_out,
|
||||
NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
|
||||
int n_threads);
|
||||
} // namespace aare
|
||||
@@ -19,7 +19,7 @@ class Frame {
|
||||
uint32_t m_cols;
|
||||
Dtype m_dtype;
|
||||
std::byte *m_data;
|
||||
//TODO! Add frame number?
|
||||
// TODO! Add frame number?
|
||||
|
||||
public:
|
||||
/**
|
||||
@@ -39,7 +39,7 @@ class Frame {
|
||||
* @param dtype data type of the pixels
|
||||
*/
|
||||
Frame(const std::byte *bytes, uint32_t rows, uint32_t cols, Dtype dtype);
|
||||
~Frame(){ delete[] m_data; };
|
||||
~Frame() { delete[] m_data; };
|
||||
|
||||
/** @warning Copy is disabled to ensure performance when passing
|
||||
* frames around. Can discuss enabling it.
|
||||
@@ -52,7 +52,6 @@ class Frame {
|
||||
Frame &operator=(Frame &&other) noexcept;
|
||||
Frame(Frame &&other) noexcept;
|
||||
|
||||
|
||||
Frame clone() const; //<- Explicit copy
|
||||
|
||||
uint32_t rows() const;
|
||||
@@ -93,7 +92,7 @@ class Frame {
|
||||
if (row >= m_rows || col >= m_cols) {
|
||||
throw std::out_of_range("Invalid row or column index");
|
||||
}
|
||||
//TODO! add tests then reimplement using pixel_ptr
|
||||
// TODO! add tests then reimplement using pixel_ptr
|
||||
T data;
|
||||
std::memcpy(&data, m_data + (row * m_cols + col) * m_dtype.bytes(),
|
||||
m_dtype.bytes());
|
||||
@@ -102,18 +101,18 @@ class Frame {
|
||||
/**
|
||||
* @brief Return an NDView of the frame. This is the preferred way to access
|
||||
* data in the frame.
|
||||
*
|
||||
*
|
||||
* @tparam T type of the pixels
|
||||
* @return NDView<T, 2>
|
||||
* @return NDView<T, 2>
|
||||
*/
|
||||
template <typename T> NDView<T, 2> view() {
|
||||
std::array<int64_t, 2> shape = {static_cast<int64_t>(m_rows),
|
||||
static_cast<int64_t>(m_cols)};
|
||||
template <typename T> NDView<T, 2> view() & {
|
||||
std::array<ssize_t, 2> shape = {static_cast<ssize_t>(m_rows),
|
||||
static_cast<ssize_t>(m_cols)};
|
||||
T *data = reinterpret_cast<T *>(m_data);
|
||||
return NDView<T, 2>(data, shape);
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* @brief Copy the frame data into a new NDArray. This is a deep copy.
|
||||
*/
|
||||
template <typename T> NDArray<T> image() {
|
||||
|
||||
@@ -51,7 +51,7 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
|
||||
Photon photon;
|
||||
photon.x = cluster.x;
|
||||
photon.y = cluster.y;
|
||||
photon.energy = eta.sum;
|
||||
photon.energy = static_cast<decltype(photon.energy)>(eta.sum);
|
||||
|
||||
// auto ie = nearest_index(m_energy_bins, photon.energy)-1;
|
||||
// auto ix = nearest_index(m_etabinsx, eta.x)-1;
|
||||
@@ -99,7 +99,7 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
|
||||
Photon photon;
|
||||
photon.x = cluster.x;
|
||||
photon.y = cluster.y;
|
||||
photon.energy = eta.sum;
|
||||
photon.energy = static_cast<decltype(photon.energy)>(eta.sum);
|
||||
|
||||
// Now do some actual interpolation.
|
||||
// Find which energy bin the cluster is in
|
||||
|
||||
@@ -3,104 +3,113 @@
|
||||
#include <filesystem>
|
||||
#include <vector>
|
||||
|
||||
#include "aare/FilePtr.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
#include "aare/NDArray.hpp"
|
||||
#include "aare/FileInterface.hpp"
|
||||
#include "aare/FilePtr.hpp"
|
||||
#include "aare/NDArray.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
namespace aare {
|
||||
|
||||
|
||||
struct JungfrauDataHeader{
|
||||
struct JungfrauDataHeader {
|
||||
uint64_t framenum;
|
||||
uint64_t bunchid;
|
||||
};
|
||||
|
||||
class JungfrauDataFile : public FileInterface {
|
||||
|
||||
size_t m_rows{}; //!< number of rows in the image, from find_frame_size();
|
||||
size_t m_cols{}; //!< number of columns in the image, from find_frame_size();
|
||||
size_t m_rows{}; //!< number of rows in the image, from find_frame_size();
|
||||
size_t
|
||||
m_cols{}; //!< number of columns in the image, from find_frame_size();
|
||||
size_t m_bytes_per_frame{}; //!< number of bytes per frame excluding header
|
||||
size_t m_total_frames{}; //!< total number of frames in the series of files
|
||||
size_t m_offset{}; //!< file index of the first file, allow starting at non zero file
|
||||
size_t m_current_file_index{}; //!< The index of the open file
|
||||
size_t m_current_frame_index{}; //!< The index of the current frame (with reference to all files)
|
||||
size_t m_total_frames{}; //!< total number of frames in the series of files
|
||||
size_t m_offset{}; //!< file index of the first file, allow starting at non
|
||||
//!< zero file
|
||||
size_t m_current_file_index{}; //!< The index of the open file
|
||||
size_t m_current_frame_index{}; //!< The index of the current frame (with
|
||||
//!< reference to all files)
|
||||
|
||||
std::vector<size_t> m_last_frame_in_file{}; //!< Used for seeking to the correct file
|
||||
std::vector<size_t>
|
||||
m_last_frame_in_file{}; //!< Used for seeking to the correct file
|
||||
std::filesystem::path m_path; //!< path to the files
|
||||
std::string m_base_name; //!< base name used for formatting file names
|
||||
|
||||
FilePtr m_fp; //!< RAII wrapper for a FILE*
|
||||
FilePtr m_fp; //!< RAII wrapper for a FILE*
|
||||
|
||||
|
||||
using pixel_type = uint16_t;
|
||||
static constexpr size_t header_size = sizeof(JungfrauDataHeader);
|
||||
static constexpr size_t n_digits_in_file_index = 6; //!< to format file names
|
||||
static constexpr size_t header_size = sizeof(JungfrauDataHeader);
|
||||
static constexpr size_t n_digits_in_file_index =
|
||||
6; //!< to format file names
|
||||
|
||||
public:
|
||||
JungfrauDataFile(const std::filesystem::path &fname);
|
||||
|
||||
std::string base_name() const; //!< get the base name of the file (without path and extension)
|
||||
size_t bytes_per_frame() override;
|
||||
size_t pixels_per_frame() override;
|
||||
size_t bytes_per_pixel() const;
|
||||
std::string base_name()
|
||||
const; //!< get the base name of the file (without path and extension)
|
||||
size_t bytes_per_frame() override;
|
||||
size_t pixels_per_frame() override;
|
||||
size_t bytes_per_pixel() const;
|
||||
size_t bitdepth() const override;
|
||||
void seek(size_t frame_index) override; //!< seek to the given frame index (note not byte offset)
|
||||
size_t tell() override; //!< get the frame index of the file pointer
|
||||
void seek(size_t frame_index)
|
||||
override; //!< seek to the given frame index (note not byte offset)
|
||||
size_t tell() override; //!< get the frame index of the file pointer
|
||||
size_t total_frames() const override;
|
||||
size_t rows() const override;
|
||||
size_t cols() const override;
|
||||
std::array<ssize_t,2> shape() const;
|
||||
size_t n_files() const; //!< get the number of files in the series.
|
||||
std::array<ssize_t, 2> shape() const;
|
||||
size_t n_files() const; //!< get the number of files in the series.
|
||||
|
||||
// Extra functions needed for FileInterface
|
||||
Frame read_frame() override;
|
||||
Frame read_frame(size_t frame_number) override;
|
||||
std::vector<Frame> read_n(size_t n_frames=0) override;
|
||||
std::vector<Frame> read_n(size_t n_frames = 0) override;
|
||||
void read_into(std::byte *image_buf) override;
|
||||
void read_into(std::byte *image_buf, size_t n_frames) override;
|
||||
size_t frame_number(size_t frame_index) override;
|
||||
DetectorType detector_type() const override;
|
||||
|
||||
/**
|
||||
* @brief Read a single frame from the file into the given buffer.
|
||||
* @param image_buf buffer to read the frame into. (Note the caller is responsible for allocating the buffer)
|
||||
* @brief Read a single frame from the file into the given buffer.
|
||||
* @param image_buf buffer to read the frame into. (Note the caller is
|
||||
* responsible for allocating the buffer)
|
||||
* @param header pointer to a JungfrauDataHeader or nullptr to skip header)
|
||||
*/
|
||||
void read_into(std::byte *image_buf, JungfrauDataHeader *header = nullptr);
|
||||
|
||||
/**
|
||||
* @brief Read a multiple frames from the file into the given buffer.
|
||||
* @param image_buf buffer to read the frame into. (Note the caller is responsible for allocating the buffer)
|
||||
* @brief Read a multiple frames from the file into the given buffer.
|
||||
* @param image_buf buffer to read the frame into. (Note the caller is
|
||||
* responsible for allocating the buffer)
|
||||
* @param n_frames number of frames to read
|
||||
* @param header pointer to a JungfrauDataHeader or nullptr to skip header)
|
||||
*/
|
||||
void read_into(std::byte *image_buf, size_t n_frames, JungfrauDataHeader *header = nullptr);
|
||||
|
||||
/**
|
||||
void read_into(std::byte *image_buf, size_t n_frames,
|
||||
JungfrauDataHeader *header = nullptr);
|
||||
|
||||
/**
|
||||
* @brief Read a single frame from the file into the given NDArray
|
||||
* @param image NDArray to read the frame into.
|
||||
*/
|
||||
void read_into(NDArray<uint16_t>* image, JungfrauDataHeader* header = nullptr);
|
||||
void read_into(NDArray<uint16_t> *image,
|
||||
JungfrauDataHeader *header = nullptr);
|
||||
|
||||
JungfrauDataHeader read_header();
|
||||
std::filesystem::path current_file() const { return fpath(m_current_file_index+m_offset); }
|
||||
std::filesystem::path current_file() const {
|
||||
return fpath(m_current_file_index + m_offset);
|
||||
}
|
||||
|
||||
|
||||
private:
|
||||
private:
|
||||
/**
|
||||
* @brief Find the size of the frame in the file. (256x256, 256x1024, 512x1024)
|
||||
* @brief Find the size of the frame in the file. (256x256, 256x1024,
|
||||
* 512x1024)
|
||||
* @param fname path to the file
|
||||
* @throws std::runtime_error if the file is empty or the size cannot be determined
|
||||
* @throws std::runtime_error if the file is empty or the size cannot be
|
||||
* determined
|
||||
*/
|
||||
void find_frame_size(const std::filesystem::path &fname);
|
||||
void find_frame_size(const std::filesystem::path &fname);
|
||||
|
||||
|
||||
void parse_fname(const std::filesystem::path &fname);
|
||||
void scan_files();
|
||||
void open_file(size_t file_index);
|
||||
std::filesystem::path fpath(size_t frame_index) const;
|
||||
|
||||
|
||||
};
|
||||
void parse_fname(const std::filesystem::path &fname);
|
||||
void scan_files();
|
||||
void open_file(size_t file_index);
|
||||
std::filesystem::path fpath(size_t frame_index) const;
|
||||
};
|
||||
|
||||
} // namespace aare
|
||||
@@ -21,12 +21,11 @@ TODO! Add expression templates for operators
|
||||
|
||||
namespace aare {
|
||||
|
||||
|
||||
template <typename T, int64_t Ndim = 2>
|
||||
template <typename T, ssize_t Ndim = 2>
|
||||
class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
std::array<int64_t, Ndim> shape_;
|
||||
std::array<int64_t, Ndim> strides_;
|
||||
size_t size_{};
|
||||
std::array<ssize_t, Ndim> shape_;
|
||||
std::array<ssize_t, Ndim> strides_;
|
||||
size_t size_{}; //TODO! do we need to store size when we have shape?
|
||||
T *data_;
|
||||
|
||||
public:
|
||||
@@ -42,20 +41,18 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
*
|
||||
* @param shape shape of the new NDArray
|
||||
*/
|
||||
explicit NDArray(std::array<int64_t, Ndim> shape)
|
||||
explicit NDArray(std::array<ssize_t, Ndim> shape)
|
||||
: shape_(shape), strides_(c_strides<Ndim>(shape_)),
|
||||
size_(std::accumulate(shape_.begin(), shape_.end(), 1,
|
||||
std::multiplies<>())),
|
||||
size_(num_elements(shape_)),
|
||||
data_(new T[size_]) {}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Construct a new NDArray object with a shape and value.
|
||||
*
|
||||
* @param shape shape of the new array
|
||||
* @param value value to initialize the array with
|
||||
*/
|
||||
NDArray(std::array<int64_t, Ndim> shape, T value) : NDArray(shape) {
|
||||
NDArray(std::array<ssize_t, Ndim> shape, T value) : NDArray(shape) {
|
||||
this->operator=(value);
|
||||
}
|
||||
|
||||
@@ -69,8 +66,8 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
std::copy(v.begin(), v.end(), begin());
|
||||
}
|
||||
|
||||
template<size_t Size>
|
||||
NDArray(const std::array<T, Size>& arr) : NDArray<T,1>({Size}) {
|
||||
template <size_t Size>
|
||||
NDArray(const std::array<T, Size> &arr) : NDArray<T, 1>({Size}) {
|
||||
std::copy(arr.begin(), arr.end(), begin());
|
||||
}
|
||||
|
||||
@@ -79,7 +76,24 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
: shape_(other.shape_), strides_(c_strides<Ndim>(shape_)),
|
||||
size_(other.size_), data_(other.data_) {
|
||||
other.reset(); // TODO! is this necessary?
|
||||
}
|
||||
|
||||
|
||||
//Move constructor from an an array with Ndim + 1
|
||||
template <ssize_t M, typename = std::enable_if_t<(M == Ndim + 1)>>
|
||||
NDArray(NDArray<T, M> &&other)
|
||||
: shape_(drop_first_dim(other.shape())),
|
||||
strides_(c_strides<Ndim>(shape_)), size_(num_elements(shape_)),
|
||||
data_(other.data()) {
|
||||
|
||||
// For now only allow move if the size matches, to avoid unreachable data
|
||||
// if the use case arises we can remove this check
|
||||
if(size() != other.size()) {
|
||||
data_ = nullptr; // avoid double free, other will clean up the memory in it's destructor
|
||||
throw std::runtime_error(LOCATION +
|
||||
"Size mismatch in move constructor of NDArray<T, Ndim-1>");
|
||||
}
|
||||
other.reset();
|
||||
}
|
||||
|
||||
// Copy constructor
|
||||
@@ -113,10 +127,10 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
NDArray &operator-=(const NDArray &other);
|
||||
NDArray &operator*=(const NDArray &other);
|
||||
|
||||
//Write directly to the data array, or create a new one
|
||||
template<size_t Size>
|
||||
NDArray<T,1>& operator=(const std::array<T,Size> &other){
|
||||
if(Size != size_){
|
||||
// Write directly to the data array, or create a new one
|
||||
template <size_t Size>
|
||||
NDArray<T, 1> &operator=(const std::array<T, Size> &other) {
|
||||
if (Size != size_) {
|
||||
delete[] data_;
|
||||
size_ = Size;
|
||||
data_ = new T[size_];
|
||||
@@ -157,11 +171,6 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
|
||||
NDArray &operator&=(const T & /*mask*/);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
void sqrt() {
|
||||
for (int i = 0; i < size_; ++i) {
|
||||
data_[i] = std::sqrt(data_[i]);
|
||||
@@ -186,22 +195,22 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
}
|
||||
|
||||
// TODO! is int the right type for index?
|
||||
T &operator()(int64_t i) { return data_[i]; }
|
||||
const T &operator()(int64_t i) const { return data_[i]; }
|
||||
T &operator()(ssize_t i) { return data_[i]; }
|
||||
const T &operator()(ssize_t i) const { return data_[i]; }
|
||||
|
||||
T &operator[](int64_t i) { return data_[i]; }
|
||||
const T &operator[](int64_t i) const { return data_[i]; }
|
||||
T &operator[](ssize_t i) { return data_[i]; }
|
||||
const T &operator[](ssize_t i) const { return data_[i]; }
|
||||
|
||||
T *data() { return data_; }
|
||||
std::byte *buffer() { return reinterpret_cast<std::byte *>(data_); }
|
||||
ssize_t size() const { return static_cast<ssize_t>(size_); }
|
||||
size_t total_bytes() const { return size_ * sizeof(T); }
|
||||
std::array<int64_t, Ndim> shape() const noexcept { return shape_; }
|
||||
int64_t shape(int64_t i) const noexcept { return shape_[i]; }
|
||||
std::array<int64_t, Ndim> strides() const noexcept { return strides_; }
|
||||
std::array<ssize_t, Ndim> shape() const noexcept { return shape_; }
|
||||
ssize_t shape(ssize_t i) const noexcept { return shape_[i]; }
|
||||
std::array<ssize_t, Ndim> strides() const noexcept { return strides_; }
|
||||
size_t bitdepth() const noexcept { return sizeof(T) * 8; }
|
||||
|
||||
std::array<int64_t, Ndim> byte_strides() const noexcept {
|
||||
std::array<ssize_t, Ndim> byte_strides() const noexcept {
|
||||
auto byte_strides = strides_;
|
||||
for (auto &val : byte_strides)
|
||||
val *= sizeof(T);
|
||||
@@ -228,7 +237,7 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
};
|
||||
|
||||
// Move assign
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &
|
||||
NDArray<T, Ndim>::operator=(NDArray<T, Ndim> &&other) noexcept {
|
||||
if (this != &other) {
|
||||
@@ -242,7 +251,7 @@ NDArray<T, Ndim>::operator=(NDArray<T, Ndim> &&other) noexcept {
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator+=(const NDArray<T, Ndim> &other) {
|
||||
// check shape
|
||||
if (shape_ == other.shape_) {
|
||||
@@ -254,7 +263,7 @@ NDArray<T, Ndim> &NDArray<T, Ndim>::operator+=(const NDArray<T, Ndim> &other) {
|
||||
throw(std::runtime_error("Shape of ImageDatas must match"));
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator-=(const NDArray<T, Ndim> &other) {
|
||||
// check shape
|
||||
if (shape_ == other.shape_) {
|
||||
@@ -266,7 +275,7 @@ NDArray<T, Ndim> &NDArray<T, Ndim>::operator-=(const NDArray<T, Ndim> &other) {
|
||||
throw(std::runtime_error("Shape of ImageDatas must match"));
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator*=(const NDArray<T, Ndim> &other) {
|
||||
// check shape
|
||||
if (shape_ == other.shape_) {
|
||||
@@ -278,14 +287,14 @@ NDArray<T, Ndim> &NDArray<T, Ndim>::operator*=(const NDArray<T, Ndim> &other) {
|
||||
throw(std::runtime_error("Shape of ImageDatas must match"));
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator&=(const T &mask) {
|
||||
for (auto it = begin(); it != end(); ++it)
|
||||
*it &= mask;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<bool, Ndim> NDArray<T, Ndim>::operator>(const NDArray &other) {
|
||||
if (shape_ == other.shape_) {
|
||||
NDArray<bool, Ndim> result{shape_};
|
||||
@@ -297,7 +306,7 @@ NDArray<bool, Ndim> NDArray<T, Ndim>::operator>(const NDArray &other) {
|
||||
throw(std::runtime_error("Shape of ImageDatas must match"));
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator=(const NDArray<T, Ndim> &other) {
|
||||
if (this != &other) {
|
||||
delete[] data_;
|
||||
@@ -310,7 +319,7 @@ NDArray<T, Ndim> &NDArray<T, Ndim>::operator=(const NDArray<T, Ndim> &other) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
bool NDArray<T, Ndim>::operator==(const NDArray<T, Ndim> &other) const {
|
||||
if (shape_ != other.shape_)
|
||||
return false;
|
||||
@@ -322,83 +331,74 @@ bool NDArray<T, Ndim>::operator==(const NDArray<T, Ndim> &other) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
bool NDArray<T, Ndim>::operator!=(const NDArray<T, Ndim> &other) const {
|
||||
return !((*this) == other);
|
||||
}
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator++() {
|
||||
for (uint32_t i = 0; i < size_; ++i)
|
||||
data_[i] += 1;
|
||||
return *this;
|
||||
}
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator=(const T &value) {
|
||||
std::fill_n(data_, size_, value);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator+=(const T &value) {
|
||||
for (uint32_t i = 0; i < size_; ++i)
|
||||
data_[i] += value;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> NDArray<T, Ndim>::operator+(const T &value) {
|
||||
NDArray result = *this;
|
||||
result += value;
|
||||
return result;
|
||||
}
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator-=(const T &value) {
|
||||
for (uint32_t i = 0; i < size_; ++i)
|
||||
data_[i] -= value;
|
||||
return *this;
|
||||
}
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> NDArray<T, Ndim>::operator-(const T &value) {
|
||||
NDArray result = *this;
|
||||
result -= value;
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator/=(const T &value) {
|
||||
for (uint32_t i = 0; i < size_; ++i)
|
||||
data_[i] /= value;
|
||||
return *this;
|
||||
}
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> NDArray<T, Ndim>::operator/(const T &value) {
|
||||
NDArray result = *this;
|
||||
result /= value;
|
||||
return result;
|
||||
}
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator*=(const T &value) {
|
||||
for (uint32_t i = 0; i < size_; ++i)
|
||||
data_[i] *= value;
|
||||
return *this;
|
||||
}
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> NDArray<T, Ndim>::operator*(const T &value) {
|
||||
NDArray result = *this;
|
||||
result *= value;
|
||||
return result;
|
||||
}
|
||||
// template <typename T, int64_t Ndim> void NDArray<T, Ndim>::Print() {
|
||||
// if (shape_[0] < 20 && shape_[1] < 20)
|
||||
// Print_all();
|
||||
// else
|
||||
// Print_some();
|
||||
// }
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
std::ostream &operator<<(std::ostream &os, const NDArray<T, Ndim> &arr) {
|
||||
for (auto row = 0; row < arr.shape(0); ++row) {
|
||||
for (auto col = 0; col < arr.shape(1); ++col) {
|
||||
@@ -410,7 +410,7 @@ std::ostream &operator<<(std::ostream &os, const NDArray<T, Ndim> &arr) {
|
||||
return os;
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim> void NDArray<T, Ndim>::Print_all() {
|
||||
template <typename T, ssize_t Ndim> void NDArray<T, Ndim>::Print_all() {
|
||||
for (auto row = 0; row < shape_[0]; ++row) {
|
||||
for (auto col = 0; col < shape_[1]; ++col) {
|
||||
std::cout << std::setw(3);
|
||||
@@ -419,7 +419,7 @@ template <typename T, int64_t Ndim> void NDArray<T, Ndim>::Print_all() {
|
||||
std::cout << "\n";
|
||||
}
|
||||
}
|
||||
template <typename T, int64_t Ndim> void NDArray<T, Ndim>::Print_some() {
|
||||
template <typename T, ssize_t Ndim> void NDArray<T, Ndim>::Print_some() {
|
||||
for (auto row = 0; row < 5; ++row) {
|
||||
for (auto col = 0; col < 5; ++col) {
|
||||
std::cout << std::setw(7);
|
||||
@@ -429,7 +429,7 @@ template <typename T, int64_t Ndim> void NDArray<T, Ndim>::Print_some() {
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
void save(NDArray<T, Ndim> &img, std::string &pathname) {
|
||||
std::ofstream f;
|
||||
f.open(pathname, std::ios::binary);
|
||||
@@ -437,9 +437,9 @@ void save(NDArray<T, Ndim> &img, std::string &pathname) {
|
||||
f.close();
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> load(const std::string &pathname,
|
||||
std::array<int64_t, Ndim> shape) {
|
||||
std::array<ssize_t, Ndim> shape) {
|
||||
NDArray<T, Ndim> img{shape};
|
||||
std::ifstream f;
|
||||
f.open(pathname, std::ios::binary);
|
||||
@@ -448,6 +448,23 @@ NDArray<T, Ndim> load(const std::string &pathname,
|
||||
return img;
|
||||
}
|
||||
|
||||
|
||||
template <typename RT, typename NT, typename DT, ssize_t Ndim>
|
||||
NDArray<RT, Ndim> safe_divide(const NDArray<NT, Ndim> &numerator,
|
||||
const NDArray<DT, Ndim> &denominator) {
|
||||
if (numerator.shape() != denominator.shape()) {
|
||||
throw std::runtime_error(
|
||||
"Shapes of numerator and denominator must match");
|
||||
}
|
||||
NDArray<RT, Ndim> result(numerator.shape());
|
||||
for (ssize_t i = 0; i < numerator.size(); ++i) {
|
||||
if (denominator[i] != 0) {
|
||||
result[i] =
|
||||
static_cast<RT>(numerator[i]) / static_cast<RT>(denominator[i]);
|
||||
} else {
|
||||
result[i] = RT{0}; // or handle division by zero as needed
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
#include "aare/defs.hpp"
|
||||
#include "aare/ArrayExpr.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
@@ -14,10 +14,11 @@
|
||||
#include <vector>
|
||||
namespace aare {
|
||||
|
||||
template <int64_t Ndim> using Shape = std::array<int64_t, Ndim>;
|
||||
template <ssize_t Ndim> using Shape = std::array<ssize_t, Ndim>;
|
||||
|
||||
// TODO! fix mismatch between signed and unsigned
|
||||
template <int64_t Ndim> Shape<Ndim> make_shape(const std::vector<size_t> &shape) {
|
||||
template <ssize_t Ndim>
|
||||
Shape<Ndim> make_shape(const std::vector<size_t> &shape) {
|
||||
if (shape.size() != Ndim)
|
||||
throw std::runtime_error("Shape size mismatch");
|
||||
Shape<Ndim> arr;
|
||||
@@ -25,67 +26,130 @@ template <int64_t Ndim> Shape<Ndim> make_shape(const std::vector<size_t> &shape)
|
||||
return arr;
|
||||
}
|
||||
|
||||
template <int64_t Dim = 0, typename Strides> int64_t element_offset(const Strides & /*unused*/) { return 0; }
|
||||
|
||||
template <int64_t Dim = 0, typename Strides, typename... Ix>
|
||||
int64_t element_offset(const Strides &strides, int64_t i, Ix... index) {
|
||||
/**
|
||||
* @brief Helper function to drop the first dimension of a shape.
|
||||
* This is useful when you want to create a 2D view from a 3D array.
|
||||
* @param shape The shape to drop the first dimension from.
|
||||
* @return A new shape with the first dimension dropped.
|
||||
*/
|
||||
template<size_t Ndim>
|
||||
Shape<Ndim-1> drop_first_dim(const Shape<Ndim> &shape) {
|
||||
static_assert(Ndim > 1, "Cannot drop first dimension from a 1D shape");
|
||||
Shape<Ndim - 1> new_shape;
|
||||
std::copy(shape.begin() + 1, shape.end(), new_shape.begin());
|
||||
return new_shape;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Helper function when constructing NDArray/NDView. Calculates the number
|
||||
* of elements in the resulting array from a shape.
|
||||
* @param shape The shape to calculate the number of elements for.
|
||||
* @return The number of elements in and NDArray/NDView of that shape.
|
||||
*/
|
||||
template <size_t Ndim>
|
||||
size_t num_elements(const Shape<Ndim> &shape) {
|
||||
return std::accumulate(shape.begin(), shape.end(), 1,
|
||||
std::multiplies<size_t>());
|
||||
}
|
||||
|
||||
template <ssize_t Dim = 0, typename Strides>
|
||||
ssize_t element_offset(const Strides & /*unused*/) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <ssize_t Dim = 0, typename Strides, typename... Ix>
|
||||
ssize_t element_offset(const Strides &strides, ssize_t i, Ix... index) {
|
||||
return i * strides[Dim] + element_offset<Dim + 1>(strides, index...);
|
||||
}
|
||||
|
||||
template <int64_t Ndim> std::array<int64_t, Ndim> c_strides(const std::array<int64_t, Ndim> &shape) {
|
||||
std::array<int64_t, Ndim> strides{};
|
||||
template <ssize_t Ndim>
|
||||
std::array<ssize_t, Ndim> c_strides(const std::array<ssize_t, Ndim> &shape) {
|
||||
std::array<ssize_t, Ndim> strides{};
|
||||
std::fill(strides.begin(), strides.end(), 1);
|
||||
for (int64_t i = Ndim - 1; i > 0; --i) {
|
||||
for (ssize_t i = Ndim - 1; i > 0; --i) {
|
||||
strides[i - 1] = strides[i] * shape[i];
|
||||
}
|
||||
return strides;
|
||||
}
|
||||
|
||||
template <int64_t Ndim> std::array<int64_t, Ndim> make_array(const std::vector<int64_t> &vec) {
|
||||
template <ssize_t Ndim>
|
||||
std::array<ssize_t, Ndim> make_array(const std::vector<ssize_t> &vec) {
|
||||
assert(vec.size() == Ndim);
|
||||
std::array<int64_t, Ndim> arr{};
|
||||
std::array<ssize_t, Ndim> arr{};
|
||||
std::copy_n(vec.begin(), Ndim, arr.begin());
|
||||
return arr;
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim = 2> class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
|
||||
template <typename T, ssize_t Ndim = 2>
|
||||
class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
|
||||
public:
|
||||
NDView() = default;
|
||||
~NDView() = default;
|
||||
NDView(const NDView &) = default;
|
||||
NDView(NDView &&) = default;
|
||||
|
||||
NDView(T *buffer, std::array<int64_t, Ndim> shape)
|
||||
NDView(T *buffer, std::array<ssize_t, Ndim> shape)
|
||||
: buffer_(buffer), strides_(c_strides<Ndim>(shape)), shape_(shape),
|
||||
size_(std::accumulate(std::begin(shape), std::end(shape), 1, std::multiplies<>())) {}
|
||||
size_(std::accumulate(std::begin(shape), std::end(shape), 1,
|
||||
std::multiplies<>())) {}
|
||||
|
||||
// NDView(T *buffer, const std::vector<int64_t> &shape)
|
||||
// : buffer_(buffer), strides_(c_strides<Ndim>(make_array<Ndim>(shape))), shape_(make_array<Ndim>(shape)),
|
||||
// size_(std::accumulate(std::begin(shape), std::end(shape), 1, std::multiplies<>())) {}
|
||||
|
||||
template <typename... Ix> std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) {
|
||||
// stride-aware constructor
|
||||
NDView(T* buffer, std::array<ssize_t, Ndim> shape, std::array<ssize_t, Ndim> strides)
|
||||
: buffer_(buffer), shape_(shape), strides_(strides),
|
||||
size_(std::accumulate(std::begin(shape), std::end(shape), 1,
|
||||
std::multiplies<>())) {}
|
||||
|
||||
template <typename... Ix>
|
||||
std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) {
|
||||
return buffer_[element_offset(strides_, index...)];
|
||||
}
|
||||
|
||||
template <typename... Ix> std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) const {
|
||||
template <typename... Ix>
|
||||
std::enable_if_t<sizeof...(Ix) == 1 && (Ndim > 1), NDView<T, Ndim - 1>> operator()(Ix... index) {
|
||||
// return a view of the next dimension
|
||||
std::array<ssize_t, Ndim - 1> new_shape{};
|
||||
std::copy_n(shape_.begin() + 1, Ndim - 1, new_shape.begin());
|
||||
return NDView<T, Ndim - 1>(&buffer_[element_offset(strides_, index...)],
|
||||
new_shape);
|
||||
|
||||
}
|
||||
|
||||
template <typename... Ix>
|
||||
std::enable_if_t<sizeof...(Ix) == Ndim, const T &> operator()(Ix... index) const {
|
||||
return buffer_[element_offset(strides_, index...)];
|
||||
}
|
||||
|
||||
|
||||
ssize_t size() const { return static_cast<ssize_t>(size_); }
|
||||
size_t total_bytes() const { return size_ * sizeof(T); }
|
||||
std::array<int64_t, Ndim> strides() const noexcept { return strides_; }
|
||||
std::array<ssize_t, Ndim> strides() const noexcept { return strides_; }
|
||||
|
||||
T *begin() { return buffer_; }
|
||||
T *end() { return buffer_ + size_; }
|
||||
T const *begin() const { return buffer_; }
|
||||
T const *end() const { return buffer_ + size_; }
|
||||
T &operator()(int64_t i) const { return buffer_[i]; }
|
||||
T &operator[](int64_t i) const { return buffer_[i]; }
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief Access element at index i.
|
||||
*/
|
||||
T &operator[](ssize_t i) { return buffer_[i]; }
|
||||
|
||||
/**
|
||||
* @brief Access element at index i.
|
||||
*/
|
||||
const T &operator[](ssize_t i) const { return buffer_[i]; }
|
||||
|
||||
bool operator==(const NDView &other) const {
|
||||
if (size_ != other.size_)
|
||||
return false;
|
||||
for (uint64_t i = 0; i != size_; ++i) {
|
||||
if (shape_ != other.shape_)
|
||||
return false;
|
||||
for (size_t i = 0; i != size_; ++i) {
|
||||
if (buffer_[i] != other.buffer_[i])
|
||||
return false;
|
||||
}
|
||||
@@ -94,16 +158,21 @@ template <typename T, int64_t Ndim = 2> class NDView : public ArrayExpr<NDView<T
|
||||
|
||||
NDView &operator+=(const T val) { return elemenwise(val, std::plus<T>()); }
|
||||
NDView &operator-=(const T val) { return elemenwise(val, std::minus<T>()); }
|
||||
NDView &operator*=(const T val) { return elemenwise(val, std::multiplies<T>()); }
|
||||
NDView &operator/=(const T val) { return elemenwise(val, std::divides<T>()); }
|
||||
NDView &operator*=(const T val) {
|
||||
return elemenwise(val, std::multiplies<T>());
|
||||
}
|
||||
NDView &operator/=(const T val) {
|
||||
return elemenwise(val, std::divides<T>());
|
||||
}
|
||||
|
||||
NDView &operator/=(const NDView &other) { return elemenwise(other, std::divides<T>()); }
|
||||
NDView &operator/=(const NDView &other) {
|
||||
return elemenwise(other, std::divides<T>());
|
||||
}
|
||||
|
||||
|
||||
template<size_t Size>
|
||||
NDView& operator=(const std::array<T, Size> &arr) {
|
||||
if(size() != static_cast<ssize_t>(arr.size()))
|
||||
throw std::runtime_error(LOCATION + "Array and NDView size mismatch");
|
||||
template <size_t Size> NDView &operator=(const std::array<T, Size> &arr) {
|
||||
if (size() != static_cast<ssize_t>(arr.size()))
|
||||
throw std::runtime_error(LOCATION +
|
||||
"Array and NDView size mismatch");
|
||||
std::copy(arr.begin(), arr.end(), begin());
|
||||
return *this;
|
||||
}
|
||||
@@ -136,31 +205,51 @@ template <typename T, int64_t Ndim = 2> class NDView : public ArrayExpr<NDView<T
|
||||
}
|
||||
|
||||
auto &shape() const { return shape_; }
|
||||
auto shape(int64_t i) const { return shape_[i]; }
|
||||
auto shape(ssize_t i) const { return shape_[i]; }
|
||||
|
||||
T *data() { return buffer_; }
|
||||
const T *data() const { return buffer_; }
|
||||
void print_all() const;
|
||||
|
||||
/**
|
||||
* @brief Create a subview of a range of the first dimension.
|
||||
* This is useful for splitting a batches of frames in parallel processing.
|
||||
* @param first The first index of the subview (inclusive).
|
||||
* @param last The last index of the subview (exclusive).
|
||||
* @return A new NDView that is a subview of the current view.
|
||||
* @throws std::runtime_error if the range is invalid.
|
||||
*/
|
||||
NDView sub_view(ssize_t first, ssize_t last) const {
|
||||
if (first < 0 || last > shape_[0] || first >= last)
|
||||
throw std::runtime_error(LOCATION + "Invalid sub_view range");
|
||||
auto new_shape = shape_;
|
||||
new_shape[0] = last - first;
|
||||
return NDView(buffer_ + first * strides_[0], new_shape);
|
||||
}
|
||||
|
||||
private:
|
||||
T *buffer_{nullptr};
|
||||
std::array<int64_t, Ndim> strides_{};
|
||||
std::array<int64_t, Ndim> shape_{};
|
||||
std::array<ssize_t, Ndim> strides_{};
|
||||
std::array<ssize_t, Ndim> shape_{};
|
||||
uint64_t size_{};
|
||||
|
||||
template <class BinaryOperation> NDView &elemenwise(T val, BinaryOperation op) {
|
||||
template <class BinaryOperation>
|
||||
NDView &elemenwise(T val, BinaryOperation op) {
|
||||
for (uint64_t i = 0; i != size_; ++i) {
|
||||
buffer_[i] = op(buffer_[i], val);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
template <class BinaryOperation> NDView &elemenwise(const NDView &other, BinaryOperation op) {
|
||||
template <class BinaryOperation>
|
||||
NDView &elemenwise(const NDView &other, BinaryOperation op) {
|
||||
for (uint64_t i = 0; i != size_; ++i) {
|
||||
buffer_[i] = op(buffer_[i], other.buffer_[i]);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
template <typename T, int64_t Ndim> void NDView<T, Ndim>::print_all() const {
|
||||
|
||||
template <typename T, ssize_t Ndim> void NDView<T, Ndim>::print_all() const {
|
||||
for (auto row = 0; row < shape_[0]; ++row) {
|
||||
for (auto col = 0; col < shape_[1]; ++col) {
|
||||
std::cout << std::setw(3);
|
||||
@@ -170,9 +259,8 @@ template <typename T, int64_t Ndim> void NDView<T, Ndim>::print_all() const {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
std::ostream& operator <<(std::ostream& os, const NDView<T, Ndim>& arr){
|
||||
template <typename T, ssize_t Ndim>
|
||||
std::ostream &operator<<(std::ostream &os, const NDView<T, Ndim> &arr) {
|
||||
for (auto row = 0; row < arr.shape(0); ++row) {
|
||||
for (auto col = 0; col < arr.shape(1); ++col) {
|
||||
os << std::setw(3);
|
||||
@@ -183,10 +271,8 @@ std::ostream& operator <<(std::ostream& os, const NDView<T, Ndim>& arr){
|
||||
return os;
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
NDView<T,1> make_view(std::vector<T>& vec){
|
||||
return NDView<T,1>(vec.data(), {static_cast<int64_t>(vec.size())});
|
||||
template <typename T> NDView<T, 1> make_view(std::vector<T> &vec) {
|
||||
return NDView<T, 1>(vec.data(), {static_cast<ssize_t>(vec.size())});
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,9 +1,8 @@
|
||||
#pragma once
|
||||
#include "aare/Dtype.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
#include "aare/FileInterface.hpp"
|
||||
#include "aare/NumpyHelpers.hpp"
|
||||
|
||||
#include "aare/defs.hpp"
|
||||
|
||||
#include <filesystem>
|
||||
#include <iostream>
|
||||
@@ -11,13 +10,12 @@
|
||||
|
||||
namespace aare {
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief NumpyFile class to read and write numpy files
|
||||
* @note derived from FileInterface
|
||||
* @note implements all the pure virtual functions from FileInterface
|
||||
* @note documentation for the functions can also be found in the FileInterface class
|
||||
* @note documentation for the functions can also be found in the FileInterface
|
||||
* class
|
||||
*/
|
||||
class NumpyFile : public FileInterface {
|
||||
|
||||
@@ -28,26 +26,35 @@ class NumpyFile : public FileInterface {
|
||||
* @param mode file mode (r, w)
|
||||
* @param cfg file configuration
|
||||
*/
|
||||
explicit NumpyFile(const std::filesystem::path &fname, const std::string &mode = "r", FileConfig cfg = {});
|
||||
explicit NumpyFile(const std::filesystem::path &fname,
|
||||
const std::string &mode = "r", FileConfig cfg = {});
|
||||
|
||||
void write(Frame &frame);
|
||||
Frame read_frame() override { return get_frame(this->current_frame++); }
|
||||
Frame read_frame(size_t frame_number) override { return get_frame(frame_number); }
|
||||
Frame read_frame(size_t frame_number) override {
|
||||
return get_frame(frame_number);
|
||||
}
|
||||
|
||||
std::vector<Frame> read_n(size_t n_frames) override;
|
||||
void read_into(std::byte *image_buf) override { return get_frame_into(this->current_frame++, image_buf); }
|
||||
void read_into(std::byte *image_buf) override {
|
||||
return get_frame_into(this->current_frame++, image_buf);
|
||||
}
|
||||
void read_into(std::byte *image_buf, size_t n_frames) override;
|
||||
size_t frame_number(size_t frame_index) override { return frame_index; };
|
||||
size_t bytes_per_frame() override;
|
||||
size_t pixels_per_frame() override;
|
||||
void seek(size_t frame_number) override { this->current_frame = frame_number; }
|
||||
void seek(size_t frame_number) override {
|
||||
this->current_frame = frame_number;
|
||||
}
|
||||
size_t tell() override { return this->current_frame; }
|
||||
size_t total_frames() const override { return m_header.shape[0]; }
|
||||
size_t rows() const override { return m_header.shape[1]; }
|
||||
size_t cols() const override { return m_header.shape[2]; }
|
||||
size_t bitdepth() const override { return m_header.dtype.bitdepth(); }
|
||||
|
||||
DetectorType detector_type() const override { return DetectorType::Unknown; }
|
||||
DetectorType detector_type() const override {
|
||||
return DetectorType::Unknown;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief get the data type of the numpy file
|
||||
@@ -69,8 +76,9 @@ class NumpyFile : public FileInterface {
|
||||
*/
|
||||
template <typename T, size_t NDim> NDArray<T, NDim> load() {
|
||||
NDArray<T, NDim> arr(make_shape<NDim>(m_header.shape));
|
||||
if (fseek(fp, static_cast<int64_t>(header_size), SEEK_SET)) {
|
||||
throw std::runtime_error(LOCATION + "Error seeking to the start of the data");
|
||||
if (fseek(fp, static_cast<long>(header_size), SEEK_SET)) {
|
||||
throw std::runtime_error(LOCATION +
|
||||
"Error seeking to the start of the data");
|
||||
}
|
||||
size_t rc = fread(arr.data(), sizeof(T), arr.size(), fp);
|
||||
if (rc != static_cast<size_t>(arr.size())) {
|
||||
@@ -78,16 +86,20 @@ class NumpyFile : public FileInterface {
|
||||
}
|
||||
return arr;
|
||||
}
|
||||
template <typename A, typename TYPENAME, A Ndim> void write(NDView<TYPENAME, Ndim> &frame) {
|
||||
template <typename A, typename TYPENAME, A Ndim>
|
||||
void write(NDView<TYPENAME, Ndim> &frame) {
|
||||
write_impl(frame.data(), frame.total_bytes());
|
||||
}
|
||||
template <typename A, typename TYPENAME, A Ndim> void write(NDArray<TYPENAME, Ndim> &frame) {
|
||||
template <typename A, typename TYPENAME, A Ndim>
|
||||
void write(NDArray<TYPENAME, Ndim> &frame) {
|
||||
write_impl(frame.data(), frame.total_bytes());
|
||||
}
|
||||
template <typename A, typename TYPENAME, A Ndim> void write(NDView<TYPENAME, Ndim> &&frame) {
|
||||
template <typename A, typename TYPENAME, A Ndim>
|
||||
void write(NDView<TYPENAME, Ndim> &&frame) {
|
||||
write_impl(frame.data(), frame.total_bytes());
|
||||
}
|
||||
template <typename A, typename TYPENAME, A Ndim> void write(NDArray<TYPENAME, Ndim> &&frame) {
|
||||
template <typename A, typename TYPENAME, A Ndim>
|
||||
void write(NDArray<TYPENAME, Ndim> &&frame) {
|
||||
write_impl(frame.data(), frame.total_bytes());
|
||||
}
|
||||
|
||||
|
||||
@@ -40,15 +40,18 @@ bool parse_bool(const std::string &in);
|
||||
|
||||
std::string get_value_from_map(const std::string &mapstr);
|
||||
|
||||
std::unordered_map<std::string, std::string> parse_dict(std::string in, const std::vector<std::string> &keys);
|
||||
std::unordered_map<std::string, std::string>
|
||||
parse_dict(std::string in, const std::vector<std::string> &keys);
|
||||
|
||||
template <typename T, size_t N> bool in_array(T val, const std::array<T, N> &arr) {
|
||||
template <typename T, size_t N>
|
||||
bool in_array(T val, const std::array<T, N> &arr) {
|
||||
return std::find(std::begin(arr), std::end(arr), val) != std::end(arr);
|
||||
}
|
||||
bool is_digits(const std::string &str);
|
||||
|
||||
aare::Dtype parse_descr(std::string typestring);
|
||||
size_t write_header(const std::filesystem::path &fname, const NumpyHeader &header);
|
||||
size_t write_header(const std::filesystem::path &fname,
|
||||
const NumpyHeader &header);
|
||||
size_t write_header(std::ostream &out, const NumpyHeader &header);
|
||||
|
||||
} // namespace NumpyHelpers
|
||||
|
||||
@@ -18,15 +18,15 @@ template <typename SUM_TYPE = double> class Pedestal {
|
||||
|
||||
uint32_t m_samples;
|
||||
NDArray<uint32_t, 2> m_cur_samples;
|
||||
|
||||
//TODO! in case of int needs to be changed to uint64_t
|
||||
|
||||
// TODO! in case of int needs to be changed to uint64_t
|
||||
NDArray<SUM_TYPE, 2> m_sum;
|
||||
NDArray<SUM_TYPE, 2> m_sum2;
|
||||
|
||||
//Cache mean since it is used over and over in the ClusterFinder
|
||||
//This optimization is related to the access pattern of the ClusterFinder
|
||||
//Relies on having more reads than pushes to the pedestal
|
||||
NDArray<SUM_TYPE, 2> m_mean;
|
||||
// Cache mean since it is used over and over in the ClusterFinder
|
||||
// This optimization is related to the access pattern of the ClusterFinder
|
||||
// Relies on having more reads than pushes to the pedestal
|
||||
NDArray<SUM_TYPE, 2> m_mean;
|
||||
|
||||
public:
|
||||
Pedestal(uint32_t rows, uint32_t cols, uint32_t n_samples = 1000)
|
||||
@@ -42,9 +42,7 @@ template <typename SUM_TYPE = double> class Pedestal {
|
||||
}
|
||||
~Pedestal() = default;
|
||||
|
||||
NDArray<SUM_TYPE, 2> mean() {
|
||||
return m_mean;
|
||||
}
|
||||
NDArray<SUM_TYPE, 2> mean() { return m_mean; }
|
||||
|
||||
SUM_TYPE mean(const uint32_t row, const uint32_t col) const {
|
||||
return m_mean(row, col);
|
||||
@@ -71,8 +69,6 @@ template <typename SUM_TYPE = double> class Pedestal {
|
||||
return variance_array;
|
||||
}
|
||||
|
||||
|
||||
|
||||
NDArray<SUM_TYPE, 2> std() {
|
||||
NDArray<SUM_TYPE, 2> standard_deviation_array({m_rows, m_cols});
|
||||
for (uint32_t i = 0; i < m_rows * m_cols; i++) {
|
||||
@@ -83,8 +79,6 @@ template <typename SUM_TYPE = double> class Pedestal {
|
||||
return standard_deviation_array;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void clear() {
|
||||
m_sum = 0;
|
||||
m_sum2 = 0;
|
||||
@@ -92,22 +86,18 @@ template <typename SUM_TYPE = double> class Pedestal {
|
||||
m_mean = 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void clear(const uint32_t row, const uint32_t col) {
|
||||
m_sum(row, col) = 0;
|
||||
m_sum2(row, col) = 0;
|
||||
m_cur_samples(row, col) = 0;
|
||||
m_mean(row, col) = 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
template <typename T> void push(NDView<T, 2> frame) {
|
||||
assert(frame.size() == m_rows * m_cols);
|
||||
|
||||
// TODO! move away from m_rows, m_cols
|
||||
if (frame.shape() != std::array<int64_t, 2>{m_rows, m_cols}) {
|
||||
if (frame.shape() != std::array<ssize_t, 2>{m_rows, m_cols}) {
|
||||
throw std::runtime_error(
|
||||
"Frame shape does not match pedestal shape");
|
||||
}
|
||||
@@ -122,13 +112,13 @@ template <typename SUM_TYPE = double> class Pedestal {
|
||||
/**
|
||||
* Push but don't update the cached mean. Speeds up the process
|
||||
* when initializing the pedestal.
|
||||
*
|
||||
*
|
||||
*/
|
||||
template <typename T> void push_no_update(NDView<T, 2> frame) {
|
||||
assert(frame.size() == m_rows * m_cols);
|
||||
|
||||
// TODO! move away from m_rows, m_cols
|
||||
if (frame.shape() != std::array<int64_t, 2>{m_rows, m_cols}) {
|
||||
if (frame.shape() != std::array<ssize_t, 2>{m_rows, m_cols}) {
|
||||
throw std::runtime_error(
|
||||
"Frame shape does not match pedestal shape");
|
||||
}
|
||||
@@ -140,9 +130,6 @@ template <typename SUM_TYPE = double> class Pedestal {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
template <typename T> void push(Frame &frame) {
|
||||
assert(frame.rows() == static_cast<size_t>(m_rows) &&
|
||||
frame.cols() == static_cast<size_t>(m_cols));
|
||||
@@ -170,7 +157,8 @@ template <typename SUM_TYPE = double> class Pedestal {
|
||||
m_sum(row, col) += val - m_sum(row, col) / m_samples;
|
||||
m_sum2(row, col) += val * val - m_sum2(row, col) / m_samples;
|
||||
}
|
||||
//Since we just did a push we know that m_cur_samples(row, col) is at least 1
|
||||
// Since we just did a push we know that m_cur_samples(row, col) is at
|
||||
// least 1
|
||||
m_mean(row, col) = m_sum(row, col) / m_cur_samples(row, col);
|
||||
}
|
||||
|
||||
@@ -183,7 +171,8 @@ template <typename SUM_TYPE = double> class Pedestal {
|
||||
m_cur_samples(row, col)++;
|
||||
} else {
|
||||
m_sum(row, col) += val - m_sum(row, col) / m_cur_samples(row, col);
|
||||
m_sum2(row, col) += val * val - m_sum2(row, col) / m_cur_samples(row, col);
|
||||
m_sum2(row, col) +=
|
||||
val * val - m_sum2(row, col) / m_cur_samples(row, col);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,19 +180,16 @@ template <typename SUM_TYPE = double> class Pedestal {
|
||||
* @brief Update the mean of the pedestal. This is used after having done
|
||||
* push_no_update. It is not necessary to call this function after push.
|
||||
*/
|
||||
void update_mean(){
|
||||
m_mean = m_sum / m_cur_samples;
|
||||
}
|
||||
void update_mean() { m_mean = m_sum / m_cur_samples; }
|
||||
|
||||
template<typename T>
|
||||
void push_fast(const uint32_t row, const uint32_t col, const T val_){
|
||||
//Assume we reached the steady state where all pixels have
|
||||
//m_samples samples
|
||||
template <typename T>
|
||||
void push_fast(const uint32_t row, const uint32_t col, const T val_) {
|
||||
// Assume we reached the steady state where all pixels have
|
||||
// m_samples samples
|
||||
SUM_TYPE val = static_cast<SUM_TYPE>(val_);
|
||||
m_sum(row, col) += val - m_sum(row, col) / m_samples;
|
||||
m_sum2(row, col) += val * val - m_sum2(row, col) / m_samples;
|
||||
m_mean(row, col) = m_sum(row, col) / m_samples;
|
||||
}
|
||||
|
||||
};
|
||||
} // namespace aare
|
||||
@@ -1,7 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "aare/defs.hpp"
|
||||
#include "aare/NDArray.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
|
||||
namespace aare {
|
||||
|
||||
@@ -10,11 +10,11 @@ NDArray<ssize_t, 2> GenerateMoench05PixelMap();
|
||||
NDArray<ssize_t, 2> GenerateMoench05PixelMap1g();
|
||||
NDArray<ssize_t, 2> GenerateMoench05PixelMapOld();
|
||||
|
||||
//Matterhorn02
|
||||
NDArray<ssize_t, 2>GenerateMH02SingleCounterPixelMap();
|
||||
// Matterhorn02
|
||||
NDArray<ssize_t, 2> GenerateMH02SingleCounterPixelMap();
|
||||
NDArray<ssize_t, 3> GenerateMH02FourCounterPixelMap();
|
||||
|
||||
//Eiger
|
||||
NDArray<ssize_t, 2>GenerateEigerFlipRowsPixelMap();
|
||||
// Eiger
|
||||
NDArray<ssize_t, 2> GenerateEigerFlipRowsPixelMap();
|
||||
|
||||
} // namespace aare
|
||||
@@ -18,9 +18,9 @@
|
||||
// @author Jordan DeLong (delong.j@fb.com)
|
||||
|
||||
// Changes made by PSD Detector Group:
|
||||
// Copied: Line 34 constexpr std::size_t hardware_destructive_interference_size = 128; from folly/lang/Align.h
|
||||
// Changed extension to .hpp
|
||||
// Changed namespace to aare
|
||||
// Copied: Line 34 constexpr std::size_t hardware_destructive_interference_size
|
||||
// = 128; from folly/lang/Align.h Changed extension to .hpp Changed namespace to
|
||||
// aare
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -45,15 +45,14 @@ template <class T> struct ProducerConsumerQueue {
|
||||
ProducerConsumerQueue(const ProducerConsumerQueue &) = delete;
|
||||
ProducerConsumerQueue &operator=(const ProducerConsumerQueue &) = delete;
|
||||
|
||||
|
||||
ProducerConsumerQueue(ProducerConsumerQueue &&other){
|
||||
ProducerConsumerQueue(ProducerConsumerQueue &&other) {
|
||||
size_ = other.size_;
|
||||
records_ = other.records_;
|
||||
other.records_ = nullptr;
|
||||
readIndex_ = other.readIndex_.load(std::memory_order_acquire);
|
||||
writeIndex_ = other.writeIndex_.load(std::memory_order_acquire);
|
||||
}
|
||||
ProducerConsumerQueue &operator=(ProducerConsumerQueue &&other){
|
||||
ProducerConsumerQueue &operator=(ProducerConsumerQueue &&other) {
|
||||
size_ = other.size_;
|
||||
records_ = other.records_;
|
||||
other.records_ = nullptr;
|
||||
@@ -61,16 +60,17 @@ template <class T> struct ProducerConsumerQueue {
|
||||
writeIndex_ = other.writeIndex_.load(std::memory_order_acquire);
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
ProducerConsumerQueue():ProducerConsumerQueue(2){};
|
||||
|
||||
ProducerConsumerQueue() : ProducerConsumerQueue(2){};
|
||||
// size must be >= 2.
|
||||
//
|
||||
// Also, note that the number of usable slots in the queue at any
|
||||
// given time is actually (size-1), so if you start with an empty queue,
|
||||
// isFull() will return true after size-1 insertions.
|
||||
explicit ProducerConsumerQueue(uint32_t size)
|
||||
: size_(size), records_(static_cast<T *>(std::malloc(sizeof(T) * size))), readIndex_(0), writeIndex_(0) {
|
||||
: size_(size),
|
||||
records_(static_cast<T *>(std::malloc(sizeof(T) * size))),
|
||||
readIndex_(0), writeIndex_(0) {
|
||||
assert(size >= 2);
|
||||
if (!records_) {
|
||||
throw std::bad_alloc();
|
||||
@@ -154,7 +154,8 @@ template <class T> struct ProducerConsumerQueue {
|
||||
}
|
||||
|
||||
bool isEmpty() const {
|
||||
return readIndex_.load(std::memory_order_acquire) == writeIndex_.load(std::memory_order_acquire);
|
||||
return readIndex_.load(std::memory_order_acquire) ==
|
||||
writeIndex_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
bool isFull() const {
|
||||
@@ -175,7 +176,8 @@ template <class T> struct ProducerConsumerQueue {
|
||||
// be removing items concurrently).
|
||||
// * It is undefined to call this from any other thread.
|
||||
size_t sizeGuess() const {
|
||||
int ret = writeIndex_.load(std::memory_order_acquire) - readIndex_.load(std::memory_order_acquire);
|
||||
int ret = writeIndex_.load(std::memory_order_acquire) -
|
||||
readIndex_.load(std::memory_order_acquire);
|
||||
if (ret < 0) {
|
||||
ret += size_;
|
||||
}
|
||||
@@ -192,7 +194,7 @@ template <class T> struct ProducerConsumerQueue {
|
||||
// const uint32_t size_;
|
||||
uint32_t size_;
|
||||
// T *const records_;
|
||||
T* records_;
|
||||
T *records_;
|
||||
|
||||
alignas(hardware_destructive_interference_size) AtomicIndex readIndex_;
|
||||
alignas(hardware_destructive_interference_size) AtomicIndex writeIndex_;
|
||||
|
||||
@@ -1,28 +1,19 @@
|
||||
#pragma once
|
||||
#include "aare/DetectorGeometry.hpp"
|
||||
#include "aare/FileInterface.hpp"
|
||||
#include "aare/RawMasterFile.hpp"
|
||||
#include "aare/Frame.hpp"
|
||||
#include "aare/NDArray.hpp" //for pixel map
|
||||
#include "aare/RawMasterFile.hpp"
|
||||
#include "aare/RawSubFile.hpp"
|
||||
|
||||
#ifdef AARE_TESTS
|
||||
#include "../tests/friend_test.hpp"
|
||||
#endif
|
||||
|
||||
#include <optional>
|
||||
|
||||
namespace aare {
|
||||
|
||||
struct ModuleConfig {
|
||||
int module_gap_row{};
|
||||
int module_gap_col{};
|
||||
|
||||
bool operator==(const ModuleConfig &other) const {
|
||||
if (module_gap_col != other.module_gap_col)
|
||||
return false;
|
||||
if (module_gap_row != other.module_gap_row)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Class to read .raw files. The class will parse the master file
|
||||
* to find the correct geometry for the frames.
|
||||
@@ -30,22 +21,12 @@ struct ModuleConfig {
|
||||
* Consider using that unless you need raw file specific functionality.
|
||||
*/
|
||||
class RawFile : public FileInterface {
|
||||
size_t n_subfiles{}; //f0,f1...fn
|
||||
size_t n_subfile_parts{}; // d0,d1...dn
|
||||
//TODO! move to vector of SubFile instead of pointers
|
||||
std::vector<std::vector<RawSubFile *>> subfiles; //subfiles[f0,f1...fn][d0,d1...dn]
|
||||
// std::vector<xy> positions;
|
||||
|
||||
ModuleConfig cfg{0, 0};
|
||||
|
||||
std::vector<std::unique_ptr<RawSubFile>> m_subfiles;
|
||||
|
||||
RawMasterFile m_master;
|
||||
|
||||
size_t m_current_frame{};
|
||||
|
||||
// std::vector<ModuleGeometry> m_module_pixel_0;
|
||||
// size_t m_rows{};
|
||||
// size_t m_cols{};
|
||||
|
||||
DetectorGeometry m_geometry;
|
||||
|
||||
public:
|
||||
@@ -56,7 +37,7 @@ class RawFile : public FileInterface {
|
||||
|
||||
*/
|
||||
RawFile(const std::filesystem::path &fname, const std::string &mode = "r");
|
||||
virtual ~RawFile() override;
|
||||
virtual ~RawFile() override = default;
|
||||
|
||||
Frame read_frame() override;
|
||||
Frame read_frame(size_t frame_number) override;
|
||||
@@ -64,10 +45,10 @@ class RawFile : public FileInterface {
|
||||
void read_into(std::byte *image_buf) override;
|
||||
void read_into(std::byte *image_buf, size_t n_frames) override;
|
||||
|
||||
//TODO! do we need to adapt the API?
|
||||
// TODO! do we need to adapt the API?
|
||||
void read_into(std::byte *image_buf, DetectorHeader *header);
|
||||
void read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header);
|
||||
|
||||
void read_into(std::byte *image_buf, size_t n_frames,
|
||||
DetectorHeader *header);
|
||||
|
||||
size_t frame_number(size_t frame_index) override;
|
||||
size_t bytes_per_frame() override;
|
||||
@@ -79,35 +60,14 @@ class RawFile : public FileInterface {
|
||||
size_t rows() const override;
|
||||
size_t cols() const override;
|
||||
size_t bitdepth() const override;
|
||||
xy geometry();
|
||||
size_t n_mod() const;
|
||||
|
||||
size_t n_modules() const;
|
||||
size_t n_modules_in_roi() const;
|
||||
xy geometry() const;
|
||||
|
||||
RawMasterFile master() const;
|
||||
|
||||
|
||||
|
||||
DetectorType detector_type() const override;
|
||||
|
||||
private:
|
||||
|
||||
/**
|
||||
* @brief read the frame at the given frame index into the image buffer
|
||||
* @param frame_number frame number to read
|
||||
* @param image_buf buffer to store the frame
|
||||
*/
|
||||
|
||||
void get_frame_into(size_t frame_index, std::byte *frame_buffer, DetectorHeader *header = nullptr);
|
||||
|
||||
|
||||
/**
|
||||
* @brief get the frame at the given frame index
|
||||
* @param frame_number frame number to read
|
||||
* @return Frame
|
||||
*/
|
||||
Frame get_frame(size_t frame_index);
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief read the header of the file
|
||||
* @param fname path to the data subfile
|
||||
@@ -115,12 +75,24 @@ class RawFile : public FileInterface {
|
||||
*/
|
||||
static DetectorHeader read_header(const std::filesystem::path &fname);
|
||||
|
||||
// void update_geometry_with_roi();
|
||||
int find_number_of_subfiles();
|
||||
private:
|
||||
/**
|
||||
* @brief read the frame at the given frame index into the image buffer
|
||||
* @param frame_number frame number to read
|
||||
* @param image_buf buffer to store the frame
|
||||
*/
|
||||
|
||||
void get_frame_into(size_t frame_index, std::byte *frame_buffer,
|
||||
DetectorHeader *header = nullptr);
|
||||
|
||||
/**
|
||||
* @brief get the frame at the given frame index
|
||||
* @param frame_number frame number to read
|
||||
* @return Frame
|
||||
*/
|
||||
Frame get_frame(size_t frame_index);
|
||||
|
||||
void open_subfiles();
|
||||
void find_geometry();
|
||||
};
|
||||
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
#include "aare/defs.hpp"
|
||||
#include <algorithm>
|
||||
#include <filesystem>
|
||||
#include <fmt/format.h>
|
||||
#include <fstream>
|
||||
@@ -41,14 +42,16 @@ class RawFileNameComponents {
|
||||
|
||||
class ScanParameters {
|
||||
bool m_enabled = false;
|
||||
std::string m_dac;
|
||||
DACIndex m_dac{};
|
||||
int m_start = 0;
|
||||
int m_stop = 0;
|
||||
int m_step = 0;
|
||||
//TODO! add settleTime, requires string to time conversion
|
||||
int64_t m_settleTime = 0; // [ns]
|
||||
|
||||
public:
|
||||
ScanParameters(const std::string &par);
|
||||
ScanParameters(const bool enabled, const DACIndex dac, const int start,
|
||||
const int stop, const int step, const int64_t settleTime);
|
||||
ScanParameters() = default;
|
||||
ScanParameters(const ScanParameters &) = default;
|
||||
ScanParameters &operator=(const ScanParameters &) = default;
|
||||
@@ -56,12 +59,12 @@ class ScanParameters {
|
||||
int start() const;
|
||||
int stop() const;
|
||||
int step() const;
|
||||
const std::string &dac() const;
|
||||
DACIndex dac() const;
|
||||
bool enabled() const;
|
||||
int64_t settleTime() const;
|
||||
void increment_stop();
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* @brief Class for parsing a master file either in our .json format or the old
|
||||
* .raw format
|
||||
@@ -78,8 +81,10 @@ class RawMasterFile {
|
||||
size_t m_pixels_y{};
|
||||
size_t m_pixels_x{};
|
||||
size_t m_bitdepth{};
|
||||
uint8_t m_quad = 0;
|
||||
|
||||
xy m_geometry{};
|
||||
xy m_udp_interfaces_per_module{1, 1};
|
||||
|
||||
size_t m_max_frames_per_file{};
|
||||
// uint32_t m_adc_mask{}; // TODO! implement reading
|
||||
@@ -97,11 +102,9 @@ class RawMasterFile {
|
||||
std::optional<size_t> m_digital_samples;
|
||||
std::optional<size_t> m_transceiver_samples;
|
||||
std::optional<size_t> m_number_of_rows;
|
||||
std::optional<uint8_t> m_quad;
|
||||
|
||||
std::optional<ROI> m_roi;
|
||||
|
||||
|
||||
public:
|
||||
RawMasterFile(const std::filesystem::path &fpath);
|
||||
|
||||
@@ -117,26 +120,27 @@ class RawMasterFile {
|
||||
size_t max_frames_per_file() const;
|
||||
size_t bitdepth() const;
|
||||
size_t frame_padding() const;
|
||||
xy udp_interfaces_per_module() const;
|
||||
const FrameDiscardPolicy &frame_discard_policy() const;
|
||||
|
||||
size_t total_frames_expected() const;
|
||||
xy geometry() const;
|
||||
size_t n_modules() const;
|
||||
uint8_t quad() const;
|
||||
|
||||
std::optional<size_t> analog_samples() const;
|
||||
std::optional<size_t> digital_samples() const;
|
||||
std::optional<size_t> transceiver_samples() const;
|
||||
std::optional<size_t> number_of_rows() const;
|
||||
std::optional<uint8_t> quad() const;
|
||||
|
||||
|
||||
std::optional<ROI> roi() const;
|
||||
|
||||
|
||||
ScanParameters scan_parameters() const;
|
||||
|
||||
private:
|
||||
void parse_json(const std::filesystem::path &fpath);
|
||||
void parse_raw(const std::filesystem::path &fpath);
|
||||
void retrieve_geometry();
|
||||
};
|
||||
|
||||
} // namespace aare
|
||||
@@ -10,23 +10,34 @@
|
||||
namespace aare {
|
||||
|
||||
/**
|
||||
* @brief Class to read a singe subfile written in .raw format. Used from RawFile to read
|
||||
* the entire detector. Can be used directly to read part of the image.
|
||||
* @brief Class to read a singe subfile written in .raw format. Used from
|
||||
* RawFile to read the entire detector. Can be used directly to read part of the
|
||||
* image.
|
||||
*/
|
||||
class RawSubFile {
|
||||
protected:
|
||||
std::ifstream m_file;
|
||||
DetectorType m_detector_type;
|
||||
size_t m_bitdepth;
|
||||
std::filesystem::path m_fname;
|
||||
std::filesystem::path m_path; //!< path to the subfile
|
||||
std::string m_base_name; //!< base name used for formatting file names
|
||||
size_t m_offset{}; //!< file index of the first file, allow starting at non
|
||||
//!< zero file
|
||||
size_t m_total_frames{}; //!< total number of frames in the series of files
|
||||
size_t m_rows{};
|
||||
size_t m_cols{};
|
||||
size_t m_bytes_per_frame{};
|
||||
size_t m_num_frames{};
|
||||
|
||||
int m_module_index{};
|
||||
size_t m_current_file_index{}; //!< The index of the open file
|
||||
size_t m_current_frame_index{}; //!< The index of the current frame (with
|
||||
//!< reference to all files)
|
||||
std::vector<size_t>
|
||||
m_last_frame_in_file{}; //!< Used for seeking to the correct file
|
||||
|
||||
uint32_t m_pos_row{};
|
||||
uint32_t m_pos_col{};
|
||||
|
||||
|
||||
|
||||
std::optional<NDArray<ssize_t, 2>> m_pixel_map;
|
||||
|
||||
public:
|
||||
@@ -40,12 +51,14 @@ class RawSubFile {
|
||||
* @throws std::invalid_argument if the detector,type pair is not supported
|
||||
*/
|
||||
RawSubFile(const std::filesystem::path &fname, DetectorType detector,
|
||||
size_t rows, size_t cols, size_t bitdepth, uint32_t pos_row = 0, uint32_t pos_col = 0);
|
||||
size_t rows, size_t cols, size_t bitdepth, uint32_t pos_row = 0,
|
||||
uint32_t pos_col = 0);
|
||||
|
||||
~RawSubFile() = default;
|
||||
/**
|
||||
* @brief Seek to the given frame number
|
||||
* @note Puts the file pointer at the start of the header, not the start of the data
|
||||
* @note Puts the file pointer at the start of the header, not the start of
|
||||
* the data
|
||||
* @param frame_index frame position in file to seek to
|
||||
* @throws std::runtime_error if the frame number is out of range
|
||||
*/
|
||||
@@ -53,26 +66,30 @@ class RawSubFile {
|
||||
size_t tell();
|
||||
|
||||
void read_into(std::byte *image_buf, DetectorHeader *header = nullptr);
|
||||
void read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header= nullptr);
|
||||
void read_into(std::byte *image_buf, size_t n_frames,
|
||||
DetectorHeader *header = nullptr);
|
||||
void get_part(std::byte *buffer, size_t frame_index);
|
||||
|
||||
|
||||
void read_header(DetectorHeader *header);
|
||||
|
||||
|
||||
size_t rows() const;
|
||||
size_t cols() const;
|
||||
|
||||
|
||||
size_t frame_number(size_t frame_index);
|
||||
|
||||
size_t bytes_per_frame() const { return m_bytes_per_frame; }
|
||||
size_t pixels_per_frame() const { return m_rows * m_cols; }
|
||||
size_t bytes_per_pixel() const { return m_bitdepth / bits_per_byte; }
|
||||
|
||||
size_t frames_in_file() const { return m_num_frames; }
|
||||
size_t frames_in_file() const { return m_total_frames; }
|
||||
|
||||
private:
|
||||
template <typename T>
|
||||
void read_with_map(std::byte *image_buf);
|
||||
private:
|
||||
template <typename T> void read_with_map(std::byte *image_buf);
|
||||
|
||||
void parse_fname(const std::filesystem::path &fname);
|
||||
void scan_files();
|
||||
void open_file(size_t file_index);
|
||||
std::filesystem::path fpath(size_t file_index) const;
|
||||
};
|
||||
|
||||
} // namespace aare
|
||||
@@ -28,7 +28,7 @@ template <typename T> class VarClusterFinder {
|
||||
};
|
||||
|
||||
private:
|
||||
const std::array<int64_t, 2> shape_;
|
||||
const std::array<ssize_t, 2> shape_;
|
||||
NDView<T, 2> original_;
|
||||
NDArray<int, 2> labeled_;
|
||||
NDArray<int, 2> peripheral_labeled_;
|
||||
@@ -38,11 +38,13 @@ template <typename T> class VarClusterFinder {
|
||||
bool use_noise_map = false;
|
||||
int peripheralThresholdFactor_ = 5;
|
||||
int current_label;
|
||||
const std::array<int, 4> di{{0, -1, -1, -1}}; // row ### 8-neighbour by scaning from left to right
|
||||
const std::array<int, 4> dj{{-1, -1, 0, 1}}; // col ### 8-neighbour by scaning from top to bottom
|
||||
const std::array<int, 4> di{
|
||||
{0, -1, -1, -1}}; // row ### 8-neighbour by scaning from left to right
|
||||
const std::array<int, 4> dj{
|
||||
{-1, -1, 0, 1}}; // col ### 8-neighbour by scaning from top to bottom
|
||||
const std::array<int, 8> di_{{0, 0, -1, 1, -1, 1, -1, 1}}; // row
|
||||
const std::array<int, 8> dj_{{-1, 1, 0, 0, 1, -1, -1, 1}}; // col
|
||||
std::map<int, int> child; // heirachy: key: child; val: parent
|
||||
std::map<int, int> child; // heirachy: key: child; val: parent
|
||||
std::unordered_map<int, Hit> h_size;
|
||||
std::vector<Hit> hits;
|
||||
// std::vector<std::vector<int16_t>> row
|
||||
@@ -50,7 +52,8 @@ template <typename T> class VarClusterFinder {
|
||||
|
||||
public:
|
||||
VarClusterFinder(Shape<2> shape, T threshold)
|
||||
: shape_(shape), labeled_(shape, 0), peripheral_labeled_(shape, 0), binary_(shape), threshold_(threshold) {
|
||||
: shape_(shape), labeled_(shape, 0), peripheral_labeled_(shape, 0),
|
||||
binary_(shape), threshold_(threshold) {
|
||||
hits.reserve(2000);
|
||||
}
|
||||
|
||||
@@ -60,7 +63,9 @@ template <typename T> class VarClusterFinder {
|
||||
noiseMap = noise_map;
|
||||
use_noise_map = true;
|
||||
}
|
||||
void set_peripheralThresholdFactor(int factor) { peripheralThresholdFactor_ = factor; }
|
||||
void set_peripheralThresholdFactor(int factor) {
|
||||
peripheralThresholdFactor_ = factor;
|
||||
}
|
||||
void find_clusters(NDView<T, 2> img);
|
||||
void find_clusters_X(NDView<T, 2> img);
|
||||
void rec_FillHit(int clusterIndex, int i, int j);
|
||||
@@ -144,7 +149,8 @@ template <typename T> int VarClusterFinder<T>::check_neighbours(int i, int j) {
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T> void VarClusterFinder<T>::find_clusters(NDView<T, 2> img) {
|
||||
template <typename T>
|
||||
void VarClusterFinder<T>::find_clusters(NDView<T, 2> img) {
|
||||
original_ = img;
|
||||
labeled_ = 0;
|
||||
peripheral_labeled_ = 0;
|
||||
@@ -156,7 +162,8 @@ template <typename T> void VarClusterFinder<T>::find_clusters(NDView<T, 2> img)
|
||||
store_clusters();
|
||||
}
|
||||
|
||||
template <typename T> void VarClusterFinder<T>::find_clusters_X(NDView<T, 2> img) {
|
||||
template <typename T>
|
||||
void VarClusterFinder<T>::find_clusters_X(NDView<T, 2> img) {
|
||||
original_ = img;
|
||||
int clusterIndex = 0;
|
||||
for (int i = 0; i < shape_[0]; ++i) {
|
||||
@@ -175,7 +182,8 @@ template <typename T> void VarClusterFinder<T>::find_clusters_X(NDView<T, 2> img
|
||||
h_size.clear();
|
||||
}
|
||||
|
||||
template <typename T> void VarClusterFinder<T>::rec_FillHit(int clusterIndex, int i, int j) {
|
||||
template <typename T>
|
||||
void VarClusterFinder<T>::rec_FillHit(int clusterIndex, int i, int j) {
|
||||
// printf("original_(%d, %d)=%f\n", i, j, original_(i,j));
|
||||
// printf("h_size[%d].size=%d\n", clusterIndex, h_size[clusterIndex].size);
|
||||
if (h_size[clusterIndex].size < MAX_CLUSTER_SIZE) {
|
||||
@@ -203,11 +211,15 @@ template <typename T> void VarClusterFinder<T>::rec_FillHit(int clusterIndex, in
|
||||
} else {
|
||||
// if (h_size[clusterIndex].size < MAX_CLUSTER_SIZE){
|
||||
// h_size[clusterIndex].size += 1;
|
||||
// h_size[clusterIndex].rows[h_size[clusterIndex].size] = row;
|
||||
// h_size[clusterIndex].cols[h_size[clusterIndex].size] = col;
|
||||
// h_size[clusterIndex].enes[h_size[clusterIndex].size] = original_(row, col);
|
||||
// h_size[clusterIndex].rows[h_size[clusterIndex].size] =
|
||||
// row; h_size[clusterIndex].cols[h_size[clusterIndex].size]
|
||||
// = col;
|
||||
// h_size[clusterIndex].enes[h_size[clusterIndex].size] =
|
||||
// original_(row, col);
|
||||
// }// ? weather to include peripheral pixels
|
||||
original_(row, col) = 0; // remove peripheral pixels, to avoid potential influence for pedestal updating
|
||||
original_(row, col) =
|
||||
0; // remove peripheral pixels, to avoid potential influence
|
||||
// for pedestal updating
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -228,14 +240,14 @@ template <typename T> void VarClusterFinder<T>::first_pass() {
|
||||
|
||||
for (ssize_t i = 0; i < original_.size(); ++i) {
|
||||
if (use_noise_map)
|
||||
threshold_ = 5 * noiseMap(i);
|
||||
binary_(i) = (original_(i) > threshold_);
|
||||
threshold_ = 5 * noiseMap[i];
|
||||
binary_[i] = (original_[i] > threshold_);
|
||||
}
|
||||
|
||||
for (int i = 0; i < shape_[0]; ++i) {
|
||||
for (int j = 0; j < shape_[1]; ++j) {
|
||||
|
||||
// do we have someting to process?
|
||||
// do we have something to process?
|
||||
if (binary_(i, j)) {
|
||||
auto tmp = check_neighbours(i, j);
|
||||
if (tmp != 0) {
|
||||
@@ -275,8 +287,8 @@ template <typename T> void VarClusterFinder<T>::store_clusters() {
|
||||
for (int i = 0; i < shape_[0]; ++i) {
|
||||
for (int j = 0; j < shape_[1]; ++j) {
|
||||
if (labeled_(i, j) != 0 || false
|
||||
// (i-1 >= 0 and labeled_(i-1, j) != 0) or // another circle of peripheral pixels
|
||||
// (j-1 >= 0 and labeled_(i, j-1) != 0) or
|
||||
// (i-1 >= 0 and labeled_(i-1, j) != 0) or // another circle of
|
||||
// peripheral pixels (j-1 >= 0 and labeled_(i, j-1) != 0) or
|
||||
// (i+1 < shape_[0] and labeled_(i+1, j) != 0) or
|
||||
// (j+1 < shape_[1] and labeled_(i, j+1) != 0)
|
||||
) {
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
|
||||
#pragma once
|
||||
#include <aare/NDArray.hpp>
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <vector>
|
||||
#include <aare/NDArray.hpp>
|
||||
|
||||
namespace aare {
|
||||
/**
|
||||
@@ -15,26 +15,24 @@ namespace aare {
|
||||
* @param last iterator to the last element
|
||||
* @param val value to compare
|
||||
* @return index of the last element that is smaller than val
|
||||
*
|
||||
*
|
||||
*/
|
||||
template <typename T>
|
||||
size_t last_smaller(const T* first, const T* last, T val) {
|
||||
for (auto iter = first+1; iter != last; ++iter) {
|
||||
size_t last_smaller(const T *first, const T *last, T val) {
|
||||
for (auto iter = first + 1; iter != last; ++iter) {
|
||||
if (*iter >= val) {
|
||||
return std::distance(first, iter-1);
|
||||
return std::distance(first, iter - 1);
|
||||
}
|
||||
}
|
||||
return std::distance(first, last-1);
|
||||
return std::distance(first, last - 1);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
size_t last_smaller(const NDArray<T, 1>& arr, T val) {
|
||||
template <typename T> size_t last_smaller(const NDArray<T, 1> &arr, T val) {
|
||||
return last_smaller(arr.begin(), arr.end(), val);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
size_t last_smaller(const std::vector<T>& vec, T val) {
|
||||
return last_smaller(vec.data(), vec.data()+vec.size(), val);
|
||||
template <typename T> size_t last_smaller(const std::vector<T> &vec, T val) {
|
||||
return last_smaller(vec.data(), vec.data() + vec.size(), val);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -48,64 +46,67 @@ size_t last_smaller(const std::vector<T>& vec, T val) {
|
||||
* @return index of the first element that is larger than val
|
||||
*/
|
||||
template <typename T>
|
||||
size_t first_larger(const T* first, const T* last, T val) {
|
||||
size_t first_larger(const T *first, const T *last, T val) {
|
||||
for (auto iter = first; iter != last; ++iter) {
|
||||
if (*iter > val) {
|
||||
return std::distance(first, iter);
|
||||
}
|
||||
}
|
||||
return std::distance(first, last-1);
|
||||
return std::distance(first, last - 1);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
size_t first_larger(const NDArray<T, 1>& arr, T val) {
|
||||
template <typename T> size_t first_larger(const NDArray<T, 1> &arr, T val) {
|
||||
return first_larger(arr.begin(), arr.end(), val);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
size_t first_larger(const std::vector<T>& vec, T val) {
|
||||
return first_larger(vec.data(), vec.data()+vec.size(), val);
|
||||
template <typename T> size_t first_larger(const std::vector<T> &vec, T val) {
|
||||
return first_larger(vec.data(), vec.data() + vec.size(), val);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Index of the nearest element to val.
|
||||
* Requires a sorted array. If there is no difference it takes the first element.
|
||||
* Requires a sorted array. If there is no difference it takes the first
|
||||
* element.
|
||||
* @param first iterator to the first element
|
||||
* @param last iterator to the last element
|
||||
* @param val value to compare
|
||||
* @return index of the nearest element
|
||||
*/
|
||||
template <typename T>
|
||||
size_t nearest_index(const T* first, const T* last, T val) {
|
||||
auto iter = std::min_element(first, last,
|
||||
[val](T a, T b) {
|
||||
size_t nearest_index(const T *first, const T *last, T val) {
|
||||
auto iter = std::min_element(first, last, [val](T a, T b) {
|
||||
return std::abs(a - val) < std::abs(b - val);
|
||||
});
|
||||
return std::distance(first, iter);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
size_t nearest_index(const NDArray<T, 1>& arr, T val) {
|
||||
template <typename T> size_t nearest_index(const NDArray<T, 1> &arr, T val) {
|
||||
return nearest_index(arr.begin(), arr.end(), val);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
size_t nearest_index(const std::vector<T>& vec, T val) {
|
||||
return nearest_index(vec.data(), vec.data()+vec.size(), val);
|
||||
template <typename T> size_t nearest_index(const std::vector<T> &vec, T val) {
|
||||
return nearest_index(vec.data(), vec.data() + vec.size(), val);
|
||||
}
|
||||
|
||||
template <typename T, size_t N>
|
||||
size_t nearest_index(const std::array<T,N>& arr, T val) {
|
||||
return nearest_index(arr.data(), arr.data()+arr.size(), val);
|
||||
size_t nearest_index(const std::array<T, N> &arr, T val) {
|
||||
return nearest_index(arr.data(), arr.data() + arr.size(), val);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::vector<T> cumsum(const std::vector<T>& vec) {
|
||||
template <typename T> std::vector<T> cumsum(const std::vector<T> &vec) {
|
||||
std::vector<T> result(vec.size());
|
||||
std::partial_sum(vec.begin(), vec.end(), result.begin());
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
template <typename Container> bool all_equal(const Container &c) {
|
||||
if (!c.empty() &&
|
||||
std::all_of(begin(c), end(c),
|
||||
[c](const typename Container::value_type &element) {
|
||||
return element == c.front();
|
||||
}))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
209
include/aare/calibration.hpp
Normal file
209
include/aare/calibration.hpp
Normal file
@@ -0,0 +1,209 @@
|
||||
#pragma once
|
||||
|
||||
#include "aare/NDArray.hpp"
|
||||
#include "aare/NDView.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
#include "aare/utils/par.hpp"
|
||||
#include "aare/utils/task.hpp"
|
||||
#include <cstdint>
|
||||
#include <future>
|
||||
|
||||
namespace aare {
|
||||
|
||||
// Really try to convince the compile to inline this function
|
||||
// TODO! Clang?
|
||||
#if (defined(_MSC_VER) || defined(__INTEL_COMPILER))
|
||||
#define STRONG_INLINE __forceinline
|
||||
#else
|
||||
#define STRONG_INLINE inline
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__)
|
||||
#define ALWAYS_INLINE __attribute__((always_inline)) inline
|
||||
#else
|
||||
#define ALWAYS_INLINE STRONG_INLINE
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Get the gain from the raw ADC value. In Jungfrau the gain is
|
||||
* encoded in the left most 2 bits of the raw value.
|
||||
* 00 -> gain 0
|
||||
* 01 -> gain 1
|
||||
* 11 -> gain 2
|
||||
* @param raw the raw ADC value
|
||||
* @return the gain as an integer
|
||||
*/
|
||||
ALWAYS_INLINE int get_gain(uint16_t raw) {
|
||||
switch (raw >> 14) {
|
||||
case 0:
|
||||
return 0;
|
||||
case 1:
|
||||
return 1;
|
||||
case 3:
|
||||
return 2;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uint16_t get_value(uint16_t raw) { return raw & ADC_MASK; }
|
||||
|
||||
ALWAYS_INLINE std::pair<uint16_t, int16_t> get_value_and_gain(uint16_t raw) {
|
||||
static_assert(
|
||||
sizeof(std::pair<uint16_t, int16_t>) ==
|
||||
sizeof(uint16_t) + sizeof(int16_t),
|
||||
"Size of pair<uint16_t, int16_t> does not match expected size");
|
||||
return {get_value(raw), get_gain(raw)};
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void apply_calibration_impl(NDView<T, 3> res, NDView<uint16_t, 3> raw_data,
|
||||
NDView<T, 3> ped, NDView<T, 3> cal, int start,
|
||||
int stop) {
|
||||
|
||||
for (int frame_nr = start; frame_nr != stop; ++frame_nr) {
|
||||
for (int row = 0; row != raw_data.shape(1); ++row) {
|
||||
for (int col = 0; col != raw_data.shape(2); ++col) {
|
||||
auto [value, gain] =
|
||||
get_value_and_gain(raw_data(frame_nr, row, col));
|
||||
|
||||
// Using multiplication does not seem to speed up the code here
|
||||
// ADU/keV is the standard unit for the calibration which
|
||||
// means rewriting the formula is not worth it.
|
||||
res(frame_nr, row, col) =
|
||||
(value - ped(gain, row, col)) / cal(gain, row, col);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void apply_calibration_impl(NDView<T, 3> res, NDView<uint16_t, 3> raw_data,
|
||||
NDView<T, 2> ped, NDView<T, 2> cal, int start,
|
||||
int stop) {
|
||||
|
||||
for (int frame_nr = start; frame_nr != stop; ++frame_nr) {
|
||||
for (int row = 0; row != raw_data.shape(1); ++row) {
|
||||
for (int col = 0; col != raw_data.shape(2); ++col) {
|
||||
auto [value, gain] =
|
||||
get_value_and_gain(raw_data(frame_nr, row, col));
|
||||
|
||||
// Using multiplication does not seem to speed up the code here
|
||||
// ADU/keV is the standard unit for the calibration which
|
||||
// means rewriting the formula is not worth it.
|
||||
|
||||
// Set the value to 0 if the gain is not 0
|
||||
if (gain == 0)
|
||||
res(frame_nr, row, col) =
|
||||
(value - ped(row, col)) / cal(row, col);
|
||||
else
|
||||
res(frame_nr, row, col) = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T, ssize_t Ndim = 3>
|
||||
void apply_calibration(NDView<T, 3> res, NDView<uint16_t, 3> raw_data,
|
||||
NDView<T, Ndim> ped, NDView<T, Ndim> cal,
|
||||
ssize_t n_threads = 4) {
|
||||
std::vector<std::future<void>> futures;
|
||||
futures.reserve(n_threads);
|
||||
auto limits = split_task(0, raw_data.shape(0), n_threads);
|
||||
for (const auto &lim : limits)
|
||||
futures.push_back(std::async(
|
||||
static_cast<void (*)(NDView<T, 3>, NDView<uint16_t, 3>,
|
||||
NDView<T, Ndim>, NDView<T, Ndim>, int, int)>(
|
||||
apply_calibration_impl),
|
||||
res, raw_data, ped, cal, lim.first, lim.second));
|
||||
for (auto &f : futures)
|
||||
f.get();
|
||||
}
|
||||
|
||||
template <bool only_gain0>
|
||||
std::pair<NDArray<size_t, 3>, NDArray<size_t, 3>>
|
||||
sum_and_count_per_gain(NDView<uint16_t, 3> raw_data) {
|
||||
constexpr ssize_t num_gains = only_gain0 ? 1 : 3;
|
||||
NDArray<size_t, 3> accumulator(
|
||||
std::array<ssize_t, 3>{num_gains, raw_data.shape(1), raw_data.shape(2)},
|
||||
0);
|
||||
NDArray<size_t, 3> count(
|
||||
std::array<ssize_t, 3>{num_gains, raw_data.shape(1), raw_data.shape(2)},
|
||||
0);
|
||||
for (int frame_nr = 0; frame_nr != raw_data.shape(0); ++frame_nr) {
|
||||
for (int row = 0; row != raw_data.shape(1); ++row) {
|
||||
for (int col = 0; col != raw_data.shape(2); ++col) {
|
||||
auto [value, gain] =
|
||||
get_value_and_gain(raw_data(frame_nr, row, col));
|
||||
if (gain != 0 && only_gain0)
|
||||
continue;
|
||||
accumulator(gain, row, col) += value;
|
||||
count(gain, row, col) += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {std::move(accumulator), std::move(count)};
|
||||
}
|
||||
|
||||
template <typename T, bool only_gain0 = false>
|
||||
NDArray<T, 3 - static_cast<ssize_t>(only_gain0)>
|
||||
calculate_pedestal(NDView<uint16_t, 3> raw_data, ssize_t n_threads) {
|
||||
|
||||
constexpr ssize_t num_gains = only_gain0 ? 1 : 3;
|
||||
std::vector<std::future<std::pair<NDArray<size_t, 3>, NDArray<size_t, 3>>>>
|
||||
futures;
|
||||
futures.reserve(n_threads);
|
||||
|
||||
auto subviews = make_subviews(raw_data, n_threads);
|
||||
|
||||
for (auto view : subviews) {
|
||||
futures.push_back(std::async(
|
||||
static_cast<std::pair<NDArray<size_t, 3>, NDArray<size_t, 3>> (*)(
|
||||
NDView<uint16_t, 3>)>(&sum_and_count_per_gain<only_gain0>),
|
||||
view));
|
||||
}
|
||||
Shape<3> shape{num_gains, raw_data.shape(1), raw_data.shape(2)};
|
||||
NDArray<size_t, 3> accumulator(shape, 0);
|
||||
NDArray<size_t, 3> count(shape, 0);
|
||||
|
||||
// Combine the results from the futures
|
||||
for (auto &f : futures) {
|
||||
auto [acc, cnt] = f.get();
|
||||
accumulator += acc;
|
||||
count += cnt;
|
||||
}
|
||||
|
||||
|
||||
// Will move to a NDArray<T, 3 - static_cast<ssize_t>(only_gain0)>
|
||||
// if only_gain0 is true
|
||||
return safe_divide<T>(accumulator, count);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Count the number of switching pixels in the raw data.
|
||||
* This function counts the number of pixels that switch between G1 and G2 gain.
|
||||
* It returns an NDArray with the number of switching pixels per pixel.
|
||||
* @param raw_data The NDView containing the raw data
|
||||
* @return An NDArray with the number of switching pixels per pixel
|
||||
*/
|
||||
NDArray<int, 2> count_switching_pixels(NDView<uint16_t, 3> raw_data);
|
||||
|
||||
/**
|
||||
* @brief Count the number of switching pixels in the raw data.
|
||||
* This function counts the number of pixels that switch between G1 and G2 gain.
|
||||
* It returns an NDArray with the number of switching pixels per pixel.
|
||||
* @param raw_data The NDView containing the raw data
|
||||
* @param n_threads The number of threads to use for parallel processing
|
||||
* @return An NDArray with the number of switching pixels per pixel
|
||||
*/
|
||||
NDArray<int, 2> count_switching_pixels(NDView<uint16_t, 3> raw_data,
|
||||
ssize_t n_threads);
|
||||
|
||||
template <typename T>
|
||||
auto calculate_pedestal_g0(NDView<uint16_t, 3> raw_data, ssize_t n_threads) {
|
||||
return calculate_pedestal<T, true>(raw_data, n_threads);
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,26 +1,27 @@
|
||||
#pragma once
|
||||
|
||||
#include <aare/NDView.hpp>
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
#include <aare/NDView.hpp>
|
||||
namespace aare {
|
||||
|
||||
|
||||
uint16_t adc_sar_05_decode64to16(uint64_t input);
|
||||
uint16_t adc_sar_04_decode64to16(uint64_t input);
|
||||
void adc_sar_05_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output);
|
||||
void adc_sar_04_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output);
|
||||
|
||||
void adc_sar_05_decode64to16(NDView<uint64_t, 2> input,
|
||||
NDView<uint16_t, 2> output);
|
||||
void adc_sar_04_decode64to16(NDView<uint64_t, 2> input,
|
||||
NDView<uint16_t, 2> output);
|
||||
|
||||
/**
|
||||
* @brief Apply custom weights to a 16-bit input value. Will sum up weights[i]**i
|
||||
* for each bit i that is set in the input value.
|
||||
* @brief Apply custom weights to a 16-bit input value. Will sum up
|
||||
* weights[i]**i for each bit i that is set in the input value.
|
||||
* @throws std::out_of_range if weights.size() < 16
|
||||
* @param input 16-bit input value
|
||||
* @param weights vector of weights, size must be less than or equal to 16
|
||||
*/
|
||||
double apply_custom_weights(uint16_t input, const NDView<double, 1> weights);
|
||||
|
||||
void apply_custom_weights(NDView<uint16_t, 1> input, NDView<double, 1> output, const NDView<double, 1> weights);
|
||||
void apply_custom_weights(NDView<uint16_t, 1> input, NDView<double, 1> output,
|
||||
const NDView<double, 1> weights);
|
||||
|
||||
} // namespace aare
|
||||
|
||||
@@ -3,16 +3,15 @@
|
||||
#include "aare/Dtype.hpp"
|
||||
|
||||
#include <array>
|
||||
#include <stdexcept>
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <variant>
|
||||
#include <vector>
|
||||
|
||||
|
||||
/**
|
||||
* @brief LOCATION macro to get the current location in the code
|
||||
*/
|
||||
@@ -20,28 +19,24 @@
|
||||
std::string(__FILE__) + std::string(":") + std::to_string(__LINE__) + \
|
||||
":" + std::string(__func__) + ":"
|
||||
|
||||
|
||||
|
||||
#ifdef AARE_CUSTOM_ASSERT
|
||||
#define AARE_ASSERT(expr)\
|
||||
if (expr)\
|
||||
{}\
|
||||
else\
|
||||
#define AARE_ASSERT(expr) \
|
||||
if (expr) { \
|
||||
} else \
|
||||
aare::assert_failed(LOCATION + " Assertion failed: " + #expr + "\n");
|
||||
#else
|
||||
#define AARE_ASSERT(cond)\
|
||||
do { (void)sizeof(cond); } while(0)
|
||||
#define AARE_ASSERT(cond) \
|
||||
do { \
|
||||
(void)sizeof(cond); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
|
||||
namespace aare {
|
||||
|
||||
inline constexpr size_t bits_per_byte = 8;
|
||||
|
||||
void assert_failed(const std::string &msg);
|
||||
|
||||
|
||||
|
||||
class DynamicCluster {
|
||||
public:
|
||||
int cluster_sizeX;
|
||||
@@ -55,7 +50,7 @@ class DynamicCluster {
|
||||
|
||||
public:
|
||||
DynamicCluster(int cluster_sizeX_, int cluster_sizeY_,
|
||||
Dtype dt_ = Dtype(typeid(int32_t)))
|
||||
Dtype dt_ = Dtype(typeid(int32_t)))
|
||||
: cluster_sizeX(cluster_sizeX_), cluster_sizeY(cluster_sizeY_),
|
||||
dt(dt_) {
|
||||
m_data = new std::byte[cluster_sizeX * cluster_sizeY * dt.bytes()]{};
|
||||
@@ -179,58 +174,30 @@ template <typename T> struct t_xy {
|
||||
};
|
||||
using xy = t_xy<uint32_t>;
|
||||
|
||||
struct ROI {
|
||||
ssize_t xmin{};
|
||||
ssize_t xmax{};
|
||||
ssize_t ymin{};
|
||||
ssize_t ymax{};
|
||||
|
||||
/**
|
||||
* @brief Class to hold the geometry of a module. Where pixel 0 is located and the size of the module
|
||||
*/
|
||||
struct ModuleGeometry{
|
||||
int origin_x{};
|
||||
int origin_y{};
|
||||
int height{};
|
||||
int width{};
|
||||
int row_index{};
|
||||
int col_index{};
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Class to hold the geometry of a detector. Number of modules, their size and where pixel 0
|
||||
* for each module is located
|
||||
*/
|
||||
struct DetectorGeometry{
|
||||
int modules_x{};
|
||||
int modules_y{};
|
||||
int pixels_x{};
|
||||
int pixels_y{};
|
||||
int module_gap_row{};
|
||||
int module_gap_col{};
|
||||
std::vector<ModuleGeometry> module_pixel_0;
|
||||
};
|
||||
|
||||
struct ROI{
|
||||
int64_t xmin{};
|
||||
int64_t xmax{};
|
||||
int64_t ymin{};
|
||||
int64_t ymax{};
|
||||
|
||||
int64_t height() const { return ymax - ymin; }
|
||||
int64_t width() const { return xmax - xmin; }
|
||||
bool contains(int64_t x, int64_t y) const {
|
||||
ssize_t height() const { return ymax - ymin; }
|
||||
ssize_t width() const { return xmax - xmin; }
|
||||
bool contains(ssize_t x, ssize_t y) const {
|
||||
return x >= xmin && x < xmax && y >= ymin && y < ymax;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
using dynamic_shape = std::vector<ssize_t>;
|
||||
|
||||
using dynamic_shape = std::vector<int64_t>;
|
||||
|
||||
//TODO! Can we uniform enums between the libraries?
|
||||
// TODO! Can we uniform enums between the libraries?
|
||||
|
||||
/**
|
||||
* @brief Enum class to identify different detectors.
|
||||
* @brief Enum class to identify different detectors.
|
||||
* The values are the same as in slsDetectorPackage
|
||||
* Different spelling to avoid confusion with the slsDetectorPackage
|
||||
*/
|
||||
enum class DetectorType {
|
||||
//Standard detectors match the enum values from slsDetectorPackage
|
||||
// Standard detectors match the enum values from slsDetectorPackage
|
||||
Generic,
|
||||
Eiger,
|
||||
Gotthard,
|
||||
@@ -241,12 +208,129 @@ enum class DetectorType {
|
||||
Gotthard2,
|
||||
Xilinx_ChipTestBoard,
|
||||
|
||||
//Additional detectors used for defining processing. Variants of the standard ones.
|
||||
Moench03=100,
|
||||
// Additional detectors used for defining processing. Variants of the
|
||||
// standard ones.
|
||||
Moench03 = 100,
|
||||
Moench03_old,
|
||||
Unknown
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Enum class to define the Digital to Analog converter
|
||||
* The values are the same as in slsDetectorPackage
|
||||
*/
|
||||
enum DACIndex {
|
||||
DAC_0,
|
||||
DAC_1,
|
||||
DAC_2,
|
||||
DAC_3,
|
||||
DAC_4,
|
||||
DAC_5,
|
||||
DAC_6,
|
||||
DAC_7,
|
||||
DAC_8,
|
||||
DAC_9,
|
||||
DAC_10,
|
||||
DAC_11,
|
||||
DAC_12,
|
||||
DAC_13,
|
||||
DAC_14,
|
||||
DAC_15,
|
||||
DAC_16,
|
||||
DAC_17,
|
||||
VSVP,
|
||||
VTRIM,
|
||||
VRPREAMP,
|
||||
VRSHAPER,
|
||||
VSVN,
|
||||
VTGSTV,
|
||||
VCMP_LL,
|
||||
VCMP_LR,
|
||||
VCAL,
|
||||
VCMP_RL,
|
||||
RXB_RB,
|
||||
RXB_LB,
|
||||
VCMP_RR,
|
||||
VCP,
|
||||
VCN,
|
||||
VISHAPER,
|
||||
VTHRESHOLD,
|
||||
IO_DELAY,
|
||||
VREF_DS,
|
||||
VOUT_CM,
|
||||
VIN_CM,
|
||||
VREF_COMP,
|
||||
VB_COMP,
|
||||
VDD_PROT,
|
||||
VIN_COM,
|
||||
VREF_PRECH,
|
||||
VB_PIXBUF,
|
||||
VB_DS,
|
||||
VREF_H_ADC,
|
||||
VB_COMP_FE,
|
||||
VB_COMP_ADC,
|
||||
VCOM_CDS,
|
||||
VREF_RSTORE,
|
||||
VB_OPA_1ST,
|
||||
VREF_COMP_FE,
|
||||
VCOM_ADC1,
|
||||
VREF_L_ADC,
|
||||
VREF_CDS,
|
||||
VB_CS,
|
||||
VB_OPA_FD,
|
||||
VCOM_ADC2,
|
||||
VCASSH,
|
||||
VTH2,
|
||||
VRSHAPER_N,
|
||||
VIPRE_OUT,
|
||||
VTH3,
|
||||
VTH1,
|
||||
VICIN,
|
||||
VCAS,
|
||||
VCAL_N,
|
||||
VIPRE,
|
||||
VCAL_P,
|
||||
VDCSH,
|
||||
VBP_COLBUF,
|
||||
VB_SDA,
|
||||
VCASC_SFP,
|
||||
VIPRE_CDS,
|
||||
IBIAS_SFP,
|
||||
ADC_VPP,
|
||||
HIGH_VOLTAGE,
|
||||
TEMPERATURE_ADC,
|
||||
TEMPERATURE_FPGA,
|
||||
TEMPERATURE_FPGAEXT,
|
||||
TEMPERATURE_10GE,
|
||||
TEMPERATURE_DCDC,
|
||||
TEMPERATURE_SODL,
|
||||
TEMPERATURE_SODR,
|
||||
TEMPERATURE_FPGA2,
|
||||
TEMPERATURE_FPGA3,
|
||||
TRIMBIT_SCAN,
|
||||
V_POWER_A = 100,
|
||||
V_POWER_B = 101,
|
||||
V_POWER_C = 102,
|
||||
V_POWER_D = 103,
|
||||
V_POWER_IO = 104,
|
||||
V_POWER_CHIP = 105,
|
||||
I_POWER_A = 106,
|
||||
I_POWER_B = 107,
|
||||
I_POWER_C = 108,
|
||||
I_POWER_D = 109,
|
||||
I_POWER_IO = 110,
|
||||
V_LIMIT = 111,
|
||||
SLOW_ADC0 = 1000,
|
||||
SLOW_ADC1,
|
||||
SLOW_ADC2,
|
||||
SLOW_ADC3,
|
||||
SLOW_ADC4,
|
||||
SLOW_ADC5,
|
||||
SLOW_ADC6,
|
||||
SLOW_ADC7,
|
||||
SLOW_ADC_TEMP
|
||||
};
|
||||
|
||||
enum class TimingMode { Auto, Trigger };
|
||||
enum class FrameDiscardPolicy { NoDiscard, Discard, DiscardPartial };
|
||||
|
||||
@@ -263,4 +347,15 @@ template <> FrameDiscardPolicy StringTo(const std::string & /*mode*/);
|
||||
|
||||
using DataTypeVariants = std::variant<uint16_t, uint32_t>;
|
||||
|
||||
constexpr uint16_t ADC_MASK =
|
||||
0x3FFF; // used to mask out the gain bits in Jungfrau
|
||||
|
||||
/**
|
||||
* @brief Convert a string to a DACIndex
|
||||
* @param arg string representation of the dacIndex
|
||||
* @return DACIndex
|
||||
* @throw invalid argument error if the string does not match any DACIndex
|
||||
*/
|
||||
template <> DACIndex StringTo(const std::string &arg);
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,16 +0,0 @@
|
||||
#pragma once
|
||||
#include "aare/defs.hpp"
|
||||
#include "aare/RawMasterFile.hpp" //ROI refactor away
|
||||
namespace aare{
|
||||
|
||||
/**
|
||||
* @brief Update the detector geometry given a region of interest
|
||||
*
|
||||
* @param geo
|
||||
* @param roi
|
||||
* @return DetectorGeometry
|
||||
*/
|
||||
DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, ROI roi);
|
||||
|
||||
|
||||
} // namespace aare
|
||||
141
include/aare/logger.hpp
Normal file
141
include/aare/logger.hpp
Normal file
@@ -0,0 +1,141 @@
|
||||
#pragma once
|
||||
/*Utility to log to console*/
|
||||
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <sys/time.h>
|
||||
|
||||
namespace aare {
|
||||
|
||||
#define RED "\x1b[31m"
|
||||
#define GREEN "\x1b[32m"
|
||||
#define YELLOW "\x1b[33m"
|
||||
#define BLUE "\x1b[34m"
|
||||
#define MAGENTA "\x1b[35m"
|
||||
#define CYAN "\x1b[36m"
|
||||
#define GRAY "\x1b[37m"
|
||||
#define DARKGRAY "\x1b[30m"
|
||||
|
||||
#define BG_BLACK "\x1b[48;5;232m"
|
||||
#define BG_RED "\x1b[41m"
|
||||
#define BG_GREEN "\x1b[42m"
|
||||
#define BG_YELLOW "\x1b[43m"
|
||||
#define BG_BLUE "\x1b[44m"
|
||||
#define BG_MAGENTA "\x1b[45m"
|
||||
#define BG_CYAN "\x1b[46m"
|
||||
#define RESET "\x1b[0m"
|
||||
#define BOLD "\x1b[1m"
|
||||
|
||||
enum TLogLevel {
|
||||
logERROR,
|
||||
logWARNING,
|
||||
logINFOBLUE,
|
||||
logINFOGREEN,
|
||||
logINFORED,
|
||||
logINFOCYAN,
|
||||
logINFOMAGENTA,
|
||||
logINFO,
|
||||
logDEBUG, // constructors, destructors etc. should still give too much
|
||||
// output
|
||||
logDEBUG1,
|
||||
logDEBUG2,
|
||||
logDEBUG3,
|
||||
logDEBUG4,
|
||||
logDEBUG5
|
||||
};
|
||||
|
||||
// Compiler should optimize away anything below this value
|
||||
#ifndef AARE_LOG_LEVEL
|
||||
#define AARE_LOG_LEVEL \
|
||||
"LOG LEVEL NOT SET IN CMAKE" // This is configured in the main
|
||||
// CMakeLists.txt
|
||||
#endif
|
||||
|
||||
#define __AT__ \
|
||||
std::string(__FILE__) + std::string("::") + std::string(__func__) + \
|
||||
std::string("(): ")
|
||||
#define __SHORT_FORM_OF_FILE__ \
|
||||
(strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
|
||||
#define __SHORT_AT__ \
|
||||
std::string(__SHORT_FORM_OF_FILE__) + std::string("::") + \
|
||||
std::string(__func__) + std::string("(): ")
|
||||
|
||||
class Logger {
|
||||
std::ostringstream os;
|
||||
TLogLevel m_level = AARE_LOG_LEVEL;
|
||||
|
||||
public:
|
||||
Logger() = default;
|
||||
explicit Logger(TLogLevel level) : m_level(level){};
|
||||
~Logger() {
|
||||
// output in the destructor to allow for << syntax
|
||||
os << RESET << '\n';
|
||||
std::clog << os.str() << std::flush; // Single write
|
||||
}
|
||||
|
||||
static TLogLevel &
|
||||
ReportingLevel() { // singelton eeh TODO! Do we need a runtime option?
|
||||
static TLogLevel reportingLevel = logDEBUG5;
|
||||
return reportingLevel;
|
||||
}
|
||||
|
||||
// Danger this buffer need as many elements as TLogLevel
|
||||
static const char *Color(TLogLevel level) noexcept {
|
||||
static const char *const colors[] = {
|
||||
RED BOLD, YELLOW BOLD, BLUE, GREEN, RED, CYAN, MAGENTA,
|
||||
RESET, RESET, RESET, RESET, RESET, RESET, RESET};
|
||||
// out of bounds
|
||||
if (level < 0 || level >= sizeof(colors) / sizeof(colors[0])) {
|
||||
return RESET;
|
||||
}
|
||||
return colors[level];
|
||||
}
|
||||
|
||||
// Danger this buffer need as many elements as TLogLevel
|
||||
static std::string ToString(TLogLevel level) {
|
||||
static const char *const buffer[] = {
|
||||
"ERROR", "WARNING", "INFO", "INFO", "INFO",
|
||||
"INFO", "INFO", "INFO", "DEBUG", "DEBUG1",
|
||||
"DEBUG2", "DEBUG3", "DEBUG4", "DEBUG5"};
|
||||
// out of bounds
|
||||
if (level < 0 || level >= sizeof(buffer) / sizeof(buffer[0])) {
|
||||
return "UNKNOWN";
|
||||
}
|
||||
return buffer[level];
|
||||
}
|
||||
|
||||
std::ostringstream &Get() {
|
||||
os << Color(m_level) << "- " << Timestamp() << " " << ToString(m_level)
|
||||
<< ": ";
|
||||
return os;
|
||||
}
|
||||
|
||||
static std::string Timestamp() {
|
||||
constexpr size_t buffer_len = 12;
|
||||
char buffer[buffer_len];
|
||||
time_t t;
|
||||
::time(&t);
|
||||
tm r;
|
||||
strftime(buffer, buffer_len, "%X", localtime_r(&t, &r));
|
||||
buffer[buffer_len - 1] = '\0';
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv, nullptr);
|
||||
constexpr size_t result_len = 100;
|
||||
char result[result_len];
|
||||
snprintf(result, result_len, "%s.%03ld", buffer,
|
||||
static_cast<long>(tv.tv_usec) / 1000);
|
||||
result[result_len - 1] = '\0';
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
// TODO! Do we need to keep the runtime option?
|
||||
#define LOG(level) \
|
||||
if (level > AARE_LOG_LEVEL) \
|
||||
; \
|
||||
else if (level > aare::Logger::ReportingLevel()) \
|
||||
; \
|
||||
else \
|
||||
aare::Logger(level).Get()
|
||||
|
||||
} // namespace aare
|
||||
@@ -4,9 +4,9 @@
|
||||
#include <string>
|
||||
namespace aare {
|
||||
|
||||
/**
|
||||
/**
|
||||
* @brief Get the error message from an ifstream object
|
||||
*/
|
||||
*/
|
||||
std::string ifstream_error_msg(std::ifstream &ifs);
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,18 +1,34 @@
|
||||
#pragma once
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "aare/utils/task.hpp"
|
||||
|
||||
namespace aare {
|
||||
|
||||
template<typename F>
|
||||
void RunInParallel(F func, const std::vector<std::pair<int, int>>& tasks) {
|
||||
// auto tasks = split_task(0, y.shape(0), n_threads);
|
||||
std::vector<std::thread> threads;
|
||||
for (auto &task : tasks) {
|
||||
threads.push_back(std::thread(func, task.first, task.second));
|
||||
}
|
||||
for (auto &thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
template <typename F>
|
||||
void RunInParallel(F func, const std::vector<std::pair<int, int>> &tasks) {
|
||||
// auto tasks = split_task(0, y.shape(0), n_threads);
|
||||
std::vector<std::thread> threads;
|
||||
for (auto &task : tasks) {
|
||||
threads.push_back(std::thread(func, task.first, task.second));
|
||||
}
|
||||
for (auto &thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
std::vector<NDView<T,3>> make_subviews(NDView<T, 3> &data, ssize_t n_threads) {
|
||||
std::vector<NDView<T, 3>> subviews;
|
||||
subviews.reserve(n_threads);
|
||||
auto limits = split_task(0, data.shape(0), n_threads);
|
||||
for (const auto &lim : limits) {
|
||||
subviews.push_back(data.sub_view(lim.first, lim.second));
|
||||
}
|
||||
return subviews;
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,4 +1,4 @@
|
||||
|
||||
#pragma once
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
|
||||
@@ -1,10 +1,16 @@
|
||||
[tool.scikit-build.metadata.version]
|
||||
provider = "scikit_build_core.metadata.regex"
|
||||
input = "VERSION"
|
||||
regex = '^(?P<version>\d+(?:\.\d+)*(?:[\.\+\w]+)?)$'
|
||||
result = "{version}"
|
||||
|
||||
[build-system]
|
||||
requires = ["scikit-build-core>=0.10", "pybind11", "numpy"]
|
||||
build-backend = "scikit_build_core.build"
|
||||
|
||||
[project]
|
||||
name = "aare"
|
||||
version = "2025.4.22"
|
||||
dynamic = ["version"]
|
||||
requires-python = ">=3.11"
|
||||
dependencies = [
|
||||
"numpy",
|
||||
|
||||
@@ -32,6 +32,7 @@ set( PYTHON_FILES
|
||||
aare/ClusterFinder.py
|
||||
aare/ClusterVector.py
|
||||
|
||||
aare/calibration.py
|
||||
aare/func.py
|
||||
aare/RawFile.py
|
||||
aare/transform.py
|
||||
|
||||
@@ -1,22 +1,39 @@
|
||||
|
||||
from ._aare import ClusterFinder_Cluster3x3i, ClusterFinder_Cluster2x2i, ClusterFinderMT_Cluster3x3i, ClusterFinderMT_Cluster2x2i, ClusterCollector_Cluster3x3i, ClusterCollector_Cluster2x2i
|
||||
|
||||
|
||||
from ._aare import ClusterFileSink_Cluster3x3i, ClusterFileSink_Cluster2x2i
|
||||
from . import _aare
|
||||
import numpy as np
|
||||
|
||||
_supported_cluster_sizes = [(2,2), (3,3), (5,5), (7,7), (9,9),]
|
||||
|
||||
def _type_to_char(dtype):
|
||||
if dtype == np.int32:
|
||||
return 'i'
|
||||
elif dtype == np.float32:
|
||||
return 'f'
|
||||
elif dtype == np.float64:
|
||||
return 'd'
|
||||
else:
|
||||
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32, np.float32, and np.float64 are supported.")
|
||||
|
||||
def _get_class(name, cluster_size, dtype):
|
||||
"""
|
||||
Helper function to get the class based on the name, cluster size, and dtype.
|
||||
"""
|
||||
try:
|
||||
class_name = f"{name}_Cluster{cluster_size[0]}x{cluster_size[1]}{_type_to_char(dtype)}"
|
||||
cls = getattr(_aare, class_name)
|
||||
except AttributeError:
|
||||
raise ValueError(f"Unsupported combination of type and cluster size: {dtype}/{cluster_size} when requesting {class_name}")
|
||||
return cls
|
||||
|
||||
|
||||
|
||||
def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024):
|
||||
"""
|
||||
Factory function to create a ClusterFinder object. Provides a cleaner syntax for
|
||||
the templated ClusterFinder in C++.
|
||||
"""
|
||||
if dtype == np.int32 and cluster_size == (3,3):
|
||||
return ClusterFinder_Cluster3x3i(image_size, n_sigma = n_sigma, capacity=capacity)
|
||||
elif dtype == np.int32 and cluster_size == (2,2):
|
||||
return ClusterFinder_Cluster2x2i(image_size, n_sigma = n_sigma, capacity=capacity)
|
||||
else:
|
||||
#TODO! add the other formats
|
||||
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
||||
cls = _get_class("ClusterFinder", cluster_size, dtype)
|
||||
return cls(image_size, n_sigma=n_sigma, capacity=capacity)
|
||||
|
||||
|
||||
|
||||
def ClusterFinderMT(image_size, cluster_size = (3,3), dtype=np.int32, n_sigma=5, capacity = 1024, n_threads = 3):
|
||||
@@ -25,31 +42,18 @@ def ClusterFinderMT(image_size, cluster_size = (3,3), dtype=np.int32, n_sigma=5,
|
||||
the templated ClusterFinderMT in C++.
|
||||
"""
|
||||
|
||||
if dtype == np.int32 and cluster_size == (3,3):
|
||||
return ClusterFinderMT_Cluster3x3i(image_size, n_sigma = n_sigma,
|
||||
capacity = capacity, n_threads = n_threads)
|
||||
elif dtype == np.int32 and cluster_size == (2,2):
|
||||
return ClusterFinderMT_Cluster2x2i(image_size, n_sigma = n_sigma,
|
||||
capacity = capacity, n_threads = n_threads)
|
||||
else:
|
||||
#TODO! add the other formats
|
||||
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
||||
cls = _get_class("ClusterFinderMT", cluster_size, dtype)
|
||||
return cls(image_size, n_sigma=n_sigma, capacity=capacity, n_threads=n_threads)
|
||||
|
||||
|
||||
def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32):
|
||||
def ClusterCollector(clusterfindermt, dtype=np.int32):
|
||||
"""
|
||||
Factory function to create a ClusterCollector object. Provides a cleaner syntax for
|
||||
the templated ClusterCollector in C++.
|
||||
"""
|
||||
|
||||
if dtype == np.int32 and cluster_size == (3,3):
|
||||
return ClusterCollector_Cluster3x3i(clusterfindermt)
|
||||
elif dtype == np.int32 and cluster_size == (2,2):
|
||||
return ClusterCollector_Cluster2x2i(clusterfindermt)
|
||||
|
||||
else:
|
||||
#TODO! add the other formats
|
||||
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
||||
|
||||
cls = _get_class("ClusterCollector", clusterfindermt.cluster_size, dtype)
|
||||
return cls(clusterfindermt)
|
||||
|
||||
def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):
|
||||
"""
|
||||
@@ -57,11 +61,26 @@ def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):
|
||||
the templated ClusterCollector in C++.
|
||||
"""
|
||||
|
||||
if dtype == np.int32 and clusterfindermt.cluster_size == (3,3):
|
||||
return ClusterFileSink_Cluster3x3i(clusterfindermt, cluster_file)
|
||||
elif dtype == np.int32 and clusterfindermt.cluster_size == (2,2):
|
||||
return ClusterFileSink_Cluster2x2i(clusterfindermt, cluster_file)
|
||||
|
||||
else:
|
||||
#TODO! add the other formats
|
||||
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
||||
cls = _get_class("ClusterFileSink", clusterfindermt.cluster_size, dtype)
|
||||
return cls(clusterfindermt, cluster_file)
|
||||
|
||||
|
||||
def ClusterFile(fname, cluster_size=(3,3), dtype=np.int32, chunk_size = 1000):
|
||||
"""
|
||||
Factory function to create a ClusterFile object. Provides a cleaner syntax for
|
||||
the templated ClusterFile in C++.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from aare import ClusterFile
|
||||
|
||||
with ClusterFile("clusters.clust", cluster_size=(3,3), dtype=np.int32) as cf:
|
||||
# cf is now a ClusterFile_Cluster3x3i object but you don't need to know that.
|
||||
for clusters in cf:
|
||||
# Loop over clusters in chunks of 1000
|
||||
# The type of clusters will be a ClusterVector_Cluster3x3i in this case
|
||||
|
||||
"""
|
||||
|
||||
cls = _get_class("ClusterFile", cluster_size, dtype)
|
||||
return cls(fname, chunk_size=chunk_size)
|
||||
|
||||
@@ -5,20 +5,19 @@ from . import _aare
|
||||
from ._aare import File, RawMasterFile, RawSubFile, JungfrauDataFile
|
||||
from ._aare import Pedestal_d, Pedestal_f, ClusterFinder_Cluster3x3i, VarClusterFinder
|
||||
from ._aare import DetectorType
|
||||
from ._aare import ClusterFile_Cluster3x3i as ClusterFile
|
||||
from ._aare import hitmap
|
||||
from ._aare import ROI
|
||||
|
||||
# from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i
|
||||
|
||||
from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT, ClusterFileSink
|
||||
from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT, ClusterFileSink, ClusterFile
|
||||
from .ClusterVector import ClusterVector
|
||||
|
||||
|
||||
from ._aare import fit_gaus, fit_pol1
|
||||
from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2
|
||||
from ._aare import Interpolator
|
||||
from ._aare import calculate_eta2
|
||||
|
||||
from ._aare import reduce_to_2x2, reduce_to_3x3
|
||||
|
||||
from ._aare import apply_custom_weights
|
||||
|
||||
@@ -31,3 +30,9 @@ from .utils import random_pixels, random_pixel, flat_list, add_colorbar
|
||||
|
||||
#make functions available in the top level API
|
||||
from .func import *
|
||||
|
||||
from .calibration import *
|
||||
from ._aare import apply_calibration, count_switching_pixels
|
||||
from ._aare import calculate_pedestal, calculate_pedestal_float, calculate_pedestal_g0, calculate_pedestal_g0_float
|
||||
|
||||
from ._aare import VarClusterFinder
|
||||
|
||||
21
python/aare/calibration.py
Normal file
21
python/aare/calibration.py
Normal file
@@ -0,0 +1,21 @@
|
||||
#Calibration related functions
|
||||
import numpy as np
|
||||
def load_calibration(fname, hg0=False):
|
||||
"""
|
||||
Load calibration data from a file.
|
||||
|
||||
Parameters:
|
||||
fname (str): Path to the calibration file.
|
||||
hg0 (bool): If True, load HG0 calibration data instead of G0.
|
||||
|
||||
"""
|
||||
gains = 3
|
||||
rows = 512
|
||||
cols = 1024
|
||||
with open(fname, 'rb') as f:
|
||||
cal = np.fromfile(f, count=gains * rows * cols, dtype=np.double).reshape(
|
||||
gains, rows, cols
|
||||
)
|
||||
if hg0:
|
||||
cal[0] = np.fromfile(f, count=rows * cols, dtype=np.double).reshape(rows, cols)
|
||||
return cal
|
||||
@@ -1 +1 @@
|
||||
from ._aare import gaus, pol1
|
||||
from ._aare import gaus, pol1, scurve, scurve2
|
||||
@@ -1,79 +1,8 @@
|
||||
import sys
|
||||
sys.path.append('/home/l_msdetect/erik/aare/build')
|
||||
|
||||
from aare._aare import ClusterVector_i, Interpolator
|
||||
|
||||
import pickle
|
||||
from aare import apply_calibration
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import boost_histogram as bh
|
||||
import torch
|
||||
import math
|
||||
import time
|
||||
raw = np.zeros((5,10,10), dtype=np.uint16)
|
||||
pedestal = np.zeros((3,10,10), dtype=np.float32)
|
||||
calibration = np.ones((3,10,10), dtype=np.float32)
|
||||
calibrated = apply_calibration(raw, pedestal, calibration,)
|
||||
|
||||
|
||||
|
||||
def gaussian_2d(mx, my, sigma = 1, res=100, grid_size = 2):
|
||||
"""
|
||||
Generate a 2D gaussian as position mx, my, with sigma=sigma.
|
||||
The gaussian is placed on a 2x2 pixel matrix with resolution
|
||||
res in one dimesion.
|
||||
"""
|
||||
x = torch.linspace(0, pixel_size*grid_size, res)
|
||||
x,y = torch.meshgrid(x,x, indexing="ij")
|
||||
return 1 / (2*math.pi*sigma**2) * \
|
||||
torch.exp(-((x - my)**2 / (2*sigma**2) + (y - mx)**2 / (2*sigma**2)))
|
||||
|
||||
scale = 1000 #Scale factor when converting to integer
|
||||
pixel_size = 25 #um
|
||||
grid = 2
|
||||
resolution = 100
|
||||
sigma_um = 10
|
||||
xa = np.linspace(0,grid*pixel_size,resolution)
|
||||
ticks = [0, 25, 50]
|
||||
|
||||
hit = np.array((20,20))
|
||||
etahist_fname = "/home/l_msdetect/erik/tmp/test_hist.pkl"
|
||||
|
||||
local_resolution = 99
|
||||
grid_size = 3
|
||||
xaxis = np.linspace(0,grid_size*pixel_size, local_resolution)
|
||||
t = gaussian_2d(hit[0],hit[1], grid_size = grid_size, sigma = 10, res = local_resolution)
|
||||
pixels = t.reshape(grid_size, t.shape[0] // grid_size, grid_size, t.shape[1] // grid_size).sum(axis = 3).sum(axis = 1)
|
||||
pixels = pixels.numpy()
|
||||
pixels = (pixels*scale).astype(np.int32)
|
||||
v = ClusterVector_i(3,3)
|
||||
v.push_back(1,1, pixels)
|
||||
|
||||
with open(etahist_fname, "rb") as f:
|
||||
hist = pickle.load(f)
|
||||
eta = hist.view().copy()
|
||||
etabinsx = np.array(hist.axes.edges.T[0].flat)
|
||||
etabinsy = np.array(hist.axes.edges.T[1].flat)
|
||||
ebins = np.array(hist.axes.edges.T[2].flat)
|
||||
p = Interpolator(eta, etabinsx[0:-1], etabinsy[0:-1], ebins[0:-1])
|
||||
|
||||
|
||||
|
||||
|
||||
#Generate the hit
|
||||
|
||||
|
||||
|
||||
|
||||
tmp = p.interpolate(v)
|
||||
print(f'tmp:{tmp}')
|
||||
pos = np.array((tmp['x'], tmp['y']))*25
|
||||
|
||||
|
||||
print(pixels)
|
||||
fig, ax = plt.subplots(figsize = (7,7))
|
||||
ax.pcolormesh(xaxis, xaxis, t)
|
||||
ax.plot(*pos, 'o')
|
||||
ax.set_xticks([0,25,50,75])
|
||||
ax.set_yticks([0,25,50,75])
|
||||
ax.set_xlim(0,75)
|
||||
ax.set_ylim(0,75)
|
||||
ax.grid()
|
||||
print(f'{hit=}')
|
||||
print(f'{pos=}')
|
||||
92
python/src/bind_Cluster.hpp
Normal file
92
python/src/bind_Cluster.hpp
Normal file
@@ -0,0 +1,92 @@
|
||||
#include "aare/Cluster.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <fmt/format.h>
|
||||
#include <pybind11/numpy.h>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/stl.h>
|
||||
#include <pybind11/stl_bind.h>
|
||||
|
||||
namespace py = pybind11;
|
||||
using pd_type = double;
|
||||
|
||||
using namespace aare;
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
|
||||
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType>
|
||||
void define_Cluster(py::module &m, const std::string &typestr) {
|
||||
auto class_name = fmt::format("Cluster{}", typestr);
|
||||
|
||||
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>(
|
||||
m, class_name.c_str(), py::buffer_protocol())
|
||||
|
||||
.def(py::init([](uint8_t x, uint8_t y,
|
||||
py::array_t<Type, py::array::forcecast> data) {
|
||||
py::buffer_info buf_info = data.request();
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster;
|
||||
cluster.x = x;
|
||||
cluster.y = y;
|
||||
auto r = data.template unchecked<1>(); // no bounds checks
|
||||
for (py::ssize_t i = 0; i < data.size(); ++i) {
|
||||
cluster.data[i] = r(i);
|
||||
}
|
||||
return cluster;
|
||||
}))
|
||||
|
||||
// TODO! Review if to keep or not
|
||||
.def_property_readonly(
|
||||
"data",
|
||||
[](Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> &c)
|
||||
-> py::array {
|
||||
return py::array(py::buffer_info(
|
||||
c.data.data(), sizeof(Type),
|
||||
py::format_descriptor<Type>::format(), // Type
|
||||
// format
|
||||
2, // Number of dimensions
|
||||
{static_cast<ssize_t>(ClusterSizeX),
|
||||
static_cast<ssize_t>(ClusterSizeY)}, // Shape (flattened)
|
||||
{sizeof(Type) * ClusterSizeY, sizeof(Type)}
|
||||
// Stride (step size between elements)
|
||||
));
|
||||
})
|
||||
|
||||
.def_readonly("x",
|
||||
&Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>::x)
|
||||
|
||||
.def_readonly("y",
|
||||
&Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>::y);
|
||||
}
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = int16_t>
|
||||
void reduce_to_3x3(py::module &m) {
|
||||
|
||||
m.def(
|
||||
"reduce_to_3x3",
|
||||
[](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||
return reduce_to_3x3(cl);
|
||||
},
|
||||
py::return_value_policy::move,
|
||||
"Reduce cluster to 3x3 subcluster by taking the 3x3 subcluster with "
|
||||
"the highest photon energy.");
|
||||
}
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = int16_t>
|
||||
void reduce_to_2x2(py::module &m) {
|
||||
|
||||
m.def(
|
||||
"reduce_to_2x2",
|
||||
[](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||
return reduce_to_2x2(cl);
|
||||
},
|
||||
py::return_value_policy::move,
|
||||
"Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with "
|
||||
"the highest photon energy.");
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
44
python/src/bind_ClusterCollector.hpp
Normal file
44
python/src/bind_ClusterCollector.hpp
Normal file
@@ -0,0 +1,44 @@
|
||||
#include "aare/ClusterCollector.hpp"
|
||||
#include "aare/ClusterFileSink.hpp"
|
||||
#include "aare/ClusterFinder.hpp"
|
||||
#include "aare/ClusterFinderMT.hpp"
|
||||
#include "aare/ClusterVector.hpp"
|
||||
#include "aare/NDView.hpp"
|
||||
#include "aare/Pedestal.hpp"
|
||||
#include "np_helper.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/stl.h>
|
||||
#include <pybind11/stl_bind.h>
|
||||
|
||||
namespace py = pybind11;
|
||||
using pd_type = double;
|
||||
|
||||
using namespace aare;
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_ClusterCollector(py::module &m, const std::string &typestr) {
|
||||
auto class_name = fmt::format("ClusterCollector_{}", typestr);
|
||||
|
||||
using ClusterType = Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||
|
||||
py::class_<ClusterCollector<ClusterType>>(m, class_name.c_str())
|
||||
.def(py::init<ClusterFinderMT<ClusterType, uint16_t, double> *>())
|
||||
.def("stop", &ClusterCollector<ClusterType>::stop)
|
||||
.def(
|
||||
"steal_clusters",
|
||||
[](ClusterCollector<ClusterType> &self) {
|
||||
auto v = new std::vector<ClusterVector<ClusterType>>(
|
||||
self.steal_clusters());
|
||||
return v; // TODO change!!!
|
||||
},
|
||||
py::return_value_policy::take_ownership);
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
@@ -21,8 +21,7 @@ using namespace ::aare;
|
||||
|
||||
template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_cluster_file_io_bindings(py::module &m,
|
||||
const std::string &typestr) {
|
||||
void define_ClusterFile(py::module &m, const std::string &typestr) {
|
||||
|
||||
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
|
||||
|
||||
@@ -39,19 +38,20 @@ void define_cluster_file_io_bindings(py::module &m,
|
||||
self.read_clusters(n_clusters));
|
||||
return v;
|
||||
},
|
||||
py::return_value_policy::take_ownership)
|
||||
py::return_value_policy::take_ownership, py::arg("n_clusters"))
|
||||
.def("read_frame",
|
||||
[](ClusterFile<ClusterType> &self) {
|
||||
auto v = new ClusterVector<ClusterType>(self.read_frame());
|
||||
return v;
|
||||
})
|
||||
.def("set_roi", &ClusterFile<ClusterType>::set_roi)
|
||||
.def("set_roi", &ClusterFile<ClusterType>::set_roi,
|
||||
py::arg("roi"))
|
||||
.def(
|
||||
"set_noise_map",
|
||||
[](ClusterFile<ClusterType> &self, py::array_t<int32_t> noise_map) {
|
||||
auto view = make_view_2d(noise_map);
|
||||
self.set_noise_map(view);
|
||||
})
|
||||
}, py::arg("noise_map"))
|
||||
|
||||
.def("set_gain_map",
|
||||
[](ClusterFile<ClusterType> &self, py::array_t<double> gain_map) {
|
||||
37
python/src/bind_ClusterFileSink.hpp
Normal file
37
python/src/bind_ClusterFileSink.hpp
Normal file
@@ -0,0 +1,37 @@
|
||||
#include "aare/ClusterCollector.hpp"
|
||||
#include "aare/ClusterFileSink.hpp"
|
||||
#include "aare/ClusterFinder.hpp"
|
||||
#include "aare/ClusterFinderMT.hpp"
|
||||
#include "aare/ClusterVector.hpp"
|
||||
#include "aare/NDView.hpp"
|
||||
#include "aare/Pedestal.hpp"
|
||||
#include "np_helper.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/stl.h>
|
||||
#include <pybind11/stl_bind.h>
|
||||
|
||||
namespace py = pybind11;
|
||||
using pd_type = double;
|
||||
|
||||
using namespace aare;
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_ClusterFileSink(py::module &m, const std::string &typestr) {
|
||||
auto class_name = fmt::format("ClusterFileSink_{}", typestr);
|
||||
|
||||
using ClusterType = Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||
|
||||
py::class_<ClusterFileSink<ClusterType>>(m, class_name.c_str())
|
||||
.def(py::init<ClusterFinderMT<ClusterType, uint16_t, double> *,
|
||||
const std::filesystem::path &>())
|
||||
.def("stop", &ClusterFileSink<ClusterType>::stop);
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
77
python/src/bind_ClusterFinder.hpp
Normal file
77
python/src/bind_ClusterFinder.hpp
Normal file
@@ -0,0 +1,77 @@
|
||||
#include "aare/ClusterCollector.hpp"
|
||||
#include "aare/ClusterFileSink.hpp"
|
||||
#include "aare/ClusterFinder.hpp"
|
||||
#include "aare/ClusterFinderMT.hpp"
|
||||
#include "aare/ClusterVector.hpp"
|
||||
#include "aare/NDView.hpp"
|
||||
#include "aare/Pedestal.hpp"
|
||||
#include "np_helper.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/stl.h>
|
||||
#include <pybind11/stl_bind.h>
|
||||
|
||||
namespace py = pybind11;
|
||||
using pd_type = double;
|
||||
|
||||
using namespace aare;
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_ClusterFinder(py::module &m, const std::string &typestr) {
|
||||
auto class_name = fmt::format("ClusterFinder_{}", typestr);
|
||||
|
||||
using ClusterType = Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||
|
||||
py::class_<ClusterFinder<ClusterType, uint16_t, pd_type>>(
|
||||
m, class_name.c_str())
|
||||
.def(py::init<Shape<2>, pd_type, size_t>(), py::arg("image_size"),
|
||||
py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000)
|
||||
.def("push_pedestal_frame",
|
||||
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self,
|
||||
py::array_t<uint16_t> frame) {
|
||||
auto view = make_view_2d(frame);
|
||||
self.push_pedestal_frame(view);
|
||||
})
|
||||
.def("clear_pedestal",
|
||||
&ClusterFinder<ClusterType, uint16_t, pd_type>::clear_pedestal)
|
||||
.def_property_readonly(
|
||||
"pedestal",
|
||||
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self) {
|
||||
auto pd = new NDArray<pd_type, 2>{};
|
||||
*pd = self.pedestal();
|
||||
return return_image_data(pd);
|
||||
})
|
||||
.def_property_readonly(
|
||||
"noise",
|
||||
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self) {
|
||||
auto arr = new NDArray<pd_type, 2>{};
|
||||
*arr = self.noise();
|
||||
return return_image_data(arr);
|
||||
})
|
||||
.def(
|
||||
"steal_clusters",
|
||||
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self,
|
||||
bool realloc_same_capacity) {
|
||||
ClusterVector<ClusterType> clusters =
|
||||
self.steal_clusters(realloc_same_capacity);
|
||||
return clusters;
|
||||
},
|
||||
py::arg("realloc_same_capacity") = false)
|
||||
.def(
|
||||
"find_clusters",
|
||||
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self,
|
||||
py::array_t<uint16_t> frame, uint64_t frame_number) {
|
||||
auto view = make_view_2d(frame);
|
||||
self.find_clusters(view, frame_number);
|
||||
return;
|
||||
},
|
||||
py::arg(), py::arg("frame_number") = 0);
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
81
python/src/bind_ClusterFinderMT.hpp
Normal file
81
python/src/bind_ClusterFinderMT.hpp
Normal file
@@ -0,0 +1,81 @@
|
||||
#include "aare/ClusterCollector.hpp"
|
||||
#include "aare/ClusterFileSink.hpp"
|
||||
#include "aare/ClusterFinder.hpp"
|
||||
#include "aare/ClusterFinderMT.hpp"
|
||||
#include "aare/ClusterVector.hpp"
|
||||
#include "aare/NDView.hpp"
|
||||
#include "aare/Pedestal.hpp"
|
||||
#include "np_helper.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/stl.h>
|
||||
#include <pybind11/stl_bind.h>
|
||||
|
||||
namespace py = pybind11;
|
||||
using pd_type = double;
|
||||
|
||||
using namespace aare;
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_ClusterFinderMT(py::module &m, const std::string &typestr) {
|
||||
auto class_name = fmt::format("ClusterFinderMT_{}", typestr);
|
||||
|
||||
using ClusterType = Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||
|
||||
py::class_<ClusterFinderMT<ClusterType, uint16_t, pd_type>>(
|
||||
m, class_name.c_str())
|
||||
.def(py::init<Shape<2>, pd_type, size_t, size_t>(),
|
||||
py::arg("image_size"), py::arg("n_sigma") = 5.0,
|
||||
py::arg("capacity") = 2048, py::arg("n_threads") = 3)
|
||||
.def("push_pedestal_frame",
|
||||
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
||||
py::array_t<uint16_t> frame) {
|
||||
auto view = make_view_2d(frame);
|
||||
self.push_pedestal_frame(view);
|
||||
})
|
||||
.def(
|
||||
"find_clusters",
|
||||
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
||||
py::array_t<uint16_t> frame, uint64_t frame_number) {
|
||||
auto view = make_view_2d(frame);
|
||||
self.find_clusters(view, frame_number);
|
||||
return;
|
||||
},
|
||||
py::arg(), py::arg("frame_number") = 0)
|
||||
.def_property_readonly(
|
||||
"cluster_size",
|
||||
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self) {
|
||||
return py::make_tuple(ClusterSizeX, ClusterSizeY);
|
||||
})
|
||||
.def("clear_pedestal",
|
||||
&ClusterFinderMT<ClusterType, uint16_t, pd_type>::clear_pedestal)
|
||||
.def("sync", &ClusterFinderMT<ClusterType, uint16_t, pd_type>::sync)
|
||||
.def("stop", &ClusterFinderMT<ClusterType, uint16_t, pd_type>::stop)
|
||||
.def("start", &ClusterFinderMT<ClusterType, uint16_t, pd_type>::start)
|
||||
.def(
|
||||
"pedestal",
|
||||
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
||||
size_t thread_index) {
|
||||
auto pd = new NDArray<pd_type, 2>{};
|
||||
*pd = self.pedestal(thread_index);
|
||||
return return_image_data(pd);
|
||||
},
|
||||
py::arg("thread_index") = 0)
|
||||
.def(
|
||||
"noise",
|
||||
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
||||
size_t thread_index) {
|
||||
auto arr = new NDArray<pd_type, 2>{};
|
||||
*arr = self.noise(thread_index);
|
||||
return return_image_data(arr);
|
||||
},
|
||||
py::arg("thread_index") = 0);
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
@@ -44,10 +44,11 @@ void define_ClusterVector(py::module &m, const std::string &typestr) {
|
||||
auto *vec = new std::vector<Type>(self.sum());
|
||||
return return_vector(vec);
|
||||
})
|
||||
.def("sum_2x2", [](ClusterVector<ClusterType> &self){
|
||||
auto *vec = new std::vector<Type>(self.sum_2x2());
|
||||
return return_vector(vec);
|
||||
})
|
||||
.def("sum_2x2",
|
||||
[](ClusterVector<ClusterType> &self) {
|
||||
auto *vec = new std::vector<Type>(self.sum_2x2());
|
||||
return return_vector(vec);
|
||||
})
|
||||
.def_property_readonly("size", &ClusterVector<ClusterType>::size)
|
||||
.def("item_size", &ClusterVector<ClusterType>::item_size)
|
||||
.def_property_readonly("fmt",
|
||||
@@ -101,4 +102,49 @@ void define_ClusterVector(py::module &m, const std::string &typestr) {
|
||||
|
||||
return hitmap;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_2x2_reduction(py::module &m) {
|
||||
m.def(
|
||||
"reduce_to_2x2",
|
||||
[](const ClusterVector<
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>> &cv) {
|
||||
return new ClusterVector<Cluster<Type, 2, 2, CoordType>>(
|
||||
reduce_to_2x2(cv));
|
||||
},
|
||||
R"(
|
||||
|
||||
Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with
|
||||
the highest photon energy."
|
||||
Parameters
|
||||
----------
|
||||
cv : ClusterVector
|
||||
)",
|
||||
py::arg("clustervector"));
|
||||
}
|
||||
|
||||
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_3x3_reduction(py::module &m) {
|
||||
|
||||
m.def(
|
||||
"reduce_to_3x3",
|
||||
[](const ClusterVector<
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>> &cv) {
|
||||
return new ClusterVector<Cluster<Type, 3, 3, CoordType>>(
|
||||
reduce_to_3x3(cv));
|
||||
},
|
||||
R"(
|
||||
|
||||
Reduce cluster to 3x3 subcluster by taking the 3x3 subcluster with
|
||||
the highest photon energy."
|
||||
Parameters
|
||||
----------
|
||||
cv : ClusterVector
|
||||
)",
|
||||
py::arg("clustervector"));
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
153
python/src/bind_calibration.hpp
Normal file
153
python/src/bind_calibration.hpp
Normal file
@@ -0,0 +1,153 @@
|
||||
#include "aare/calibration.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <pybind11/numpy.h>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/stl.h>
|
||||
#include <pybind11/stl_bind.h>
|
||||
|
||||
namespace py = pybind11;
|
||||
|
||||
template <typename DataType>
|
||||
py::array_t<DataType> pybind_apply_calibration(
|
||||
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> data,
|
||||
py::array_t<DataType, py::array::c_style | py::array::forcecast> pedestal,
|
||||
py::array_t<DataType, py::array::c_style | py::array::forcecast>
|
||||
calibration,
|
||||
int n_threads = 4) {
|
||||
|
||||
auto data_span = make_view_3d(data); // data is always 3D
|
||||
/* No pointer is passed, so NumPy will allocate the buffer */
|
||||
auto result = py::array_t<DataType>(data_span.shape());
|
||||
auto res = make_view_3d(result);
|
||||
if (data.ndim() == 3 && pedestal.ndim() == 3 && calibration.ndim() == 3) {
|
||||
auto ped = make_view_3d(pedestal);
|
||||
auto cal = make_view_3d(calibration);
|
||||
aare::apply_calibration<DataType, 3>(res, data_span, ped, cal,
|
||||
n_threads);
|
||||
} else if (data.ndim() == 3 && pedestal.ndim() == 2 &&
|
||||
calibration.ndim() == 2) {
|
||||
auto ped = make_view_2d(pedestal);
|
||||
auto cal = make_view_2d(calibration);
|
||||
aare::apply_calibration<DataType, 2>(res, data_span, ped, cal,
|
||||
n_threads);
|
||||
} else {
|
||||
throw std::runtime_error(
|
||||
"Invalid number of dimensions for data, pedestal or calibration");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
py::array_t<int> pybind_count_switching_pixels(
|
||||
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> data,
|
||||
ssize_t n_threads = 4) {
|
||||
|
||||
auto data_span = make_view_3d(data);
|
||||
auto arr = new NDArray<int, 2>{};
|
||||
*arr = aare::count_switching_pixels(data_span, n_threads);
|
||||
return return_image_data(arr);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
py::array_t<T> pybind_calculate_pedestal(
|
||||
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> data,
|
||||
ssize_t n_threads) {
|
||||
|
||||
auto data_span = make_view_3d(data);
|
||||
auto arr = new NDArray<T, 3>{};
|
||||
*arr = aare::calculate_pedestal<T, false>(data_span, n_threads);
|
||||
return return_image_data(arr);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
py::array_t<T> pybind_calculate_pedestal_g0(
|
||||
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> data,
|
||||
ssize_t n_threads) {
|
||||
|
||||
auto data_span = make_view_3d(data);
|
||||
auto arr = new NDArray<T, 2>{};
|
||||
*arr = aare::calculate_pedestal<T, true>(data_span, n_threads);
|
||||
return return_image_data(arr);
|
||||
}
|
||||
|
||||
void bind_calibration(py::module &m) {
|
||||
m.def("apply_calibration", &pybind_apply_calibration<double>,
|
||||
py::arg("raw_data").noconvert(), py::kw_only(),
|
||||
py::arg("pd").noconvert(), py::arg("cal").noconvert(),
|
||||
py::arg("n_threads") = 4);
|
||||
|
||||
m.def("apply_calibration", &pybind_apply_calibration<float>,
|
||||
py::arg("raw_data").noconvert(), py::kw_only(),
|
||||
py::arg("pd").noconvert(), py::arg("cal").noconvert(),
|
||||
py::arg("n_threads") = 4);
|
||||
|
||||
m.def("count_switching_pixels", &pybind_count_switching_pixels,
|
||||
R"(
|
||||
Count the number of time each pixel switches to G1 or G2.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
raw_data : array_like
|
||||
3D array of shape (frames, rows, cols) to count the switching pixels from.
|
||||
n_threads : int
|
||||
The number of threads to use for the calculation.
|
||||
)",
|
||||
py::arg("raw_data").noconvert(), py::kw_only(),
|
||||
py::arg("n_threads") = 4);
|
||||
|
||||
m.def("calculate_pedestal", &pybind_calculate_pedestal<double>,
|
||||
R"(
|
||||
Calculate the pedestal for all three gains and return the result as a 3D array of doubles.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
raw_data : array_like
|
||||
3D array of shape (frames, rows, cols) to calculate the pedestal from.
|
||||
Needs to contain data for all three gains (G0, G1, G2).
|
||||
n_threads : int
|
||||
The number of threads to use for the calculation.
|
||||
)",
|
||||
py::arg("raw_data").noconvert(), py::arg("n_threads") = 4);
|
||||
|
||||
m.def("calculate_pedestal_float", &pybind_calculate_pedestal<float>,
|
||||
R"(
|
||||
Same as `calculate_pedestal` but returns a 3D array of floats.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
raw_data : array_like
|
||||
3D array of shape (frames, rows, cols) to calculate the pedestal from.
|
||||
Needs to contain data for all three gains (G0, G1, G2).
|
||||
n_threads : int
|
||||
The number of threads to use for the calculation.
|
||||
)",
|
||||
py::arg("raw_data").noconvert(), py::arg("n_threads") = 4);
|
||||
|
||||
m.def("calculate_pedestal_g0", &pybind_calculate_pedestal_g0<double>,
|
||||
R"(
|
||||
Calculate the pedestal for G0 and return the result as a 2D array of doubles.
|
||||
Pixels in G1 and G2 are ignored.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
raw_data : array_like
|
||||
3D array of shape (frames, rows, cols) to calculate the pedestal from.
|
||||
n_threads : int
|
||||
The number of threads to use for the calculation.
|
||||
)",
|
||||
py::arg("raw_data").noconvert(), py::arg("n_threads") = 4);
|
||||
|
||||
m.def("calculate_pedestal_g0_float", &pybind_calculate_pedestal_g0<float>,
|
||||
R"(
|
||||
Same as `calculate_pedestal_g0` but returns a 2D array of floats.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
raw_data : array_like
|
||||
3D array of shape (frames, rows, cols) to calculate the pedestal from.
|
||||
n_threads : int
|
||||
The number of threads to use for the calculation.
|
||||
)",
|
||||
py::arg("raw_data").noconvert(), py::arg("n_threads") = 4);
|
||||
}
|
||||
@@ -1,211 +0,0 @@
|
||||
#include "aare/ClusterCollector.hpp"
|
||||
#include "aare/ClusterFileSink.hpp"
|
||||
#include "aare/ClusterFinder.hpp"
|
||||
#include "aare/ClusterFinderMT.hpp"
|
||||
#include "aare/ClusterVector.hpp"
|
||||
#include "aare/NDView.hpp"
|
||||
#include "aare/Pedestal.hpp"
|
||||
#include "np_helper.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/stl.h>
|
||||
#include <pybind11/stl_bind.h>
|
||||
|
||||
namespace py = pybind11;
|
||||
using pd_type = double;
|
||||
|
||||
using namespace aare;
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
|
||||
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType>
|
||||
void define_cluster(py::module &m, const std::string &typestr) {
|
||||
auto class_name = fmt::format("Cluster{}", typestr);
|
||||
|
||||
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>(
|
||||
m, class_name.c_str(), py::buffer_protocol())
|
||||
|
||||
.def(py::init([](uint8_t x, uint8_t y, py::array_t<Type> data) {
|
||||
py::buffer_info buf_info = data.request();
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster;
|
||||
cluster.x = x;
|
||||
cluster.y = y;
|
||||
auto r = data.template unchecked<1>(); // no bounds checks
|
||||
for (py::ssize_t i = 0; i < data.size(); ++i) {
|
||||
cluster.data[i] = r(i);
|
||||
}
|
||||
return cluster;
|
||||
}));
|
||||
|
||||
/*
|
||||
.def_property(
|
||||
"data",
|
||||
[](ClusterType &c) -> py::array {
|
||||
return py::array(py::buffer_info(
|
||||
c.data, sizeof(Type),
|
||||
py::format_descriptor<Type>::format(), // Type
|
||||
// format
|
||||
1, // Number of dimensions
|
||||
{static_cast<ssize_t>(ClusterSizeX *
|
||||
ClusterSizeY)}, // Shape (flattened)
|
||||
{sizeof(Type)} // Stride (step size between elements)
|
||||
));
|
||||
},
|
||||
[](ClusterType &c, py::array_t<Type> arr) {
|
||||
py::buffer_info buf_info = arr.request();
|
||||
Type *ptr = static_cast<Type *>(buf_info.ptr);
|
||||
std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY,
|
||||
c.data); // TODO dont iterate over centers!!!
|
||||
|
||||
});
|
||||
*/
|
||||
}
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_cluster_finder_mt_bindings(py::module &m,
|
||||
const std::string &typestr) {
|
||||
auto class_name = fmt::format("ClusterFinderMT_{}", typestr);
|
||||
|
||||
using ClusterType = Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||
|
||||
py::class_<ClusterFinderMT<ClusterType, uint16_t, pd_type>>(
|
||||
m, class_name.c_str())
|
||||
.def(py::init<Shape<2>, pd_type, size_t, size_t>(),
|
||||
py::arg("image_size"), py::arg("n_sigma") = 5.0,
|
||||
py::arg("capacity") = 2048, py::arg("n_threads") = 3)
|
||||
.def("push_pedestal_frame",
|
||||
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
||||
py::array_t<uint16_t> frame) {
|
||||
auto view = make_view_2d(frame);
|
||||
self.push_pedestal_frame(view);
|
||||
})
|
||||
.def(
|
||||
"find_clusters",
|
||||
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
||||
py::array_t<uint16_t> frame, uint64_t frame_number) {
|
||||
auto view = make_view_2d(frame);
|
||||
self.find_clusters(view, frame_number);
|
||||
return;
|
||||
},
|
||||
py::arg(), py::arg("frame_number") = 0)
|
||||
.def_property_readonly("cluster_size", [](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self){
|
||||
return py::make_tuple(ClusterSizeX, ClusterSizeY);
|
||||
})
|
||||
.def("clear_pedestal",
|
||||
&ClusterFinderMT<ClusterType, uint16_t, pd_type>::clear_pedestal)
|
||||
.def("sync", &ClusterFinderMT<ClusterType, uint16_t, pd_type>::sync)
|
||||
.def("stop", &ClusterFinderMT<ClusterType, uint16_t, pd_type>::stop)
|
||||
.def("start", &ClusterFinderMT<ClusterType, uint16_t, pd_type>::start)
|
||||
.def(
|
||||
"pedestal",
|
||||
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
||||
size_t thread_index) {
|
||||
auto pd = new NDArray<pd_type, 2>{};
|
||||
*pd = self.pedestal(thread_index);
|
||||
return return_image_data(pd);
|
||||
},
|
||||
py::arg("thread_index") = 0)
|
||||
.def(
|
||||
"noise",
|
||||
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
|
||||
size_t thread_index) {
|
||||
auto arr = new NDArray<pd_type, 2>{};
|
||||
*arr = self.noise(thread_index);
|
||||
return return_image_data(arr);
|
||||
},
|
||||
py::arg("thread_index") = 0);
|
||||
}
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_cluster_collector_bindings(py::module &m,
|
||||
const std::string &typestr) {
|
||||
auto class_name = fmt::format("ClusterCollector_{}", typestr);
|
||||
|
||||
using ClusterType = Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||
|
||||
py::class_<ClusterCollector<ClusterType>>(m, class_name.c_str())
|
||||
.def(py::init<ClusterFinderMT<ClusterType, uint16_t, double> *>())
|
||||
.def("stop", &ClusterCollector<ClusterType>::stop)
|
||||
.def(
|
||||
"steal_clusters",
|
||||
[](ClusterCollector<ClusterType> &self) {
|
||||
auto v = new std::vector<ClusterVector<ClusterType>>(
|
||||
self.steal_clusters());
|
||||
return v; // TODO change!!!
|
||||
},
|
||||
py::return_value_policy::take_ownership);
|
||||
}
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_cluster_file_sink_bindings(py::module &m,
|
||||
const std::string &typestr) {
|
||||
auto class_name = fmt::format("ClusterFileSink_{}", typestr);
|
||||
|
||||
using ClusterType = Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||
|
||||
py::class_<ClusterFileSink<ClusterType>>(m, class_name.c_str())
|
||||
.def(py::init<ClusterFinderMT<ClusterType, uint16_t, double> *,
|
||||
const std::filesystem::path &>())
|
||||
.def("stop", &ClusterFileSink<ClusterType>::stop);
|
||||
}
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_cluster_finder_bindings(py::module &m, const std::string &typestr) {
|
||||
auto class_name = fmt::format("ClusterFinder_{}", typestr);
|
||||
|
||||
using ClusterType = Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||
|
||||
py::class_<ClusterFinder<ClusterType, uint16_t, pd_type>>(
|
||||
m, class_name.c_str())
|
||||
.def(py::init<Shape<2>, pd_type, size_t>(), py::arg("image_size"),
|
||||
py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000)
|
||||
.def("push_pedestal_frame",
|
||||
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self,
|
||||
py::array_t<uint16_t> frame) {
|
||||
auto view = make_view_2d(frame);
|
||||
self.push_pedestal_frame(view);
|
||||
})
|
||||
.def("clear_pedestal",
|
||||
&ClusterFinder<ClusterType, uint16_t, pd_type>::clear_pedestal)
|
||||
.def_property_readonly(
|
||||
"pedestal",
|
||||
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self) {
|
||||
auto pd = new NDArray<pd_type, 2>{};
|
||||
*pd = self.pedestal();
|
||||
return return_image_data(pd);
|
||||
})
|
||||
.def_property_readonly(
|
||||
"noise",
|
||||
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self) {
|
||||
auto arr = new NDArray<pd_type, 2>{};
|
||||
*arr = self.noise();
|
||||
return return_image_data(arr);
|
||||
})
|
||||
.def(
|
||||
"steal_clusters",
|
||||
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self,
|
||||
bool realloc_same_capacity) {
|
||||
ClusterVector<ClusterType> clusters =
|
||||
self.steal_clusters(realloc_same_capacity);
|
||||
return clusters;
|
||||
},
|
||||
py::arg("realloc_same_capacity") = false)
|
||||
.def(
|
||||
"find_clusters",
|
||||
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self,
|
||||
py::array_t<uint16_t> frame, uint64_t frame_number) {
|
||||
auto view = make_view_2d(frame);
|
||||
self.find_clusters(view, frame_number);
|
||||
return;
|
||||
},
|
||||
py::arg(), py::arg("frame_number") = 0);
|
||||
}
|
||||
#pragma GCC diagnostic pop
|
||||
@@ -6,8 +6,8 @@
|
||||
#include "aare/RawMasterFile.hpp"
|
||||
#include "aare/RawSubFile.hpp"
|
||||
|
||||
#include "aare/defs.hpp"
|
||||
#include "aare/decode.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
// #include "aare/fClusterFileV2.hpp"
|
||||
|
||||
#include "np_helper.hpp"
|
||||
@@ -26,95 +26,103 @@ using namespace ::aare;
|
||||
|
||||
void define_ctb_raw_file_io_bindings(py::module &m) {
|
||||
|
||||
m.def("adc_sar_05_decode64to16", [](py::array_t<uint8_t> input) {
|
||||
m.def("adc_sar_05_decode64to16", [](py::array_t<uint8_t> input) {
|
||||
if (input.ndim() != 2) {
|
||||
throw std::runtime_error(
|
||||
"Only 2D arrays are supported at this moment");
|
||||
}
|
||||
|
||||
|
||||
if(input.ndim() != 2){
|
||||
throw std::runtime_error("Only 2D arrays are supported at this moment");
|
||||
}
|
||||
// Create a 2D output array with the same shape as the input
|
||||
std::vector<ssize_t> shape{input.shape(0),
|
||||
input.shape(1) /
|
||||
static_cast<ssize_t>(bits_per_byte)};
|
||||
py::array_t<uint16_t> output(shape);
|
||||
|
||||
//Create a 2D output array with the same shape as the input
|
||||
std::vector<ssize_t> shape{input.shape(0), input.shape(1)/static_cast<int64_t>(bits_per_byte)};
|
||||
py::array_t<uint16_t> output(shape);
|
||||
// Create a view of the input and output arrays
|
||||
NDView<uint64_t, 2> input_view(
|
||||
reinterpret_cast<uint64_t *>(input.mutable_data()),
|
||||
{output.shape(0), output.shape(1)});
|
||||
NDView<uint16_t, 2> output_view(output.mutable_data(),
|
||||
{output.shape(0), output.shape(1)});
|
||||
|
||||
//Create a view of the input and output arrays
|
||||
NDView<uint64_t, 2> input_view(reinterpret_cast<uint64_t*>(input.mutable_data()), {output.shape(0), output.shape(1)});
|
||||
NDView<uint16_t, 2> output_view(output.mutable_data(), {output.shape(0), output.shape(1)});
|
||||
adc_sar_05_decode64to16(input_view, output_view);
|
||||
|
||||
adc_sar_05_decode64to16(input_view, output_view);
|
||||
|
||||
return output;
|
||||
});
|
||||
|
||||
|
||||
m.def("adc_sar_04_decode64to16", [](py::array_t<uint8_t> input) {
|
||||
|
||||
|
||||
if(input.ndim() != 2){
|
||||
throw std::runtime_error("Only 2D arrays are supported at this moment");
|
||||
}
|
||||
|
||||
//Create a 2D output array with the same shape as the input
|
||||
std::vector<ssize_t> shape{input.shape(0), input.shape(1)/static_cast<int64_t>(bits_per_byte)};
|
||||
py::array_t<uint16_t> output(shape);
|
||||
|
||||
//Create a view of the input and output arrays
|
||||
NDView<uint64_t, 2> input_view(reinterpret_cast<uint64_t*>(input.mutable_data()), {output.shape(0), output.shape(1)});
|
||||
NDView<uint16_t, 2> output_view(output.mutable_data(), {output.shape(0), output.shape(1)});
|
||||
|
||||
adc_sar_04_decode64to16(input_view, output_view);
|
||||
|
||||
return output;
|
||||
});
|
||||
|
||||
m.def(
|
||||
"apply_custom_weights",
|
||||
[](py::array_t<uint16_t, py::array::c_style | py::array::forcecast> &input,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast>
|
||||
&weights) {
|
||||
|
||||
|
||||
// Create new array with same shape as the input array (uninitialized values)
|
||||
py::buffer_info buf = input.request();
|
||||
py::array_t<double> output(buf.shape);
|
||||
|
||||
// Use NDViews to call into the C++ library
|
||||
auto weights_view = make_view_1d(weights);
|
||||
NDView<uint16_t, 1> input_view(input.mutable_data(), {input.size()});
|
||||
NDView<double, 1> output_view(output.mutable_data(), {output.size()});
|
||||
|
||||
apply_custom_weights(input_view, output_view, weights_view);
|
||||
return output;
|
||||
});
|
||||
|
||||
py::class_<CtbRawFile>(m, "CtbRawFile")
|
||||
.def(py::init<const std::filesystem::path &>())
|
||||
.def("read_frame",
|
||||
[](CtbRawFile &self) {
|
||||
size_t image_size = self.image_size_in_bytes();
|
||||
py::array image;
|
||||
std::vector<ssize_t> shape;
|
||||
shape.reserve(2);
|
||||
shape.push_back(1);
|
||||
shape.push_back(image_size);
|
||||
m.def("adc_sar_04_decode64to16", [](py::array_t<uint8_t> input) {
|
||||
if (input.ndim() != 2) {
|
||||
throw std::runtime_error(
|
||||
"Only 2D arrays are supported at this moment");
|
||||
}
|
||||
|
||||
py::array_t<DetectorHeader> header(1);
|
||||
// Create a 2D output array with the same shape as the input
|
||||
std::vector<ssize_t> shape{input.shape(0),
|
||||
input.shape(1) /
|
||||
static_cast<ssize_t>(bits_per_byte)};
|
||||
py::array_t<uint16_t> output(shape);
|
||||
|
||||
// always read bytes
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
// Create a view of the input and output arrays
|
||||
NDView<uint64_t, 2> input_view(
|
||||
reinterpret_cast<uint64_t *>(input.mutable_data()),
|
||||
{output.shape(0), output.shape(1)});
|
||||
NDView<uint16_t, 2> output_view(output.mutable_data(),
|
||||
{output.shape(0), output.shape(1)});
|
||||
|
||||
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()),
|
||||
header.mutable_data());
|
||||
adc_sar_04_decode64to16(input_view, output_view);
|
||||
|
||||
return py::make_tuple(header, image);
|
||||
})
|
||||
.def("seek", &CtbRawFile::seek)
|
||||
.def("tell", &CtbRawFile::tell)
|
||||
.def("master", &CtbRawFile::master)
|
||||
return output;
|
||||
});
|
||||
|
||||
.def_property_readonly("image_size_in_bytes",
|
||||
&CtbRawFile::image_size_in_bytes)
|
||||
m.def("apply_custom_weights",
|
||||
[](py::array_t<uint16_t, py::array::c_style | py::array::forcecast>
|
||||
&input,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast>
|
||||
&weights) {
|
||||
// Create new array with same shape as the input array
|
||||
// (uninitialized values)
|
||||
py::buffer_info buf = input.request();
|
||||
py::array_t<double> output(buf.shape);
|
||||
|
||||
.def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file);
|
||||
// Use NDViews to call into the C++ library
|
||||
auto weights_view = make_view_1d(weights);
|
||||
NDView<uint16_t, 1> input_view(input.mutable_data(),
|
||||
{input.size()});
|
||||
NDView<double, 1> output_view(output.mutable_data(),
|
||||
{output.size()});
|
||||
|
||||
apply_custom_weights(input_view, output_view, weights_view);
|
||||
return output;
|
||||
});
|
||||
|
||||
py::class_<CtbRawFile>(m, "CtbRawFile")
|
||||
.def(py::init<const std::filesystem::path &>())
|
||||
.def("read_frame",
|
||||
[](CtbRawFile &self) {
|
||||
size_t image_size = self.image_size_in_bytes();
|
||||
py::array image;
|
||||
std::vector<ssize_t> shape;
|
||||
shape.reserve(2);
|
||||
shape.push_back(1);
|
||||
shape.push_back(image_size);
|
||||
|
||||
py::array_t<DetectorHeader> header(1);
|
||||
|
||||
// always read bytes
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
|
||||
self.read_into(
|
||||
reinterpret_cast<std::byte *>(image.mutable_data()),
|
||||
header.mutable_data());
|
||||
|
||||
return py::make_tuple(header, image);
|
||||
})
|
||||
.def("seek", &CtbRawFile::seek)
|
||||
.def("tell", &CtbRawFile::tell)
|
||||
.def("master", &CtbRawFile::master)
|
||||
|
||||
.def_property_readonly("image_size_in_bytes",
|
||||
&CtbRawFile::image_size_in_bytes)
|
||||
|
||||
.def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file);
|
||||
}
|
||||
|
||||
@@ -20,17 +20,13 @@
|
||||
namespace py = pybind11;
|
||||
using namespace ::aare;
|
||||
|
||||
|
||||
|
||||
|
||||
//Disable warnings for unused parameters, as we ignore some
|
||||
//in the __exit__ method
|
||||
// Disable warnings for unused parameters, as we ignore some
|
||||
// in the __exit__ method
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
|
||||
void define_file_io_bindings(py::module &m) {
|
||||
|
||||
|
||||
py::enum_<DetectorType>(m, "DetectorType")
|
||||
.value("Jungfrau", DetectorType::Jungfrau)
|
||||
.value("Eiger", DetectorType::Eiger)
|
||||
@@ -41,13 +37,10 @@ void define_file_io_bindings(py::module &m) {
|
||||
.value("ChipTestBoard", DetectorType::ChipTestBoard)
|
||||
.value("Unknown", DetectorType::Unknown);
|
||||
|
||||
|
||||
PYBIND11_NUMPY_DTYPE(DetectorHeader, frameNumber, expLength, packetNumber,
|
||||
bunchId, timestamp, modId, row, column, reserved,
|
||||
debug, roundRNumber, detType, version, packetMask);
|
||||
|
||||
|
||||
|
||||
py::class_<File>(m, "File")
|
||||
.def(py::init([](const std::filesystem::path &fname) {
|
||||
return File(fname, "r", {});
|
||||
@@ -112,45 +105,18 @@ void define_file_io_bindings(py::module &m) {
|
||||
reinterpret_cast<std::byte *>(image.mutable_data()));
|
||||
return image;
|
||||
})
|
||||
.def("read_n", [](File &self, size_t n_frames) {
|
||||
//adjust for actual frames left in the file
|
||||
n_frames = std::min(n_frames, self.total_frames()-self.tell());
|
||||
if(n_frames == 0){
|
||||
throw std::runtime_error("No frames left in file");
|
||||
}
|
||||
std::vector<size_t> shape{n_frames, self.rows(), self.cols()};
|
||||
|
||||
py::array image;
|
||||
const uint8_t item_size = self.bytes_per_pixel();
|
||||
if (item_size == 1) {
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
} else if (item_size == 2) {
|
||||
image = py::array_t<uint16_t>(shape);
|
||||
} else if (item_size == 4) {
|
||||
image = py::array_t<uint32_t>(shape);
|
||||
}
|
||||
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()),
|
||||
n_frames);
|
||||
return image;
|
||||
})
|
||||
.def("__enter__", [](File &self) { return &self; })
|
||||
.def("__exit__",
|
||||
[](File &self,
|
||||
const std::optional<pybind11::type> &exc_type,
|
||||
const std::optional<pybind11::object> &exc_value,
|
||||
const std::optional<pybind11::object> &traceback) {
|
||||
// self.close();
|
||||
})
|
||||
.def("__iter__", [](File &self) { return &self; })
|
||||
.def("__next__", [](File &self) {
|
||||
.def("read_n",
|
||||
[](File &self, size_t n_frames) {
|
||||
// adjust for actual frames left in the file
|
||||
n_frames =
|
||||
std::min(n_frames, self.total_frames() - self.tell());
|
||||
if (n_frames == 0) {
|
||||
throw std::runtime_error("No frames left in file");
|
||||
}
|
||||
std::vector<size_t> shape{n_frames, self.rows(), self.cols()};
|
||||
|
||||
try{
|
||||
const uint8_t item_size = self.bytes_per_pixel();
|
||||
py::array image;
|
||||
std::vector<ssize_t> shape;
|
||||
shape.reserve(2);
|
||||
shape.push_back(self.rows());
|
||||
shape.push_back(self.cols());
|
||||
const uint8_t item_size = self.bytes_per_pixel();
|
||||
if (item_size == 1) {
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
} else if (item_size == 2) {
|
||||
@@ -159,14 +125,41 @@ void define_file_io_bindings(py::module &m) {
|
||||
image = py::array_t<uint32_t>(shape);
|
||||
}
|
||||
self.read_into(
|
||||
reinterpret_cast<std::byte *>(image.mutable_data()));
|
||||
reinterpret_cast<std::byte *>(image.mutable_data()),
|
||||
n_frames);
|
||||
return image;
|
||||
}catch(std::runtime_error &e){
|
||||
})
|
||||
.def("__enter__", [](File &self) { return &self; })
|
||||
.def("__exit__",
|
||||
[](File &self, const std::optional<pybind11::type> &exc_type,
|
||||
const std::optional<pybind11::object> &exc_value,
|
||||
const std::optional<pybind11::object> &traceback) {
|
||||
// self.close();
|
||||
})
|
||||
.def("__iter__", [](File &self) { return &self; })
|
||||
.def("__next__", [](File &self) {
|
||||
try {
|
||||
const uint8_t item_size = self.bytes_per_pixel();
|
||||
py::array image;
|
||||
std::vector<ssize_t> shape;
|
||||
shape.reserve(2);
|
||||
shape.push_back(self.rows());
|
||||
shape.push_back(self.cols());
|
||||
if (item_size == 1) {
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
} else if (item_size == 2) {
|
||||
image = py::array_t<uint16_t>(shape);
|
||||
} else if (item_size == 4) {
|
||||
image = py::array_t<uint32_t>(shape);
|
||||
}
|
||||
self.read_into(
|
||||
reinterpret_cast<std::byte *>(image.mutable_data()));
|
||||
return image;
|
||||
} catch (std::runtime_error &e) {
|
||||
throw py::stop_iteration();
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
py::class_<FileConfig>(m, "FileConfig")
|
||||
.def(py::init<>())
|
||||
.def_readwrite("rows", &FileConfig::rows)
|
||||
@@ -183,8 +176,6 @@ void define_file_io_bindings(py::module &m) {
|
||||
return "<FileConfig: " + a.to_string() + ">";
|
||||
});
|
||||
|
||||
|
||||
|
||||
py::class_<ScanParameters>(m, "ScanParameters")
|
||||
.def(py::init<const std::string &>())
|
||||
.def(py::init<const ScanParameters &>())
|
||||
@@ -195,32 +186,29 @@ void define_file_io_bindings(py::module &m) {
|
||||
.def_property_readonly("stop", &ScanParameters::stop)
|
||||
.def_property_readonly("step", &ScanParameters::step);
|
||||
|
||||
|
||||
py::class_<ROI>(m, "ROI")
|
||||
.def(py::init<>())
|
||||
.def(py::init<int64_t, int64_t, int64_t, int64_t>(), py::arg("xmin"),
|
||||
.def(py::init<ssize_t, ssize_t, ssize_t, ssize_t>(), py::arg("xmin"),
|
||||
py::arg("xmax"), py::arg("ymin"), py::arg("ymax"))
|
||||
.def_readwrite("xmin", &ROI::xmin)
|
||||
.def_readwrite("xmax", &ROI::xmax)
|
||||
.def_readwrite("ymin", &ROI::ymin)
|
||||
.def_readwrite("ymax", &ROI::ymax)
|
||||
.def("__str__", [](const ROI& self){
|
||||
return fmt::format("ROI: xmin: {} xmax: {} ymin: {} ymax: {}", self.xmin, self.xmax, self.ymin, self.ymax);
|
||||
})
|
||||
.def("__repr__", [](const ROI& self){
|
||||
return fmt::format("<ROI: xmin: {} xmax: {} ymin: {} ymax: {}>", self.xmin, self.xmax, self.ymin, self.ymax);
|
||||
})
|
||||
.def("__str__",
|
||||
[](const ROI &self) {
|
||||
return fmt::format("ROI: xmin: {} xmax: {} ymin: {} ymax: {}",
|
||||
self.xmin, self.xmax, self.ymin, self.ymax);
|
||||
})
|
||||
.def("__repr__",
|
||||
[](const ROI &self) {
|
||||
return fmt::format(
|
||||
"<ROI: xmin: {} xmax: {} ymin: {} ymax: {}>", self.xmin,
|
||||
self.xmax, self.ymin, self.ymax);
|
||||
})
|
||||
.def("__iter__", [](const ROI &self) {
|
||||
return py::make_iterator(&self.xmin, &self.ymax+1); //NOLINT
|
||||
return py::make_iterator(&self.xmin, &self.ymax + 1); // NOLINT
|
||||
});
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
// py::class_<ClusterHeader>(m, "ClusterHeader")
|
||||
// .def(py::init<>())
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
namespace py = pybind11;
|
||||
using namespace pybind11::literals;
|
||||
|
||||
|
||||
void define_fit_bindings(py::module &m) {
|
||||
|
||||
// TODO! Evaluate without converting to double
|
||||
@@ -55,6 +54,49 @@ void define_fit_bindings(py::module &m) {
|
||||
)",
|
||||
py::arg("x"), py::arg("par"));
|
||||
|
||||
m.def(
|
||||
"scurve",
|
||||
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> par) {
|
||||
auto x_view = make_view_1d(x);
|
||||
auto par_view = make_view_1d(par);
|
||||
auto y =
|
||||
new NDArray<double, 1>{aare::func::scurve(x_view, par_view)};
|
||||
return return_image_data(y);
|
||||
},
|
||||
R"(
|
||||
Evaluate a 1D scurve function for all points in x using parameters par.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The points at which to evaluate the scurve function.
|
||||
par : array_like
|
||||
The parameters of the scurve function. The first element is the background slope, the second element is the background intercept, the third element is the mean, the fourth element is the standard deviation, the fifth element is inflexion point count number, and the sixth element is C.
|
||||
)",
|
||||
py::arg("x"), py::arg("par"));
|
||||
|
||||
m.def(
|
||||
"scurve2",
|
||||
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> par) {
|
||||
auto x_view = make_view_1d(x);
|
||||
auto par_view = make_view_1d(par);
|
||||
auto y =
|
||||
new NDArray<double, 1>{aare::func::scurve2(x_view, par_view)};
|
||||
return return_image_data(y);
|
||||
},
|
||||
R"(
|
||||
Evaluate a 1D scurve2 function for all points in x using parameters par.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The points at which to evaluate the scurve function.
|
||||
par : array_like
|
||||
The parameters of the scurve2 function. The first element is the background slope, the second element is the background intercept, the third element is the mean, the fourth element is the standard deviation, the fifth element is inflexion point count number, and the sixth element is C.
|
||||
)",
|
||||
py::arg("x"), py::arg("par"));
|
||||
|
||||
m.def(
|
||||
"fit_gaus",
|
||||
@@ -98,7 +140,6 @@ n_threads : int, optional
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> y,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> y_err,
|
||||
int n_threads) {
|
||||
|
||||
if (y.ndim() == 3) {
|
||||
// Allocate memory for the output
|
||||
// Need to have pointers to allow python to manage
|
||||
@@ -132,7 +173,6 @@ n_threads : int, optional
|
||||
auto y_view_err = make_view_1d(y_err);
|
||||
auto x_view = make_view_1d(x);
|
||||
|
||||
|
||||
double chi2 = 0;
|
||||
aare::fit_gaus(x_view, y_view, y_view_err, par->view(),
|
||||
par_err->view(), chi2);
|
||||
@@ -207,11 +247,10 @@ n_threads : int, optional
|
||||
aare::fit_pol1(x_view, y_view, y_view_err, par->view(),
|
||||
par_err->view(), chi2->view(), n_threads);
|
||||
return py::dict("par"_a = return_image_data(par),
|
||||
"par_err"_a = return_image_data(par_err),
|
||||
"par_err"_a = return_image_data(par_err),
|
||||
"chi2"_a = return_image_data(chi2),
|
||||
"Ndf"_a = y.shape(2) - 2);
|
||||
|
||||
|
||||
} else if (y.ndim() == 1) {
|
||||
auto par = new NDArray<double, 1>({2});
|
||||
auto par_err = new NDArray<double, 1>({2});
|
||||
@@ -235,6 +274,177 @@ n_threads : int, optional
|
||||
R"(
|
||||
Fit a 1D polynomial to data with error estimates.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The x values.
|
||||
y : array_like
|
||||
The y values.
|
||||
y_err : array_like
|
||||
The error in the y values.
|
||||
n_threads : int, optional
|
||||
The number of threads to use. Default is 4.
|
||||
)",
|
||||
py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4);
|
||||
|
||||
//=========
|
||||
m.def(
|
||||
"fit_scurve",
|
||||
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> y,
|
||||
int n_threads) {
|
||||
if (y.ndim() == 3) {
|
||||
auto par = new NDArray<double, 3>{};
|
||||
|
||||
auto x_view = make_view_1d(x);
|
||||
auto y_view = make_view_3d(y);
|
||||
*par = aare::fit_scurve(x_view, y_view, n_threads);
|
||||
return return_image_data(par);
|
||||
} else if (y.ndim() == 1) {
|
||||
auto par = new NDArray<double, 1>{};
|
||||
auto x_view = make_view_1d(x);
|
||||
auto y_view = make_view_1d(y);
|
||||
*par = aare::fit_scurve(x_view, y_view);
|
||||
return return_image_data(par);
|
||||
} else {
|
||||
throw std::runtime_error("Data must be 1D or 3D");
|
||||
}
|
||||
},
|
||||
py::arg("x"), py::arg("y"), py::arg("n_threads") = 4);
|
||||
|
||||
m.def(
|
||||
"fit_scurve",
|
||||
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> y,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> y_err,
|
||||
int n_threads) {
|
||||
if (y.ndim() == 3) {
|
||||
auto par = new NDArray<double, 3>({y.shape(0), y.shape(1), 6});
|
||||
|
||||
auto par_err =
|
||||
new NDArray<double, 3>({y.shape(0), y.shape(1), 6});
|
||||
|
||||
auto y_view = make_view_3d(y);
|
||||
auto y_view_err = make_view_3d(y_err);
|
||||
auto x_view = make_view_1d(x);
|
||||
|
||||
auto chi2 = new NDArray<double, 2>({y.shape(0), y.shape(1)});
|
||||
|
||||
aare::fit_scurve(x_view, y_view, y_view_err, par->view(),
|
||||
par_err->view(), chi2->view(), n_threads);
|
||||
return py::dict("par"_a = return_image_data(par),
|
||||
"par_err"_a = return_image_data(par_err),
|
||||
"chi2"_a = return_image_data(chi2),
|
||||
"Ndf"_a = y.shape(2) - 2);
|
||||
|
||||
} else if (y.ndim() == 1) {
|
||||
auto par = new NDArray<double, 1>({2});
|
||||
auto par_err = new NDArray<double, 1>({2});
|
||||
|
||||
auto y_view = make_view_1d(y);
|
||||
auto y_view_err = make_view_1d(y_err);
|
||||
auto x_view = make_view_1d(x);
|
||||
|
||||
double chi2 = 0;
|
||||
|
||||
aare::fit_scurve(x_view, y_view, y_view_err, par->view(),
|
||||
par_err->view(), chi2);
|
||||
return py::dict("par"_a = return_image_data(par),
|
||||
"par_err"_a = return_image_data(par_err),
|
||||
"chi2"_a = chi2, "Ndf"_a = y.size() - 2);
|
||||
|
||||
} else {
|
||||
throw std::runtime_error("Data must be 1D or 3D");
|
||||
}
|
||||
},
|
||||
R"(
|
||||
Fit a 1D polynomial to data with error estimates.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The x values.
|
||||
y : array_like
|
||||
The y values.
|
||||
y_err : array_like
|
||||
The error in the y values.
|
||||
n_threads : int, optional
|
||||
The number of threads to use. Default is 4.
|
||||
)",
|
||||
py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4);
|
||||
|
||||
m.def(
|
||||
"fit_scurve2",
|
||||
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> y,
|
||||
int n_threads) {
|
||||
if (y.ndim() == 3) {
|
||||
auto par = new NDArray<double, 3>{};
|
||||
|
||||
auto x_view = make_view_1d(x);
|
||||
auto y_view = make_view_3d(y);
|
||||
*par = aare::fit_scurve2(x_view, y_view, n_threads);
|
||||
return return_image_data(par);
|
||||
} else if (y.ndim() == 1) {
|
||||
auto par = new NDArray<double, 1>{};
|
||||
auto x_view = make_view_1d(x);
|
||||
auto y_view = make_view_1d(y);
|
||||
*par = aare::fit_scurve2(x_view, y_view);
|
||||
return return_image_data(par);
|
||||
} else {
|
||||
throw std::runtime_error("Data must be 1D or 3D");
|
||||
}
|
||||
},
|
||||
py::arg("x"), py::arg("y"), py::arg("n_threads") = 4);
|
||||
|
||||
m.def(
|
||||
"fit_scurve2",
|
||||
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> y,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> y_err,
|
||||
int n_threads) {
|
||||
if (y.ndim() == 3) {
|
||||
auto par = new NDArray<double, 3>({y.shape(0), y.shape(1), 6});
|
||||
|
||||
auto par_err =
|
||||
new NDArray<double, 3>({y.shape(0), y.shape(1), 6});
|
||||
|
||||
auto y_view = make_view_3d(y);
|
||||
auto y_view_err = make_view_3d(y_err);
|
||||
auto x_view = make_view_1d(x);
|
||||
|
||||
auto chi2 = new NDArray<double, 2>({y.shape(0), y.shape(1)});
|
||||
|
||||
aare::fit_scurve2(x_view, y_view, y_view_err, par->view(),
|
||||
par_err->view(), chi2->view(), n_threads);
|
||||
return py::dict("par"_a = return_image_data(par),
|
||||
"par_err"_a = return_image_data(par_err),
|
||||
"chi2"_a = return_image_data(chi2),
|
||||
"Ndf"_a = y.shape(2) - 2);
|
||||
|
||||
} else if (y.ndim() == 1) {
|
||||
auto par = new NDArray<double, 1>({6});
|
||||
auto par_err = new NDArray<double, 1>({6});
|
||||
|
||||
auto y_view = make_view_1d(y);
|
||||
auto y_view_err = make_view_1d(y_err);
|
||||
auto x_view = make_view_1d(x);
|
||||
|
||||
double chi2 = 0;
|
||||
|
||||
aare::fit_scurve2(x_view, y_view, y_view_err, par->view(),
|
||||
par_err->view(), chi2);
|
||||
return py::dict("par"_a = return_image_data(par),
|
||||
"par_err"_a = return_image_data(par_err),
|
||||
"chi2"_a = chi2, "Ndf"_a = y.size() - 2);
|
||||
|
||||
} else {
|
||||
throw std::runtime_error("Data must be 1D or 3D");
|
||||
}
|
||||
},
|
||||
R"(
|
||||
Fit a 1D polynomial to data with error estimates.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
|
||||
@@ -21,10 +21,7 @@ using namespace ::aare;
|
||||
|
||||
auto read_dat_frame(JungfrauDataFile &self) {
|
||||
py::array_t<JungfrauDataHeader> header(1);
|
||||
py::array_t<uint16_t> image({
|
||||
self.rows(),
|
||||
self.cols()
|
||||
});
|
||||
py::array_t<uint16_t> image({self.rows(), self.cols()});
|
||||
|
||||
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()),
|
||||
header.mutable_data());
|
||||
@@ -40,9 +37,7 @@ auto read_n_dat_frames(JungfrauDataFile &self, size_t n_frames) {
|
||||
}
|
||||
|
||||
py::array_t<JungfrauDataHeader> header(n_frames);
|
||||
py::array_t<uint16_t> image({
|
||||
n_frames, self.rows(),
|
||||
self.cols()});
|
||||
py::array_t<uint16_t> image({n_frames, self.rows(), self.cols()});
|
||||
|
||||
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()),
|
||||
n_frames, header.mutable_data());
|
||||
|
||||
@@ -1,22 +1,27 @@
|
||||
// Files with bindings to the different classes
|
||||
|
||||
//New style file naming
|
||||
// New style file naming
|
||||
#include "bind_Cluster.hpp"
|
||||
#include "bind_ClusterCollector.hpp"
|
||||
#include "bind_ClusterFile.hpp"
|
||||
#include "bind_ClusterFileSink.hpp"
|
||||
#include "bind_ClusterFinder.hpp"
|
||||
#include "bind_ClusterFinderMT.hpp"
|
||||
#include "bind_ClusterVector.hpp"
|
||||
#include "bind_calibration.hpp"
|
||||
|
||||
//TODO! migrate the other names
|
||||
#include "cluster.hpp"
|
||||
#include "cluster_file.hpp"
|
||||
// TODO! migrate the other names
|
||||
#include "ctb_raw_file.hpp"
|
||||
#include "file.hpp"
|
||||
#include "fit.hpp"
|
||||
#include "interpolation.hpp"
|
||||
#include "raw_sub_file.hpp"
|
||||
#include "raw_master_file.hpp"
|
||||
#include "raw_file.hpp"
|
||||
#include "pixel_map.hpp"
|
||||
#include "var_cluster.hpp"
|
||||
#include "pedestal.hpp"
|
||||
#include "jungfrau_data_file.hpp"
|
||||
#include "pedestal.hpp"
|
||||
#include "pixel_map.hpp"
|
||||
#include "raw_file.hpp"
|
||||
#include "raw_master_file.hpp"
|
||||
#include "raw_sub_file.hpp"
|
||||
#include "var_cluster.hpp"
|
||||
|
||||
// Pybind stuff
|
||||
#include <pybind11/pybind11.h>
|
||||
@@ -24,6 +29,28 @@
|
||||
|
||||
namespace py = pybind11;
|
||||
|
||||
/* MACRO that defines Cluster bindings for a specific size and type
|
||||
|
||||
T - Storage type of the cluster data (int, float, double)
|
||||
N - Number of rows in the cluster
|
||||
M - Number of columns in the cluster
|
||||
U - Type of the pixel data (e.g., uint16_t)
|
||||
TYPE_CODE - A character representing the type code (e.g., 'i' for int, 'd' for
|
||||
double, 'f' for float)
|
||||
|
||||
*/
|
||||
#define DEFINE_CLUSTER_BINDINGS(T, N, M, U, TYPE_CODE) \
|
||||
define_ClusterFile<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
|
||||
define_ClusterVector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
|
||||
define_ClusterFinder<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
|
||||
define_ClusterFinderMT<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
|
||||
define_ClusterFileSink<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
|
||||
define_ClusterCollector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
|
||||
define_Cluster<T, N, M, U>(m, #N "x" #M #TYPE_CODE); \
|
||||
register_calculate_eta<T, N, M, U>(m); \
|
||||
define_2x2_reduction<T, N, M, U>(m); \
|
||||
reduce_to_2x2<T, N, M, U>(m);
|
||||
|
||||
PYBIND11_MODULE(_aare, m) {
|
||||
define_file_io_bindings(m);
|
||||
define_raw_file_io_bindings(m);
|
||||
@@ -38,59 +65,51 @@ PYBIND11_MODULE(_aare, m) {
|
||||
define_interpolation_bindings(m);
|
||||
define_jungfrau_data_file_io_bindings(m);
|
||||
|
||||
define_cluster_file_io_bindings<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||
define_cluster_file_io_bindings<double, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||
define_cluster_file_io_bindings<float, 3, 3, uint16_t>(m, "Cluster3x3f");
|
||||
define_cluster_file_io_bindings<int, 2, 2, uint16_t>(m, "Cluster2x2i");
|
||||
define_cluster_file_io_bindings<float, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||
define_cluster_file_io_bindings<double, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||
bind_calibration(m);
|
||||
|
||||
define_ClusterVector<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||
define_ClusterVector<double, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||
define_ClusterVector<float, 3, 3, uint16_t>(m, "Cluster3x3f");
|
||||
define_ClusterVector<int, 2, 2, uint16_t>(m, "Cluster2x2i");
|
||||
define_ClusterVector<double, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||
define_ClusterVector<float, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||
DEFINE_CLUSTER_BINDINGS(int, 3, 3, uint16_t, i);
|
||||
DEFINE_CLUSTER_BINDINGS(double, 3, 3, uint16_t, d);
|
||||
DEFINE_CLUSTER_BINDINGS(float, 3, 3, uint16_t, f);
|
||||
|
||||
define_cluster_finder_bindings<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||
define_cluster_finder_bindings<double, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||
define_cluster_finder_bindings<float, 3, 3, uint16_t>(m, "Cluster3x3f");
|
||||
define_cluster_finder_bindings<int, 2, 2, uint16_t>(m, "Cluster2x2i");
|
||||
define_cluster_finder_bindings<double, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||
define_cluster_finder_bindings<float, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||
DEFINE_CLUSTER_BINDINGS(int, 2, 2, uint16_t, i);
|
||||
DEFINE_CLUSTER_BINDINGS(double, 2, 2, uint16_t, d);
|
||||
DEFINE_CLUSTER_BINDINGS(float, 2, 2, uint16_t, f);
|
||||
|
||||
define_cluster_finder_mt_bindings<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||
define_cluster_finder_mt_bindings<double, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||
define_cluster_finder_mt_bindings<float, 3, 3, uint16_t>(m, "Cluster3x3f");
|
||||
define_cluster_finder_mt_bindings<int, 2, 2, uint16_t>(m, "Cluster2x2i");
|
||||
define_cluster_finder_mt_bindings<double, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||
define_cluster_finder_mt_bindings<float, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||
DEFINE_CLUSTER_BINDINGS(int, 5, 5, uint16_t, i);
|
||||
DEFINE_CLUSTER_BINDINGS(double, 5, 5, uint16_t, d);
|
||||
DEFINE_CLUSTER_BINDINGS(float, 5, 5, uint16_t, f);
|
||||
|
||||
define_cluster_file_sink_bindings<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||
define_cluster_file_sink_bindings<double, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||
define_cluster_file_sink_bindings<float, 3, 3, uint16_t>(m, "Cluster3x3f");
|
||||
define_cluster_file_sink_bindings<int, 2, 2, uint16_t>(m, "Cluster2x2i");
|
||||
define_cluster_file_sink_bindings<double, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||
define_cluster_file_sink_bindings<float, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||
DEFINE_CLUSTER_BINDINGS(int, 7, 7, uint16_t, i);
|
||||
DEFINE_CLUSTER_BINDINGS(double, 7, 7, uint16_t, d);
|
||||
DEFINE_CLUSTER_BINDINGS(float, 7, 7, uint16_t, f);
|
||||
|
||||
define_cluster_collector_bindings<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||
define_cluster_collector_bindings<double, 3, 3, uint16_t>(m, "Cluster3x3f");
|
||||
define_cluster_collector_bindings<float, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||
define_cluster_collector_bindings<int, 2, 2, uint16_t>(m, "Cluster2x2i");
|
||||
define_cluster_collector_bindings<double, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||
define_cluster_collector_bindings<float, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||
DEFINE_CLUSTER_BINDINGS(int, 9, 9, uint16_t, i);
|
||||
DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d);
|
||||
DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f);
|
||||
|
||||
define_cluster<int, 3, 3, uint16_t>(m, "3x3i");
|
||||
define_cluster<float, 3, 3, uint16_t>(m, "3x3f");
|
||||
define_cluster<double, 3, 3, uint16_t>(m, "3x3d");
|
||||
define_cluster<int, 2, 2, uint16_t>(m, "2x2i");
|
||||
define_cluster<float, 2, 2, uint16_t>(m, "2x2f");
|
||||
define_cluster<double, 2, 2, uint16_t>(m, "2x2d");
|
||||
define_3x3_reduction<int, 3, 3, uint16_t>(m);
|
||||
define_3x3_reduction<double, 3, 3, uint16_t>(m);
|
||||
define_3x3_reduction<float, 3, 3, uint16_t>(m);
|
||||
define_3x3_reduction<int, 5, 5, uint16_t>(m);
|
||||
define_3x3_reduction<double, 5, 5, uint16_t>(m);
|
||||
define_3x3_reduction<float, 5, 5, uint16_t>(m);
|
||||
define_3x3_reduction<int, 7, 7, uint16_t>(m);
|
||||
define_3x3_reduction<double, 7, 7, uint16_t>(m);
|
||||
define_3x3_reduction<float, 7, 7, uint16_t>(m);
|
||||
define_3x3_reduction<int, 9, 9, uint16_t>(m);
|
||||
define_3x3_reduction<double, 9, 9, uint16_t>(m);
|
||||
define_3x3_reduction<float, 9, 9, uint16_t>(m);
|
||||
|
||||
register_calculate_eta<int, 3, 3, uint16_t>(m);
|
||||
register_calculate_eta<float, 3, 3, uint16_t>(m);
|
||||
register_calculate_eta<double, 3, 3, uint16_t>(m);
|
||||
register_calculate_eta<int, 2, 2, uint16_t>(m);
|
||||
register_calculate_eta<float, 2, 2, uint16_t>(m);
|
||||
register_calculate_eta<double, 2, 2, uint16_t>(m);
|
||||
reduce_to_3x3<int, 3, 3, uint16_t>(m);
|
||||
reduce_to_3x3<double, 3, 3, uint16_t>(m);
|
||||
reduce_to_3x3<float, 3, 3, uint16_t>(m);
|
||||
reduce_to_3x3<int, 5, 5, uint16_t>(m);
|
||||
reduce_to_3x3<double, 5, 5, uint16_t>(m);
|
||||
reduce_to_3x3<float, 5, 5, uint16_t>(m);
|
||||
reduce_to_3x3<int, 7, 7, uint16_t>(m);
|
||||
reduce_to_3x3<double, 7, 7, uint16_t>(m);
|
||||
reduce_to_3x3<float, 7, 7, uint16_t>(m);
|
||||
reduce_to_3x3<int, 9, 9, uint16_t>(m);
|
||||
reduce_to_3x3<double, 9, 9, uint16_t>(m);
|
||||
reduce_to_3x3<float, 9, 9, uint16_t>(m);
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ namespace py = pybind11;
|
||||
using namespace aare;
|
||||
|
||||
// Pass image data back to python as a numpy array
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
py::array return_image_data(aare::NDArray<T, Ndim> *image) {
|
||||
|
||||
py::capsule free_when_done(image, [](void *f) {
|
||||
|
||||
@@ -9,7 +9,8 @@
|
||||
|
||||
namespace py = pybind11;
|
||||
|
||||
template <typename SUM_TYPE> void define_pedestal_bindings(py::module &m, const std::string &name) {
|
||||
template <typename SUM_TYPE>
|
||||
void define_pedestal_bindings(py::module &m, const std::string &name) {
|
||||
py::class_<Pedestal<SUM_TYPE>>(m, name.c_str())
|
||||
.def(py::init<int, int, int>())
|
||||
.def(py::init<int, int>())
|
||||
@@ -19,16 +20,18 @@ template <typename SUM_TYPE> void define_pedestal_bindings(py::module &m, const
|
||||
*mea = self.mean();
|
||||
return return_image_data(mea);
|
||||
})
|
||||
.def("variance", [](Pedestal<SUM_TYPE> &self) {
|
||||
auto var = new NDArray<SUM_TYPE, 2>{};
|
||||
*var = self.variance();
|
||||
return return_image_data(var);
|
||||
})
|
||||
.def("std", [](Pedestal<SUM_TYPE> &self) {
|
||||
auto std = new NDArray<SUM_TYPE, 2>{};
|
||||
*std = self.std();
|
||||
return return_image_data(std);
|
||||
})
|
||||
.def("variance",
|
||||
[](Pedestal<SUM_TYPE> &self) {
|
||||
auto var = new NDArray<SUM_TYPE, 2>{};
|
||||
*var = self.variance();
|
||||
return return_image_data(var);
|
||||
})
|
||||
.def("std",
|
||||
[](Pedestal<SUM_TYPE> &self) {
|
||||
auto std = new NDArray<SUM_TYPE, 2>{};
|
||||
*std = self.std();
|
||||
return return_image_data(std);
|
||||
})
|
||||
.def("clear", py::overload_cast<>(&Pedestal<SUM_TYPE>::clear))
|
||||
.def_property_readonly("rows", &Pedestal<SUM_TYPE>::rows)
|
||||
.def_property_readonly("cols", &Pedestal<SUM_TYPE>::cols)
|
||||
@@ -39,14 +42,19 @@ template <typename SUM_TYPE> void define_pedestal_bindings(py::module &m, const
|
||||
[&](Pedestal<SUM_TYPE> &pedestal) {
|
||||
return Pedestal<SUM_TYPE>(pedestal);
|
||||
})
|
||||
//TODO! add push for other data types
|
||||
.def("push", [](Pedestal<SUM_TYPE> &pedestal, py::array_t<uint16_t> &f) {
|
||||
auto v = make_view_2d(f);
|
||||
pedestal.push(v);
|
||||
})
|
||||
.def("push_no_update", [](Pedestal<SUM_TYPE> &pedestal, py::array_t<uint16_t, py::array::c_style> &f) {
|
||||
auto v = make_view_2d(f);
|
||||
pedestal.push_no_update(v);
|
||||
}, py::arg().noconvert())
|
||||
// TODO! add push for other data types
|
||||
.def("push",
|
||||
[](Pedestal<SUM_TYPE> &pedestal, py::array_t<uint16_t> &f) {
|
||||
auto v = make_view_2d(f);
|
||||
pedestal.push(v);
|
||||
})
|
||||
.def(
|
||||
"push_no_update",
|
||||
[](Pedestal<SUM_TYPE> &pedestal,
|
||||
py::array_t<uint16_t, py::array::c_style> &f) {
|
||||
auto v = make_view_2d(f);
|
||||
pedestal.push_no_update(v);
|
||||
},
|
||||
py::arg().noconvert())
|
||||
.def("update_mean", &Pedestal<SUM_TYPE>::update_mean);
|
||||
}
|
||||
@@ -1,41 +1,46 @@
|
||||
#include "aare/PixelMap.hpp"
|
||||
#include "np_helper.hpp"
|
||||
|
||||
|
||||
#include <cstdint>
|
||||
#include <pybind11/numpy.h>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/stl.h>
|
||||
|
||||
|
||||
namespace py = pybind11;
|
||||
using namespace::aare;
|
||||
|
||||
using namespace ::aare;
|
||||
|
||||
void define_pixel_map_bindings(py::module &m) {
|
||||
m.def("GenerateMoench03PixelMap", []() {
|
||||
auto ptr = new NDArray<ssize_t,2>(GenerateMoench03PixelMap());
|
||||
return return_image_data(ptr);
|
||||
})
|
||||
.def("GenerateMoench05PixelMap", []() {
|
||||
auto ptr = new NDArray<ssize_t,2>(GenerateMoench05PixelMap());
|
||||
return return_image_data(ptr);
|
||||
})
|
||||
.def("GenerateMoench05PixelMap1g", []() {
|
||||
auto ptr = new NDArray<ssize_t,2>(GenerateMoench05PixelMap1g());
|
||||
return return_image_data(ptr);
|
||||
})
|
||||
.def("GenerateMoench05PixelMapOld", []() {
|
||||
auto ptr = new NDArray<ssize_t,2>(GenerateMoench05PixelMapOld());
|
||||
return return_image_data(ptr);
|
||||
})
|
||||
.def("GenerateMH02SingleCounterPixelMap", []() {
|
||||
auto ptr = new NDArray<ssize_t,2>(GenerateMH02SingleCounterPixelMap());
|
||||
return return_image_data(ptr);
|
||||
})
|
||||
.def("GenerateMH02FourCounterPixelMap", []() {
|
||||
auto ptr = new NDArray<ssize_t,3>(GenerateMH02FourCounterPixelMap());
|
||||
return return_image_data(ptr);
|
||||
});
|
||||
|
||||
m.def("GenerateMoench03PixelMap",
|
||||
[]() {
|
||||
auto ptr = new NDArray<ssize_t, 2>(GenerateMoench03PixelMap());
|
||||
return return_image_data(ptr);
|
||||
})
|
||||
.def("GenerateMoench05PixelMap",
|
||||
[]() {
|
||||
auto ptr = new NDArray<ssize_t, 2>(GenerateMoench05PixelMap());
|
||||
return return_image_data(ptr);
|
||||
})
|
||||
.def("GenerateMoench05PixelMap1g",
|
||||
[]() {
|
||||
auto ptr =
|
||||
new NDArray<ssize_t, 2>(GenerateMoench05PixelMap1g());
|
||||
return return_image_data(ptr);
|
||||
})
|
||||
.def("GenerateMoench05PixelMapOld",
|
||||
[]() {
|
||||
auto ptr =
|
||||
new NDArray<ssize_t, 2>(GenerateMoench05PixelMapOld());
|
||||
return return_image_data(ptr);
|
||||
})
|
||||
.def("GenerateMH02SingleCounterPixelMap",
|
||||
[]() {
|
||||
auto ptr = new NDArray<ssize_t, 2>(
|
||||
GenerateMH02SingleCounterPixelMap());
|
||||
return return_image_data(ptr);
|
||||
})
|
||||
.def("GenerateMH02FourCounterPixelMap", []() {
|
||||
auto ptr =
|
||||
new NDArray<ssize_t, 3>(GenerateMH02FourCounterPixelMap());
|
||||
return return_image_data(ptr);
|
||||
});
|
||||
}
|
||||
@@ -32,7 +32,7 @@ void define_raw_file_io_bindings(py::module &m) {
|
||||
shape.push_back(self.cols());
|
||||
|
||||
// return headers from all subfiles
|
||||
py::array_t<DetectorHeader> header(self.n_mod());
|
||||
py::array_t<DetectorHeader> header(self.n_modules());
|
||||
|
||||
const uint8_t item_size = self.bytes_per_pixel();
|
||||
if (item_size == 1) {
|
||||
@@ -58,13 +58,14 @@ void define_raw_file_io_bindings(py::module &m) {
|
||||
throw std::runtime_error("No frames left in file");
|
||||
}
|
||||
std::vector<size_t> shape{n_frames, self.rows(), self.cols()};
|
||||
|
||||
|
||||
// return headers from all subfiles
|
||||
py::array_t<DetectorHeader> header;
|
||||
if (self.n_mod() == 1) {
|
||||
if (self.n_modules() == 1) {
|
||||
header = py::array_t<DetectorHeader>(n_frames);
|
||||
} else {
|
||||
header = py::array_t<DetectorHeader>({self.n_mod(), n_frames});
|
||||
header = py::array_t<DetectorHeader>(
|
||||
{self.n_modules_in_roi(), n_frames});
|
||||
}
|
||||
// py::array_t<DetectorHeader> header({self.n_mod(), n_frames});
|
||||
|
||||
@@ -100,7 +101,8 @@ void define_raw_file_io_bindings(py::module &m) {
|
||||
.def_property_readonly("cols", &RawFile::cols)
|
||||
.def_property_readonly("bitdepth", &RawFile::bitdepth)
|
||||
.def_property_readonly("geometry", &RawFile::geometry)
|
||||
.def_property_readonly("n_mod", &RawFile::n_mod)
|
||||
.def_property_readonly("detector_type", &RawFile::detector_type)
|
||||
.def_property_readonly("master", &RawFile::master);
|
||||
.def_property_readonly("master", &RawFile::master)
|
||||
.def_property_readonly("n_modules", &RawFile::n_modules)
|
||||
.def_property_readonly("n_modules_in_roi", &RawFile::n_modules_in_roi);
|
||||
}
|
||||
@@ -57,7 +57,10 @@ void define_raw_master_file_bindings(py::module &m) {
|
||||
.def_property_readonly("total_frames_expected",
|
||||
&RawMasterFile::total_frames_expected)
|
||||
.def_property_readonly("geometry", &RawMasterFile::geometry)
|
||||
.def_property_readonly("analog_samples", &RawMasterFile::analog_samples, R"(
|
||||
.def_property_readonly("udp_interfaces_per_module",
|
||||
&RawMasterFile::udp_interfaces_per_module)
|
||||
.def_property_readonly("analog_samples", &RawMasterFile::analog_samples,
|
||||
R"(
|
||||
Number of analog samples
|
||||
|
||||
Returns
|
||||
@@ -66,7 +69,7 @@ void define_raw_master_file_bindings(py::module &m) {
|
||||
The number of analog samples in the file (or None if not enabled)
|
||||
)")
|
||||
.def_property_readonly("digital_samples",
|
||||
&RawMasterFile::digital_samples, R"(
|
||||
&RawMasterFile::digital_samples, R"(
|
||||
Number of digital samples
|
||||
|
||||
Returns
|
||||
|
||||
@@ -24,8 +24,8 @@ auto read_frame_from_RawSubFile(RawSubFile &self) {
|
||||
py::array_t<DetectorHeader> header(1);
|
||||
const uint8_t item_size = self.bytes_per_pixel();
|
||||
std::vector<ssize_t> shape{static_cast<ssize_t>(self.rows()),
|
||||
static_cast<ssize_t>(self.cols())};
|
||||
|
||||
static_cast<ssize_t>(self.cols())};
|
||||
|
||||
py::array image;
|
||||
if (item_size == 1) {
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
@@ -43,12 +43,10 @@ auto read_frame_from_RawSubFile(RawSubFile &self) {
|
||||
auto read_n_frames_from_RawSubFile(RawSubFile &self, size_t n_frames) {
|
||||
py::array_t<DetectorHeader> header(n_frames);
|
||||
const uint8_t item_size = self.bytes_per_pixel();
|
||||
std::vector<ssize_t> shape{
|
||||
static_cast<ssize_t>(n_frames),
|
||||
static_cast<ssize_t>(self.rows()),
|
||||
static_cast<ssize_t>(self.cols())
|
||||
};
|
||||
|
||||
std::vector<ssize_t> shape{static_cast<ssize_t>(n_frames),
|
||||
static_cast<ssize_t>(self.rows()),
|
||||
static_cast<ssize_t>(self.cols())};
|
||||
|
||||
py::array image;
|
||||
if (item_size == 1) {
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
@@ -57,15 +55,14 @@ auto read_n_frames_from_RawSubFile(RawSubFile &self, size_t n_frames) {
|
||||
} else if (item_size == 4) {
|
||||
image = py::array_t<uint32_t>(shape);
|
||||
}
|
||||
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()), n_frames,
|
||||
header.mutable_data());
|
||||
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()),
|
||||
n_frames, header.mutable_data());
|
||||
|
||||
return py::make_tuple(header, image);
|
||||
}
|
||||
|
||||
|
||||
//Disable warnings for unused parameters, as we ignore some
|
||||
//in the __exit__ method
|
||||
// Disable warnings for unused parameters, as we ignore some
|
||||
// in the __exit__ method
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
|
||||
@@ -76,7 +73,7 @@ void define_raw_sub_file_io_bindings(py::module &m) {
|
||||
.def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame)
|
||||
.def_property_readonly("pixels_per_frame",
|
||||
&RawSubFile::pixels_per_frame)
|
||||
.def_property_readonly("bytes_per_pixel", &RawSubFile::bytes_per_pixel)
|
||||
.def_property_readonly("bytes_per_pixel", &RawSubFile::bytes_per_pixel)
|
||||
.def("seek", &RawSubFile::seek)
|
||||
.def("tell", &RawSubFile::tell)
|
||||
.def_property_readonly("rows", &RawSubFile::rows)
|
||||
@@ -84,18 +81,17 @@ void define_raw_sub_file_io_bindings(py::module &m) {
|
||||
.def_property_readonly("frames_in_file", &RawSubFile::frames_in_file)
|
||||
.def("read_frame", &read_frame_from_RawSubFile)
|
||||
.def("read_n", &read_n_frames_from_RawSubFile)
|
||||
.def("read", [](RawSubFile &self){
|
||||
self.seek(0);
|
||||
auto n_frames = self.frames_in_file();
|
||||
return read_n_frames_from_RawSubFile(self, n_frames);
|
||||
})
|
||||
.def("read",
|
||||
[](RawSubFile &self) {
|
||||
self.seek(0);
|
||||
auto n_frames = self.frames_in_file();
|
||||
return read_n_frames_from_RawSubFile(self, n_frames);
|
||||
})
|
||||
.def("__enter__", [](RawSubFile &self) { return &self; })
|
||||
.def("__exit__",
|
||||
[](RawSubFile &self,
|
||||
const std::optional<pybind11::type> &exc_type,
|
||||
[](RawSubFile &self, const std::optional<pybind11::type> &exc_type,
|
||||
const std::optional<pybind11::object> &exc_value,
|
||||
const std::optional<pybind11::object> &traceback) {
|
||||
})
|
||||
const std::optional<pybind11::object> &traceback) {})
|
||||
.def("__iter__", [](RawSubFile &self) { return &self; })
|
||||
.def("__next__", [](RawSubFile &self) {
|
||||
try {
|
||||
@@ -104,7 +100,6 @@ void define_raw_sub_file_io_bindings(py::module &m) {
|
||||
throw py::stop_iteration();
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
@@ -12,10 +12,8 @@
|
||||
// #include <pybind11/stl/filesystem.h>
|
||||
// #include <string>
|
||||
|
||||
|
||||
namespace py = pybind11;
|
||||
using namespace::aare;
|
||||
|
||||
using namespace ::aare;
|
||||
|
||||
void define_var_cluster_finder_bindings(py::module &m) {
|
||||
PYBIND11_NUMPY_DTYPE(VarClusterFinder<double>::Hit, size, row, col,
|
||||
@@ -29,12 +27,12 @@ void define_var_cluster_finder_bindings(py::module &m) {
|
||||
return return_image_data(ptr);
|
||||
})
|
||||
.def("set_noiseMap",
|
||||
[](VarClusterFinder<double> &self,
|
||||
[](VarClusterFinder<double> &self,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast>
|
||||
noise_map) {
|
||||
auto noise_map_span = make_view_2d(noise_map);
|
||||
self.set_noiseMap(noise_map_span);
|
||||
})
|
||||
auto noise_map_span = make_view_2d(noise_map);
|
||||
self.set_noiseMap(noise_map_span);
|
||||
})
|
||||
.def("set_peripheralThresholdFactor",
|
||||
&VarClusterFinder<double>::set_peripheralThresholdFactor)
|
||||
.def("find_clusters",
|
||||
@@ -65,9 +63,7 @@ void define_var_cluster_finder_bindings(py::module &m) {
|
||||
return return_vector(ptr);
|
||||
})
|
||||
.def("clear_hits",
|
||||
[](VarClusterFinder<double> &self) {
|
||||
self.clear_hits();
|
||||
})
|
||||
[](VarClusterFinder<double> &self) { self.clear_hits(); })
|
||||
.def("steal_hits",
|
||||
[](VarClusterFinder<double> &self) {
|
||||
auto ptr = new std::vector<VarClusterFinder<double>::Hit>(
|
||||
@@ -75,5 +71,4 @@ void define_var_cluster_finder_bindings(py::module &m) {
|
||||
return return_vector(ptr);
|
||||
})
|
||||
.def("total_clusters", &VarClusterFinder<double>::total_clusters);
|
||||
|
||||
}
|
||||
@@ -6,20 +6,20 @@ import pytest
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
"--files", action="store_true", default=False, help="run slow tests"
|
||||
"--with-data", action="store_true", default=False, help="Run tests that require additional data"
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line("markers", "files: mark test as needing image files to run")
|
||||
config.addinivalue_line("markers", "withdata: mark test as needing image files to run")
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(config, items):
|
||||
if config.getoption("--files"):
|
||||
if config.getoption("--with-data"):
|
||||
return
|
||||
skip = pytest.mark.skip(reason="need --files option to run")
|
||||
skip = pytest.mark.skip(reason="need --with-data option to run")
|
||||
for item in items:
|
||||
if "files" in item.keywords:
|
||||
if "withdata" in item.keywords:
|
||||
item.add_marker(skip)
|
||||
|
||||
|
||||
|
||||
@@ -101,6 +101,27 @@ def test_cluster_finder():
|
||||
assert clusters.size == 0
|
||||
|
||||
|
||||
def test_2x2_reduction():
|
||||
"""Test 2x2 Reduction"""
|
||||
cluster = _aare.Cluster3x3i(5,5,np.array([1, 1, 1, 2, 3, 1, 2, 2, 1], dtype=np.int32))
|
||||
|
||||
reduced_cluster = _aare.reduce_to_2x2(cluster)
|
||||
|
||||
assert reduced_cluster.x == 4
|
||||
assert reduced_cluster.y == 5
|
||||
assert (reduced_cluster.data == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
|
||||
|
||||
|
||||
def test_3x3_reduction():
|
||||
"""Test 3x3 Reduction"""
|
||||
cluster = _aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
|
||||
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double))
|
||||
|
||||
reduced_cluster = _aare.reduce_to_3x3(cluster)
|
||||
|
||||
assert reduced_cluster.x == 4
|
||||
assert reduced_cluster.y == 5
|
||||
assert (reduced_cluster.data == np.array([[1.0, 2.0, 1.0], [2.0, 2.0, 3.0], [1.0, 2.0, 1.0]], dtype=np.double)).all()
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ import pickle
|
||||
from aare import ClusterFile
|
||||
from conftest import test_data_path
|
||||
|
||||
@pytest.mark.files
|
||||
@pytest.mark.withdata
|
||||
def test_cluster_file(test_data_path):
|
||||
"""Test ClusterFile"""
|
||||
f = ClusterFile(test_data_path / "clust/single_frame_97_clustrers.clust")
|
||||
@@ -39,7 +39,7 @@ def test_cluster_file(test_data_path):
|
||||
for i in range(10):
|
||||
assert arr[i]['x'] == i+1
|
||||
|
||||
@pytest.mark.files
|
||||
@pytest.mark.withdata
|
||||
def test_read_clusters_and_fill_histogram(test_data_path):
|
||||
# Create the histogram
|
||||
n_bins = 100
|
||||
|
||||
@@ -5,7 +5,7 @@ import time
|
||||
from pathlib import Path
|
||||
import pickle
|
||||
|
||||
from aare import ClusterFile
|
||||
from aare import ClusterFile, ClusterVector
|
||||
from aare import _aare
|
||||
from conftest import test_data_path
|
||||
|
||||
@@ -51,4 +51,36 @@ def test_make_a_hitmap_from_cluster_vector():
|
||||
# print(img)
|
||||
# print(ref)
|
||||
assert (img == ref).all()
|
||||
|
||||
|
||||
|
||||
def test_2x2_reduction():
|
||||
cv = ClusterVector((3,3))
|
||||
|
||||
cv.push_back(_aare.Cluster3x3i(5, 5, np.array([1, 1, 1, 2, 3, 1, 2, 2, 1], dtype=np.int32)))
|
||||
cv.push_back(_aare.Cluster3x3i(5, 5, np.array([2, 2, 1, 2, 3, 1, 1, 1, 1], dtype=np.int32)))
|
||||
|
||||
reduced_cv = np.array(_aare.reduce_to_2x2(cv), copy=False)
|
||||
|
||||
assert reduced_cv.size == 2
|
||||
assert reduced_cv[0]["x"] == 4
|
||||
assert reduced_cv[0]["y"] == 5
|
||||
assert (reduced_cv[0]["data"] == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
|
||||
assert reduced_cv[1]["x"] == 4
|
||||
assert reduced_cv[1]["y"] == 6
|
||||
assert (reduced_cv[1]["data"] == np.array([[2, 2], [2, 3]], dtype=np.int32)).all()
|
||||
|
||||
|
||||
def test_3x3_reduction():
|
||||
cv = _aare.ClusterVector_Cluster5x5d()
|
||||
|
||||
cv.push_back(_aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
|
||||
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double)))
|
||||
cv.push_back(_aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
|
||||
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double)))
|
||||
|
||||
reduced_cv = np.array(_aare.reduce_to_3x3(cv), copy=False)
|
||||
|
||||
assert reduced_cv.size == 2
|
||||
assert reduced_cv[0]["x"] == 4
|
||||
assert reduced_cv[0]["y"] == 5
|
||||
assert (reduced_cv[0]["data"] == np.array([[1.0, 2.0, 1.0], [2.0, 2.0, 3.0], [1.0, 2.0, 1.0]], dtype=np.double)).all()
|
||||
73
python/tests/test_RawFile.py
Normal file
73
python/tests/test_RawFile.py
Normal file
@@ -0,0 +1,73 @@
|
||||
import pytest
|
||||
from aare import RawFile
|
||||
import numpy as np
|
||||
|
||||
@pytest.mark.withdata
|
||||
def test_read_rawfile_with_roi(test_data_path):
|
||||
|
||||
with RawFile(test_data_path / "raw/SingleChipROI/Data_master_0.json") as f:
|
||||
headers, frames = f.read()
|
||||
|
||||
assert headers.size == 10100
|
||||
assert frames.shape == (10100, 256, 256)
|
||||
|
||||
@pytest.mark.withdata
|
||||
def test_read_rawfile_quad_eiger_and_compare_to_numpy(test_data_path):
|
||||
|
||||
d0 = test_data_path/'raw/eiger_quad_data/W13_vrpreampscan_m21C_300V_800eV_vthre2000_d0_f0_0.raw'
|
||||
d1 = test_data_path/'raw/eiger_quad_data/W13_vrpreampscan_m21C_300V_800eV_vthre2000_d1_f0_0.raw'
|
||||
|
||||
image = np.zeros((512,512), dtype=np.uint32)
|
||||
|
||||
with open(d0) as f:
|
||||
raw = np.fromfile(f, dtype=np.uint32, count = 256*512, offset = 20*256*512*4 + 112*21).reshape(256,512)
|
||||
|
||||
image[256:,:] = raw
|
||||
|
||||
with open(d1) as f:
|
||||
raw = np.fromfile(f, dtype=np.uint32, count = 256*512, offset = 20*256*512*4 + 112*21).reshape(256,512)
|
||||
|
||||
image[0:256,:] = raw[::-1,:]
|
||||
|
||||
with RawFile(test_data_path/'raw/eiger_quad_data/W13_vrpreampscan_m21C_300V_800eV_vthre2000_master_0.json') as f:
|
||||
f.seek(20)
|
||||
header, image1 = f.read_frame()
|
||||
|
||||
assert (image == image1).all()
|
||||
|
||||
|
||||
@pytest.mark.withdata
|
||||
def test_read_rawfile_eiger_and_compare_to_numpy(test_data_path):
|
||||
d0 = test_data_path/'raw/eiger/Lab6_20500eV_2deg_20240629_d0_f0_7.raw'
|
||||
d1 = test_data_path/'raw/eiger/Lab6_20500eV_2deg_20240629_d1_f0_7.raw'
|
||||
d2 = test_data_path/'raw/eiger/Lab6_20500eV_2deg_20240629_d2_f0_7.raw'
|
||||
d3 = test_data_path/'raw/eiger/Lab6_20500eV_2deg_20240629_d3_f0_7.raw'
|
||||
|
||||
image = np.zeros((512,1024), dtype=np.uint32)
|
||||
|
||||
#TODO why is there no header offset?
|
||||
with open(d0) as f:
|
||||
raw = np.fromfile(f, dtype=np.uint32, count = 256*512, offset=112).reshape(256,512)
|
||||
|
||||
image[0:256,0:512] = raw[::-1]
|
||||
|
||||
with open(d1) as f:
|
||||
raw = np.fromfile(f, dtype=np.uint32, count = 256*512, offset=112).reshape(256,512)
|
||||
|
||||
image[0:256,512:] = raw[::-1]
|
||||
|
||||
with open(d2) as f:
|
||||
raw = np.fromfile(f, dtype=np.uint32, count = 256*512, offset=112).reshape(256,512)
|
||||
|
||||
image[256:,0:512] = raw
|
||||
|
||||
with open(d3) as f:
|
||||
raw = np.fromfile(f, dtype=np.uint32, count = 256*512, offset=112).reshape(256,512)
|
||||
|
||||
image[256:,512:] = raw
|
||||
|
||||
|
||||
with RawFile(test_data_path/'raw/eiger/Lab6_20500eV_2deg_20240629_master_7.json') as f:
|
||||
header, image1 = f.read_frame()
|
||||
|
||||
assert (image == image1).all()
|
||||
@@ -3,34 +3,37 @@ import numpy as np
|
||||
from aare import RawSubFile, DetectorType
|
||||
|
||||
|
||||
@pytest.mark.files
|
||||
@pytest.mark.withdata
|
||||
def test_read_a_jungfrau_RawSubFile(test_data_path):
|
||||
|
||||
# Starting with f1 there is now 7 frames left in the series of files
|
||||
with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f1_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f:
|
||||
assert f.frames_in_file == 3
|
||||
assert f.frames_in_file == 7
|
||||
|
||||
headers, frames = f.read()
|
||||
|
||||
assert headers.size == 3
|
||||
assert frames.shape == (3, 512, 1024)
|
||||
assert headers.size == 7
|
||||
assert frames.shape == (7, 512, 1024)
|
||||
|
||||
# Frame numbers in this file should be 4, 5, 6
|
||||
for i,h in zip(range(4,7,1), headers):
|
||||
|
||||
for i,h in zip(range(4,11,1), headers):
|
||||
assert h["frameNumber"] == i
|
||||
|
||||
# Compare to canned data using numpy
|
||||
data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy")
|
||||
assert np.all(data[3:6] == frames)
|
||||
assert np.all(data[3:] == frames)
|
||||
|
||||
@pytest.mark.files
|
||||
@pytest.mark.withdata
|
||||
def test_iterate_over_a_jungfrau_RawSubFile(test_data_path):
|
||||
|
||||
data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy")
|
||||
|
||||
# Given the first subfile in a series we can read all frames from f0, f1, f2...fN
|
||||
with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f:
|
||||
i = 0
|
||||
for header, frame in f:
|
||||
assert header["frameNumber"] == i+1
|
||||
assert np.all(frame == data[i])
|
||||
i += 1
|
||||
assert i == 3
|
||||
assert header["frameNumber"] == 3
|
||||
assert i == 10
|
||||
assert header["frameNumber"] == 10
|
||||
|
||||
135
python/tests/test_calibration.py
Normal file
135
python/tests/test_calibration.py
Normal file
@@ -0,0 +1,135 @@
|
||||
import pytest
|
||||
import numpy as np
|
||||
|
||||
import aare
|
||||
|
||||
def test_apply_calibration_small_data():
|
||||
# The raw data consists of 10 4x5 images
|
||||
raw = np.zeros((10, 4, 5), dtype=np.uint16)
|
||||
|
||||
# We need a pedestal for each gain, so 3
|
||||
pedestal = np.zeros((3, 4, 5), dtype=np.float32)
|
||||
|
||||
# And the same for calibration
|
||||
calibration = np.ones((3, 4, 5), dtype=np.float32)
|
||||
|
||||
# Set the known values, probing one pixel in each gain
|
||||
raw[0, 0, 0] = 100 #ADC value of 100, gain 0
|
||||
pedestal[0, 0, 0] = 10
|
||||
calibration[0, 0, 0] = 43.7
|
||||
|
||||
raw[2, 3, 3] = (1<<14) + 1000 #ADC value of 1000, gain 1
|
||||
pedestal[1, 3, 3] = 500
|
||||
calibration[1, 3, 3] = 2.0
|
||||
|
||||
raw[1,1,4] = (3<<14) + 857 #ADC value of 857, gain 2
|
||||
pedestal[2,1,4] = 100
|
||||
calibration[2,1,4] = 3.0
|
||||
|
||||
|
||||
|
||||
data = aare.apply_calibration(raw, pd = pedestal, cal = calibration)
|
||||
|
||||
|
||||
# The formula that is applied is:
|
||||
# calibrated = (raw - pedestal) / calibration
|
||||
assert data.shape == (10, 4, 5)
|
||||
assert data[0, 0, 0] == (100 - 10) / 43.7
|
||||
assert data[2, 3, 3] == (1000 - 500) / 2.0
|
||||
assert data[1, 1, 4] == (857 - 100) / 3.0
|
||||
|
||||
# Other pixels should be zero
|
||||
assert data[2,2,2] == 0
|
||||
assert data[0,1,1] == 0
|
||||
assert data[1,3,0] == 0
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def raw_data_3x2x2():
|
||||
raw = np.zeros((3, 2, 2), dtype=np.uint16)
|
||||
raw[0, 0, 0] = 100
|
||||
raw[1,0, 0] = 200
|
||||
raw[2, 0, 0] = 300
|
||||
|
||||
raw[0, 0, 1] = (1<<14) + 100
|
||||
raw[1, 0, 1] = (1<<14) + 200
|
||||
raw[2, 0, 1] = (1<<14) + 300
|
||||
|
||||
raw[0, 1, 0] = (1<<14) + 37
|
||||
raw[1, 1, 0] = 38
|
||||
raw[2, 1, 0] = (3<<14) + 39
|
||||
|
||||
raw[0, 1, 1] = (3<<14) + 100
|
||||
raw[1, 1, 1] = (3<<14) + 200
|
||||
raw[2, 1, 1] = (3<<14) + 300
|
||||
return raw
|
||||
|
||||
def test_calculate_pedestal(raw_data_3x2x2):
|
||||
# Calculate the pedestal
|
||||
pd = aare.calculate_pedestal(raw_data_3x2x2)
|
||||
assert pd.shape == (3, 2, 2)
|
||||
assert pd.dtype == np.float64
|
||||
assert pd[0, 0, 0] == 200
|
||||
assert pd[1, 0, 0] == 0
|
||||
assert pd[2, 0, 0] == 0
|
||||
|
||||
assert pd[0, 0, 1] == 0
|
||||
assert pd[1, 0, 1] == 200
|
||||
assert pd[2, 0, 1] == 0
|
||||
|
||||
assert pd[0, 1, 0] == 38
|
||||
assert pd[1, 1, 0] == 37
|
||||
assert pd[2, 1, 0] == 39
|
||||
|
||||
assert pd[0, 1, 1] == 0
|
||||
assert pd[1, 1, 1] == 0
|
||||
assert pd[2, 1, 1] == 200
|
||||
|
||||
def test_calculate_pedestal_float(raw_data_3x2x2):
|
||||
#results should be the same for float
|
||||
pd2 = aare.calculate_pedestal_float(raw_data_3x2x2)
|
||||
assert pd2.shape == (3, 2, 2)
|
||||
assert pd2.dtype == np.float32
|
||||
assert pd2[0, 0, 0] == 200
|
||||
assert pd2[1, 0, 0] == 0
|
||||
assert pd2[2, 0, 0] == 0
|
||||
|
||||
assert pd2[0, 0, 1] == 0
|
||||
assert pd2[1, 0, 1] == 200
|
||||
assert pd2[2, 0, 1] == 0
|
||||
|
||||
assert pd2[0, 1, 0] == 38
|
||||
assert pd2[1, 1, 0] == 37
|
||||
assert pd2[2, 1, 0] == 39
|
||||
|
||||
assert pd2[0, 1, 1] == 0
|
||||
assert pd2[1, 1, 1] == 0
|
||||
assert pd2[2, 1, 1] == 200
|
||||
|
||||
def test_calculate_pedestal_g0(raw_data_3x2x2):
|
||||
pd = aare.calculate_pedestal_g0(raw_data_3x2x2)
|
||||
assert pd.shape == (2, 2)
|
||||
assert pd.dtype == np.float64
|
||||
assert pd[0, 0] == 200
|
||||
assert pd[1, 0] == 38
|
||||
assert pd[0, 1] == 0
|
||||
assert pd[1, 1] == 0
|
||||
|
||||
def test_calculate_pedestal_g0_float(raw_data_3x2x2):
|
||||
pd = aare.calculate_pedestal_g0_float(raw_data_3x2x2)
|
||||
assert pd.shape == (2, 2)
|
||||
assert pd.dtype == np.float32
|
||||
assert pd[0, 0] == 200
|
||||
assert pd[1, 0] == 38
|
||||
assert pd[0, 1] == 0
|
||||
assert pd[1, 1] == 0
|
||||
|
||||
def test_count_switching_pixels(raw_data_3x2x2):
|
||||
# Count the number of pixels that switched gain
|
||||
count = aare.count_switching_pixels(raw_data_3x2x2)
|
||||
assert count.shape == (2, 2)
|
||||
assert count.sum() == 8
|
||||
assert count[0, 0] == 0
|
||||
assert count[1, 0] == 2
|
||||
assert count[0, 1] == 3
|
||||
assert count[1, 1] == 3
|
||||
@@ -2,7 +2,7 @@ import pytest
|
||||
import numpy as np
|
||||
from aare import JungfrauDataFile
|
||||
|
||||
@pytest.mark.files
|
||||
@pytest.mark.withdata
|
||||
def test_jfungfrau_dat_read_number_of_frames(test_data_path):
|
||||
with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as dat_file:
|
||||
assert dat_file.total_frames == 24
|
||||
@@ -14,7 +14,7 @@ def test_jfungfrau_dat_read_number_of_frames(test_data_path):
|
||||
assert dat_file.total_frames == 113
|
||||
|
||||
|
||||
@pytest.mark.files
|
||||
@pytest.mark.withdata
|
||||
def test_jfungfrau_dat_read_number_of_file(test_data_path):
|
||||
with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as dat_file:
|
||||
assert dat_file.n_files == 4
|
||||
@@ -26,7 +26,7 @@ def test_jfungfrau_dat_read_number_of_file(test_data_path):
|
||||
assert dat_file.n_files == 7
|
||||
|
||||
|
||||
@pytest.mark.files
|
||||
@pytest.mark.withdata
|
||||
def test_read_module(test_data_path):
|
||||
"""
|
||||
Read all frames from the series of .dat files. Compare to canned data in npz format.
|
||||
@@ -50,7 +50,7 @@ def test_read_module(test_data_path):
|
||||
assert np.all(ref_header == header)
|
||||
assert np.all(ref_data == data)
|
||||
|
||||
@pytest.mark.files
|
||||
@pytest.mark.withdata
|
||||
def test_read_half_module(test_data_path):
|
||||
|
||||
# Read all frames from the .dat file
|
||||
@@ -71,7 +71,7 @@ def test_read_half_module(test_data_path):
|
||||
assert np.all(ref_data == data)
|
||||
|
||||
|
||||
@pytest.mark.files
|
||||
@pytest.mark.withdata
|
||||
def test_read_single_chip(test_data_path):
|
||||
|
||||
# Read all frames from the .dat file
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user