mirror of
https://github.com/slsdetectorgroup/aare.git
synced 2025-06-14 08:17:13 +02:00
Compare commits
51 Commits
fix/api_cl
...
angle_cali
Author | SHA1 | Date | |
---|---|---|---|
c35f4a7746 | |||
ba8778cf44 | |||
6bc8b0c4a7 | |||
1369bc780e | |||
9cfe1ac5e6 | |||
6f4cc219b7 | |||
0d5c6fed61 | |||
54f76100c2 | |||
e04bf6be30 | |||
bd0ff3d7da | |||
df1335529c | |||
b94be4cbe8 | |||
6328369ce9 | |||
67b94eefb0 | |||
81588fba3b | |||
276283ff14 | |||
cf158e2dcd | |||
12ae1424fb | |||
6db201f397 | |||
d5226909fe | |||
eb6862ff99 | |||
f06e722dce | |||
2e0424254c | |||
7b5e32a824 | |||
86d343f5f5 | |||
129e7e9f9d | |||
58c934d9cf | |||
4088b0889d | |||
d5f8daf194 | |||
c6e8e5f6a1 | |||
b501c31e38 | |||
326941e2b4 | |||
84aafa75f6 | |||
177459c98a | |||
c49a2fdf8e | |||
14211047ff | |||
acd9d5d487 | |||
d4050ec557 | |||
fca9d5d2fa | |||
1174f7f434 | |||
2bb7d360bf | |||
a90e532b21 | |||
8d8182c632 | |||
5f34ab6df1 | |||
5c8a5099fd | |||
7c93632605 | |||
54def26334 | |||
a59e9656be | |||
3f753ec900 | |||
6e4db45b57 | |||
e1533282f1 |
@ -1,18 +1,24 @@
|
||||
name: Build on RHEL8
|
||||
|
||||
on:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
buildh:
|
||||
build:
|
||||
runs-on: "ubuntu-latest"
|
||||
container:
|
||||
image: gitea.psi.ch/images/rhel8-developer-gitea-actions
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
# workaround until actions/checkout@v4 is available for RH8
|
||||
# - uses: actions/checkout@v4
|
||||
- name: Clone repository
|
||||
run: |
|
||||
echo Cloning ${{ github.ref_name }}
|
||||
git clone https://${{secrets.GITHUB_TOKEN}}@gitea.psi.ch/${{ github.repository }}.git --branch=${{ github.ref_name }} .
|
||||
|
||||
|
||||
- name: Install dependencies
|
||||
@ -22,7 +28,7 @@ jobs:
|
||||
- name: Build library
|
||||
run: |
|
||||
mkdir build && cd build
|
||||
cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON
|
||||
cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON -DPython_FIND_VIRTUALENV=FIRST
|
||||
make -j 2
|
||||
|
||||
- name: C++ unit tests
|
||||
|
@ -8,7 +8,7 @@ permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
buildh:
|
||||
build:
|
||||
runs-on: "ubuntu-latest"
|
||||
container:
|
||||
image: gitea.psi.ch/images/rhel9-developer-gitea-actions
|
||||
|
8
.github/workflows/build_and_deploy_conda.yml
vendored
8
.github/workflows/build_and_deploy_conda.yml
vendored
@ -24,13 +24,13 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get conda
|
||||
uses: conda-incubator/setup-miniconda@v3.0.4
|
||||
uses: conda-incubator/setup-miniconda@v3
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
environment-file: etc/dev-env.yml
|
||||
miniforge-version: latest
|
||||
channels: conda-forge
|
||||
|
||||
- name: Prepare
|
||||
run: conda install conda-build=24.9 conda-verify pytest anaconda-client
|
||||
conda-remove-defaults: "true"
|
||||
|
||||
- name: Enable upload
|
||||
run: conda config --set anaconda_upload yes
|
||||
|
9
.github/workflows/build_conda.yml
vendored
9
.github/workflows/build_conda.yml
vendored
@ -24,14 +24,15 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get conda
|
||||
uses: conda-incubator/setup-miniconda@v3.0.4
|
||||
uses: conda-incubator/setup-miniconda@v3
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
environment-file: etc/dev-env.yml
|
||||
miniforge-version: latest
|
||||
channels: conda-forge
|
||||
conda-remove-defaults: "true"
|
||||
|
||||
- name: Prepare
|
||||
run: conda install conda-build=24.9 conda-verify pytest anaconda-client
|
||||
|
||||
|
||||
- name: Disable upload
|
||||
run: conda config --set anaconda_upload no
|
||||
|
||||
|
64
.github/workflows/build_wheel.yml
vendored
Normal file
64
.github/workflows/build_wheel.yml
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
name: Build wheel
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
|
||||
|
||||
jobs:
|
||||
build_wheels:
|
||||
name: Build wheels on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest,]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Build wheels
|
||||
run: pipx run cibuildwheel==2.23.0
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }}
|
||||
path: ./wheelhouse/*.whl
|
||||
|
||||
build_sdist:
|
||||
name: Build source distribution
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Build sdist
|
||||
run: pipx run build --sdist
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cibw-sdist
|
||||
path: dist/*.tar.gz
|
||||
|
||||
upload_pypi:
|
||||
needs: [build_wheels, build_sdist]
|
||||
runs-on: ubuntu-latest
|
||||
environment: pypi
|
||||
permissions:
|
||||
id-token: write
|
||||
if: github.event_name == 'release' && github.event.action == 'published'
|
||||
# or, alternatively, upload to PyPI on every tag starting with 'v' (remove on: release above to use this)
|
||||
# if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v')
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
# unpacks all CIBW artifacts into dist/
|
||||
pattern: cibw-*
|
||||
path: dist
|
||||
merge-multiple: true
|
||||
|
||||
- uses: pypa/gh-action-pypi-publish@release/v1
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -17,7 +17,8 @@ Testing/
|
||||
ctbDict.cpp
|
||||
ctbDict.h
|
||||
|
||||
|
||||
wheelhouse/
|
||||
dist/
|
||||
|
||||
*.pyc
|
||||
*/__pycache__/*
|
||||
|
@ -1,16 +1,29 @@
|
||||
cmake_minimum_required(VERSION 3.14)
|
||||
cmake_minimum_required(VERSION 3.15)
|
||||
|
||||
project(aare
|
||||
VERSION 1.0.0
|
||||
DESCRIPTION "Data processing library for PSI detectors"
|
||||
HOMEPAGE_URL "https://github.com/slsdetectorgroup/aare"
|
||||
LANGUAGES C CXX
|
||||
)
|
||||
|
||||
# Read VERSION file into project version
|
||||
set(VERSION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/VERSION")
|
||||
file(READ "${VERSION_FILE}" VERSION_CONTENT)
|
||||
string(STRIP "${VERSION_CONTENT}" PROJECT_VERSION_STRING)
|
||||
set(PROJECT_VERSION ${PROJECT_VERSION_STRING})
|
||||
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
|
||||
execute_process(
|
||||
COMMAND git log -1 --format=%h
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
|
||||
OUTPUT_VARIABLE GIT_HASH
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
message(STATUS "Building from git hash: ${GIT_HASH}")
|
||||
|
||||
if (${CMAKE_VERSION} VERSION_GREATER "3.24")
|
||||
cmake_policy(SET CMP0135 NEW) #Fetch content download timestamp
|
||||
endif()
|
||||
@ -49,6 +62,7 @@ option(AARE_FETCH_CATCH "Use FetchContent to download catch2" ON)
|
||||
option(AARE_FETCH_JSON "Use FetchContent to download nlohmann::json" ON)
|
||||
option(AARE_FETCH_ZMQ "Use FetchContent to download libzmq" ON)
|
||||
option(AARE_FETCH_LMFIT "Use FetchContent to download lmfit" ON)
|
||||
option(AARE_FETCH_HDF5 "Use FetchContent to download hdf5-devel" OFF)
|
||||
|
||||
|
||||
#Convenience option to use system libraries only (no FetchContent)
|
||||
@ -60,6 +74,7 @@ if(AARE_SYSTEM_LIBRARIES)
|
||||
set(AARE_FETCH_CATCH OFF CACHE BOOL "Disabled FetchContent for catch2" FORCE)
|
||||
set(AARE_FETCH_JSON OFF CACHE BOOL "Disabled FetchContent for nlohmann::json" FORCE)
|
||||
set(AARE_FETCH_ZMQ OFF CACHE BOOL "Disabled FetchContent for libzmq" FORCE)
|
||||
set(AARE_FETCH_HDF5 OFF CACHE BOOL "Disabled FetchContent for hdf5" FORCE)
|
||||
# Still fetch lmfit when setting AARE_SYSTEM_LIBRARIES since this is not available
|
||||
# on conda-forge
|
||||
endif()
|
||||
@ -206,6 +221,23 @@ else()
|
||||
find_package(nlohmann_json 3.11.3 REQUIRED)
|
||||
endif()
|
||||
|
||||
if(AARE_FETCH_HDF5)
|
||||
message(FATAL_ERROR "Fetching HDF5 via FetchContent is not supported here. Please install it via your system.
|
||||
For Ubuntu: sudo apt install libhdf5-dev
|
||||
For Red Hat: sudo dnf install hdf5-devel
|
||||
For MacOS: brew install hdf5")
|
||||
else()
|
||||
find_package(HDF5 QUIET COMPONENTS CXX)
|
||||
if (HDF5_FOUND)
|
||||
message(STATUS "Found HDF5: ${HDF5_INCLUDE_DIRS}")
|
||||
else()
|
||||
message(FATAL_ERROR "HDF5 was NOT found! Please install it via your system
|
||||
For Ubuntu: sudo apt install libhdf5-dev
|
||||
For Red Hat: sudo dnf install hdf5-devel
|
||||
For MacOS: brew install hdf5")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
include(GNUInstallDirs)
|
||||
|
||||
# If conda build, always set lib dir to 'lib'
|
||||
@ -318,10 +350,6 @@ if(AARE_ASAN)
|
||||
)
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if(AARE_TESTS)
|
||||
enable_testing()
|
||||
add_subdirectory(tests)
|
||||
@ -331,6 +359,7 @@ endif()
|
||||
###------------------------------------------------------------------------------------------
|
||||
|
||||
set(PUBLICHEADERS
|
||||
include/aare/AngleCalibration.hpp
|
||||
include/aare/ArrayExpr.hpp
|
||||
include/aare/CalculateEta.hpp
|
||||
include/aare/Cluster.hpp
|
||||
@ -345,10 +374,14 @@ set(PUBLICHEADERS
|
||||
include/aare/Fit.hpp
|
||||
include/aare/FileInterface.hpp
|
||||
include/aare/FilePtr.hpp
|
||||
include/aare/FlatField.hpp
|
||||
include/aare/Frame.hpp
|
||||
include/aare/GainMap.hpp
|
||||
include/aare/geo_helpers.hpp
|
||||
include/aare/Hdf5FileReader.hpp
|
||||
include/aare/JungfrauDataFile.hpp
|
||||
include/aare/MythenDetectorSpecifications.hpp
|
||||
include/aare/MythenFileReader.hpp
|
||||
include/aare/NDArray.hpp
|
||||
include/aare/NDView.hpp
|
||||
include/aare/NumpyFile.hpp
|
||||
@ -364,6 +397,7 @@ set(PUBLICHEADERS
|
||||
|
||||
|
||||
set(SourceFiles
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/AngleCalibration.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/CtbRawFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp
|
||||
@ -381,9 +415,10 @@ set(SourceFiles
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
|
||||
)
|
||||
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp
|
||||
)
|
||||
|
||||
add_library(aare_core STATIC ${SourceFiles})
|
||||
target_include_directories(aare_core PUBLIC
|
||||
@ -391,14 +426,19 @@ target_include_directories(aare_core PUBLIC
|
||||
"$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>"
|
||||
)
|
||||
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
target_link_libraries(
|
||||
aare_core
|
||||
PUBLIC
|
||||
fmt::fmt
|
||||
nlohmann_json::nlohmann_json
|
||||
HDF5::HDF5
|
||||
${STD_FS_LIB} # from helpers.cmake
|
||||
PRIVATE
|
||||
aare_compiler_flags
|
||||
Threads::Threads
|
||||
$<BUILD_INTERFACE:lmfit>
|
||||
|
||||
)
|
||||
@ -415,11 +455,14 @@ endif()
|
||||
if(AARE_TESTS)
|
||||
set(TestSources
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/AngleCalibration.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Hdf5FileReader.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp
|
||||
@ -427,8 +470,10 @@ if(AARE_TESTS)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Cluster.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/CalculateEta.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinderMT.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/MythenFileReader.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp
|
||||
|
@ -1,28 +1,5 @@
|
||||
python:
|
||||
- 3.11
|
||||
- 3.11
|
||||
- 3.11
|
||||
- 3.12
|
||||
- 3.12
|
||||
- 3.12
|
||||
- 3.13
|
||||
|
||||
|
||||
|
||||
numpy:
|
||||
- 1.26
|
||||
- 2.0
|
||||
- 2.1
|
||||
- 1.26
|
||||
- 2.0
|
||||
- 2.1
|
||||
- 2.1
|
||||
|
||||
|
||||
zip_keys:
|
||||
- python
|
||||
- numpy
|
||||
|
||||
pin_run_as_build:
|
||||
numpy: x.x
|
||||
python: x.x
|
@ -1,10 +1,10 @@
|
||||
source:
|
||||
path: ../
|
||||
|
||||
{% set version = load_file_regex(load_file = 'VERSION', regex_pattern = '(\d+(?:\.\d+)*(?:[\+\w\.]+))').group(1) %}
|
||||
package:
|
||||
name: aare
|
||||
version: 2025.4.1 #TODO! how to not duplicate this?
|
||||
|
||||
|
||||
|
||||
|
||||
version: {{version}}
|
||||
|
||||
source:
|
||||
path: ..
|
||||
@ -12,44 +12,39 @@ source:
|
||||
build:
|
||||
number: 0
|
||||
script:
|
||||
- unset CMAKE_GENERATOR && {{ PYTHON }} -m pip install . -vv # [not win]
|
||||
- {{ PYTHON }} -m pip install . -vv # [win]
|
||||
- unset CMAKE_GENERATOR && {{ PYTHON }} -m pip install . -vv
|
||||
|
||||
requirements:
|
||||
build:
|
||||
- python {{python}}
|
||||
- numpy {{ numpy }}
|
||||
- {{ compiler('cxx') }}
|
||||
|
||||
|
||||
host:
|
||||
- cmake
|
||||
- ninja
|
||||
- python {{python}}
|
||||
- numpy {{ numpy }}
|
||||
|
||||
host:
|
||||
- python
|
||||
- pip
|
||||
- numpy=2.1
|
||||
- scikit-build-core
|
||||
- pybind11 >=2.13.0
|
||||
- fmt
|
||||
- zeromq
|
||||
- nlohmann_json
|
||||
- catch2
|
||||
- matplotlib # needed in host to solve the environment for run
|
||||
|
||||
run:
|
||||
- python {{python}}
|
||||
- numpy {{ numpy }}
|
||||
- python
|
||||
- {{ pin_compatible('numpy') }}
|
||||
- matplotlib
|
||||
|
||||
|
||||
|
||||
test:
|
||||
imports:
|
||||
- aare
|
||||
# requires:
|
||||
# - pytest
|
||||
# source_files:
|
||||
# - tests
|
||||
# commands:
|
||||
# - pytest tests
|
||||
requires:
|
||||
- pytest
|
||||
- boost-histogram
|
||||
source_files:
|
||||
- python/tests
|
||||
commands:
|
||||
- python -m pytest python/tests
|
||||
|
||||
about:
|
||||
summary: An example project built with pybind11 and scikit-build.
|
||||
# license_file: LICENSE
|
||||
summary: Data analysis library for hybrid pixel detectors from PSI
|
||||
|
@ -3,13 +3,11 @@ channels:
|
||||
- conda-forge
|
||||
dependencies:
|
||||
- anaconda-client
|
||||
- conda-build
|
||||
- doxygen
|
||||
- sphinx=7.1.2
|
||||
- breathe
|
||||
- pybind11
|
||||
- sphinx_rtd_theme
|
||||
- furo
|
||||
- nlohmann_json
|
||||
- zeromq
|
||||
- fmt
|
||||
- numpy
|
||||
|
||||
|
164
include/aare/AngleCalibration.hpp
Normal file
164
include/aare/AngleCalibration.hpp
Normal file
@ -0,0 +1,164 @@
|
||||
#pragma once
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
|
||||
#include <fstream>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "FlatField.hpp"
|
||||
#include "MythenDetectorSpecifications.hpp"
|
||||
#include "MythenFileReader.hpp"
|
||||
#include "NDArray.hpp"
|
||||
|
||||
namespace aare {
|
||||
|
||||
using parameters =
|
||||
std::tuple<std::vector<double>, std::vector<double>, std::vector<double>>;
|
||||
|
||||
class AngleCalibration {
|
||||
|
||||
public:
|
||||
AngleCalibration(
|
||||
std::shared_ptr<MythenDetectorSpecifications> mythen_detector_,
|
||||
std::shared_ptr<FlatField> flat_field_,
|
||||
std::shared_ptr<MythenFileReader> mythen_file_reader_);
|
||||
|
||||
/** set the histogram bin width [degrees] */
|
||||
void set_histogram_bin_width(double bin_width);
|
||||
|
||||
double get_histogram_bin_width() const;
|
||||
|
||||
ssize_t get_new_num_bins() const;
|
||||
|
||||
/** reads the historical Detector Group (DG) parameters from file **/
|
||||
void read_initial_calibration_from_file(const std::string &filename);
|
||||
|
||||
std::vector<double> get_centers() const;
|
||||
std::vector<double> get_conversions() const;
|
||||
|
||||
std::vector<double> get_offsets() const;
|
||||
|
||||
NDView<double, 1> get_new_photon_counts() const;
|
||||
|
||||
NDView<double, 1> get_new_statistical_errors() const;
|
||||
|
||||
/** converts DG parameters to easy EE parameters e.g.geometric
|
||||
* parameters */
|
||||
parameters convert_to_EE_parameters() const;
|
||||
|
||||
std::tuple<double, double, double>
|
||||
convert_to_EE_parameters(const size_t module_index) const;
|
||||
|
||||
std::tuple<double, double, double>
|
||||
convert_to_EE_parameters(const double center, const double conversion,
|
||||
const double offset) const;
|
||||
|
||||
/** converts DG parameters to BC parameters e.g. best computing
|
||||
* parameters */
|
||||
parameters convert_to_BC_parameters() const;
|
||||
|
||||
/**
|
||||
* calculates new histogram with fixed sized angle bins
|
||||
* for several acquisitions at different detector angles for given frame
|
||||
* indices
|
||||
* @param start_frame_index, end_frame_index gives range of frames
|
||||
*/
|
||||
void
|
||||
calculate_fixed_bin_angle_width_histogram(const size_t start_frame_index,
|
||||
const size_t end_frame_index);
|
||||
|
||||
void write_to_file(const std::string &filename,
|
||||
const bool store_nonzero_bins = false,
|
||||
const std::filesystem::path &filepath =
|
||||
std::filesystem::current_path()) const;
|
||||
|
||||
/** calculates diffraction angle from EE module parameters (used in
|
||||
* Beer's Law)
|
||||
* @param strip_index local strip index of module
|
||||
*/
|
||||
double diffraction_angle_from_EE_parameters(
|
||||
const double module_center_distance, const double normal_distance,
|
||||
const double angle, const size_t strip_index,
|
||||
const double distance_to_strip = 0) const;
|
||||
|
||||
/** calculates diffraction angle from EE module parameters (used in
|
||||
* Beer's Law)
|
||||
* @param center module center
|
||||
* @param conversion module conversion
|
||||
* @param offset module offset
|
||||
* @param strip_index local strip index of module
|
||||
* @param distance_to_strip distance to strip given by strip_index and
|
||||
* module -> note needs to be small enough to be in the respective module
|
||||
*/
|
||||
double diffraction_angle_from_DG_parameters(
|
||||
const double center, const double conversion, const double offset,
|
||||
const size_t strip_index, const double distance_to_strip = 0) const;
|
||||
|
||||
/** calculated the strip width expressed as angle [degrees]
|
||||
* @param strip_index local strip index of module
|
||||
*/
|
||||
double angular_strip_width_from_DG_parameters(
|
||||
const double center, const double conversion, const double offset,
|
||||
const size_t local_strip_index) const;
|
||||
|
||||
double angular_strip_width_from_EE_parameters(
|
||||
const double module_center_distance, const double normal_distance,
|
||||
const double angle, const size_t local_strip_index) const;
|
||||
|
||||
protected:
|
||||
/** converts global strip index to local strip index of that module */
|
||||
size_t global_to_local_strip_index_conversion(
|
||||
const size_t global_strip_index) const;
|
||||
|
||||
/**
|
||||
* redistributes photon counts with of histogram using one bin per strip
|
||||
* to histogram with fixed size angle bins
|
||||
* @param frame MythenFrame storing data from image
|
||||
* @param bin_counts accumulate new photon counts
|
||||
* @param new_statistical_weights accumulate new statistical weights
|
||||
* @param new_errors accumulate new_errors
|
||||
*/
|
||||
void redistribute_photon_counts_to_fixed_angle_bins(
|
||||
const MythenFrame &frame, NDView<double, 1> bin_counts,
|
||||
NDView<double, 1> new_statistical_weights, NDView<double, 1> new_errors,
|
||||
NDView<double, 1> inverse_nromalized_flatfield) const;
|
||||
|
||||
private:
|
||||
// TODO: Design maybe have a struct with three vectors, store all three
|
||||
// sets of parameters as member variables
|
||||
|
||||
// TODO: check if interpretation and units are correct
|
||||
// historical DG parameters
|
||||
// TODO change to NDArray
|
||||
std::vector<double> centers; // orthogonal projection of sample onto
|
||||
// detector (given in strip number) [mm]
|
||||
// D/pitch
|
||||
std::vector<double> conversions; // pitch/(normal distance from sample
|
||||
// to detector (R)) [mm]
|
||||
// //used for easy conversion
|
||||
std::vector<double>
|
||||
offsets; // position of strip zero relative to sample [degrees] phi
|
||||
// - 180/pi*D/R TODO: expected an arcsin(D/R)?
|
||||
|
||||
std::shared_ptr<MythenDetectorSpecifications> mythen_detector;
|
||||
|
||||
std::shared_ptr<FlatField> flat_field;
|
||||
|
||||
NDArray<double, 1> new_photon_counts;
|
||||
NDArray<double, 1> new_photon_count_errors;
|
||||
|
||||
double histogram_bin_width = 0.0036; // [degrees]
|
||||
|
||||
ssize_t num_bins{};
|
||||
|
||||
std::shared_ptr<MythenFileReader>
|
||||
mythen_file_reader; // TODO replace by FileInterface ptr
|
||||
};
|
||||
|
||||
} // namespace aare
|
@ -1,22 +1,24 @@
|
||||
#pragma once
|
||||
#include <cstdint> //int64_t
|
||||
#include <cstddef> //size_t
|
||||
#include <cstdint>
|
||||
#include <cstddef>
|
||||
#include <array>
|
||||
|
||||
#include <cassert>
|
||||
#include "aare/defs.hpp"
|
||||
|
||||
|
||||
namespace aare {
|
||||
|
||||
template <typename E, int64_t Ndim> class ArrayExpr {
|
||||
template <typename E, ssize_t Ndim> class ArrayExpr {
|
||||
public:
|
||||
static constexpr bool is_leaf = false;
|
||||
|
||||
auto operator[](size_t i) const { return static_cast<E const &>(*this)[i]; }
|
||||
auto operator()(size_t i) const { return static_cast<E const &>(*this)[i]; }
|
||||
auto size() const { return static_cast<E const &>(*this).size(); }
|
||||
std::array<int64_t, Ndim> shape() const { return static_cast<E const &>(*this).shape(); }
|
||||
std::array<ssize_t, Ndim> shape() const { return static_cast<E const &>(*this).shape(); }
|
||||
};
|
||||
|
||||
template <typename A, typename B, int64_t Ndim>
|
||||
template <typename A, typename B, ssize_t Ndim>
|
||||
class ArrayAdd : public ArrayExpr<ArrayAdd<A, B, Ndim>, Ndim> {
|
||||
const A &arr1_;
|
||||
const B &arr2_;
|
||||
@ -27,10 +29,10 @@ class ArrayAdd : public ArrayExpr<ArrayAdd<A, B, Ndim>, Ndim> {
|
||||
}
|
||||
auto operator[](int i) const { return arr1_[i] + arr2_[i]; }
|
||||
size_t size() const { return arr1_.size(); }
|
||||
std::array<int64_t, Ndim> shape() const { return arr1_.shape(); }
|
||||
std::array<ssize_t, Ndim> shape() const { return arr1_.shape(); }
|
||||
};
|
||||
|
||||
template <typename A, typename B, int64_t Ndim>
|
||||
template <typename A, typename B, ssize_t Ndim>
|
||||
class ArraySub : public ArrayExpr<ArraySub<A, B, Ndim>, Ndim> {
|
||||
const A &arr1_;
|
||||
const B &arr2_;
|
||||
@ -41,10 +43,10 @@ class ArraySub : public ArrayExpr<ArraySub<A, B, Ndim>, Ndim> {
|
||||
}
|
||||
auto operator[](int i) const { return arr1_[i] - arr2_[i]; }
|
||||
size_t size() const { return arr1_.size(); }
|
||||
std::array<int64_t, Ndim> shape() const { return arr1_.shape(); }
|
||||
std::array<ssize_t, Ndim> shape() const { return arr1_.shape(); }
|
||||
};
|
||||
|
||||
template <typename A, typename B, int64_t Ndim>
|
||||
template <typename A, typename B, ssize_t Ndim>
|
||||
class ArrayMul : public ArrayExpr<ArrayMul<A, B, Ndim>,Ndim> {
|
||||
const A &arr1_;
|
||||
const B &arr2_;
|
||||
@ -55,10 +57,10 @@ class ArrayMul : public ArrayExpr<ArrayMul<A, B, Ndim>,Ndim> {
|
||||
}
|
||||
auto operator[](int i) const { return arr1_[i] * arr2_[i]; }
|
||||
size_t size() const { return arr1_.size(); }
|
||||
std::array<int64_t, Ndim> shape() const { return arr1_.shape(); }
|
||||
std::array<ssize_t, Ndim> shape() const { return arr1_.shape(); }
|
||||
};
|
||||
|
||||
template <typename A, typename B, int64_t Ndim>
|
||||
template <typename A, typename B, ssize_t Ndim>
|
||||
class ArrayDiv : public ArrayExpr<ArrayDiv<A, B, Ndim>, Ndim> {
|
||||
const A &arr1_;
|
||||
const B &arr2_;
|
||||
@ -69,27 +71,27 @@ class ArrayDiv : public ArrayExpr<ArrayDiv<A, B, Ndim>, Ndim> {
|
||||
}
|
||||
auto operator[](int i) const { return arr1_[i] / arr2_[i]; }
|
||||
size_t size() const { return arr1_.size(); }
|
||||
std::array<int64_t, Ndim> shape() const { return arr1_.shape(); }
|
||||
std::array<ssize_t, Ndim> shape() const { return arr1_.shape(); }
|
||||
};
|
||||
|
||||
|
||||
|
||||
template <typename A, typename B, int64_t Ndim>
|
||||
template <typename A, typename B, ssize_t Ndim>
|
||||
auto operator+(const ArrayExpr<A, Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
|
||||
return ArrayAdd<ArrayExpr<A, Ndim>, ArrayExpr<B, Ndim>, Ndim>(arr1, arr2);
|
||||
}
|
||||
|
||||
template <typename A, typename B, int64_t Ndim>
|
||||
template <typename A, typename B, ssize_t Ndim>
|
||||
auto operator-(const ArrayExpr<A,Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
|
||||
return ArraySub<ArrayExpr<A, Ndim>, ArrayExpr<B, Ndim>, Ndim>(arr1, arr2);
|
||||
}
|
||||
|
||||
template <typename A, typename B, int64_t Ndim>
|
||||
template <typename A, typename B, ssize_t Ndim>
|
||||
auto operator*(const ArrayExpr<A, Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
|
||||
return ArrayMul<ArrayExpr<A, Ndim>, ArrayExpr<B, Ndim>, Ndim>(arr1, arr2);
|
||||
}
|
||||
|
||||
template <typename A, typename B, int64_t Ndim>
|
||||
template <typename A, typename B, ssize_t Ndim>
|
||||
auto operator/(const ArrayExpr<A, Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
|
||||
return ArrayDiv<ArrayExpr<A, Ndim>, ArrayExpr<B, Ndim>, Ndim>(arr1, arr2);
|
||||
}
|
||||
|
@ -6,14 +6,14 @@
|
||||
|
||||
namespace aare {
|
||||
|
||||
typedef enum {
|
||||
enum class corner : int {
|
||||
cBottomLeft = 0,
|
||||
cBottomRight = 1,
|
||||
cTopLeft = 2,
|
||||
cTopRight = 3
|
||||
} corner;
|
||||
};
|
||||
|
||||
typedef enum {
|
||||
enum class pixel : int {
|
||||
pBottomLeft = 0,
|
||||
pBottom = 1,
|
||||
pBottomRight = 2,
|
||||
@ -23,7 +23,7 @@ typedef enum {
|
||||
pTopLeft = 6,
|
||||
pTop = 7,
|
||||
pTopRight = 8
|
||||
} pixel;
|
||||
};
|
||||
|
||||
template <typename T> struct Eta2 {
|
||||
double x;
|
||||
@ -33,7 +33,7 @@ template <typename T> struct Eta2 {
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Calculate the eta2 values for all clusters in a Clsutervector
|
||||
* @brief Calculate the eta2 values for all clusters in a Clustervector
|
||||
*/
|
||||
template <typename ClusterType,
|
||||
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||
@ -41,7 +41,7 @@ NDArray<double, 2> calculate_eta2(const ClusterVector<ClusterType> &clusters) {
|
||||
NDArray<double, 2> eta2({static_cast<int64_t>(clusters.size()), 2});
|
||||
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
auto e = calculate_eta2(clusters.at(i));
|
||||
auto e = calculate_eta2(clusters[i]);
|
||||
eta2(i, 0) = e.x;
|
||||
eta2(i, 1) = e.y;
|
||||
}
|
||||
@ -64,31 +64,79 @@ calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||
eta.sum = max_sum.first;
|
||||
auto c = max_sum.second;
|
||||
|
||||
size_t cluster_center_index =
|
||||
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||
|
||||
size_t index_bottom_left_max_2x2_subcluster =
|
||||
(int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1);
|
||||
|
||||
if ((cl.data[index_bottom_left_max_2x2_subcluster] +
|
||||
cl.data[index_bottom_left_max_2x2_subcluster + 1]) != 0)
|
||||
eta.x = static_cast<double>(
|
||||
cl.data[index_bottom_left_max_2x2_subcluster + 1]) /
|
||||
static_cast<double>(
|
||||
(cl.data[index_bottom_left_max_2x2_subcluster] +
|
||||
cl.data[index_bottom_left_max_2x2_subcluster + 1]));
|
||||
// check that cluster center is in max subcluster
|
||||
if (cluster_center_index != index_bottom_left_max_2x2_subcluster &&
|
||||
cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 &&
|
||||
cluster_center_index !=
|
||||
index_bottom_left_max_2x2_subcluster + ClusterSizeX &&
|
||||
cluster_center_index !=
|
||||
index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1)
|
||||
throw std::runtime_error("Photon center is not in max 2x2_subcluster");
|
||||
|
||||
if ((cl.data[index_bottom_left_max_2x2_subcluster] +
|
||||
cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]) != 0)
|
||||
eta.y =
|
||||
static_cast<double>(
|
||||
cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]) /
|
||||
static_cast<double>(
|
||||
(cl.data[index_bottom_left_max_2x2_subcluster] +
|
||||
cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]));
|
||||
if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) %
|
||||
ClusterSizeX ==
|
||||
0) {
|
||||
if ((cl.data[cluster_center_index + 1] +
|
||||
cl.data[cluster_center_index]) != 0)
|
||||
|
||||
eta.x = static_cast<double>(cl.data[cluster_center_index + 1]) /
|
||||
static_cast<double>((cl.data[cluster_center_index + 1] +
|
||||
cl.data[cluster_center_index]));
|
||||
} else {
|
||||
if ((cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index - 1]) != 0)
|
||||
|
||||
eta.x = static_cast<double>(cl.data[cluster_center_index]) /
|
||||
static_cast<double>((cl.data[cluster_center_index - 1] +
|
||||
cl.data[cluster_center_index]));
|
||||
}
|
||||
if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) /
|
||||
ClusterSizeX <
|
||||
1) {
|
||||
assert(cluster_center_index + ClusterSizeX <
|
||||
ClusterSizeX * ClusterSizeY); // suppress warning
|
||||
if ((cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index + ClusterSizeX]) != 0)
|
||||
eta.y = static_cast<double>(
|
||||
cl.data[cluster_center_index + ClusterSizeX]) /
|
||||
static_cast<double>(
|
||||
(cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index + ClusterSizeX]));
|
||||
} else {
|
||||
if ((cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index - ClusterSizeX]) != 0)
|
||||
eta.y = static_cast<double>(cl.data[cluster_center_index]) /
|
||||
static_cast<double>(
|
||||
(cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index - ClusterSizeX]));
|
||||
}
|
||||
|
||||
eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no
|
||||
// underyling enum class
|
||||
return eta;
|
||||
}
|
||||
|
||||
// TODO! Look up eta2 calculation - photon center should be top right corner
|
||||
template <typename T>
|
||||
Eta2<T> calculate_eta2(const Cluster<T, 2, 2, int16_t> &cl) {
|
||||
Eta2<T> eta{};
|
||||
|
||||
if ((cl.data[0] + cl.data[1]) != 0)
|
||||
eta.x = static_cast<double>(cl.data[1]) / (cl.data[0] + cl.data[1]);
|
||||
if ((cl.data[0] + cl.data[2]) != 0)
|
||||
eta.y = static_cast<double>(cl.data[2]) / (cl.data[0] + cl.data[2]);
|
||||
eta.sum = cl.sum();
|
||||
eta.c = static_cast<int>(corner::cBottomLeft); // TODO! This is not correct,
|
||||
// but need to put something
|
||||
return eta;
|
||||
}
|
||||
|
||||
// calculates Eta3 for 3x3 cluster based on code from analyze_cluster
|
||||
// TODO only supported for 3x3 Clusters
|
||||
template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) {
|
||||
|
@ -16,80 +16,61 @@
|
||||
|
||||
namespace aare {
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = int16_t>
|
||||
constexpr bool is_valid_cluster =
|
||||
std::is_arithmetic_v<T> && std::is_integral_v<CoordType> &&
|
||||
(ClusterSizeX > 0) && (ClusterSizeY > 0);
|
||||
|
||||
// requires clause c++20 maybe update
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = int16_t,
|
||||
typename Enable = std::enable_if_t<
|
||||
is_valid_cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>>
|
||||
typename CoordType = int16_t>
|
||||
struct Cluster {
|
||||
|
||||
static_assert(std::is_arithmetic_v<T>, "T needs to be an arithmetic type");
|
||||
static_assert(std::is_integral_v<CoordType>,
|
||||
"CoordType needs to be an integral type");
|
||||
static_assert(ClusterSizeX > 0 && ClusterSizeY > 0,
|
||||
"Cluster sizes must be bigger than zero");
|
||||
|
||||
CoordType x;
|
||||
CoordType y;
|
||||
T data[ClusterSizeX * ClusterSizeY];
|
||||
std::array<T, ClusterSizeX * ClusterSizeY> data;
|
||||
|
||||
T sum() const {
|
||||
return std::accumulate(data, data + ClusterSizeX * ClusterSizeY, 0);
|
||||
}
|
||||
static constexpr uint8_t cluster_size_x = ClusterSizeX;
|
||||
static constexpr uint8_t cluster_size_y = ClusterSizeY;
|
||||
using value_type = T;
|
||||
using coord_type = CoordType;
|
||||
|
||||
T sum() const { return std::accumulate(data.begin(), data.end(), T{}); }
|
||||
|
||||
std::pair<T, int> max_sum_2x2() const {
|
||||
|
||||
constexpr size_t num_2x2_subclusters =
|
||||
(ClusterSizeX - 1) * (ClusterSizeY - 1);
|
||||
if constexpr (cluster_size_x == 3 && cluster_size_y == 3) {
|
||||
std::array<T, 4> sum_2x2_subclusters;
|
||||
sum_2x2_subclusters[0] = data[0] + data[1] + data[3] + data[4];
|
||||
sum_2x2_subclusters[1] = data[1] + data[2] + data[4] + data[5];
|
||||
sum_2x2_subclusters[2] = data[3] + data[4] + data[6] + data[7];
|
||||
sum_2x2_subclusters[3] = data[4] + data[5] + data[7] + data[8];
|
||||
int index = std::max_element(sum_2x2_subclusters.begin(),
|
||||
sum_2x2_subclusters.end()) -
|
||||
sum_2x2_subclusters.begin();
|
||||
return std::make_pair(sum_2x2_subclusters[index], index);
|
||||
} else if constexpr (cluster_size_x == 2 && cluster_size_y == 2) {
|
||||
return std::make_pair(data[0] + data[1] + data[2] + data[3], 0);
|
||||
} else {
|
||||
constexpr size_t num_2x2_subclusters =
|
||||
(ClusterSizeX - 1) * (ClusterSizeY - 1);
|
||||
|
||||
std::array<T, num_2x2_subclusters> sum_2x2_subcluster;
|
||||
for (size_t i = 0; i < ClusterSizeY - 1; ++i) {
|
||||
for (size_t j = 0; j < ClusterSizeX - 1; ++j)
|
||||
sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] =
|
||||
data[i * ClusterSizeX + j] +
|
||||
data[i * ClusterSizeX + j + 1] +
|
||||
data[(i + 1) * ClusterSizeX + j] +
|
||||
data[(i + 1) * ClusterSizeX + j + 1];
|
||||
std::array<T, num_2x2_subclusters> sum_2x2_subcluster;
|
||||
for (size_t i = 0; i < ClusterSizeY - 1; ++i) {
|
||||
for (size_t j = 0; j < ClusterSizeX - 1; ++j)
|
||||
sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] =
|
||||
data[i * ClusterSizeX + j] +
|
||||
data[i * ClusterSizeX + j + 1] +
|
||||
data[(i + 1) * ClusterSizeX + j] +
|
||||
data[(i + 1) * ClusterSizeX + j + 1];
|
||||
}
|
||||
|
||||
int index = std::max_element(sum_2x2_subcluster.begin(),
|
||||
sum_2x2_subcluster.end()) -
|
||||
sum_2x2_subcluster.begin();
|
||||
return std::make_pair(sum_2x2_subcluster[index], index);
|
||||
}
|
||||
|
||||
int index = std::max_element(sum_2x2_subcluster.begin(),
|
||||
sum_2x2_subcluster.end()) -
|
||||
sum_2x2_subcluster.begin();
|
||||
return std::make_pair(sum_2x2_subcluster[index], index);
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization for 2x2 clusters (only one sum exists)
|
||||
template <typename T> struct Cluster<T, 2, 2, int16_t> {
|
||||
int16_t x;
|
||||
int16_t y;
|
||||
T data[4];
|
||||
|
||||
T sum() const { return std::accumulate(data, data + 4, 0); }
|
||||
|
||||
std::pair<T, int> max_sum_2x2() const {
|
||||
return std::make_pair(data[0] + data[1] + data[2] + data[3],
|
||||
0); // Only one possible 2x2 sum
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization for 3x3 clusters
|
||||
template <typename T> struct Cluster<T, 3, 3, int16_t> {
|
||||
int16_t x;
|
||||
int16_t y;
|
||||
T data[9];
|
||||
|
||||
T sum() const { return std::accumulate(data, data + 9, 0); }
|
||||
|
||||
std::pair<T, int> max_sum_2x2() const {
|
||||
std::array<T, 4> sum_2x2_subclusters;
|
||||
sum_2x2_subclusters[0] = data[0] + data[1] + data[3] + data[4];
|
||||
sum_2x2_subclusters[1] = data[1] + data[2] + data[4] + data[5];
|
||||
sum_2x2_subclusters[2] = data[3] + data[4] + data[6] + data[7];
|
||||
sum_2x2_subclusters[3] = data[4] + data[5] + data[7] + data[8];
|
||||
int index = std::max_element(sum_2x2_subclusters.begin(),
|
||||
sum_2x2_subclusters.end()) -
|
||||
sum_2x2_subclusters.begin();
|
||||
return std::make_pair(sum_2x2_subclusters[index], index);
|
||||
}
|
||||
};
|
||||
|
||||
@ -102,20 +83,4 @@ struct is_cluster<Cluster<T, X, Y, CoordType>> : std::true_type {}; // Cluster
|
||||
|
||||
template <typename T> constexpr bool is_cluster_v = is_cluster<T>::value;
|
||||
|
||||
template <typename ClusterType,
|
||||
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||
struct extract_template_arguments; // Forward declaration
|
||||
|
||||
// helper struct to extract template argument
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType>
|
||||
struct extract_template_arguments<
|
||||
Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
|
||||
using value_type = T;
|
||||
static constexpr int cluster_size_x = ClusterSizeX;
|
||||
static constexpr int cluster_size_y = ClusterSizeY;
|
||||
using coordtype = CoordType;
|
||||
};
|
||||
|
||||
} // namespace aare
|
||||
|
@ -37,7 +37,11 @@ class ClusterCollector {
|
||||
public:
|
||||
ClusterCollector(ClusterFinderMT<ClusterType, uint16_t, double> *source) {
|
||||
m_source = source->sink();
|
||||
m_thread = std::thread(&ClusterCollector::process, this);
|
||||
m_thread =
|
||||
std::thread(&ClusterCollector::process,
|
||||
this); // only one process does that so why isnt it
|
||||
// automatically written to m_cluster in collect
|
||||
// - instead of writing first to m_sink?
|
||||
}
|
||||
void stop() {
|
||||
m_stop_requested = true;
|
||||
|
@ -39,14 +39,15 @@ template <typename ClusterType,
|
||||
typename Enable = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||
class ClusterFile {
|
||||
FILE *fp{};
|
||||
const std::string m_filename{};
|
||||
uint32_t m_num_left{}; /*Number of photons left in frame*/
|
||||
size_t m_chunk_size{}; /*Number of clusters to read at a time*/
|
||||
const std::string m_mode; /*Mode to open the file in*/
|
||||
std::string m_mode; /*Mode to open the file in*/
|
||||
std::optional<ROI> m_roi; /*Region of interest, will be applied if set*/
|
||||
std::optional<NDArray<int32_t, 2>>
|
||||
m_noise_map; /*Noise map to cut photons, will be applied if set*/
|
||||
std::optional<GainMap> m_gain_map; /*Gain map to apply to the clusters, will
|
||||
be applied if set*/
|
||||
std::optional<InvertedGainMap> m_gain_map; /*Gain map to apply to the
|
||||
clusters, will be applied if set*/
|
||||
|
||||
public:
|
||||
/**
|
||||
@ -59,26 +60,81 @@ class ClusterFile {
|
||||
* @throws std::runtime_error if the file could not be opened
|
||||
*/
|
||||
ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000,
|
||||
const std::string &mode = "r");
|
||||
const std::string &mode = "r")
|
||||
|
||||
~ClusterFile();
|
||||
: m_filename(fname.string()), m_chunk_size(chunk_size), m_mode(mode) {
|
||||
|
||||
if (mode == "r") {
|
||||
fp = fopen(m_filename.c_str(), "rb");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for reading: " +
|
||||
m_filename);
|
||||
}
|
||||
} else if (mode == "w") {
|
||||
fp = fopen(m_filename.c_str(), "wb");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for writing: " +
|
||||
m_filename);
|
||||
}
|
||||
} else if (mode == "a") {
|
||||
fp = fopen(m_filename.c_str(), "ab");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for appending: " +
|
||||
m_filename);
|
||||
}
|
||||
} else {
|
||||
throw std::runtime_error("Unsupported mode: " + mode);
|
||||
}
|
||||
}
|
||||
|
||||
~ClusterFile() { close(); }
|
||||
|
||||
/**
|
||||
* @brief Read n_clusters clusters from the file discarding frame numbers.
|
||||
* If EOF is reached the returned vector will have less than n_clusters
|
||||
* clusters
|
||||
* @brief Read n_clusters clusters from the file discarding
|
||||
* frame numbers. If EOF is reached the returned vector will
|
||||
* have less than n_clusters clusters
|
||||
*/
|
||||
ClusterVector<ClusterType> read_clusters(size_t n_clusters);
|
||||
ClusterVector<ClusterType> read_clusters(size_t n_clusters) {
|
||||
if (m_mode != "r") {
|
||||
throw std::runtime_error("File not opened for reading");
|
||||
}
|
||||
if (m_noise_map || m_roi) {
|
||||
return read_clusters_with_cut(n_clusters);
|
||||
} else {
|
||||
return read_clusters_without_cut(n_clusters);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Read a single frame from the file and return the clusters. The
|
||||
* cluster vector will have the frame number set.
|
||||
* @throws std::runtime_error if the file is not opened for reading or the
|
||||
* file pointer not at the beginning of a frame
|
||||
* @brief Read a single frame from the file and return the
|
||||
* clusters. The cluster vector will have the frame number
|
||||
* set.
|
||||
* @throws std::runtime_error if the file is not opened for
|
||||
* reading or the file pointer not at the beginning of a
|
||||
* frame
|
||||
*/
|
||||
ClusterVector<ClusterType> read_frame();
|
||||
ClusterVector<ClusterType> read_frame() {
|
||||
if (m_mode != "r") {
|
||||
throw std::runtime_error(LOCATION + "File not opened for reading");
|
||||
}
|
||||
if (m_noise_map || m_roi) {
|
||||
return read_frame_with_cut();
|
||||
} else {
|
||||
return read_frame_without_cut();
|
||||
}
|
||||
}
|
||||
|
||||
void write_frame(const ClusterVector<ClusterType> &clusters);
|
||||
void write_frame(const ClusterVector<ClusterType> &clusters) {
|
||||
if (m_mode != "w" && m_mode != "a") {
|
||||
throw std::runtime_error("File not opened for writing");
|
||||
}
|
||||
|
||||
int32_t frame_number = clusters.frame_number();
|
||||
fwrite(&frame_number, sizeof(frame_number), 1, fp);
|
||||
uint32_t n_clusters = clusters.size();
|
||||
fwrite(&n_clusters, sizeof(n_clusters), 1, fp);
|
||||
fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the chunk size
|
||||
@ -86,34 +142,84 @@ class ClusterFile {
|
||||
size_t chunk_size() const { return m_chunk_size; }
|
||||
|
||||
/**
|
||||
* @brief Set the region of interest to use when reading clusters. If set
|
||||
* only clusters within the ROI will be read.
|
||||
* @brief Set the region of interest to use when reading
|
||||
* clusters. If set only clusters within the ROI will be
|
||||
* read.
|
||||
*/
|
||||
void set_roi(ROI roi);
|
||||
void set_roi(ROI roi) { m_roi = roi; }
|
||||
|
||||
/**
|
||||
* @brief Set the noise map to use when reading clusters. If set clusters
|
||||
* below the noise level will be discarded. Selection criteria one of:
|
||||
* Central pixel above noise, highest 2x2 sum above 2 * noise, total sum
|
||||
* above 3 * noise.
|
||||
* @brief Set the noise map to use when reading clusters. If
|
||||
* set clusters below the noise level will be discarded.
|
||||
* Selection criteria one of: Central pixel above noise,
|
||||
* highest 2x2 sum above 2 * noise, total sum above 3 *
|
||||
* noise.
|
||||
*/
|
||||
void set_noise_map(const NDView<int32_t, 2> noise_map);
|
||||
void set_noise_map(const NDView<int32_t, 2> noise_map) {
|
||||
m_noise_map = NDArray<int32_t, 2>(noise_map);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the gain map to use when reading clusters. If set the gain map
|
||||
* will be applied to the clusters that pass ROI and noise_map selection.
|
||||
* The gain map is expected to be in ADU/energy.
|
||||
*/
|
||||
void set_gain_map(const NDView<double, 2> gain_map);
|
||||
void set_gain_map(const NDView<double, 2> gain_map) {
|
||||
m_gain_map = InvertedGainMap(gain_map);
|
||||
}
|
||||
|
||||
void set_gain_map(const GainMap &gain_map);
|
||||
void set_gain_map(const InvertedGainMap &gain_map) {
|
||||
m_gain_map = gain_map;
|
||||
}
|
||||
|
||||
void set_gain_map(const GainMap &&gain_map);
|
||||
void set_gain_map(const InvertedGainMap &&gain_map) {
|
||||
m_gain_map = gain_map;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Close the file. If not closed the file will be closed in the
|
||||
* destructor
|
||||
* @brief Close the file. If not closed the file will be
|
||||
* closed in the destructor
|
||||
*/
|
||||
void close();
|
||||
void close() {
|
||||
if (fp) {
|
||||
fclose(fp);
|
||||
fp = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
/** @brief Open the file in specific mode
|
||||
*
|
||||
*/
|
||||
void open(const std::string &mode) {
|
||||
if (fp) {
|
||||
close();
|
||||
}
|
||||
|
||||
if (mode == "r") {
|
||||
fp = fopen(m_filename.c_str(), "rb");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for reading: " +
|
||||
m_filename);
|
||||
}
|
||||
m_mode = "r";
|
||||
} else if (mode == "w") {
|
||||
fp = fopen(m_filename.c_str(), "wb");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for writing: " +
|
||||
m_filename);
|
||||
}
|
||||
m_mode = "w";
|
||||
} else if (mode == "a") {
|
||||
fp = fopen(m_filename.c_str(), "ab");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for appending: " +
|
||||
m_filename);
|
||||
}
|
||||
m_mode = "a";
|
||||
} else {
|
||||
throw std::runtime_error("Unsupported mode: " + mode);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
ClusterVector<ClusterType> read_clusters_with_cut(size_t n_clusters);
|
||||
@ -124,103 +230,6 @@ class ClusterFile {
|
||||
ClusterType read_one_cluster();
|
||||
};
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
ClusterFile<ClusterType, Enable>::ClusterFile(
|
||||
const std::filesystem::path &fname, size_t chunk_size,
|
||||
const std::string &mode)
|
||||
: m_chunk_size(chunk_size), m_mode(mode) {
|
||||
|
||||
if (mode == "r") {
|
||||
fp = fopen(fname.c_str(), "rb");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for reading: " +
|
||||
fname.string());
|
||||
}
|
||||
} else if (mode == "w") {
|
||||
fp = fopen(fname.c_str(), "wb");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for writing: " +
|
||||
fname.string());
|
||||
}
|
||||
} else if (mode == "a") {
|
||||
fp = fopen(fname.c_str(), "ab");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for appending: " +
|
||||
fname.string());
|
||||
}
|
||||
} else {
|
||||
throw std::runtime_error("Unsupported mode: " + mode);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
ClusterFile<ClusterType, Enable>::~ClusterFile() {
|
||||
close();
|
||||
}
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
void ClusterFile<ClusterType, Enable>::close() {
|
||||
if (fp) {
|
||||
fclose(fp);
|
||||
fp = nullptr;
|
||||
}
|
||||
}
|
||||
template <typename ClusterType, typename Enable>
|
||||
void ClusterFile<ClusterType, Enable>::set_roi(ROI roi) {
|
||||
m_roi = roi;
|
||||
}
|
||||
template <typename ClusterType, typename Enable>
|
||||
void ClusterFile<ClusterType, Enable>::set_noise_map(
|
||||
const NDView<int32_t, 2> noise_map) {
|
||||
m_noise_map = NDArray<int32_t, 2>(noise_map);
|
||||
}
|
||||
template <typename ClusterType, typename Enable>
|
||||
void ClusterFile<ClusterType, Enable>::set_gain_map(
|
||||
const NDView<double, 2> gain_map) {
|
||||
m_gain_map = GainMap(gain_map);
|
||||
}
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
void ClusterFile<ClusterType, Enable>::set_gain_map(const GainMap &gain_map) {
|
||||
m_gain_map = gain_map;
|
||||
}
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
void ClusterFile<ClusterType, Enable>::set_gain_map(const GainMap &&gain_map) {
|
||||
m_gain_map = gain_map;
|
||||
}
|
||||
|
||||
// TODO generally supported for all clsuter types
|
||||
template <typename ClusterType, typename Enable>
|
||||
void ClusterFile<ClusterType, Enable>::write_frame(
|
||||
const ClusterVector<ClusterType> &clusters) {
|
||||
if (m_mode != "w" && m_mode != "a") {
|
||||
throw std::runtime_error("File not opened for writing");
|
||||
}
|
||||
if (!(clusters.cluster_size_x() == 3) &&
|
||||
!(clusters.cluster_size_y() == 3)) {
|
||||
throw std::runtime_error("Only 3x3 clusters are supported");
|
||||
}
|
||||
int32_t frame_number = clusters.frame_number();
|
||||
fwrite(&frame_number, sizeof(frame_number), 1, fp);
|
||||
uint32_t n_clusters = clusters.size();
|
||||
fwrite(&n_clusters, sizeof(n_clusters), 1, fp);
|
||||
fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp);
|
||||
}
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
ClusterVector<ClusterType>
|
||||
ClusterFile<ClusterType, Enable>::read_clusters(size_t n_clusters) {
|
||||
if (m_mode != "r") {
|
||||
throw std::runtime_error("File not opened for reading");
|
||||
}
|
||||
if (m_noise_map || m_roi) {
|
||||
return read_clusters_with_cut(n_clusters);
|
||||
} else {
|
||||
return read_clusters_without_cut(n_clusters);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
ClusterVector<ClusterType>
|
||||
ClusterFile<ClusterType, Enable>::read_clusters_without_cut(size_t n_clusters) {
|
||||
@ -240,8 +249,8 @@ ClusterFile<ClusterType, Enable>::read_clusters_without_cut(size_t n_clusters) {
|
||||
// if there are photons left from previous frame read them first
|
||||
if (nph) {
|
||||
if (nph > n_clusters) {
|
||||
// if we have more photons left in the frame then photons to read we
|
||||
// read directly the requested number
|
||||
// if we have more photons left in the frame then photons to
|
||||
// read we read directly the requested number
|
||||
nn = n_clusters;
|
||||
} else {
|
||||
nn = nph;
|
||||
@ -270,7 +279,7 @@ ClusterFile<ClusterType, Enable>::read_clusters_without_cut(size_t n_clusters) {
|
||||
}
|
||||
}
|
||||
|
||||
// Resize the vector to the number of clusters.
|
||||
// Resize the vector to the number o f clusters.
|
||||
// No new allocation, only change bounds.
|
||||
clusters.resize(nph_read);
|
||||
if (m_gain_map)
|
||||
@ -282,7 +291,7 @@ template <typename ClusterType, typename Enable>
|
||||
ClusterVector<ClusterType>
|
||||
ClusterFile<ClusterType, Enable>::read_clusters_with_cut(size_t n_clusters) {
|
||||
ClusterVector<ClusterType> clusters;
|
||||
clusters.resize(n_clusters);
|
||||
clusters.reserve(n_clusters);
|
||||
|
||||
// if there are photons left from previous frame read them first
|
||||
if (m_num_left) {
|
||||
@ -307,8 +316,8 @@ ClusterFile<ClusterType, Enable>::read_clusters_with_cut(size_t n_clusters) {
|
||||
while (fread(&frame_number, sizeof(frame_number), 1, fp)) {
|
||||
if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) {
|
||||
clusters.set_frame_number(
|
||||
frame_number); // cluster vector will hold the last frame
|
||||
// number
|
||||
frame_number); // cluster vector will hold the last
|
||||
// frame number
|
||||
while (m_num_left && clusters.size() < n_clusters) {
|
||||
ClusterType c = read_one_cluster();
|
||||
if (is_selected(c)) {
|
||||
@ -339,18 +348,6 @@ ClusterType ClusterFile<ClusterType, Enable>::read_one_cluster() {
|
||||
return c;
|
||||
}
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
ClusterVector<ClusterType> ClusterFile<ClusterType, Enable>::read_frame() {
|
||||
if (m_mode != "r") {
|
||||
throw std::runtime_error(LOCATION + "File not opened for reading");
|
||||
}
|
||||
if (m_noise_map || m_roi) {
|
||||
return read_frame_with_cut();
|
||||
} else {
|
||||
return read_frame_without_cut();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
ClusterVector<ClusterType>
|
||||
ClusterFile<ClusterType, Enable>::read_frame_without_cut() {
|
||||
@ -375,11 +372,13 @@ ClusterFile<ClusterType, Enable>::read_frame_without_cut() {
|
||||
ClusterVector<ClusterType> clusters(n_clusters);
|
||||
clusters.set_frame_number(frame_number);
|
||||
|
||||
clusters.resize(n_clusters);
|
||||
|
||||
if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) !=
|
||||
static_cast<size_t>(n_clusters)) {
|
||||
throw std::runtime_error(LOCATION + "Could not read clusters");
|
||||
}
|
||||
clusters.resize(n_clusters);
|
||||
|
||||
if (m_gain_map)
|
||||
m_gain_map->apply_gain_map(clusters);
|
||||
return clusters;
|
||||
@ -427,13 +426,9 @@ bool ClusterFile<ClusterType, Enable>::is_selected(ClusterType &cl) {
|
||||
}
|
||||
}
|
||||
|
||||
auto cluster_size_x = extract_template_arguments<
|
||||
std::remove_reference_t<decltype(cl)>>::cluster_size_x;
|
||||
auto cluster_size_y = extract_template_arguments<
|
||||
std::remove_reference_t<decltype(cl)>>::cluster_size_y;
|
||||
|
||||
size_t cluster_center_index =
|
||||
(cluster_size_x / 2) + (cluster_size_y / 2) * cluster_size_x;
|
||||
(ClusterType::cluster_size_x / 2) +
|
||||
(ClusterType::cluster_size_y / 2) * ClusterType::cluster_size_x;
|
||||
|
||||
if (m_noise_map) {
|
||||
auto sum_1x1 = cl.data[cluster_center_index]; // central pixel
|
||||
|
@ -1,154 +0,0 @@
|
||||
#pragma once
|
||||
#include "aare/core/defs.hpp"
|
||||
#include <filesystem>
|
||||
#include <fmt/format.h>
|
||||
#include <string>
|
||||
|
||||
namespace aare {
|
||||
struct ClusterHeader {
|
||||
int32_t frame_number;
|
||||
int32_t n_clusters;
|
||||
std::string to_string() const {
|
||||
return "frame_number: " + std::to_string(frame_number) +
|
||||
", n_clusters: " + std::to_string(n_clusters);
|
||||
}
|
||||
};
|
||||
|
||||
struct ClusterV2_ {
|
||||
int16_t x;
|
||||
int16_t y;
|
||||
std::array<int32_t, 9> data;
|
||||
std::string to_string(bool detailed = false) const {
|
||||
if (detailed) {
|
||||
std::string data_str = "[";
|
||||
for (auto &d : data) {
|
||||
data_str += std::to_string(d) + ", ";
|
||||
}
|
||||
data_str += "]";
|
||||
return "x: " + std::to_string(x) + ", y: " + std::to_string(y) +
|
||||
", data: " + data_str;
|
||||
}
|
||||
return "x: " + std::to_string(x) + ", y: " + std::to_string(y);
|
||||
}
|
||||
};
|
||||
|
||||
struct ClusterV2 {
|
||||
ClusterV2_ cluster;
|
||||
int32_t frame_number;
|
||||
std::string to_string() const {
|
||||
return "frame_number: " + std::to_string(frame_number) + ", " +
|
||||
cluster.to_string();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief
|
||||
* important not: fp always points to the clusters header and does not point to
|
||||
* individual clusters
|
||||
*
|
||||
*/
|
||||
class ClusterFileV2 {
|
||||
std::filesystem::path m_fpath;
|
||||
std::string m_mode;
|
||||
FILE *fp{nullptr};
|
||||
|
||||
void check_open() {
|
||||
if (!fp)
|
||||
throw std::runtime_error(
|
||||
fmt::format("File: {} not open", m_fpath.string()));
|
||||
}
|
||||
|
||||
public:
|
||||
ClusterFileV2(std::filesystem::path const &fpath, std::string const &mode)
|
||||
: m_fpath(fpath), m_mode(mode) {
|
||||
if (m_mode != "r" && m_mode != "w")
|
||||
throw std::invalid_argument("mode must be 'r' or 'w'");
|
||||
if (m_mode == "r" && !std::filesystem::exists(m_fpath))
|
||||
throw std::invalid_argument("File does not exist");
|
||||
if (mode == "r") {
|
||||
fp = fopen(fpath.string().c_str(), "rb");
|
||||
} else if (mode == "w") {
|
||||
if (std::filesystem::exists(fpath)) {
|
||||
fp = fopen(fpath.string().c_str(), "r+b");
|
||||
} else {
|
||||
fp = fopen(fpath.string().c_str(), "wb");
|
||||
}
|
||||
}
|
||||
if (fp == nullptr) {
|
||||
throw std::runtime_error("Failed to open file");
|
||||
}
|
||||
}
|
||||
~ClusterFileV2() { close(); }
|
||||
std::vector<ClusterV2> read() {
|
||||
check_open();
|
||||
|
||||
ClusterHeader header;
|
||||
fread(&header, sizeof(ClusterHeader), 1, fp);
|
||||
std::vector<ClusterV2_> clusters_(header.n_clusters);
|
||||
fread(clusters_.data(), sizeof(ClusterV2_), header.n_clusters, fp);
|
||||
std::vector<ClusterV2> clusters;
|
||||
for (auto &c : clusters_) {
|
||||
ClusterV2 cluster;
|
||||
cluster.cluster = std::move(c);
|
||||
cluster.frame_number = header.frame_number;
|
||||
clusters.push_back(cluster);
|
||||
}
|
||||
|
||||
return clusters;
|
||||
}
|
||||
std::vector<std::vector<ClusterV2>> read(int n_frames) {
|
||||
std::vector<std::vector<ClusterV2>> clusters;
|
||||
for (int i = 0; i < n_frames; i++) {
|
||||
clusters.push_back(read());
|
||||
}
|
||||
return clusters;
|
||||
}
|
||||
|
||||
size_t write(std::vector<ClusterV2> const &clusters) {
|
||||
check_open();
|
||||
if (m_mode != "w")
|
||||
throw std::runtime_error("File not opened in write mode");
|
||||
if (clusters.empty())
|
||||
return 0;
|
||||
|
||||
ClusterHeader header;
|
||||
header.frame_number = clusters[0].frame_number;
|
||||
header.n_clusters = clusters.size();
|
||||
fwrite(&header, sizeof(ClusterHeader), 1, fp);
|
||||
for (auto &c : clusters) {
|
||||
fwrite(&c.cluster, sizeof(ClusterV2_), 1, fp);
|
||||
}
|
||||
return clusters.size();
|
||||
}
|
||||
|
||||
size_t write(std::vector<std::vector<ClusterV2>> const &clusters) {
|
||||
check_open();
|
||||
if (m_mode != "w")
|
||||
throw std::runtime_error("File not opened in write mode");
|
||||
|
||||
size_t n_clusters = 0;
|
||||
for (auto &c : clusters) {
|
||||
n_clusters += write(c);
|
||||
}
|
||||
return n_clusters;
|
||||
}
|
||||
|
||||
int seek_to_begin() { return fseek(fp, 0, SEEK_SET); }
|
||||
int seek_to_end() { return fseek(fp, 0, SEEK_END); }
|
||||
|
||||
int32_t frame_number() {
|
||||
auto pos = ftell(fp);
|
||||
ClusterHeader header;
|
||||
fread(&header, sizeof(ClusterHeader), 1, fp);
|
||||
fseek(fp, pos, SEEK_SET);
|
||||
return header.frame_number;
|
||||
}
|
||||
|
||||
void close() {
|
||||
if (fp) {
|
||||
fclose(fp);
|
||||
fp = nullptr;
|
||||
}
|
||||
}
|
||||
};
|
||||
} // namespace aare
|
@ -20,11 +20,9 @@ class ClusterFinder {
|
||||
Pedestal<PEDESTAL_TYPE> m_pedestal;
|
||||
ClusterVector<ClusterType> m_clusters;
|
||||
|
||||
static const uint8_t ClusterSizeX =
|
||||
extract_template_arguments<ClusterType>::cluster_size_x;
|
||||
static const uint8_t ClusterSizeY =
|
||||
extract_template_arguments<ClusterType>::cluster_size_x;
|
||||
using CT = typename extract_template_arguments<ClusterType>::value_type;
|
||||
static const uint8_t ClusterSizeX = ClusterType::cluster_size_x;
|
||||
static const uint8_t ClusterSizeY = ClusterType::cluster_size_y;
|
||||
using CT = typename ClusterType::value_type;
|
||||
|
||||
public:
|
||||
/**
|
||||
@ -79,7 +77,6 @@ class ClusterFinder {
|
||||
int has_center_pixel_y = ClusterSizeY % 2;
|
||||
|
||||
m_clusters.set_frame_number(frame_number);
|
||||
std::vector<CT> cluster_data(ClusterSizeX * ClusterSizeY);
|
||||
for (int iy = 0; iy < frame.shape(0); iy++) {
|
||||
for (int ix = 0; ix < frame.shape(1); ix++) {
|
||||
|
||||
@ -126,8 +123,9 @@ class ClusterFinder {
|
||||
|
||||
// Store cluster
|
||||
if (value == max) {
|
||||
// Zero out the cluster data
|
||||
std::fill(cluster_data.begin(), cluster_data.end(), 0);
|
||||
ClusterType cluster{};
|
||||
cluster.x = ix;
|
||||
cluster.y = iy;
|
||||
|
||||
// Fill the cluster data since we have a photon to store
|
||||
// It's worth redoing the look since most of the time we
|
||||
@ -141,20 +139,15 @@ class ClusterFinder {
|
||||
static_cast<CT>(frame(iy + ir, ix + ic)) -
|
||||
static_cast<CT>(
|
||||
m_pedestal.mean(iy + ir, ix + ic));
|
||||
cluster_data[i] =
|
||||
cluster.data[i] =
|
||||
tmp; // Watch for out of bounds access
|
||||
i++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ClusterType new_cluster{};
|
||||
new_cluster.x = ix;
|
||||
new_cluster.y = iy;
|
||||
std::copy(cluster_data.begin(), cluster_data.end(),
|
||||
new_cluster.data);
|
||||
// Add the cluster to the output ClusterVector
|
||||
m_clusters.push_back(new_cluster);
|
||||
m_clusters.push_back(cluster);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -34,7 +34,8 @@ template <typename ClusterType = Cluster<int32_t, 3, 3>,
|
||||
typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double>
|
||||
class ClusterFinderMT {
|
||||
|
||||
using CT = typename extract_template_arguments<ClusterType>::value_type;
|
||||
protected:
|
||||
using CT = typename ClusterType::value_type;
|
||||
size_t m_current_thread{0};
|
||||
size_t m_n_threads{0};
|
||||
using Finder = ClusterFinder<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>;
|
||||
@ -50,6 +51,7 @@ class ClusterFinderMT {
|
||||
std::thread m_collect_thread;
|
||||
std::chrono::milliseconds m_default_wait{1};
|
||||
|
||||
private:
|
||||
std::atomic<bool> m_stop_requested{false};
|
||||
std::atomic<bool> m_processing_threads_stopped{true};
|
||||
|
||||
@ -120,6 +122,7 @@ class ClusterFinderMT {
|
||||
ClusterFinderMT(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
|
||||
size_t capacity = 2000, size_t n_threads = 3)
|
||||
: m_n_threads(n_threads) {
|
||||
|
||||
for (size_t i = 0; i < n_threads; i++) {
|
||||
m_cluster_finders.push_back(
|
||||
std::make_unique<
|
||||
|
@ -18,256 +18,6 @@ template <typename ClusterType,
|
||||
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||
class ClusterVector; // Forward declaration
|
||||
|
||||
/**
|
||||
* @brief ClusterVector is a container for clusters of various sizes. It uses a
|
||||
* contiguous memory buffer to store the clusters. It is templated on the data
|
||||
* type and the coordinate type of the clusters.
|
||||
* @note push_back can invalidate pointers to elements in the container
|
||||
* @warning ClusterVector is currently move only to catch unintended copies, but
|
||||
* this might change since there are probably use cases where copying is needed.
|
||||
* @tparam T data type of the pixels in the cluster
|
||||
* @tparam CoordType data type of the x and y coordinates of the cluster
|
||||
* (normally int16_t)
|
||||
*/
|
||||
#if 0
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType>
|
||||
class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
|
||||
std::byte *m_data{};
|
||||
size_t m_size{0};
|
||||
size_t m_capacity;
|
||||
uint64_t m_frame_number{0}; // TODO! Check frame number size and type
|
||||
/**
|
||||
Format string used in the python bindings to create a numpy
|
||||
array from the buffer
|
||||
= - native byte order
|
||||
h - short
|
||||
d - double
|
||||
i - int
|
||||
*/
|
||||
constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:";
|
||||
|
||||
public:
|
||||
using value_type = T;
|
||||
using ClusterType = Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||
|
||||
/**
|
||||
* @brief Construct a new ClusterVector object
|
||||
* @param capacity initial capacity of the buffer in number of clusters
|
||||
* @param frame_number frame number of the clusters. Default is 0, which is
|
||||
* also used to indicate that the clusters come from many frames
|
||||
*/
|
||||
ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0)
|
||||
: m_capacity(capacity), m_frame_number(frame_number) {
|
||||
allocate_buffer(m_capacity);
|
||||
}
|
||||
|
||||
~ClusterVector() { delete[] m_data; }
|
||||
|
||||
// Move constructor
|
||||
ClusterVector(ClusterVector &&other) noexcept
|
||||
: m_data(other.m_data), m_size(other.m_size),
|
||||
m_capacity(other.m_capacity), m_frame_number(other.m_frame_number) {
|
||||
other.m_data = nullptr;
|
||||
other.m_size = 0;
|
||||
other.m_capacity = 0;
|
||||
}
|
||||
|
||||
// Move assignment operator
|
||||
ClusterVector &operator=(ClusterVector &&other) noexcept {
|
||||
if (this != &other) {
|
||||
delete[] m_data;
|
||||
m_data = other.m_data;
|
||||
m_size = other.m_size;
|
||||
m_capacity = other.m_capacity;
|
||||
m_frame_number = other.m_frame_number;
|
||||
other.m_data = nullptr;
|
||||
other.m_size = 0;
|
||||
other.m_capacity = 0;
|
||||
other.m_frame_number = 0;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reserve space for at least capacity clusters
|
||||
* @param capacity number of clusters to reserve space for
|
||||
* @note If capacity is less than the current capacity, the function does
|
||||
* nothing.
|
||||
*/
|
||||
void reserve(size_t capacity) {
|
||||
if (capacity > m_capacity) {
|
||||
allocate_buffer(capacity);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Add a cluster to the vector
|
||||
*/
|
||||
void push_back(const ClusterType &cluster) {
|
||||
if (m_size == m_capacity) {
|
||||
allocate_buffer(m_capacity * 2);
|
||||
}
|
||||
std::byte *ptr = element_ptr(m_size);
|
||||
*reinterpret_cast<CoordType *>(ptr) = cluster.x;
|
||||
ptr += sizeof(CoordType);
|
||||
*reinterpret_cast<CoordType *>(ptr) = cluster.y;
|
||||
ptr += sizeof(CoordType);
|
||||
|
||||
std::memcpy(ptr, cluster.data, ClusterSizeX * ClusterSizeY * sizeof(T));
|
||||
|
||||
m_size++;
|
||||
}
|
||||
|
||||
ClusterVector &operator+=(const ClusterVector &other) {
|
||||
if (m_size + other.m_size > m_capacity) {
|
||||
allocate_buffer(m_capacity + other.m_size);
|
||||
}
|
||||
std::copy(other.m_data, other.m_data + other.m_size * item_size(),
|
||||
m_data + m_size * item_size());
|
||||
m_size += other.m_size;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Sum the pixels in each cluster
|
||||
* @return std::vector<T> vector of sums for each cluster
|
||||
*/
|
||||
/*
|
||||
std::vector<T> sum() {
|
||||
std::vector<T> sums(m_size);
|
||||
const size_t stride = item_size();
|
||||
const size_t n_pixels = ClusterSizeX * ClusterSizeY;
|
||||
std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y
|
||||
|
||||
for (size_t i = 0; i < m_size; i++) {
|
||||
sums[i] =
|
||||
std::accumulate(reinterpret_cast<T *>(ptr),
|
||||
reinterpret_cast<T *>(ptr) + n_pixels, T{});
|
||||
ptr += stride;
|
||||
}
|
||||
return sums;
|
||||
}
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Sum the pixels in the 2x2 subcluster with the biggest pixel sum in
|
||||
* each cluster
|
||||
* @return std::vector<T> vector of sums for each cluster
|
||||
*/ //TODO if underlying container is a vector use std::for_each
|
||||
/*
|
||||
std::vector<T> sum_2x2() {
|
||||
std::vector<T> sums_2x2(m_size);
|
||||
|
||||
for (size_t i = 0; i < m_size; i++) {
|
||||
sums_2x2[i] = at(i).max_sum_2x2;
|
||||
}
|
||||
return sums_2x2;
|
||||
}
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Return the number of clusters in the vector
|
||||
*/
|
||||
size_t size() const { return m_size; }
|
||||
|
||||
uint8_t cluster_size_x() const { return ClusterSizeX; }
|
||||
|
||||
uint8_t cluster_size_y() const { return ClusterSizeY; }
|
||||
|
||||
/**
|
||||
* @brief Return the capacity of the buffer in number of clusters. This is
|
||||
* the number of clusters that can be stored in the current buffer without
|
||||
* reallocation.
|
||||
*/
|
||||
size_t capacity() const { return m_capacity; }
|
||||
|
||||
/**
|
||||
* @brief Return the size in bytes of a single cluster
|
||||
*/
|
||||
size_t item_size() const {
|
||||
return 2 * sizeof(CoordType) + ClusterSizeX * ClusterSizeY * sizeof(T);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the offset in bytes for the i-th cluster
|
||||
*/
|
||||
size_t element_offset(size_t i) const { return item_size() * i; }
|
||||
|
||||
/**
|
||||
* @brief Return a pointer to the i-th cluster
|
||||
*/
|
||||
std::byte *element_ptr(size_t i) { return m_data + element_offset(i); }
|
||||
|
||||
/**
|
||||
* @brief Return a pointer to the i-th cluster
|
||||
*/
|
||||
const std::byte *element_ptr(size_t i) const {
|
||||
return m_data + element_offset(i);
|
||||
}
|
||||
|
||||
std::byte *data() { return m_data; }
|
||||
std::byte const *data() const { return m_data; }
|
||||
|
||||
/**
|
||||
* @brief Return a reference to the i-th cluster casted to type V
|
||||
* @tparam V type of the cluster
|
||||
*/
|
||||
ClusterType &at(size_t i) {
|
||||
return *reinterpret_cast<ClusterType *>(element_ptr(i));
|
||||
}
|
||||
|
||||
const ClusterType &at(size_t i) const {
|
||||
return *reinterpret_cast<const ClusterType *>(element_ptr(i));
|
||||
}
|
||||
|
||||
template <typename V> const V &at(size_t i) const {
|
||||
return *reinterpret_cast<const V *>(element_ptr(i));
|
||||
}
|
||||
|
||||
const std::string_view fmt_base() const {
|
||||
// TODO! how do we match on coord_t?
|
||||
return m_fmt_base;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the frame number of the clusters. 0 is used to indicate
|
||||
* that the clusters come from many frames
|
||||
*/
|
||||
uint64_t frame_number() const { return m_frame_number; }
|
||||
|
||||
void set_frame_number(uint64_t frame_number) {
|
||||
m_frame_number = frame_number;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Resize the vector to contain new_size clusters. If new_size is
|
||||
* greater than the current capacity, a new buffer is allocated. If the size
|
||||
* is smaller no memory is freed, size is just updated.
|
||||
* @param new_size new size of the vector
|
||||
* @warning The additional clusters are not initialized
|
||||
*/
|
||||
void resize(size_t new_size) {
|
||||
// TODO! Should we initialize the new clusters?
|
||||
if (new_size > m_capacity) {
|
||||
allocate_buffer(new_size);
|
||||
}
|
||||
m_size = new_size;
|
||||
}
|
||||
|
||||
private:
|
||||
void allocate_buffer(size_t new_capacity) {
|
||||
size_t num_bytes = item_size() * new_capacity;
|
||||
std::byte *new_data = new std::byte[num_bytes]{};
|
||||
std::copy(m_data, m_data + item_size() * m_size, new_data);
|
||||
delete[] m_data;
|
||||
m_data = new_data;
|
||||
m_capacity = new_capacity;
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief ClusterVector is a container for clusters of various sizes. It
|
||||
* uses a contiguous memory buffer to store the clusters. It is templated on
|
||||
@ -285,7 +35,7 @@ template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
|
||||
std::vector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> m_data{};
|
||||
uint64_t m_frame_number{0}; // TODO! Check frame number size and type
|
||||
int32_t m_frame_number{0}; // TODO! Check frame number size and type
|
||||
|
||||
public:
|
||||
using value_type = T;
|
||||
@ -297,7 +47,7 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
* @param frame_number frame number of the clusters. Default is 0, which is
|
||||
* also used to indicate that the clusters come from many frames
|
||||
*/
|
||||
ClusterVector(size_t capacity = 300, int32_t frame_number = 0)
|
||||
ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0)
|
||||
: m_frame_number(frame_number) {
|
||||
m_data.reserve(capacity);
|
||||
}
|
||||
@ -319,6 +69,36 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Sum the pixels in each cluster
|
||||
* @return std::vector<T> vector of sums for each cluster
|
||||
*/
|
||||
std::vector<T> sum() {
|
||||
std::vector<T> sums(m_data.size());
|
||||
|
||||
std::transform(
|
||||
m_data.begin(), m_data.end(), sums.begin(),
|
||||
[](const ClusterType &cluster) { return cluster.sum(); });
|
||||
|
||||
return sums;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Sum the pixels in the 2x2 subcluster with the biggest pixel sum in
|
||||
* each cluster
|
||||
* @return std::vector<T> vector of sums for each cluster
|
||||
*/
|
||||
std::vector<T> sum_2x2() {
|
||||
std::vector<T> sums_2x2(m_data.size());
|
||||
|
||||
std::transform(m_data.begin(), m_data.end(), sums_2x2.begin(),
|
||||
[](const ClusterType &cluster) {
|
||||
return cluster.max_sum_2x2().first;
|
||||
});
|
||||
|
||||
return sums_2x2;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reserve space for at least capacity clusters
|
||||
* @param capacity number of clusters to reserve space for
|
||||
@ -361,7 +141,8 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
* @brief Return the size in bytes of a single cluster
|
||||
*/
|
||||
size_t item_size() const {
|
||||
return 2 * sizeof(CoordType) + ClusterSizeX * ClusterSizeY * sizeof(T);
|
||||
return sizeof(ClusterType); // 2 * sizeof(CoordType) + ClusterSizeX *
|
||||
// ClusterSizeY * sizeof(T);
|
||||
}
|
||||
|
||||
ClusterType *data() { return m_data.data(); }
|
||||
@ -371,17 +152,17 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
* @brief Return a reference to the i-th cluster casted to type V
|
||||
* @tparam V type of the cluster
|
||||
*/
|
||||
ClusterType &at(size_t i) { return m_data[i]; }
|
||||
ClusterType &operator[](size_t i) { return m_data[i]; }
|
||||
|
||||
const ClusterType &at(size_t i) const { return m_data[i]; }
|
||||
const ClusterType &operator[](size_t i) const { return m_data[i]; }
|
||||
|
||||
/**
|
||||
* @brief Return the frame number of the clusters. 0 is used to indicate
|
||||
* that the clusters come from many frames
|
||||
*/
|
||||
uint64_t frame_number() const { return m_frame_number; }
|
||||
int32_t frame_number() const { return m_frame_number; }
|
||||
|
||||
void set_frame_number(uint64_t frame_number) {
|
||||
void set_frame_number(int32_t frame_number) {
|
||||
m_frame_number = frame_number;
|
||||
}
|
||||
};
|
||||
|
@ -18,8 +18,8 @@ class FilePtr {
|
||||
FilePtr(FilePtr &&other);
|
||||
FilePtr &operator=(FilePtr &&other);
|
||||
FILE *get();
|
||||
int64_t tell();
|
||||
void seek(int64_t offset, int whence = SEEK_SET) {
|
||||
ssize_t tell();
|
||||
void seek(ssize_t offset, int whence = SEEK_SET) {
|
||||
if (fseek(fp_, offset, whence) != 0)
|
||||
throw std::runtime_error("Error seeking in file");
|
||||
}
|
||||
|
@ -15,6 +15,12 @@ NDArray<double, 1> gaus(NDView<double, 1> x, NDView<double, 1> par);
|
||||
double pol1(const double x, const double *par);
|
||||
NDArray<double, 1> pol1(NDView<double, 1> x, NDView<double, 1> par);
|
||||
|
||||
double scurve(const double x, const double *par);
|
||||
NDArray<double, 1> scurve(NDView<double, 1> x, NDView<double, 1> par);
|
||||
|
||||
double scurve2(const double x, const double *par);
|
||||
NDArray<double, 1> scurve2(NDView<double, 1> x, NDView<double, 1> par);
|
||||
|
||||
} // namespace func
|
||||
|
||||
|
||||
@ -25,6 +31,9 @@ std::array<double, 3> gaus_init_par(const NDView<double, 1> x, const NDView<doub
|
||||
|
||||
std::array<double, 2> pol1_init_par(const NDView<double, 1> x, const NDView<double, 1> y);
|
||||
|
||||
std::array<double, 6> scurve_init_par(const NDView<double, 1> x, const NDView<double, 1> y);
|
||||
std::array<double, 6> scurve2_init_par(const NDView<double, 1> x, const NDView<double, 1> y);
|
||||
|
||||
static constexpr int DEFAULT_NUM_THREADS = 4;
|
||||
|
||||
/**
|
||||
@ -38,7 +47,7 @@ NDArray<double, 1> fit_gaus(NDView<double, 1> x, NDView<double, 1> y);
|
||||
/**
|
||||
* @brief Fit a 1D Gaussian to each pixel. Data layout [row, col, values]
|
||||
* @param x x values
|
||||
* @param y y vales, layout [row, col, values]
|
||||
* @param y y values, layout [row, col, values]
|
||||
* @param n_threads number of threads to use
|
||||
*/
|
||||
|
||||
@ -51,7 +60,7 @@ NDArray<double, 3> fit_gaus(NDView<double, 1> x, NDView<double, 3> y,
|
||||
/**
|
||||
* @brief Fit a 1D Gaussian with error estimates
|
||||
* @param x x values
|
||||
* @param y y vales, layout [row, col, values]
|
||||
* @param y y values, layout [row, col, values]
|
||||
* @param y_err error in y, layout [row, col, values]
|
||||
* @param par_out output parameters
|
||||
* @param par_err_out output error parameters
|
||||
@ -64,7 +73,7 @@ void fit_gaus(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
|
||||
* @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout
|
||||
* [row, col, values]
|
||||
* @param x x values
|
||||
* @param y y vales, layout [row, col, values]
|
||||
* @param y y values, layout [row, col, values]
|
||||
* @param y_err error in y, layout [row, col, values]
|
||||
* @param par_out output parameters, layout [row, col, values]
|
||||
* @param par_err_out output parameter errors, layout [row, col, values]
|
||||
@ -88,5 +97,19 @@ void fit_pol1(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
|
||||
NDView<double, 3> par_out, NDView<double, 3> par_err_out,NDView<double, 2> chi2_out,
|
||||
int n_threads = DEFAULT_NUM_THREADS);
|
||||
|
||||
NDArray<double, 1> fit_scurve(NDView<double, 1> x, NDView<double, 1> y);
|
||||
NDArray<double, 3> fit_scurve(NDView<double, 1> x, NDView<double, 3> y, int n_threads);
|
||||
void fit_scurve(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
|
||||
NDView<double, 1> par_out, NDView<double, 1> par_err_out, double& chi2);
|
||||
void fit_scurve(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
|
||||
NDView<double, 3> par_out, NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
|
||||
int n_threads);
|
||||
|
||||
NDArray<double, 1> fit_scurve2(NDView<double, 1> x, NDView<double, 1> y);
|
||||
NDArray<double, 3> fit_scurve2(NDView<double, 1> x, NDView<double, 3> y, int n_threads);
|
||||
void fit_scurve2(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
|
||||
NDView<double, 1> par_out, NDView<double, 1> par_err_out, double& chi2);
|
||||
void fit_scurve2(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
|
||||
NDView<double, 3> par_out, NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
|
||||
int n_threads);
|
||||
} // namespace aare
|
112
include/aare/FlatField.hpp
Normal file
112
include/aare/FlatField.hpp
Normal file
@ -0,0 +1,112 @@
|
||||
|
||||
/**
|
||||
* stores flatfield for angle calibration
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
#include "MythenDetectorSpecifications.hpp"
|
||||
#include "NDArray.hpp"
|
||||
|
||||
namespace aare {
|
||||
// TODO maybe template now its uint32
|
||||
class FlatField {
|
||||
|
||||
public:
|
||||
FlatField(std::shared_ptr<MythenDetectorSpecifications> mythen_detector_)
|
||||
: mythen_detector(mythen_detector_) {
|
||||
|
||||
flat_field = NDArray<uint32_t, 1>(
|
||||
std::array<ssize_t, 1>{mythen_detector->num_strips()}, 0);
|
||||
}
|
||||
|
||||
void read_flatfield_from_file(const std::string &filename) {
|
||||
|
||||
std::string word;
|
||||
uint32_t strip_number{};
|
||||
|
||||
try {
|
||||
std::ifstream file(filename, std::ios_base::in);
|
||||
if (!file.good()) {
|
||||
throw std::logic_error("file does not exist");
|
||||
}
|
||||
|
||||
std::stringstream file_buffer;
|
||||
file_buffer << file.rdbuf();
|
||||
|
||||
while (file_buffer >> word) {
|
||||
|
||||
strip_number = std::stoi(word);
|
||||
|
||||
file_buffer >> word;
|
||||
if (!mythen_detector->get_bad_channels()[strip_number])
|
||||
flat_field[strip_number] = std::stod(word);
|
||||
}
|
||||
|
||||
file.close();
|
||||
} catch (const std::exception &e) {
|
||||
std::cerr << "Error: " << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
NDView<uint32_t, 1> get_flatfield() const { return flat_field.view(); }
|
||||
|
||||
double calculate_mean(double tolerance = 0.001) const {
|
||||
auto [sum, count] = std::accumulate(
|
||||
flat_field.begin(), flat_field.end(),
|
||||
std::make_pair<double, ssize_t>(0.0, 0),
|
||||
[&tolerance](std::pair<double, ssize_t> acc, const auto &element) {
|
||||
return element == 0 ? acc
|
||||
: std::make_pair(acc.first + element,
|
||||
acc.second + 1);
|
||||
});
|
||||
|
||||
return sum / count;
|
||||
}
|
||||
|
||||
NDArray<double, 1>
|
||||
inverse_normalized_flatfield(double tolerance = 0.001) const {
|
||||
double mean = calculate_mean(tolerance);
|
||||
|
||||
NDArray<double, 1> inverse_normalized_flatfield(flat_field.shape());
|
||||
|
||||
for (ssize_t i = 0; i < flat_field.size(); ++i) {
|
||||
inverse_normalized_flatfield[i] =
|
||||
(flat_field[i] <= tolerance ? 0.0 : mean / flat_field[i]);
|
||||
if (inverse_normalized_flatfield[i] < tolerance)
|
||||
mythen_detector->get_bad_channels()[i] = true;
|
||||
}
|
||||
|
||||
return inverse_normalized_flatfield; // TODO: better to have a copy in
|
||||
// this context but unneccessary
|
||||
// for angle calibration code
|
||||
// maybe provide inplace and copy option
|
||||
// maybe store as member variable access with view
|
||||
}
|
||||
|
||||
NDArray<double, 1> normalized_flatfield(double tolerance = 0.001) const {
|
||||
double mean = calculate_mean(tolerance);
|
||||
|
||||
NDArray<double, 1> normalized_flatfield(flat_field.shape());
|
||||
|
||||
for (ssize_t i = 0; i < flat_field.size(); ++i) {
|
||||
normalized_flatfield[i] = (flat_field[i] == flat_field[i] / mean);
|
||||
if (normalized_flatfield[i] < tolerance)
|
||||
mythen_detector->get_bad_channels()[i] = true;
|
||||
}
|
||||
return normalized_flatfield;
|
||||
}
|
||||
|
||||
private:
|
||||
NDArray<uint32_t, 1> flat_field; // TODO: should be 2d
|
||||
std::shared_ptr<MythenDetectorSpecifications> mythen_detector;
|
||||
};
|
||||
} // namespace aare
|
@ -107,8 +107,8 @@ class Frame {
|
||||
* @return NDView<T, 2>
|
||||
*/
|
||||
template <typename T> NDView<T, 2> view() {
|
||||
std::array<int64_t, 2> shape = {static_cast<int64_t>(m_rows),
|
||||
static_cast<int64_t>(m_cols)};
|
||||
std::array<ssize_t, 2> shape = {static_cast<ssize_t>(m_rows),
|
||||
static_cast<ssize_t>(m_cols)};
|
||||
T *data = reinterpret_cast<T *>(m_data);
|
||||
return NDView<T, 2>(data, shape);
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
/************************************************
|
||||
* @file ApplyGainMap.hpp
|
||||
* @short function to apply gain map of image size to a vector of clusters
|
||||
* @file GainMap.hpp
|
||||
* @short function to apply gain map of image size to a vector of clusters -
|
||||
*note stored gainmap is inverted for efficient aaplication to images
|
||||
***********************************************/
|
||||
|
||||
#pragma once
|
||||
@ -12,14 +13,21 @@
|
||||
|
||||
namespace aare {
|
||||
|
||||
class GainMap {
|
||||
class InvertedGainMap {
|
||||
|
||||
public:
|
||||
explicit GainMap(const NDArray<double, 2> &gain_map)
|
||||
: m_gain_map(gain_map) {};
|
||||
explicit InvertedGainMap(const NDArray<double, 2> &gain_map)
|
||||
: m_gain_map(gain_map) {
|
||||
for (auto &item : m_gain_map) {
|
||||
item = 1.0 / item;
|
||||
}
|
||||
};
|
||||
|
||||
explicit GainMap(const NDView<double, 2> gain_map) {
|
||||
explicit InvertedGainMap(const NDView<double, 2> gain_map) {
|
||||
m_gain_map = NDArray<double, 2>(gain_map);
|
||||
for (auto &item : m_gain_map) {
|
||||
item = 1.0 / item;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ClusterType,
|
||||
@ -34,19 +42,21 @@ class GainMap {
|
||||
int64_t index_cluster_center_x = ClusterSizeX / 2;
|
||||
int64_t index_cluster_center_y = ClusterSizeY / 2;
|
||||
for (size_t i = 0; i < clustervec.size(); i++) {
|
||||
auto &cl = clustervec.at(i);
|
||||
auto &cl = clustervec[i];
|
||||
|
||||
if (cl.x > 0 && cl.y > 0 && cl.x < m_gain_map.shape(1) - 1 &&
|
||||
cl.y < m_gain_map.shape(0) - 1) {
|
||||
for (size_t j = 0; j < ClusterSizeX * ClusterSizeY; j++) {
|
||||
size_t x = cl.x + j % ClusterSizeX - index_cluster_center_x;
|
||||
size_t y = cl.y + j / ClusterSizeX - index_cluster_center_y;
|
||||
cl.data[j] = cl.data[j] * static_cast<T>(m_gain_map(y, x));
|
||||
cl.data[j] = static_cast<T>(
|
||||
static_cast<double>(cl.data[j]) *
|
||||
m_gain_map(
|
||||
y, x)); // cast after conversion to keep precision
|
||||
}
|
||||
} else {
|
||||
memset(cl.data, 0,
|
||||
ClusterSizeX * ClusterSizeY *
|
||||
sizeof(T)); // clear edge clusters
|
||||
// clear edge clusters
|
||||
cl.data.fill(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
212
include/aare/Hdf5FileReader.hpp
Normal file
212
include/aare/Hdf5FileReader.hpp
Normal file
@ -0,0 +1,212 @@
|
||||
/************************************************
|
||||
* @file HD5FFileReader.hpp
|
||||
* @short HDF5FileReader based on H5File object
|
||||
***********************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "Frame.hpp"
|
||||
#include "NDArray.hpp"
|
||||
#include <H5Cpp.h>
|
||||
#include <array>
|
||||
#include <cxxabi.h>
|
||||
#include <optional>
|
||||
|
||||
namespace aare {
|
||||
|
||||
// return std::type_info
|
||||
inline const std::type_info &deduce_cpp_type(const H5::DataType datatype) {
|
||||
if (H5Tequal(datatype.getId(), H5::PredType::NATIVE_UINT8.getId())) {
|
||||
return typeid(uint8_t);
|
||||
} else if (H5Tequal(datatype.getId(),
|
||||
H5::PredType::NATIVE_UINT16.getId())) {
|
||||
return typeid(uint16_t);
|
||||
} else if (H5Tequal(datatype.getId(),
|
||||
H5::PredType::NATIVE_UINT32.getId())) {
|
||||
return typeid(uint32_t);
|
||||
} else if (H5Tequal(datatype.getId(),
|
||||
H5::PredType::NATIVE_UINT64.getId())) {
|
||||
return typeid(uint64_t);
|
||||
} else if (H5Tequal(datatype.getId(), H5::PredType::NATIVE_INT8.getId())) {
|
||||
return typeid(int8_t);
|
||||
} else if (H5Tequal(datatype.getId(), H5::PredType::NATIVE_INT16.getId())) {
|
||||
return typeid(int16_t);
|
||||
} else if (H5Tequal(datatype.getId(), H5::PredType::NATIVE_INT32.getId())) {
|
||||
return typeid(int32_t);
|
||||
} else if (H5Tequal(datatype.getId(), H5::PredType::NATIVE_INT64.getId())) {
|
||||
return typeid(int64_t);
|
||||
} else if (H5Tequal(datatype.getId(), H5::PredType::NATIVE_INT.getId())) {
|
||||
return typeid(int);
|
||||
} else if (H5Tequal(datatype.getId(), H5::PredType::IEEE_F64LE.getId())) {
|
||||
return typeid(double);
|
||||
} else if (H5Tequal(datatype.getId(), H5::PredType::IEEE_F32LE.getId())) {
|
||||
return typeid(float);
|
||||
} else if (H5Tequal(datatype.getId(), H5::PredType::NATIVE_FLOAT.getId())) {
|
||||
return typeid(float);
|
||||
} else if (H5Tequal(datatype.getId(),
|
||||
H5::PredType::NATIVE_DOUBLE.getId())) {
|
||||
return typeid(float);
|
||||
} else if (H5Tequal(datatype.getId(), H5::PredType::NATIVE_CHAR.getId()) &&
|
||||
datatype.getId() == H5::PredType::NATIVE_CHAR.getId()) {
|
||||
return typeid(char);
|
||||
} else {
|
||||
throw std::runtime_error("c++ type cannot be deduced");
|
||||
}
|
||||
}
|
||||
|
||||
struct Subset {
|
||||
std::vector<hsize_t> shape;
|
||||
std::vector<hsize_t> offset; // index where data subset should start
|
||||
};
|
||||
|
||||
class HDF5Dataset {
|
||||
|
||||
public:
|
||||
HDF5Dataset(const std::string &datasetname_, const H5::DataSet dataset_)
|
||||
: datasetname(datasetname_), dataset(dataset_) {
|
||||
datatype = dataset.getDataType();
|
||||
|
||||
cpp_type = &deduce_cpp_type(datatype);
|
||||
|
||||
dataspace = dataset.getSpace();
|
||||
rank = dataspace.getSimpleExtentNdims(); // number of dimensions
|
||||
|
||||
shape.resize(rank);
|
||||
dataspace.getSimpleExtentDims(shape.data(), nullptr);
|
||||
}
|
||||
|
||||
hsize_t get_shape(ssize_t index) const { return shape[index]; }
|
||||
|
||||
std::vector<hsize_t> get_shape() const { return shape; }
|
||||
|
||||
H5::DataType get_datatype() const { return datatype; }
|
||||
|
||||
const std::type_info *get_cpp_type() const { return cpp_type; }
|
||||
|
||||
/**
|
||||
* Reads subset of dataset into the buffer
|
||||
* e.g. to read one 2d frame pass Subset({shape[1], shape[2]}, {frame_index,
|
||||
* 0,0})
|
||||
*/
|
||||
void
|
||||
read_into_buffer(std::byte *buffer,
|
||||
std::optional<const Subset> subset = std::nullopt) const {
|
||||
|
||||
if (subset) {
|
||||
// TODO treat scalar cases
|
||||
if (static_cast<ssize_t>(subset->offset.size()) != rank) {
|
||||
throw std::runtime_error("provide an offset for" +
|
||||
std::to_string(rank) + "dimensions");
|
||||
}
|
||||
for (ssize_t i = 0; i < rank; ++i) {
|
||||
hsize_t size =
|
||||
i < rank - static_cast<ssize_t>(subset->shape.size())
|
||||
? 0
|
||||
: subset->shape[i - (rank - subset->shape.size())];
|
||||
if ((size + subset->offset[i]) > shape[i]) {
|
||||
throw std::runtime_error(
|
||||
"subset is too large or offset is out of bounds");
|
||||
}
|
||||
}
|
||||
|
||||
H5::DataSpace memspace(static_cast<int>(subset->shape.size()),
|
||||
subset->shape.data());
|
||||
|
||||
dataspace.selectHyperslab(H5S_SELECT_SET, subset->shape.data(),
|
||||
subset->offset.data());
|
||||
dataset.read(buffer, datatype, memspace, dataspace);
|
||||
} else {
|
||||
dataset.read(buffer, datatype);
|
||||
}
|
||||
}
|
||||
|
||||
Frame store_as_frame() const {
|
||||
uint32_t rows{}, cols{};
|
||||
if (rank == 1) {
|
||||
rows = 1;
|
||||
// TODO overflow
|
||||
cols = static_cast<uint32_t>(shape[0]);
|
||||
} else if (rank == 2) {
|
||||
rows = static_cast<uint32_t>(shape[0]);
|
||||
cols = static_cast<uint32_t>(shape[1]);
|
||||
} else {
|
||||
throw std::runtime_error("Frame only supports 2d images");
|
||||
}
|
||||
|
||||
Frame frame(rows, cols, Dtype(*cpp_type));
|
||||
|
||||
read_into_buffer(frame.data());
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
template <typename T, ssize_t NDim>
|
||||
NDArray<T, NDim> store_as_ndarray() const {
|
||||
if (NDim != rank) {
|
||||
std::cout
|
||||
<< "Warning: dataset dimension and array dimension mismatch"
|
||||
<< std::endl; // TODO: replace with log - still valid if we
|
||||
// want subset
|
||||
}
|
||||
if (typeid(T) != *cpp_type) {
|
||||
std::cout << "Warning: dataset and array type mismatch"
|
||||
<< std::endl;
|
||||
}
|
||||
std::array<ssize_t, NDim> array_shape{};
|
||||
std::transform(
|
||||
shape.begin(), shape.end(), array_shape.begin(),
|
||||
[](const auto dim) { return static_cast<ssize_t>(dim); });
|
||||
|
||||
aare::NDArray<T, NDim> dataset_array(array_shape);
|
||||
|
||||
read_into_buffer(reinterpret_cast<std::byte *>(dataset_array.data()));
|
||||
|
||||
return dataset_array;
|
||||
}
|
||||
|
||||
// getMemDataSize()
|
||||
|
||||
private:
|
||||
std::string datasetname{};
|
||||
H5::DataSet dataset;
|
||||
H5::DataSpace dataspace;
|
||||
H5::DataType datatype;
|
||||
const std::type_info *cpp_type;
|
||||
ssize_t rank{};
|
||||
std::vector<hsize_t> shape{};
|
||||
};
|
||||
|
||||
class HDF5FileReader {
|
||||
|
||||
public:
|
||||
HDF5FileReader() = default;
|
||||
|
||||
void open_file(const std::string &filename_) {
|
||||
filename = filename_;
|
||||
try {
|
||||
file = H5::H5File(filename, H5F_ACC_RDONLY);
|
||||
} catch (H5::Exception &e) {
|
||||
std::cerr << "Error: " << e.getDetailMsg() << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
void close_file() { file.close(); }
|
||||
|
||||
HDF5Dataset get_dataset(const std::string &dataset_name) const {
|
||||
H5::DataSet dataset;
|
||||
try {
|
||||
dataset = file.openDataSet(dataset_name);
|
||||
} catch (H5::Exception &e) {
|
||||
std::cerr << "Error: " << e.getDetailMsg() << std::endl;
|
||||
}
|
||||
|
||||
// TODO use optional to handle error
|
||||
return HDF5Dataset(dataset_name, dataset);
|
||||
}
|
||||
|
||||
private:
|
||||
std::string filename{};
|
||||
H5::H5File file;
|
||||
};
|
||||
|
||||
} // namespace aare
|
@ -44,9 +44,8 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
|
||||
photons.reserve(clusters.size());
|
||||
|
||||
if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) {
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
for (const ClusterType &cluster : clusters) {
|
||||
|
||||
auto cluster = clusters.at(i);
|
||||
auto eta = calculate_eta2(cluster);
|
||||
|
||||
Photon photon;
|
||||
@ -70,20 +69,20 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
|
||||
// cBottomRight = 1,
|
||||
// cTopLeft = 2,
|
||||
// cTopRight = 3
|
||||
switch (eta.c) {
|
||||
case cTopLeft:
|
||||
switch (static_cast<corner>(eta.c)) {
|
||||
case corner::cTopLeft:
|
||||
dX = -1.;
|
||||
dY = 0;
|
||||
break;
|
||||
case cTopRight:;
|
||||
case corner::cTopRight:;
|
||||
dX = 0;
|
||||
dY = 0;
|
||||
break;
|
||||
case cBottomLeft:
|
||||
case corner::cBottomLeft:
|
||||
dX = -1.;
|
||||
dY = -1.;
|
||||
break;
|
||||
case cBottomRight:
|
||||
case corner::cBottomRight:
|
||||
dX = 0.;
|
||||
dY = -1.;
|
||||
break;
|
||||
@ -94,8 +93,7 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
|
||||
}
|
||||
} else if (clusters.cluster_size_x() == 2 ||
|
||||
clusters.cluster_size_y() == 2) {
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
auto cluster = clusters.at(i);
|
||||
for (const ClusterType &cluster : clusters) {
|
||||
auto eta = calculate_eta2(cluster);
|
||||
|
||||
Photon photon;
|
||||
|
150
include/aare/MythenDetectorSpecifications.hpp
Normal file
150
include/aare/MythenDetectorSpecifications.hpp
Normal file
@ -0,0 +1,150 @@
|
||||
#pragma once
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
#include "NDArray.hpp"
|
||||
|
||||
namespace aare {
|
||||
|
||||
class MythenDetectorSpecifications {
|
||||
|
||||
public:
|
||||
// TODO: constructor that reads from a config file
|
||||
|
||||
MythenDetectorSpecifications() {
|
||||
num_strips_ = max_modules_ * strips_per_module_;
|
||||
|
||||
num_connected_modules_ = max_modules_;
|
||||
|
||||
bad_channels =
|
||||
NDArray<bool, 1>(std::array<ssize_t, 1>{num_strips_}, false);
|
||||
|
||||
connected_modules = NDArray<bool, 1>(
|
||||
std::array<ssize_t, 1>{static_cast<ssize_t>(max_modules_)}, true);
|
||||
}
|
||||
|
||||
MythenDetectorSpecifications(const size_t max_modules,
|
||||
const double exposure_time,
|
||||
const double bloffset)
|
||||
: max_modules_(max_modules), exposure_time_(exposure_time),
|
||||
bloffset_(bloffset) {
|
||||
num_strips_ = max_modules_ * strips_per_module_;
|
||||
|
||||
num_connected_modules_ = max_modules_;
|
||||
|
||||
bad_channels =
|
||||
NDArray<bool, 1>(std::array<ssize_t, 1>{num_strips_}, false);
|
||||
|
||||
connected_modules = NDArray<bool, 1>(
|
||||
std::array<ssize_t, 1>{static_cast<ssize_t>(max_modules_)}, true);
|
||||
}
|
||||
|
||||
void read_bad_channels_from_file(const std::string &filename) {
|
||||
std::string line;
|
||||
|
||||
try {
|
||||
std::ifstream file(filename, std::ios_base::in);
|
||||
if (!file.good()) {
|
||||
throw std::logic_error("file does not exist");
|
||||
}
|
||||
|
||||
while (std::getline(file, line)) {
|
||||
std::size_t pos = line.find("-");
|
||||
|
||||
if (pos == std::string::npos) {
|
||||
bad_channels(std::stoi(line)) = true;
|
||||
} else {
|
||||
size_t line_size = line.size();
|
||||
for (int i = std::stoi(line.substr(0, pos));
|
||||
i <= std::stoi(line.substr(pos + 1, line_size - pos));
|
||||
++i)
|
||||
bad_channels(i) = true;
|
||||
}
|
||||
}
|
||||
|
||||
file.close();
|
||||
} catch (const std::exception &e) {
|
||||
std::cerr << "Error: " << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
void read_unconnected_modules_from_file(const std::string &filename) {
|
||||
std::string line;
|
||||
|
||||
try {
|
||||
std::ifstream file(filename, std::ios_base::in);
|
||||
if (!file.good()) {
|
||||
throw std::logic_error("file does not exist");
|
||||
}
|
||||
|
||||
std::stringstream file_buffer;
|
||||
file_buffer << file.rdbuf();
|
||||
|
||||
file_buffer >> line;
|
||||
num_connected_modules_ -= std::stoi(line);
|
||||
|
||||
while (file_buffer >> line) {
|
||||
size_t module = std::stoi(line);
|
||||
connected_modules[module] = false;
|
||||
for (size_t i = module * strips_per_module_;
|
||||
i < (module + 1) * strips_per_module_; ++i)
|
||||
bad_channels[i] = true;
|
||||
}
|
||||
} catch (const std::exception &e) {
|
||||
std::cerr << "Error: " << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
NDView<bool, 1> get_bad_channels() const { return bad_channels.view(); }
|
||||
|
||||
NDView<bool, 1> get_connected_modules() const {
|
||||
return connected_modules.view();
|
||||
}
|
||||
|
||||
static constexpr double pitch() { return pitch_; }
|
||||
|
||||
static constexpr size_t strips_per_module() { return strips_per_module_; }
|
||||
|
||||
size_t max_modules() const { return max_modules_; }
|
||||
|
||||
double exposure_time() const { return exposure_time_; }
|
||||
|
||||
double bloffset() const { return bloffset_; }
|
||||
|
||||
double dtt0() const { return dtt0_; }
|
||||
|
||||
static constexpr double min_angle() { return min_angle_; }
|
||||
|
||||
static constexpr double max_angle() { return max_angle_; }
|
||||
|
||||
ssize_t num_strips() const { return num_strips_; }
|
||||
|
||||
private:
|
||||
static constexpr size_t strips_per_module_ = 1280;
|
||||
static constexpr double pitch_ = 0.05; // strip width [mm]
|
||||
static constexpr double min_angle_ =
|
||||
-180.0; // maybe shoudnt be static but configurable
|
||||
static constexpr double max_angle_ = 180.0;
|
||||
static constexpr double dtt0_ =
|
||||
0.0; // No idea what this is - probably configurable
|
||||
|
||||
size_t max_modules_ = 48;
|
||||
|
||||
double exposure_time_ = 5.0; // TODO: could read from acquired file but
|
||||
// maybe should be configurable
|
||||
double bloffset_ = 1.532; // what is this? detector offset relative to what?
|
||||
|
||||
size_t num_connected_modules_{};
|
||||
|
||||
ssize_t num_strips_{};
|
||||
|
||||
NDArray<bool, 1> bad_channels;
|
||||
NDArray<bool, 1> connected_modules; // connected modules
|
||||
};
|
||||
|
||||
} // namespace aare
|
82
include/aare/MythenFileReader.hpp
Normal file
82
include/aare/MythenFileReader.hpp
Normal file
@ -0,0 +1,82 @@
|
||||
/************************************************
|
||||
* @file MythenFileReader.hpp
|
||||
* @short minimal file reader to read mythen files
|
||||
***********************************************/
|
||||
|
||||
#include <bitset>
|
||||
#include <filesystem>
|
||||
#include <string>
|
||||
|
||||
#include "Hdf5FileReader.hpp"
|
||||
#include "NDArray.hpp"
|
||||
|
||||
namespace aare {
|
||||
|
||||
struct MythenFrame {
|
||||
NDArray<uint32_t, 1> photon_counts;
|
||||
double detector_angle{};
|
||||
// double reference_intensity{}; not needed
|
||||
std::array<uint8_t, 3> channel_mask{};
|
||||
};
|
||||
|
||||
/** minimal version for a mythen file reader */
|
||||
class MythenFileReader : public HDF5FileReader {
|
||||
|
||||
public:
|
||||
MythenFileReader(const std::filesystem::path &file_path_,
|
||||
const std::string &file_prefix_)
|
||||
: m_base_path(file_path_), file_prefix(file_prefix_) {};
|
||||
|
||||
MythenFrame read_frame(ssize_t frame_index) {
|
||||
// TODO not a good design fixed number of digits in file name for frame
|
||||
// number -> pad with zeros
|
||||
// not even sure if files have the same name
|
||||
std::string current_file_name =
|
||||
m_base_path / (file_prefix + std::to_string(frame_index) + ".h5");
|
||||
|
||||
MythenFrame frame;
|
||||
open_file(current_file_name);
|
||||
|
||||
auto dataset_photon_count =
|
||||
get_dataset("/entry/instrument/detector/data");
|
||||
|
||||
frame.photon_counts =
|
||||
dataset_photon_count.store_as_ndarray<uint32_t, 1>();
|
||||
|
||||
++frame.photon_counts; // Why though?
|
||||
|
||||
auto dataset_detector_angle =
|
||||
get_dataset("/entry/instrument/NDAttributes/DetectorAngle");
|
||||
|
||||
dataset_detector_angle.read_into_buffer(
|
||||
reinterpret_cast<std::byte *>(&frame.detector_angle));
|
||||
|
||||
auto dataset_channel_number =
|
||||
get_dataset("/entry/instrument/NDAttributes/CounterMask");
|
||||
|
||||
uint8_t channel_number;
|
||||
|
||||
dataset_channel_number.read_into_buffer(
|
||||
reinterpret_cast<std::byte *>(&channel_number));
|
||||
|
||||
std::bitset<3> binary_channel_numbers(channel_number); // 1 0 0
|
||||
|
||||
// binary_channel_numbers.flip(); // TODO not sure where most
|
||||
// significant
|
||||
// bit is ask Anna again
|
||||
|
||||
frame.channel_mask = std::array<uint8_t, 3>{binary_channel_numbers[0],
|
||||
binary_channel_numbers[1],
|
||||
binary_channel_numbers[2]};
|
||||
|
||||
close_file();
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
private:
|
||||
std::filesystem::path m_base_path{};
|
||||
std::string file_prefix{};
|
||||
};
|
||||
|
||||
} // namespace aare
|
@ -21,11 +21,10 @@ TODO! Add expression templates for operators
|
||||
|
||||
namespace aare {
|
||||
|
||||
|
||||
template <typename T, int64_t Ndim = 2>
|
||||
template <typename T, ssize_t Ndim = 2>
|
||||
class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
std::array<int64_t, Ndim> shape_;
|
||||
std::array<int64_t, Ndim> strides_;
|
||||
std::array<ssize_t, Ndim> shape_;
|
||||
std::array<ssize_t, Ndim> strides_;
|
||||
size_t size_{};
|
||||
T *data_;
|
||||
|
||||
@ -42,20 +41,19 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
*
|
||||
* @param shape shape of the new NDArray
|
||||
*/
|
||||
explicit NDArray(std::array<int64_t, Ndim> shape)
|
||||
explicit NDArray(std::array<ssize_t, Ndim> shape)
|
||||
: shape_(shape), strides_(c_strides<Ndim>(shape_)),
|
||||
size_(std::accumulate(shape_.begin(), shape_.end(), 1,
|
||||
std::multiplies<>())),
|
||||
data_(new T[size_]) {}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Construct a new NDArray object with a shape and value.
|
||||
*
|
||||
* @param shape shape of the new array
|
||||
* @param value value to initialize the array with
|
||||
*/
|
||||
NDArray(std::array<int64_t, Ndim> shape, T value) : NDArray(shape) {
|
||||
NDArray(std::array<ssize_t, Ndim> shape, T value) : NDArray(shape) {
|
||||
this->operator=(value);
|
||||
}
|
||||
|
||||
@ -69,8 +67,8 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
std::copy(v.begin(), v.end(), begin());
|
||||
}
|
||||
|
||||
template<size_t Size>
|
||||
NDArray(const std::array<T, Size>& arr) : NDArray<T,1>({Size}) {
|
||||
template <size_t Size>
|
||||
NDArray(const std::array<T, Size> &arr) : NDArray<T, 1>({Size}) {
|
||||
std::copy(arr.begin(), arr.end(), begin());
|
||||
}
|
||||
|
||||
@ -79,7 +77,6 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
: shape_(other.shape_), strides_(c_strides<Ndim>(shape_)),
|
||||
size_(other.size_), data_(other.data_) {
|
||||
other.reset(); // TODO! is this necessary?
|
||||
|
||||
}
|
||||
|
||||
// Copy constructor
|
||||
@ -113,10 +110,10 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
NDArray &operator-=(const NDArray &other);
|
||||
NDArray &operator*=(const NDArray &other);
|
||||
|
||||
//Write directly to the data array, or create a new one
|
||||
template<size_t Size>
|
||||
NDArray<T,1>& operator=(const std::array<T,Size> &other){
|
||||
if(Size != size_){
|
||||
// Write directly to the data array, or create a new one
|
||||
template <size_t Size>
|
||||
NDArray<T, 1> &operator=(const std::array<T, Size> &other) {
|
||||
if (Size != size_) {
|
||||
delete[] data_;
|
||||
size_ = Size;
|
||||
data_ = new T[size_];
|
||||
@ -142,6 +139,9 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
|
||||
NDArray<bool, Ndim> operator>(const NDArray &other);
|
||||
|
||||
bool equals(const NDArray<T, Ndim> &other,
|
||||
const T tolerance = std::numeric_limits<T>::epsilon()) const;
|
||||
|
||||
bool operator==(const NDArray &other) const;
|
||||
bool operator!=(const NDArray &other) const;
|
||||
|
||||
@ -157,11 +157,6 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
|
||||
NDArray &operator&=(const T & /*mask*/);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
void sqrt() {
|
||||
for (int i = 0; i < size_; ++i) {
|
||||
data_[i] = std::sqrt(data_[i]);
|
||||
@ -186,22 +181,22 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
}
|
||||
|
||||
// TODO! is int the right type for index?
|
||||
T &operator()(int64_t i) { return data_[i]; }
|
||||
const T &operator()(int64_t i) const { return data_[i]; }
|
||||
T &operator()(ssize_t i) { return data_[i]; }
|
||||
const T &operator()(ssize_t i) const { return data_[i]; }
|
||||
|
||||
T &operator[](int64_t i) { return data_[i]; }
|
||||
const T &operator[](int64_t i) const { return data_[i]; }
|
||||
T &operator[](ssize_t i) { return data_[i]; }
|
||||
const T &operator[](ssize_t i) const { return data_[i]; }
|
||||
|
||||
T *data() { return data_; }
|
||||
std::byte *buffer() { return reinterpret_cast<std::byte *>(data_); }
|
||||
ssize_t size() const { return static_cast<ssize_t>(size_); }
|
||||
size_t total_bytes() const { return size_ * sizeof(T); }
|
||||
std::array<int64_t, Ndim> shape() const noexcept { return shape_; }
|
||||
int64_t shape(int64_t i) const noexcept { return shape_[i]; }
|
||||
std::array<int64_t, Ndim> strides() const noexcept { return strides_; }
|
||||
std::array<ssize_t, Ndim> shape() const noexcept { return shape_; }
|
||||
ssize_t shape(ssize_t i) const noexcept { return shape_[i]; }
|
||||
std::array<ssize_t, Ndim> strides() const noexcept { return strides_; }
|
||||
size_t bitdepth() const noexcept { return sizeof(T) * 8; }
|
||||
|
||||
std::array<int64_t, Ndim> byte_strides() const noexcept {
|
||||
std::array<ssize_t, Ndim> byte_strides() const noexcept {
|
||||
auto byte_strides = strides_;
|
||||
for (auto &val : byte_strides)
|
||||
val *= sizeof(T);
|
||||
@ -228,7 +223,7 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
};
|
||||
|
||||
// Move assign
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &
|
||||
NDArray<T, Ndim>::operator=(NDArray<T, Ndim> &&other) noexcept {
|
||||
if (this != &other) {
|
||||
@ -242,7 +237,7 @@ NDArray<T, Ndim>::operator=(NDArray<T, Ndim> &&other) noexcept {
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator+=(const NDArray<T, Ndim> &other) {
|
||||
// check shape
|
||||
if (shape_ == other.shape_) {
|
||||
@ -254,7 +249,7 @@ NDArray<T, Ndim> &NDArray<T, Ndim>::operator+=(const NDArray<T, Ndim> &other) {
|
||||
throw(std::runtime_error("Shape of ImageDatas must match"));
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator-=(const NDArray<T, Ndim> &other) {
|
||||
// check shape
|
||||
if (shape_ == other.shape_) {
|
||||
@ -266,7 +261,7 @@ NDArray<T, Ndim> &NDArray<T, Ndim>::operator-=(const NDArray<T, Ndim> &other) {
|
||||
throw(std::runtime_error("Shape of ImageDatas must match"));
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator*=(const NDArray<T, Ndim> &other) {
|
||||
// check shape
|
||||
if (shape_ == other.shape_) {
|
||||
@ -278,14 +273,14 @@ NDArray<T, Ndim> &NDArray<T, Ndim>::operator*=(const NDArray<T, Ndim> &other) {
|
||||
throw(std::runtime_error("Shape of ImageDatas must match"));
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator&=(const T &mask) {
|
||||
for (auto it = begin(); it != end(); ++it)
|
||||
*it &= mask;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<bool, Ndim> NDArray<T, Ndim>::operator>(const NDArray &other) {
|
||||
if (shape_ == other.shape_) {
|
||||
NDArray<bool, Ndim> result{shape_};
|
||||
@ -297,7 +292,7 @@ NDArray<bool, Ndim> NDArray<T, Ndim>::operator>(const NDArray &other) {
|
||||
throw(std::runtime_error("Shape of ImageDatas must match"));
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator=(const NDArray<T, Ndim> &other) {
|
||||
if (this != &other) {
|
||||
delete[] data_;
|
||||
@ -310,7 +305,7 @@ NDArray<T, Ndim> &NDArray<T, Ndim>::operator=(const NDArray<T, Ndim> &other) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
bool NDArray<T, Ndim>::operator==(const NDArray<T, Ndim> &other) const {
|
||||
if (shape_ != other.shape_)
|
||||
return false;
|
||||
@ -322,83 +317,81 @@ bool NDArray<T, Ndim>::operator==(const NDArray<T, Ndim> &other) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
bool NDArray<T, Ndim>::operator!=(const NDArray<T, Ndim> &other) const {
|
||||
return !((*this) == other);
|
||||
}
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator++() {
|
||||
for (uint32_t i = 0; i < size_; ++i)
|
||||
data_[i] += 1;
|
||||
return *this;
|
||||
}
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator=(const T &value) {
|
||||
std::fill_n(data_, size_, value);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator+=(const T &value) {
|
||||
for (uint32_t i = 0; i < size_; ++i)
|
||||
data_[i] += value;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> NDArray<T, Ndim>::operator+(const T &value) {
|
||||
NDArray result = *this;
|
||||
result += value;
|
||||
return result;
|
||||
}
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator-=(const T &value) {
|
||||
for (uint32_t i = 0; i < size_; ++i)
|
||||
data_[i] -= value;
|
||||
return *this;
|
||||
}
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> NDArray<T, Ndim>::operator-(const T &value) {
|
||||
NDArray result = *this;
|
||||
result -= value;
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator/=(const T &value) {
|
||||
for (uint32_t i = 0; i < size_; ++i)
|
||||
data_[i] /= value;
|
||||
return *this;
|
||||
}
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> NDArray<T, Ndim>::operator/(const T &value) {
|
||||
NDArray result = *this;
|
||||
result /= value;
|
||||
return result;
|
||||
}
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> &NDArray<T, Ndim>::operator*=(const T &value) {
|
||||
for (uint32_t i = 0; i < size_; ++i)
|
||||
data_[i] *= value;
|
||||
return *this;
|
||||
}
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> NDArray<T, Ndim>::operator*(const T &value) {
|
||||
NDArray result = *this;
|
||||
result *= value;
|
||||
return result;
|
||||
}
|
||||
// template <typename T, int64_t Ndim> void NDArray<T, Ndim>::Print() {
|
||||
|
||||
// template <typename T, ssize_t Ndim> void NDArray<T, Ndim>::Print() {
|
||||
// if (shape_[0] < 20 && shape_[1] < 20)
|
||||
// Print_all();
|
||||
// else
|
||||
// Print_some();
|
||||
// }
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
std::ostream &operator<<(std::ostream &os, const NDArray<T, Ndim> &arr) {
|
||||
for (auto row = 0; row < arr.shape(0); ++row) {
|
||||
for (auto col = 0; col < arr.shape(1); ++col) {
|
||||
@ -410,7 +403,7 @@ std::ostream &operator<<(std::ostream &os, const NDArray<T, Ndim> &arr) {
|
||||
return os;
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim> void NDArray<T, Ndim>::Print_all() {
|
||||
template <typename T, ssize_t Ndim> void NDArray<T, Ndim>::Print_all() {
|
||||
for (auto row = 0; row < shape_[0]; ++row) {
|
||||
for (auto col = 0; col < shape_[1]; ++col) {
|
||||
std::cout << std::setw(3);
|
||||
@ -419,7 +412,7 @@ template <typename T, int64_t Ndim> void NDArray<T, Ndim>::Print_all() {
|
||||
std::cout << "\n";
|
||||
}
|
||||
}
|
||||
template <typename T, int64_t Ndim> void NDArray<T, Ndim>::Print_some() {
|
||||
template <typename T, ssize_t Ndim> void NDArray<T, Ndim>::Print_some() {
|
||||
for (auto row = 0; row < 5; ++row) {
|
||||
for (auto col = 0; col < 5; ++col) {
|
||||
std::cout << std::setw(7);
|
||||
@ -429,25 +422,52 @@ template <typename T, int64_t Ndim> void NDArray<T, Ndim>::Print_some() {
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
void save(NDArray<T, Ndim> &img, std::string &pathname) {
|
||||
std::ofstream f;
|
||||
f.open(pathname, std::ios::binary);
|
||||
f.write(img.buffer(), img.size() * sizeof(T));
|
||||
f.write(reinterpret_cast<char *>(img.buffer()), img.size() * sizeof(T));
|
||||
f.close();
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
NDArray<T, Ndim> load(const std::string &pathname,
|
||||
std::array<int64_t, Ndim> shape) {
|
||||
std::array<ssize_t, Ndim> shape) {
|
||||
NDArray<T, Ndim> img{shape};
|
||||
std::ifstream f;
|
||||
f.open(pathname, std::ios::binary);
|
||||
f.read(img.buffer(), img.size() * sizeof(T));
|
||||
f.read(reinterpret_cast<char *>(img.buffer()), img.size() * sizeof(T));
|
||||
f.close();
|
||||
return img;
|
||||
}
|
||||
|
||||
template <typename T, ssize_t Ndim = 1>
|
||||
NDArray<T, Ndim> load_non_binary_file(const std::string &filename,
|
||||
const std::array<ssize_t, Ndim> shape) {
|
||||
std::string word;
|
||||
NDArray<T, Ndim> array(shape);
|
||||
try {
|
||||
std::ifstream file(filename, std::ios_base::in);
|
||||
if (!file.good()) {
|
||||
throw std::logic_error("file does not exist");
|
||||
}
|
||||
|
||||
std::stringstream file_buffer;
|
||||
file_buffer << file.rdbuf();
|
||||
|
||||
ssize_t counter = 0;
|
||||
while (file_buffer >> word && counter < size) {
|
||||
array[counter] = static_cast<T>(
|
||||
std::stod(word)); // TODO change for different Types
|
||||
++counter;
|
||||
}
|
||||
|
||||
file.close();
|
||||
} catch (const std::exception &e) {
|
||||
std::cerr << "Error: " << e.what() << std::endl;
|
||||
}
|
||||
|
||||
return array;
|
||||
}
|
||||
|
||||
} // namespace aare
|
@ -1,11 +1,12 @@
|
||||
#pragma once
|
||||
#include "aare/defs.hpp"
|
||||
#include "aare/ArrayExpr.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <fstream>
|
||||
#include <functional>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
@ -14,10 +15,11 @@
|
||||
#include <vector>
|
||||
namespace aare {
|
||||
|
||||
template <int64_t Ndim> using Shape = std::array<int64_t, Ndim>;
|
||||
template <ssize_t Ndim> using Shape = std::array<ssize_t, Ndim>;
|
||||
|
||||
// TODO! fix mismatch between signed and unsigned
|
||||
template <int64_t Ndim> Shape<Ndim> make_shape(const std::vector<size_t> &shape) {
|
||||
template <ssize_t Ndim>
|
||||
Shape<Ndim> make_shape(const std::vector<size_t> &shape) {
|
||||
if (shape.size() != Ndim)
|
||||
throw std::runtime_error("Shape size mismatch");
|
||||
Shape<Ndim> arr;
|
||||
@ -25,62 +27,74 @@ template <int64_t Ndim> Shape<Ndim> make_shape(const std::vector<size_t> &shape)
|
||||
return arr;
|
||||
}
|
||||
|
||||
template <int64_t Dim = 0, typename Strides> int64_t element_offset(const Strides & /*unused*/) { return 0; }
|
||||
template <ssize_t Dim = 0, typename Strides>
|
||||
ssize_t element_offset(const Strides & /*unused*/) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <int64_t Dim = 0, typename Strides, typename... Ix>
|
||||
int64_t element_offset(const Strides &strides, int64_t i, Ix... index) {
|
||||
template <ssize_t Dim = 0, typename Strides, typename... Ix>
|
||||
ssize_t element_offset(const Strides &strides, ssize_t i, Ix... index) {
|
||||
return i * strides[Dim] + element_offset<Dim + 1>(strides, index...);
|
||||
}
|
||||
|
||||
template <int64_t Ndim> std::array<int64_t, Ndim> c_strides(const std::array<int64_t, Ndim> &shape) {
|
||||
std::array<int64_t, Ndim> strides{};
|
||||
template <ssize_t Ndim>
|
||||
std::array<ssize_t, Ndim> c_strides(const std::array<ssize_t, Ndim> &shape) {
|
||||
std::array<ssize_t, Ndim> strides{};
|
||||
std::fill(strides.begin(), strides.end(), 1);
|
||||
for (int64_t i = Ndim - 1; i > 0; --i) {
|
||||
for (ssize_t i = Ndim - 1; i > 0; --i) {
|
||||
strides[i - 1] = strides[i] * shape[i];
|
||||
}
|
||||
return strides;
|
||||
}
|
||||
|
||||
template <int64_t Ndim> std::array<int64_t, Ndim> make_array(const std::vector<int64_t> &vec) {
|
||||
template <ssize_t Ndim>
|
||||
std::array<ssize_t, Ndim> make_array(const std::vector<ssize_t> &vec) {
|
||||
assert(vec.size() == Ndim);
|
||||
std::array<int64_t, Ndim> arr{};
|
||||
std::array<ssize_t, Ndim> arr{};
|
||||
std::copy_n(vec.begin(), Ndim, arr.begin());
|
||||
return arr;
|
||||
}
|
||||
|
||||
template <typename T, int64_t Ndim = 2> class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
|
||||
template <typename T, ssize_t Ndim = 2>
|
||||
class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
|
||||
public:
|
||||
NDView() = default;
|
||||
~NDView() = default;
|
||||
NDView(const NDView &) = default;
|
||||
NDView(NDView &&) = default;
|
||||
|
||||
NDView(T *buffer, std::array<int64_t, Ndim> shape)
|
||||
NDView(T *buffer, std::array<ssize_t, Ndim> shape)
|
||||
: buffer_(buffer), strides_(c_strides<Ndim>(shape)), shape_(shape),
|
||||
size_(std::accumulate(std::begin(shape), std::end(shape), 1, std::multiplies<>())) {}
|
||||
size_(std::accumulate(std::begin(shape), std::end(shape), 1,
|
||||
std::multiplies<>())) {}
|
||||
|
||||
// NDView(T *buffer, const std::vector<int64_t> &shape)
|
||||
// : buffer_(buffer), strides_(c_strides<Ndim>(make_array<Ndim>(shape))), shape_(make_array<Ndim>(shape)),
|
||||
// size_(std::accumulate(std::begin(shape), std::end(shape), 1, std::multiplies<>())) {}
|
||||
// NDView(T *buffer, const std::vector<ssize_t> &shape)
|
||||
// : buffer_(buffer),
|
||||
// strides_(c_strides<Ndim>(make_array<Ndim>(shape))),
|
||||
// shape_(make_array<Ndim>(shape)),
|
||||
// size_(std::accumulate(std::begin(shape), std::end(shape), 1,
|
||||
// std::multiplies<>())) {}
|
||||
|
||||
template <typename... Ix> std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) {
|
||||
template <typename... Ix>
|
||||
std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) {
|
||||
return buffer_[element_offset(strides_, index...)];
|
||||
}
|
||||
|
||||
template <typename... Ix> std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) const {
|
||||
template <typename... Ix>
|
||||
std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) const {
|
||||
return buffer_[element_offset(strides_, index...)];
|
||||
}
|
||||
|
||||
ssize_t size() const { return static_cast<ssize_t>(size_); }
|
||||
size_t total_bytes() const { return size_ * sizeof(T); }
|
||||
std::array<int64_t, Ndim> strides() const noexcept { return strides_; }
|
||||
std::array<ssize_t, Ndim> strides() const noexcept { return strides_; }
|
||||
|
||||
T *begin() { return buffer_; }
|
||||
T *end() { return buffer_ + size_; }
|
||||
T const *begin() const { return buffer_; }
|
||||
T const *end() const { return buffer_ + size_; }
|
||||
T &operator()(int64_t i) const { return buffer_[i]; }
|
||||
T &operator[](int64_t i) const { return buffer_[i]; }
|
||||
T &operator()(ssize_t i) const { return buffer_[i]; }
|
||||
T &operator[](ssize_t i) const { return buffer_[i]; }
|
||||
|
||||
bool operator==(const NDView &other) const {
|
||||
if (size_ != other.size_)
|
||||
@ -92,18 +106,37 @@ template <typename T, int64_t Ndim = 2> class NDView : public ArrayExpr<NDView<T
|
||||
return true;
|
||||
}
|
||||
|
||||
bool equals(const NDView<T, Ndim> &other, const T tolerance) const {
|
||||
if (shape_ != other.shape_)
|
||||
return false;
|
||||
|
||||
using SignedT = typename make_signed<T>::type;
|
||||
|
||||
for (uint32_t i = 0; i != size_; ++i)
|
||||
if (std::abs(static_cast<SignedT>(buffer_[i]) -
|
||||
static_cast<SignedT>(other.buffer_[i])) > tolerance)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
NDView &operator+=(const T val) { return elemenwise(val, std::plus<T>()); }
|
||||
NDView &operator-=(const T val) { return elemenwise(val, std::minus<T>()); }
|
||||
NDView &operator*=(const T val) { return elemenwise(val, std::multiplies<T>()); }
|
||||
NDView &operator/=(const T val) { return elemenwise(val, std::divides<T>()); }
|
||||
NDView &operator*=(const T val) {
|
||||
return elemenwise(val, std::multiplies<T>());
|
||||
}
|
||||
NDView &operator/=(const T val) {
|
||||
return elemenwise(val, std::divides<T>());
|
||||
}
|
||||
|
||||
NDView &operator/=(const NDView &other) { return elemenwise(other, std::divides<T>()); }
|
||||
NDView &operator/=(const NDView &other) {
|
||||
return elemenwise(other, std::divides<T>());
|
||||
}
|
||||
|
||||
|
||||
template<size_t Size>
|
||||
NDView& operator=(const std::array<T, Size> &arr) {
|
||||
if(size() != static_cast<ssize_t>(arr.size()))
|
||||
throw std::runtime_error(LOCATION + "Array and NDView size mismatch");
|
||||
template <size_t Size> NDView &operator=(const std::array<T, Size> &arr) {
|
||||
if (size() != static_cast<ssize_t>(arr.size()))
|
||||
throw std::runtime_error(LOCATION +
|
||||
"Array and NDView size mismatch");
|
||||
std::copy(arr.begin(), arr.end(), begin());
|
||||
return *this;
|
||||
}
|
||||
@ -136,31 +169,33 @@ template <typename T, int64_t Ndim = 2> class NDView : public ArrayExpr<NDView<T
|
||||
}
|
||||
|
||||
auto &shape() const { return shape_; }
|
||||
auto shape(int64_t i) const { return shape_[i]; }
|
||||
auto shape(ssize_t i) const { return shape_[i]; }
|
||||
|
||||
T *data() { return buffer_; }
|
||||
void print_all() const;
|
||||
|
||||
private:
|
||||
T *buffer_{nullptr};
|
||||
std::array<int64_t, Ndim> strides_{};
|
||||
std::array<int64_t, Ndim> shape_{};
|
||||
std::array<ssize_t, Ndim> strides_{};
|
||||
std::array<ssize_t, Ndim> shape_{};
|
||||
uint64_t size_{};
|
||||
|
||||
template <class BinaryOperation> NDView &elemenwise(T val, BinaryOperation op) {
|
||||
template <class BinaryOperation>
|
||||
NDView &elemenwise(T val, BinaryOperation op) {
|
||||
for (uint64_t i = 0; i != size_; ++i) {
|
||||
buffer_[i] = op(buffer_[i], val);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
template <class BinaryOperation> NDView &elemenwise(const NDView &other, BinaryOperation op) {
|
||||
template <class BinaryOperation>
|
||||
NDView &elemenwise(const NDView &other, BinaryOperation op) {
|
||||
for (uint64_t i = 0; i != size_; ++i) {
|
||||
buffer_[i] = op(buffer_[i], other.buffer_[i]);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
template <typename T, int64_t Ndim> void NDView<T, Ndim>::print_all() const {
|
||||
template <typename T, ssize_t Ndim> void NDView<T, Ndim>::print_all() const {
|
||||
for (auto row = 0; row < shape_[0]; ++row) {
|
||||
for (auto col = 0; col < shape_[1]; ++col) {
|
||||
std::cout << std::setw(3);
|
||||
@ -170,9 +205,8 @@ template <typename T, int64_t Ndim> void NDView<T, Ndim>::print_all() const {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename T, int64_t Ndim>
|
||||
std::ostream& operator <<(std::ostream& os, const NDView<T, Ndim>& arr){
|
||||
template <typename T, ssize_t Ndim>
|
||||
std::ostream &operator<<(std::ostream &os, const NDView<T, Ndim> &arr) {
|
||||
for (auto row = 0; row < arr.shape(0); ++row) {
|
||||
for (auto col = 0; col < arr.shape(1); ++col) {
|
||||
os << std::setw(3);
|
||||
@ -183,5 +217,16 @@ std::ostream& operator <<(std::ostream& os, const NDView<T, Ndim>& arr){
|
||||
return os;
|
||||
}
|
||||
|
||||
template <typename T> NDView<T, 1> make_view(std::vector<T> &vec) {
|
||||
return NDView<T, 1>(vec.data(), {static_cast<ssize_t>(vec.size())});
|
||||
}
|
||||
|
||||
template <typename T, ssize_t Ndim>
|
||||
void save(NDView<T, Ndim> img, const std::string &pathname) {
|
||||
std::ofstream f;
|
||||
f.open(pathname, std::ios::binary);
|
||||
f.write(reinterpret_cast<char *>(img.data()), img.size() * sizeof(T));
|
||||
f.close();
|
||||
}
|
||||
|
||||
} // namespace aare
|
@ -69,7 +69,7 @@ class NumpyFile : public FileInterface {
|
||||
*/
|
||||
template <typename T, size_t NDim> NDArray<T, NDim> load() {
|
||||
NDArray<T, NDim> arr(make_shape<NDim>(m_header.shape));
|
||||
if (fseek(fp, static_cast<int64_t>(header_size), SEEK_SET)) {
|
||||
if (fseek(fp, static_cast<long>(header_size), SEEK_SET)) {
|
||||
throw std::runtime_error(LOCATION + "Error seeking to the start of the data");
|
||||
}
|
||||
size_t rc = fread(arr.data(), sizeof(T), arr.size(), fp);
|
||||
|
@ -107,7 +107,7 @@ template <typename SUM_TYPE = double> class Pedestal {
|
||||
assert(frame.size() == m_rows * m_cols);
|
||||
|
||||
// TODO! move away from m_rows, m_cols
|
||||
if (frame.shape() != std::array<int64_t, 2>{m_rows, m_cols}) {
|
||||
if (frame.shape() != std::array<ssize_t, 2>{m_rows, m_cols}) {
|
||||
throw std::runtime_error(
|
||||
"Frame shape does not match pedestal shape");
|
||||
}
|
||||
@ -128,7 +128,7 @@ template <typename SUM_TYPE = double> class Pedestal {
|
||||
assert(frame.size() == m_rows * m_cols);
|
||||
|
||||
// TODO! move away from m_rows, m_cols
|
||||
if (frame.shape() != std::array<int64_t, 2>{m_rows, m_cols}) {
|
||||
if (frame.shape() != std::array<ssize_t, 2>{m_rows, m_cols}) {
|
||||
throw std::runtime_error(
|
||||
"Frame shape does not match pedestal shape");
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ class RawSubFile {
|
||||
size_t m_rows{};
|
||||
size_t m_cols{};
|
||||
size_t m_bytes_per_frame{};
|
||||
size_t n_frames{};
|
||||
size_t m_num_frames{};
|
||||
uint32_t m_pos_row{};
|
||||
uint32_t m_pos_col{};
|
||||
|
||||
@ -53,6 +53,7 @@ class RawSubFile {
|
||||
size_t tell();
|
||||
|
||||
void read_into(std::byte *image_buf, DetectorHeader *header = nullptr);
|
||||
void read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header= nullptr);
|
||||
void get_part(std::byte *buffer, size_t frame_index);
|
||||
|
||||
void read_header(DetectorHeader *header);
|
||||
@ -66,6 +67,8 @@ class RawSubFile {
|
||||
size_t pixels_per_frame() const { return m_rows * m_cols; }
|
||||
size_t bytes_per_pixel() const { return m_bitdepth / bits_per_byte; }
|
||||
|
||||
size_t frames_in_file() const { return m_num_frames; }
|
||||
|
||||
private:
|
||||
template <typename T>
|
||||
void read_with_map(std::byte *image_buf);
|
||||
|
@ -28,7 +28,7 @@ template <typename T> class VarClusterFinder {
|
||||
};
|
||||
|
||||
private:
|
||||
const std::array<int64_t, 2> shape_;
|
||||
const std::array<ssize_t, 2> shape_;
|
||||
NDView<T, 2> original_;
|
||||
NDArray<int, 2> labeled_;
|
||||
NDArray<int, 2> peripheral_labeled_;
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
#include <aare/NDView.hpp>
|
||||
namespace aare {
|
||||
|
||||
@ -10,4 +11,16 @@ uint16_t adc_sar_04_decode64to16(uint64_t input);
|
||||
void adc_sar_05_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output);
|
||||
void adc_sar_04_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output);
|
||||
|
||||
} // namespace aare
|
||||
|
||||
/**
|
||||
* @brief Apply custom weights to a 16-bit input value. Will sum up weights[i]**i
|
||||
* for each bit i that is set in the input value.
|
||||
* @throws std::out_of_range if weights.size() < 16
|
||||
* @param input 16-bit input value
|
||||
* @param weights vector of weights, size must be less than or equal to 16
|
||||
*/
|
||||
double apply_custom_weights(uint16_t input, const NDView<double, 1> weights);
|
||||
|
||||
void apply_custom_weights(NDView<uint16_t, 1> input, NDView<double, 1> output, const NDView<double, 1> weights);
|
||||
|
||||
} // namespace aare
|
||||
|
@ -3,16 +3,16 @@
|
||||
#include "aare/Dtype.hpp"
|
||||
|
||||
#include <array>
|
||||
#include <stdexcept>
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <type_traits>
|
||||
#include <variant>
|
||||
#include <vector>
|
||||
|
||||
|
||||
/**
|
||||
* @brief LOCATION macro to get the current location in the code
|
||||
*/
|
||||
@ -20,28 +20,24 @@
|
||||
std::string(__FILE__) + std::string(":") + std::to_string(__LINE__) + \
|
||||
":" + std::string(__func__) + ":"
|
||||
|
||||
|
||||
|
||||
#ifdef AARE_CUSTOM_ASSERT
|
||||
#define AARE_ASSERT(expr)\
|
||||
if (expr)\
|
||||
{}\
|
||||
else\
|
||||
#define AARE_ASSERT(expr) \
|
||||
if (expr) { \
|
||||
} else \
|
||||
aare::assert_failed(LOCATION + " Assertion failed: " + #expr + "\n");
|
||||
#else
|
||||
#define AARE_ASSERT(cond)\
|
||||
do { (void)sizeof(cond); } while(0)
|
||||
#define AARE_ASSERT(cond) \
|
||||
do { \
|
||||
(void)sizeof(cond); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
|
||||
namespace aare {
|
||||
|
||||
inline constexpr size_t bits_per_byte = 8;
|
||||
|
||||
void assert_failed(const std::string &msg);
|
||||
|
||||
|
||||
|
||||
class DynamicCluster {
|
||||
public:
|
||||
int cluster_sizeX;
|
||||
@ -55,7 +51,7 @@ class DynamicCluster {
|
||||
|
||||
public:
|
||||
DynamicCluster(int cluster_sizeX_, int cluster_sizeY_,
|
||||
Dtype dt_ = Dtype(typeid(int32_t)))
|
||||
Dtype dt_ = Dtype(typeid(int32_t)))
|
||||
: cluster_sizeX(cluster_sizeX_), cluster_sizeY(cluster_sizeY_),
|
||||
dt(dt_) {
|
||||
m_data = new std::byte[cluster_sizeX * cluster_sizeY * dt.bytes()]{};
|
||||
@ -179,24 +175,24 @@ template <typename T> struct t_xy {
|
||||
};
|
||||
using xy = t_xy<uint32_t>;
|
||||
|
||||
|
||||
/**
|
||||
* @brief Class to hold the geometry of a module. Where pixel 0 is located and the size of the module
|
||||
* @brief Class to hold the geometry of a module. Where pixel 0 is located and
|
||||
* the size of the module
|
||||
*/
|
||||
struct ModuleGeometry{
|
||||
struct ModuleGeometry {
|
||||
int origin_x{};
|
||||
int origin_y{};
|
||||
int height{};
|
||||
int width{};
|
||||
int row_index{};
|
||||
int col_index{};
|
||||
int col_index{};
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Class to hold the geometry of a detector. Number of modules, their size and where pixel 0
|
||||
* for each module is located
|
||||
* @brief Class to hold the geometry of a detector. Number of modules, their
|
||||
* size and where pixel 0 for each module is located
|
||||
*/
|
||||
struct DetectorGeometry{
|
||||
struct DetectorGeometry {
|
||||
int modules_x{};
|
||||
int modules_y{};
|
||||
int pixels_x{};
|
||||
@ -206,31 +202,30 @@ struct DetectorGeometry{
|
||||
std::vector<ModuleGeometry> module_pixel_0;
|
||||
};
|
||||
|
||||
struct ROI{
|
||||
int64_t xmin{};
|
||||
int64_t xmax{};
|
||||
int64_t ymin{};
|
||||
int64_t ymax{};
|
||||
|
||||
int64_t height() const { return ymax - ymin; }
|
||||
int64_t width() const { return xmax - xmin; }
|
||||
bool contains(int64_t x, int64_t y) const {
|
||||
struct ROI {
|
||||
ssize_t xmin{};
|
||||
ssize_t xmax{};
|
||||
ssize_t ymin{};
|
||||
ssize_t ymax{};
|
||||
|
||||
ssize_t height() const { return ymax - ymin; }
|
||||
ssize_t width() const { return xmax - xmin; }
|
||||
bool contains(ssize_t x, ssize_t y) const {
|
||||
return x >= xmin && x < xmax && y >= ymin && y < ymax;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
using dynamic_shape = std::vector<ssize_t>;
|
||||
|
||||
using dynamic_shape = std::vector<int64_t>;
|
||||
|
||||
//TODO! Can we uniform enums between the libraries?
|
||||
// TODO! Can we uniform enums between the libraries?
|
||||
|
||||
/**
|
||||
* @brief Enum class to identify different detectors.
|
||||
* @brief Enum class to identify different detectors.
|
||||
* The values are the same as in slsDetectorPackage
|
||||
* Different spelling to avoid confusion with the slsDetectorPackage
|
||||
*/
|
||||
enum class DetectorType {
|
||||
//Standard detectors match the enum values from slsDetectorPackage
|
||||
// Standard detectors match the enum values from slsDetectorPackage
|
||||
Generic,
|
||||
Eiger,
|
||||
Gotthard,
|
||||
@ -241,8 +236,9 @@ enum class DetectorType {
|
||||
Gotthard2,
|
||||
Xilinx_ChipTestBoard,
|
||||
|
||||
//Additional detectors used for defining processing. Variants of the standard ones.
|
||||
Moench03=100,
|
||||
// Additional detectors used for defining processing. Variants of the
|
||||
// standard ones.
|
||||
Moench03 = 100,
|
||||
Moench03_old,
|
||||
Unknown
|
||||
};
|
||||
@ -263,4 +259,12 @@ template <> FrameDiscardPolicy StringTo(const std::string & /*mode*/);
|
||||
|
||||
using DataTypeVariants = std::variant<uint16_t, uint32_t>;
|
||||
|
||||
template <typename T, bool = std::is_integral_v<T>> struct make_signed {
|
||||
using type = T;
|
||||
};
|
||||
|
||||
template <typename T> struct make_signed<T, true> {
|
||||
using type = std::make_signed_t<T>;
|
||||
};
|
||||
|
||||
} // namespace aare
|
12
include/aare/utils/ifstream_helpers.hpp
Normal file
12
include/aare/utils/ifstream_helpers.hpp
Normal file
@ -0,0 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
namespace aare {
|
||||
|
||||
/**
|
||||
* @brief Get the error message from an ifstream object
|
||||
*/
|
||||
std::string ifstream_error_msg(std::ifstream &ifs);
|
||||
|
||||
} // namespace aare
|
@ -1,22 +1,40 @@
|
||||
[tool.scikit-build.metadata.version]
|
||||
provider = "scikit_build_core.metadata.regex"
|
||||
input = "VERSION"
|
||||
regex = '^(?P<version>\d+(?:\.\d+)*(?:[\.\+\w]+)?)$'
|
||||
result = "{version}"
|
||||
|
||||
[build-system]
|
||||
requires = ["scikit-build-core>=0.10", "pybind11", "numpy"]
|
||||
build-backend = "scikit_build_core.build"
|
||||
|
||||
[project]
|
||||
name = "aare"
|
||||
version = "2025.4.1"
|
||||
dynamic = ["version"]
|
||||
requires-python = ">=3.11"
|
||||
dependencies = [
|
||||
"numpy",
|
||||
"matplotlib",
|
||||
]
|
||||
|
||||
|
||||
[tool.cibuildwheel]
|
||||
|
||||
build = "cp{311,312,313}-manylinux_x86_64"
|
||||
|
||||
|
||||
|
||||
|
||||
[tool.scikit-build]
|
||||
cmake.verbose = true
|
||||
build.verbose = true
|
||||
cmake.build-type = "Release"
|
||||
install.components = ["python"]
|
||||
|
||||
[tool.scikit-build.cmake.define]
|
||||
AARE_PYTHON_BINDINGS = "ON"
|
||||
AARE_SYSTEM_LIBRARIES = "ON"
|
||||
AARE_INSTALL_PYTHONEXT = "ON"
|
||||
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
markers = [
|
||||
"files: marks tests that need additional data (deselect with '-m \"not files\"')",
|
||||
|
@ -1,12 +1,13 @@
|
||||
|
||||
find_package (Python 3.10 COMPONENTS Interpreter Development REQUIRED)
|
||||
find_package (Python 3.10 COMPONENTS Interpreter Development.Module REQUIRED)
|
||||
set(PYBIND11_FINDPYTHON ON) # Needed for RH8
|
||||
|
||||
# Download or find pybind11 depending on configuration
|
||||
if(AARE_FETCH_PYBIND11)
|
||||
FetchContent_Declare(
|
||||
pybind11
|
||||
GIT_REPOSITORY https://github.com/pybind/pybind11
|
||||
GIT_TAG v2.13.0
|
||||
GIT_TAG v2.13.6
|
||||
)
|
||||
FetchContent_MakeAvailable(pybind11)
|
||||
else()
|
||||
@ -28,6 +29,9 @@ target_link_libraries(_aare PRIVATE aare_core aare_compiler_flags)
|
||||
set( PYTHON_FILES
|
||||
aare/__init__.py
|
||||
aare/CtbRawFile.py
|
||||
aare/ClusterFinder.py
|
||||
aare/ClusterVector.py
|
||||
|
||||
aare/func.py
|
||||
aare/RawFile.py
|
||||
aare/transform.py
|
||||
@ -35,6 +39,7 @@ set( PYTHON_FILES
|
||||
aare/utils.py
|
||||
)
|
||||
|
||||
|
||||
# Copy the python files to the build directory
|
||||
foreach(FILE ${PYTHON_FILES})
|
||||
configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} )
|
||||
@ -58,10 +63,16 @@ endforeach(FILE ${PYTHON_EXAMPLES})
|
||||
|
||||
|
||||
if(AARE_INSTALL_PYTHONEXT)
|
||||
install(TARGETS _aare
|
||||
install(
|
||||
TARGETS _aare
|
||||
EXPORT "${TARGETS_EXPORT_NAME}"
|
||||
LIBRARY DESTINATION aare
|
||||
COMPONENT python
|
||||
)
|
||||
|
||||
install(FILES ${PYTHON_FILES} DESTINATION aare)
|
||||
install(
|
||||
FILES ${PYTHON_FILES}
|
||||
DESTINATION aare
|
||||
COMPONENT python
|
||||
)
|
||||
endif()
|
67
python/aare/ClusterFinder.py
Normal file
67
python/aare/ClusterFinder.py
Normal file
@ -0,0 +1,67 @@
|
||||
|
||||
from ._aare import ClusterFinder_Cluster3x3i, ClusterFinder_Cluster2x2i, ClusterFinderMT_Cluster3x3i, ClusterFinderMT_Cluster2x2i, ClusterCollector_Cluster3x3i, ClusterCollector_Cluster2x2i
|
||||
|
||||
|
||||
from ._aare import ClusterFileSink_Cluster3x3i, ClusterFileSink_Cluster2x2i
|
||||
import numpy as np
|
||||
|
||||
def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024):
|
||||
"""
|
||||
Factory function to create a ClusterFinder object. Provides a cleaner syntax for
|
||||
the templated ClusterFinder in C++.
|
||||
"""
|
||||
if dtype == np.int32 and cluster_size == (3,3):
|
||||
return ClusterFinder_Cluster3x3i(image_size, n_sigma = n_sigma, capacity=capacity)
|
||||
elif dtype == np.int32 and cluster_size == (2,2):
|
||||
return ClusterFinder_Cluster2x2i(image_size, n_sigma = n_sigma, capacity=capacity)
|
||||
else:
|
||||
#TODO! add the other formats
|
||||
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
||||
|
||||
|
||||
def ClusterFinderMT(image_size, cluster_size = (3,3), dtype=np.int32, n_sigma=5, capacity = 1024, n_threads = 3):
|
||||
"""
|
||||
Factory function to create a ClusterFinderMT object. Provides a cleaner syntax for
|
||||
the templated ClusterFinderMT in C++.
|
||||
"""
|
||||
|
||||
if dtype == np.int32 and cluster_size == (3,3):
|
||||
return ClusterFinderMT_Cluster3x3i(image_size, n_sigma = n_sigma,
|
||||
capacity = capacity, n_threads = n_threads)
|
||||
elif dtype == np.int32 and cluster_size == (2,2):
|
||||
return ClusterFinderMT_Cluster2x2i(image_size, n_sigma = n_sigma,
|
||||
capacity = capacity, n_threads = n_threads)
|
||||
else:
|
||||
#TODO! add the other formats
|
||||
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
||||
|
||||
|
||||
def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32):
|
||||
"""
|
||||
Factory function to create a ClusterCollector object. Provides a cleaner syntax for
|
||||
the templated ClusterCollector in C++.
|
||||
"""
|
||||
|
||||
if dtype == np.int32 and cluster_size == (3,3):
|
||||
return ClusterCollector_Cluster3x3i(clusterfindermt)
|
||||
elif dtype == np.int32 and cluster_size == (2,2):
|
||||
return ClusterCollector_Cluster2x2i(clusterfindermt)
|
||||
|
||||
else:
|
||||
#TODO! add the other formats
|
||||
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
||||
|
||||
def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):
|
||||
"""
|
||||
Factory function to create a ClusterCollector object. Provides a cleaner syntax for
|
||||
the templated ClusterCollector in C++.
|
||||
"""
|
||||
|
||||
if dtype == np.int32 and clusterfindermt.cluster_size == (3,3):
|
||||
return ClusterFileSink_Cluster3x3i(clusterfindermt, cluster_file)
|
||||
elif dtype == np.int32 and clusterfindermt.cluster_size == (2,2):
|
||||
return ClusterFileSink_Cluster2x2i(clusterfindermt, cluster_file)
|
||||
|
||||
else:
|
||||
#TODO! add the other formats
|
||||
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
11
python/aare/ClusterVector.py
Normal file
11
python/aare/ClusterVector.py
Normal file
@ -0,0 +1,11 @@
|
||||
|
||||
|
||||
from ._aare import ClusterVector_Cluster3x3i
|
||||
import numpy as np
|
||||
|
||||
def ClusterVector(cluster_size, dtype = np.int32):
|
||||
|
||||
if dtype == np.int32 and cluster_size == (3,3):
|
||||
return ClusterVector_Cluster3x3i()
|
||||
else:
|
||||
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
@ -11,8 +11,17 @@ from ._aare import ROI
|
||||
|
||||
# from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i
|
||||
|
||||
from ._aare import fit_gaus, fit_pol1
|
||||
from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT, ClusterFileSink
|
||||
from .ClusterVector import ClusterVector
|
||||
|
||||
|
||||
from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2
|
||||
from ._aare import Interpolator
|
||||
from ._aare import calculate_eta2
|
||||
|
||||
|
||||
from ._aare import apply_custom_weights
|
||||
|
||||
from .CtbRawFile import CtbRawFile
|
||||
from .RawFile import RawFile
|
||||
from .ScanParameters import ScanParameters
|
||||
|
@ -1 +1 @@
|
||||
from ._aare import gaus, pol1
|
||||
from ._aare import gaus, pol1, scurve, scurve2
|
104
python/src/bind_ClusterVector.hpp
Normal file
104
python/src/bind_ClusterVector.hpp
Normal file
@ -0,0 +1,104 @@
|
||||
#include "aare/ClusterCollector.hpp"
|
||||
#include "aare/ClusterFileSink.hpp"
|
||||
#include "aare/ClusterFinder.hpp"
|
||||
#include "aare/ClusterFinderMT.hpp"
|
||||
#include "aare/ClusterVector.hpp"
|
||||
#include "aare/NDView.hpp"
|
||||
#include "aare/Pedestal.hpp"
|
||||
#include "np_helper.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/stl.h>
|
||||
#include <pybind11/stl_bind.h>
|
||||
|
||||
namespace py = pybind11;
|
||||
using pd_type = double;
|
||||
|
||||
using namespace aare;
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
|
||||
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_ClusterVector(py::module &m, const std::string &typestr) {
|
||||
using ClusterType = Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||
auto class_name = fmt::format("ClusterVector_{}", typestr);
|
||||
|
||||
py::class_<ClusterVector<
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>, void>>(
|
||||
m, class_name.c_str(),
|
||||
py::buffer_protocol())
|
||||
|
||||
.def(py::init()) // TODO change!!!
|
||||
|
||||
.def("push_back",
|
||||
[](ClusterVector<ClusterType> &self, const ClusterType &cluster) {
|
||||
self.push_back(cluster);
|
||||
})
|
||||
|
||||
.def("sum",
|
||||
[](ClusterVector<ClusterType> &self) {
|
||||
auto *vec = new std::vector<Type>(self.sum());
|
||||
return return_vector(vec);
|
||||
})
|
||||
.def("sum_2x2", [](ClusterVector<ClusterType> &self){
|
||||
auto *vec = new std::vector<Type>(self.sum_2x2());
|
||||
return return_vector(vec);
|
||||
})
|
||||
.def_property_readonly("size", &ClusterVector<ClusterType>::size)
|
||||
.def("item_size", &ClusterVector<ClusterType>::item_size)
|
||||
.def_property_readonly("fmt",
|
||||
[typestr](ClusterVector<ClusterType> &self) {
|
||||
return fmt_format<ClusterType>;
|
||||
})
|
||||
|
||||
.def_property_readonly("cluster_size_x",
|
||||
&ClusterVector<ClusterType>::cluster_size_x)
|
||||
.def_property_readonly("cluster_size_y",
|
||||
&ClusterVector<ClusterType>::cluster_size_y)
|
||||
.def_property_readonly("capacity",
|
||||
&ClusterVector<ClusterType>::capacity)
|
||||
.def_property("frame_number", &ClusterVector<ClusterType>::frame_number,
|
||||
&ClusterVector<ClusterType>::set_frame_number)
|
||||
.def_buffer(
|
||||
[typestr](ClusterVector<ClusterType> &self) -> py::buffer_info {
|
||||
return py::buffer_info(
|
||||
self.data(), /* Pointer to buffer */
|
||||
self.item_size(), /* Size of one scalar */
|
||||
fmt_format<ClusterType>, /* Format descriptor */
|
||||
1, /* Number of dimensions */
|
||||
{self.size()}, /* Buffer dimensions */
|
||||
{self.item_size()} /* Strides (in bytes) for each index */
|
||||
);
|
||||
});
|
||||
|
||||
// Free functions using ClusterVector
|
||||
m.def("hitmap",
|
||||
[](std::array<size_t, 2> image_size, ClusterVector<ClusterType> &cv) {
|
||||
// Create a numpy array to hold the hitmap
|
||||
// The shape of the array is (image_size[0], image_size[1])
|
||||
// note that the python array is passed as [row, col] which
|
||||
// is the opposite of the clusters [x,y]
|
||||
py::array_t<int32_t> hitmap(image_size);
|
||||
auto r = hitmap.mutable_unchecked<2>();
|
||||
|
||||
// Initialize hitmap to 0
|
||||
for (py::ssize_t i = 0; i < r.shape(0); i++)
|
||||
for (py::ssize_t j = 0; j < r.shape(1); j++)
|
||||
r(i, j) = 0;
|
||||
|
||||
// Loop over the clusters and increment the hitmap
|
||||
// Skip out of bound clusters
|
||||
for (const auto &cluster : cv) {
|
||||
auto x = cluster.x;
|
||||
auto y = cluster.y;
|
||||
if (x < image_size[1] && y < image_size[0])
|
||||
r(cluster.y, cluster.x) += 1;
|
||||
}
|
||||
|
||||
return hitmap;
|
||||
});
|
||||
}
|
@ -26,17 +26,18 @@ template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
void define_cluster(py::module &m, const std::string &typestr) {
|
||||
auto class_name = fmt::format("Cluster{}", typestr);
|
||||
|
||||
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType, void>>(
|
||||
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>(
|
||||
m, class_name.c_str(), py::buffer_protocol())
|
||||
|
||||
.def(py::init([](uint8_t x, uint8_t y, py::array_t<Type> data) {
|
||||
py::buffer_info buf_info = data.request();
|
||||
Type *ptr = static_cast<Type *>(buf_info.ptr);
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType, void> cluster;
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster;
|
||||
cluster.x = x;
|
||||
cluster.y = y;
|
||||
std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY,
|
||||
cluster.data); // Copy array contents
|
||||
auto r = data.template unchecked<1>(); // no bounds checks
|
||||
for (py::ssize_t i = 0; i < data.size(); ++i) {
|
||||
cluster.data[i] = r(i);
|
||||
}
|
||||
return cluster;
|
||||
}));
|
||||
|
||||
@ -64,54 +65,6 @@ void define_cluster(py::module &m, const std::string &typestr) {
|
||||
*/
|
||||
}
|
||||
|
||||
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_cluster_vector(py::module &m, const std::string &typestr) {
|
||||
using ClusterType =
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType, void>;
|
||||
auto class_name = fmt::format("ClusterVector_{}", typestr);
|
||||
|
||||
py::class_<ClusterVector<
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType, void>, void>>(
|
||||
m, class_name.c_str(),
|
||||
py::buffer_protocol())
|
||||
|
||||
.def(py::init()) // TODO change!!!
|
||||
|
||||
.def("push_back",
|
||||
[](ClusterVector<ClusterType> &self, const ClusterType &cluster) {
|
||||
self.push_back(cluster);
|
||||
})
|
||||
|
||||
// implement push_back
|
||||
.def_property_readonly("size", &ClusterVector<ClusterType>::size)
|
||||
.def("item_size", &ClusterVector<ClusterType>::item_size)
|
||||
.def_property_readonly("fmt",
|
||||
[typestr](ClusterVector<ClusterType> &self) {
|
||||
return fmt_format<ClusterType>;
|
||||
})
|
||||
|
||||
.def_property_readonly("cluster_size_x",
|
||||
&ClusterVector<ClusterType>::cluster_size_x)
|
||||
.def_property_readonly("cluster_size_y",
|
||||
&ClusterVector<ClusterType>::cluster_size_y)
|
||||
.def_property_readonly("capacity",
|
||||
&ClusterVector<ClusterType>::capacity)
|
||||
.def_property("frame_number", &ClusterVector<ClusterType>::frame_number,
|
||||
&ClusterVector<ClusterType>::set_frame_number)
|
||||
.def_buffer(
|
||||
[typestr](ClusterVector<ClusterType> &self) -> py::buffer_info {
|
||||
return py::buffer_info(
|
||||
self.data(), /* Pointer to buffer */
|
||||
self.item_size(), /* Size of one scalar */
|
||||
fmt_format<ClusterType>, /* Format descriptor */
|
||||
1, /* Number of dimensions */
|
||||
{self.size()}, /* Buffer dimensions */
|
||||
{self.item_size()} /* Strides (in bytes) for each index */
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_cluster_finder_mt_bindings(py::module &m,
|
||||
@ -140,6 +93,9 @@ void define_cluster_finder_mt_bindings(py::module &m,
|
||||
return;
|
||||
},
|
||||
py::arg(), py::arg("frame_number") = 0)
|
||||
.def_property_readonly("cluster_size", [](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self){
|
||||
return py::make_tuple(ClusterSizeX, ClusterSizeY);
|
||||
})
|
||||
.def("clear_pedestal",
|
||||
&ClusterFinderMT<ClusterType, uint16_t, pd_type>::clear_pedestal)
|
||||
.def("sync", &ClusterFinderMT<ClusterType, uint16_t, pd_type>::sync)
|
||||
@ -251,26 +207,5 @@ void define_cluster_finder_bindings(py::module &m, const std::string &typestr) {
|
||||
return;
|
||||
},
|
||||
py::arg(), py::arg("frame_number") = 0);
|
||||
|
||||
m.def("hitmap",
|
||||
[](std::array<size_t, 2> image_size, ClusterVector<ClusterType> &cv) {
|
||||
py::array_t<int32_t> hitmap(image_size);
|
||||
auto r = hitmap.mutable_unchecked<2>();
|
||||
|
||||
// Initialize hitmap to 0
|
||||
for (py::ssize_t i = 0; i < r.shape(0); i++)
|
||||
for (py::ssize_t j = 0; j < r.shape(1); j++)
|
||||
r(i, j) = 0;
|
||||
|
||||
size_t stride = cv.item_size();
|
||||
auto ptr = cv.data();
|
||||
for (size_t i = 0; i < cv.size(); i++) {
|
||||
auto x = *reinterpret_cast<int16_t *>(ptr);
|
||||
auto y = *reinterpret_cast<int16_t *>(ptr + sizeof(int16_t));
|
||||
r(y, x) += 1;
|
||||
ptr += stride;
|
||||
}
|
||||
return hitmap;
|
||||
});
|
||||
}
|
||||
#pragma GCC diagnostic pop
|
||||
|
@ -59,9 +59,6 @@ void define_cluster_file_io_bindings(py::module &m,
|
||||
self.set_gain_map(view);
|
||||
})
|
||||
|
||||
// void set_gain_map(const GainMap &gain_map); //TODO do i need a
|
||||
// gainmap constructor?
|
||||
|
||||
.def("close", &ClusterFile<ClusterType>::close)
|
||||
.def("write_frame", &ClusterFile<ClusterType>::write_frame)
|
||||
.def("__enter__", [](ClusterFile<ClusterType> &self) { return &self; })
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include "aare/decode.hpp"
|
||||
// #include "aare/fClusterFileV2.hpp"
|
||||
|
||||
#include "np_helper.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <pybind11/iostream.h>
|
||||
@ -32,7 +34,7 @@ m.def("adc_sar_05_decode64to16", [](py::array_t<uint8_t> input) {
|
||||
}
|
||||
|
||||
//Create a 2D output array with the same shape as the input
|
||||
std::vector<ssize_t> shape{input.shape(0), input.shape(1)/static_cast<int64_t>(bits_per_byte)};
|
||||
std::vector<ssize_t> shape{input.shape(0), input.shape(1)/static_cast<ssize_t>(bits_per_byte)};
|
||||
py::array_t<uint16_t> output(shape);
|
||||
|
||||
//Create a view of the input and output arrays
|
||||
@ -53,7 +55,7 @@ m.def("adc_sar_04_decode64to16", [](py::array_t<uint8_t> input) {
|
||||
}
|
||||
|
||||
//Create a 2D output array with the same shape as the input
|
||||
std::vector<ssize_t> shape{input.shape(0), input.shape(1)/static_cast<int64_t>(bits_per_byte)};
|
||||
std::vector<ssize_t> shape{input.shape(0), input.shape(1)/static_cast<ssize_t>(bits_per_byte)};
|
||||
py::array_t<uint16_t> output(shape);
|
||||
|
||||
//Create a view of the input and output arrays
|
||||
@ -65,35 +67,54 @@ m.def("adc_sar_04_decode64to16", [](py::array_t<uint8_t> input) {
|
||||
return output;
|
||||
});
|
||||
|
||||
py::class_<CtbRawFile>(m, "CtbRawFile")
|
||||
.def(py::init<const std::filesystem::path &>())
|
||||
.def("read_frame",
|
||||
[](CtbRawFile &self) {
|
||||
size_t image_size = self.image_size_in_bytes();
|
||||
py::array image;
|
||||
std::vector<ssize_t> shape;
|
||||
shape.reserve(2);
|
||||
shape.push_back(1);
|
||||
shape.push_back(image_size);
|
||||
m.def(
|
||||
"apply_custom_weights",
|
||||
[](py::array_t<uint16_t, py::array::c_style | py::array::forcecast> &input,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast>
|
||||
&weights) {
|
||||
|
||||
|
||||
py::array_t<DetectorHeader> header(1);
|
||||
// Create new array with same shape as the input array (uninitialized values)
|
||||
py::buffer_info buf = input.request();
|
||||
py::array_t<double> output(buf.shape);
|
||||
|
||||
// always read bytes
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
// Use NDViews to call into the C++ library
|
||||
auto weights_view = make_view_1d(weights);
|
||||
NDView<uint16_t, 1> input_view(input.mutable_data(), {input.size()});
|
||||
NDView<double, 1> output_view(output.mutable_data(), {output.size()});
|
||||
|
||||
self.read_into(
|
||||
reinterpret_cast<std::byte *>(image.mutable_data()),
|
||||
header.mutable_data());
|
||||
apply_custom_weights(input_view, output_view, weights_view);
|
||||
return output;
|
||||
});
|
||||
|
||||
return py::make_tuple(header, image);
|
||||
})
|
||||
.def("seek", &CtbRawFile::seek)
|
||||
.def("tell", &CtbRawFile::tell)
|
||||
.def("master", &CtbRawFile::master)
|
||||
py::class_<CtbRawFile>(m, "CtbRawFile")
|
||||
.def(py::init<const std::filesystem::path &>())
|
||||
.def("read_frame",
|
||||
[](CtbRawFile &self) {
|
||||
size_t image_size = self.image_size_in_bytes();
|
||||
py::array image;
|
||||
std::vector<ssize_t> shape;
|
||||
shape.reserve(2);
|
||||
shape.push_back(1);
|
||||
shape.push_back(image_size);
|
||||
|
||||
.def_property_readonly("image_size_in_bytes",
|
||||
&CtbRawFile::image_size_in_bytes)
|
||||
py::array_t<DetectorHeader> header(1);
|
||||
|
||||
.def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file);
|
||||
// always read bytes
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
|
||||
}
|
||||
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()),
|
||||
header.mutable_data());
|
||||
|
||||
return py::make_tuple(header, image);
|
||||
})
|
||||
.def("seek", &CtbRawFile::seek)
|
||||
.def("tell", &CtbRawFile::tell)
|
||||
.def("master", &CtbRawFile::master)
|
||||
|
||||
.def_property_readonly("image_size_in_bytes",
|
||||
&CtbRawFile::image_size_in_bytes)
|
||||
|
||||
.def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file);
|
||||
|
||||
}
|
||||
|
@ -20,6 +20,9 @@
|
||||
namespace py = pybind11;
|
||||
using namespace ::aare;
|
||||
|
||||
|
||||
|
||||
|
||||
//Disable warnings for unused parameters, as we ignore some
|
||||
//in the __exit__ method
|
||||
#pragma GCC diagnostic push
|
||||
@ -195,7 +198,7 @@ void define_file_io_bindings(py::module &m) {
|
||||
|
||||
py::class_<ROI>(m, "ROI")
|
||||
.def(py::init<>())
|
||||
.def(py::init<int64_t, int64_t, int64_t, int64_t>(), py::arg("xmin"),
|
||||
.def(py::init<ssize_t, ssize_t, ssize_t, ssize_t>(), py::arg("xmin"),
|
||||
py::arg("xmax"), py::arg("ymin"), py::arg("ymax"))
|
||||
.def_readwrite("xmin", &ROI::xmin)
|
||||
.def_readwrite("xmax", &ROI::xmax)
|
||||
@ -214,36 +217,9 @@ void define_file_io_bindings(py::module &m) {
|
||||
|
||||
|
||||
|
||||
py::class_<RawSubFile>(m, "RawSubFile")
|
||||
.def(py::init<const std::filesystem::path &, DetectorType, size_t,
|
||||
size_t, size_t>())
|
||||
.def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame)
|
||||
.def_property_readonly("pixels_per_frame",
|
||||
&RawSubFile::pixels_per_frame)
|
||||
.def("seek", &RawSubFile::seek)
|
||||
.def("tell", &RawSubFile::tell)
|
||||
.def_property_readonly("rows", &RawSubFile::rows)
|
||||
.def_property_readonly("cols", &RawSubFile::cols)
|
||||
.def("read_frame",
|
||||
[](RawSubFile &self) {
|
||||
const uint8_t item_size = self.bytes_per_pixel();
|
||||
py::array image;
|
||||
std::vector<ssize_t> shape;
|
||||
shape.reserve(2);
|
||||
shape.push_back(self.rows());
|
||||
shape.push_back(self.cols());
|
||||
if (item_size == 1) {
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
} else if (item_size == 2) {
|
||||
image = py::array_t<uint16_t>(shape);
|
||||
} else if (item_size == 4) {
|
||||
image = py::array_t<uint32_t>(shape);
|
||||
}
|
||||
fmt::print("item_size: {} rows: {} cols: {}\n", item_size, self.rows(), self.cols());
|
||||
self.read_into(
|
||||
reinterpret_cast<std::byte *>(image.mutable_data()));
|
||||
return image;
|
||||
});
|
||||
|
||||
|
||||
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
// py::class_<ClusterHeader>(m, "ClusterHeader")
|
||||
|
@ -55,6 +55,47 @@ void define_fit_bindings(py::module &m) {
|
||||
)",
|
||||
py::arg("x"), py::arg("par"));
|
||||
|
||||
m.def(
|
||||
"scurve",
|
||||
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> par) {
|
||||
auto x_view = make_view_1d(x);
|
||||
auto par_view = make_view_1d(par);
|
||||
auto y = new NDArray<double, 1>{aare::func::scurve(x_view, par_view)};
|
||||
return return_image_data(y);
|
||||
},
|
||||
R"(
|
||||
Evaluate a 1D scurve function for all points in x using parameters par.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The points at which to evaluate the scurve function.
|
||||
par : array_like
|
||||
The parameters of the scurve function. The first element is the background slope, the second element is the background intercept, the third element is the mean, the fourth element is the standard deviation, the fifth element is inflexion point count number, and the sixth element is C.
|
||||
)",
|
||||
py::arg("x"), py::arg("par"));
|
||||
|
||||
m.def(
|
||||
"scurve2",
|
||||
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> par) {
|
||||
auto x_view = make_view_1d(x);
|
||||
auto par_view = make_view_1d(par);
|
||||
auto y = new NDArray<double, 1>{aare::func::scurve2(x_view, par_view)};
|
||||
return return_image_data(y);
|
||||
},
|
||||
R"(
|
||||
Evaluate a 1D scurve2 function for all points in x using parameters par.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The points at which to evaluate the scurve function.
|
||||
par : array_like
|
||||
The parameters of the scurve2 function. The first element is the background slope, the second element is the background intercept, the third element is the mean, the fourth element is the standard deviation, the fifth element is inflexion point count number, and the sixth element is C.
|
||||
)",
|
||||
py::arg("x"), py::arg("par"));
|
||||
|
||||
m.def(
|
||||
"fit_gaus",
|
||||
@ -235,6 +276,180 @@ n_threads : int, optional
|
||||
R"(
|
||||
Fit a 1D polynomial to data with error estimates.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The x values.
|
||||
y : array_like
|
||||
The y values.
|
||||
y_err : array_like
|
||||
The error in the y values.
|
||||
n_threads : int, optional
|
||||
The number of threads to use. Default is 4.
|
||||
)",
|
||||
py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4);
|
||||
|
||||
//=========
|
||||
m.def(
|
||||
"fit_scurve",
|
||||
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> y,
|
||||
int n_threads) {
|
||||
if (y.ndim() == 3) {
|
||||
auto par = new NDArray<double, 3>{};
|
||||
|
||||
auto x_view = make_view_1d(x);
|
||||
auto y_view = make_view_3d(y);
|
||||
*par = aare::fit_scurve(x_view, y_view, n_threads);
|
||||
return return_image_data(par);
|
||||
} else if (y.ndim() == 1) {
|
||||
auto par = new NDArray<double, 1>{};
|
||||
auto x_view = make_view_1d(x);
|
||||
auto y_view = make_view_1d(y);
|
||||
*par = aare::fit_scurve(x_view, y_view);
|
||||
return return_image_data(par);
|
||||
} else {
|
||||
throw std::runtime_error("Data must be 1D or 3D");
|
||||
}
|
||||
},
|
||||
py::arg("x"), py::arg("y"), py::arg("n_threads") = 4);
|
||||
|
||||
m.def(
|
||||
"fit_scurve",
|
||||
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> y,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> y_err,
|
||||
int n_threads) {
|
||||
if (y.ndim() == 3) {
|
||||
auto par = new NDArray<double, 3>({y.shape(0), y.shape(1), 6});
|
||||
|
||||
auto par_err =
|
||||
new NDArray<double, 3>({y.shape(0), y.shape(1), 6});
|
||||
|
||||
auto y_view = make_view_3d(y);
|
||||
auto y_view_err = make_view_3d(y_err);
|
||||
auto x_view = make_view_1d(x);
|
||||
|
||||
auto chi2 = new NDArray<double, 2>({y.shape(0), y.shape(1)});
|
||||
|
||||
aare::fit_scurve(x_view, y_view, y_view_err, par->view(),
|
||||
par_err->view(), chi2->view(), n_threads);
|
||||
return py::dict("par"_a = return_image_data(par),
|
||||
"par_err"_a = return_image_data(par_err),
|
||||
"chi2"_a = return_image_data(chi2),
|
||||
"Ndf"_a = y.shape(2) - 2);
|
||||
|
||||
|
||||
} else if (y.ndim() == 1) {
|
||||
auto par = new NDArray<double, 1>({2});
|
||||
auto par_err = new NDArray<double, 1>({2});
|
||||
|
||||
auto y_view = make_view_1d(y);
|
||||
auto y_view_err = make_view_1d(y_err);
|
||||
auto x_view = make_view_1d(x);
|
||||
|
||||
double chi2 = 0;
|
||||
|
||||
aare::fit_scurve(x_view, y_view, y_view_err, par->view(),
|
||||
par_err->view(), chi2);
|
||||
return py::dict("par"_a = return_image_data(par),
|
||||
"par_err"_a = return_image_data(par_err),
|
||||
"chi2"_a = chi2, "Ndf"_a = y.size() - 2);
|
||||
|
||||
} else {
|
||||
throw std::runtime_error("Data must be 1D or 3D");
|
||||
}
|
||||
},
|
||||
R"(
|
||||
Fit a 1D polynomial to data with error estimates.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The x values.
|
||||
y : array_like
|
||||
The y values.
|
||||
y_err : array_like
|
||||
The error in the y values.
|
||||
n_threads : int, optional
|
||||
The number of threads to use. Default is 4.
|
||||
)",
|
||||
py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4);
|
||||
|
||||
|
||||
m.def(
|
||||
"fit_scurve2",
|
||||
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> y,
|
||||
int n_threads) {
|
||||
if (y.ndim() == 3) {
|
||||
auto par = new NDArray<double, 3>{};
|
||||
|
||||
auto x_view = make_view_1d(x);
|
||||
auto y_view = make_view_3d(y);
|
||||
*par = aare::fit_scurve2(x_view, y_view, n_threads);
|
||||
return return_image_data(par);
|
||||
} else if (y.ndim() == 1) {
|
||||
auto par = new NDArray<double, 1>{};
|
||||
auto x_view = make_view_1d(x);
|
||||
auto y_view = make_view_1d(y);
|
||||
*par = aare::fit_scurve2(x_view, y_view);
|
||||
return return_image_data(par);
|
||||
} else {
|
||||
throw std::runtime_error("Data must be 1D or 3D");
|
||||
}
|
||||
},
|
||||
py::arg("x"), py::arg("y"), py::arg("n_threads") = 4);
|
||||
|
||||
m.def(
|
||||
"fit_scurve2",
|
||||
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> y,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast> y_err,
|
||||
int n_threads) {
|
||||
if (y.ndim() == 3) {
|
||||
auto par = new NDArray<double, 3>({y.shape(0), y.shape(1), 6});
|
||||
|
||||
auto par_err =
|
||||
new NDArray<double, 3>({y.shape(0), y.shape(1), 6});
|
||||
|
||||
auto y_view = make_view_3d(y);
|
||||
auto y_view_err = make_view_3d(y_err);
|
||||
auto x_view = make_view_1d(x);
|
||||
|
||||
auto chi2 = new NDArray<double, 2>({y.shape(0), y.shape(1)});
|
||||
|
||||
aare::fit_scurve2(x_view, y_view, y_view_err, par->view(),
|
||||
par_err->view(), chi2->view(), n_threads);
|
||||
return py::dict("par"_a = return_image_data(par),
|
||||
"par_err"_a = return_image_data(par_err),
|
||||
"chi2"_a = return_image_data(chi2),
|
||||
"Ndf"_a = y.shape(2) - 2);
|
||||
|
||||
|
||||
} else if (y.ndim() == 1) {
|
||||
auto par = new NDArray<double, 1>({6});
|
||||
auto par_err = new NDArray<double, 1>({6});
|
||||
|
||||
auto y_view = make_view_1d(y);
|
||||
auto y_view_err = make_view_1d(y_err);
|
||||
auto x_view = make_view_1d(x);
|
||||
|
||||
double chi2 = 0;
|
||||
|
||||
aare::fit_scurve2(x_view, y_view, y_view_err, par->view(),
|
||||
par_err->view(), chi2);
|
||||
return py::dict("par"_a = return_image_data(par),
|
||||
"par_err"_a = return_image_data(par_err),
|
||||
"chi2"_a = chi2, "Ndf"_a = y.size() - 2);
|
||||
|
||||
} else {
|
||||
throw std::runtime_error("Data must be 1D or 3D");
|
||||
}
|
||||
},
|
||||
R"(
|
||||
Fit a 1D polynomial to data with error estimates.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
|
@ -1,16 +1,21 @@
|
||||
// Files with bindings to the different classes
|
||||
|
||||
//New style file naming
|
||||
#include "bind_ClusterVector.hpp"
|
||||
|
||||
//TODO! migrate the other names
|
||||
#include "cluster.hpp"
|
||||
#include "cluster_file.hpp"
|
||||
#include "ctb_raw_file.hpp"
|
||||
#include "file.hpp"
|
||||
#include "fit.hpp"
|
||||
#include "interpolation.hpp"
|
||||
#include "pedestal.hpp"
|
||||
#include "pixel_map.hpp"
|
||||
#include "raw_file.hpp"
|
||||
#include "raw_sub_file.hpp"
|
||||
#include "raw_master_file.hpp"
|
||||
#include "raw_file.hpp"
|
||||
#include "pixel_map.hpp"
|
||||
#include "var_cluster.hpp"
|
||||
|
||||
#include "pedestal.hpp"
|
||||
#include "jungfrau_data_file.hpp"
|
||||
|
||||
// Pybind stuff
|
||||
@ -22,6 +27,7 @@ namespace py = pybind11;
|
||||
PYBIND11_MODULE(_aare, m) {
|
||||
define_file_io_bindings(m);
|
||||
define_raw_file_io_bindings(m);
|
||||
define_raw_sub_file_io_bindings(m);
|
||||
define_ctb_raw_file_io_bindings(m);
|
||||
define_raw_master_file_bindings(m);
|
||||
define_var_cluster_finder_bindings(m);
|
||||
@ -39,12 +45,12 @@ PYBIND11_MODULE(_aare, m) {
|
||||
define_cluster_file_io_bindings<float, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||
define_cluster_file_io_bindings<double, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||
|
||||
define_cluster_vector<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||
define_cluster_vector<double, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||
define_cluster_vector<float, 3, 3, uint16_t>(m, "Cluster3x3f");
|
||||
define_cluster_vector<int, 2, 2, uint16_t>(m, "Cluster2x2i");
|
||||
define_cluster_vector<double, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||
define_cluster_vector<float, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||
define_ClusterVector<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||
define_ClusterVector<double, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||
define_ClusterVector<float, 3, 3, uint16_t>(m, "Cluster3x3f");
|
||||
define_ClusterVector<int, 2, 2, uint16_t>(m, "Cluster2x2i");
|
||||
define_ClusterVector<double, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||
define_ClusterVector<float, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||
|
||||
define_cluster_finder_bindings<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||
define_cluster_finder_bindings<double, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||
|
@ -13,7 +13,7 @@ namespace py = pybind11;
|
||||
using namespace aare;
|
||||
|
||||
// Pass image data back to python as a numpy array
|
||||
template <typename T, int64_t Ndim>
|
||||
template <typename T, ssize_t Ndim>
|
||||
py::array return_image_data(aare::NDArray<T, Ndim> *image) {
|
||||
|
||||
py::capsule free_when_done(image, [](void *f) {
|
||||
|
110
python/src/raw_sub_file.hpp
Normal file
110
python/src/raw_sub_file.hpp
Normal file
@ -0,0 +1,110 @@
|
||||
#include "aare/CtbRawFile.hpp"
|
||||
#include "aare/File.hpp"
|
||||
#include "aare/Frame.hpp"
|
||||
#include "aare/RawFile.hpp"
|
||||
#include "aare/RawMasterFile.hpp"
|
||||
#include "aare/RawSubFile.hpp"
|
||||
|
||||
#include "aare/defs.hpp"
|
||||
// #include "aare/fClusterFileV2.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <pybind11/iostream.h>
|
||||
#include <pybind11/numpy.h>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/stl.h>
|
||||
#include <pybind11/stl/filesystem.h>
|
||||
#include <string>
|
||||
|
||||
namespace py = pybind11;
|
||||
using namespace ::aare;
|
||||
|
||||
auto read_frame_from_RawSubFile(RawSubFile &self) {
|
||||
py::array_t<DetectorHeader> header(1);
|
||||
const uint8_t item_size = self.bytes_per_pixel();
|
||||
std::vector<ssize_t> shape{static_cast<ssize_t>(self.rows()),
|
||||
static_cast<ssize_t>(self.cols())};
|
||||
|
||||
py::array image;
|
||||
if (item_size == 1) {
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
} else if (item_size == 2) {
|
||||
image = py::array_t<uint16_t>(shape);
|
||||
} else if (item_size == 4) {
|
||||
image = py::array_t<uint32_t>(shape);
|
||||
}
|
||||
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()),
|
||||
header.mutable_data());
|
||||
|
||||
return py::make_tuple(header, image);
|
||||
}
|
||||
|
||||
auto read_n_frames_from_RawSubFile(RawSubFile &self, size_t n_frames) {
|
||||
py::array_t<DetectorHeader> header(n_frames);
|
||||
const uint8_t item_size = self.bytes_per_pixel();
|
||||
std::vector<ssize_t> shape{
|
||||
static_cast<ssize_t>(n_frames),
|
||||
static_cast<ssize_t>(self.rows()),
|
||||
static_cast<ssize_t>(self.cols())
|
||||
};
|
||||
|
||||
py::array image;
|
||||
if (item_size == 1) {
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
} else if (item_size == 2) {
|
||||
image = py::array_t<uint16_t>(shape);
|
||||
} else if (item_size == 4) {
|
||||
image = py::array_t<uint32_t>(shape);
|
||||
}
|
||||
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()), n_frames,
|
||||
header.mutable_data());
|
||||
|
||||
return py::make_tuple(header, image);
|
||||
}
|
||||
|
||||
|
||||
//Disable warnings for unused parameters, as we ignore some
|
||||
//in the __exit__ method
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
|
||||
void define_raw_sub_file_io_bindings(py::module &m) {
|
||||
py::class_<RawSubFile>(m, "RawSubFile")
|
||||
.def(py::init<const std::filesystem::path &, DetectorType, size_t,
|
||||
size_t, size_t>())
|
||||
.def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame)
|
||||
.def_property_readonly("pixels_per_frame",
|
||||
&RawSubFile::pixels_per_frame)
|
||||
.def_property_readonly("bytes_per_pixel", &RawSubFile::bytes_per_pixel)
|
||||
.def("seek", &RawSubFile::seek)
|
||||
.def("tell", &RawSubFile::tell)
|
||||
.def_property_readonly("rows", &RawSubFile::rows)
|
||||
.def_property_readonly("cols", &RawSubFile::cols)
|
||||
.def_property_readonly("frames_in_file", &RawSubFile::frames_in_file)
|
||||
.def("read_frame", &read_frame_from_RawSubFile)
|
||||
.def("read_n", &read_n_frames_from_RawSubFile)
|
||||
.def("read", [](RawSubFile &self){
|
||||
self.seek(0);
|
||||
auto n_frames = self.frames_in_file();
|
||||
return read_n_frames_from_RawSubFile(self, n_frames);
|
||||
})
|
||||
.def("__enter__", [](RawSubFile &self) { return &self; })
|
||||
.def("__exit__",
|
||||
[](RawSubFile &self,
|
||||
const std::optional<pybind11::type> &exc_type,
|
||||
const std::optional<pybind11::object> &exc_value,
|
||||
const std::optional<pybind11::object> &traceback) {
|
||||
})
|
||||
.def("__iter__", [](RawSubFile &self) { return &self; })
|
||||
.def("__next__", [](RawSubFile &self) {
|
||||
try {
|
||||
return read_frame_from_RawSubFile(self);
|
||||
} catch (std::runtime_error &e) {
|
||||
throw py::stop_iteration();
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
@ -1,12 +1,12 @@
|
||||
import pytest
|
||||
import numpy as np
|
||||
|
||||
import aare._aare as aare
|
||||
from aare import _aare #import the C++ module
|
||||
from conftest import test_data_path
|
||||
|
||||
|
||||
def test_cluster_vector_can_be_converted_to_numpy():
|
||||
cv = aare.ClusterVector_Cluster3x3i()
|
||||
cv = _aare.ClusterVector_Cluster3x3i()
|
||||
arr = np.array(cv, copy=False)
|
||||
assert arr.shape == (0,) # 4 for x, y, size, energy and 9 for the cluster data
|
||||
|
||||
@ -14,24 +14,23 @@ def test_cluster_vector_can_be_converted_to_numpy():
|
||||
def test_ClusterVector():
|
||||
"""Test ClusterVector"""
|
||||
|
||||
clustervector = aare.ClusterVector_Cluster3x3i()
|
||||
clustervector = _aare.ClusterVector_Cluster3x3i()
|
||||
assert clustervector.cluster_size_x == 3
|
||||
assert clustervector.cluster_size_y == 3
|
||||
assert clustervector.item_size() == 4+9*4
|
||||
assert clustervector.frame_number == 0
|
||||
assert clustervector.capacity == 1024
|
||||
assert clustervector.size == 0
|
||||
|
||||
cluster = aare.Cluster3x3i(0,0,np.ones(9, dtype=np.int32))
|
||||
cluster = _aare.Cluster3x3i(0,0,np.ones(9, dtype=np.int32))
|
||||
|
||||
clustervector.push_back(cluster)
|
||||
assert clustervector.size == 1
|
||||
|
||||
with pytest.raises(TypeError): # Or use the appropriate exception type
|
||||
clustervector.push_back(aare.Cluster2x2i(0,0,np.ones(4, dtype=np.int32)))
|
||||
clustervector.push_back(_aare.Cluster2x2i(0,0,np.ones(4, dtype=np.int32)))
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
clustervector.push_back(aare.Cluster3x3f(0,0,np.ones(9, dtype=np.float32)))
|
||||
clustervector.push_back(_aare.Cluster3x3f(0,0,np.ones(9, dtype=np.float32)))
|
||||
|
||||
def test_Interpolator():
|
||||
"""Test Interpolator"""
|
||||
@ -41,13 +40,13 @@ def test_Interpolator():
|
||||
ybins = np.linspace(0, 5, 30, dtype=np.float64)
|
||||
|
||||
etacube = np.zeros(shape=[30, 30, 20], dtype=np.float64)
|
||||
interpolator = aare.Interpolator(etacube, xbins, ybins, ebins)
|
||||
interpolator = _aare.Interpolator(etacube, xbins, ybins, ebins)
|
||||
|
||||
assert interpolator.get_ietax().shape == (30,30,20)
|
||||
assert interpolator.get_ietay().shape == (30,30,20)
|
||||
clustervector = aare.ClusterVector_Cluster3x3i()
|
||||
clustervector = _aare.ClusterVector_Cluster3x3i()
|
||||
|
||||
cluster = aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32))
|
||||
cluster = _aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32))
|
||||
clustervector.push_back(cluster)
|
||||
|
||||
interpolated_photons = interpolator.interpolate(clustervector)
|
||||
@ -58,9 +57,9 @@ def test_Interpolator():
|
||||
assert interpolated_photons[0]["y"] == -1
|
||||
assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0
|
||||
|
||||
clustervector = aare.ClusterVector_Cluster2x2i()
|
||||
clustervector = _aare.ClusterVector_Cluster2x2i()
|
||||
|
||||
cluster = aare.Cluster2x2i(0,0, np.ones(4, dtype=np.int32))
|
||||
cluster = _aare.Cluster2x2i(0,0, np.ones(4, dtype=np.int32))
|
||||
clustervector.push_back(cluster)
|
||||
|
||||
interpolated_photons = interpolator.interpolate(clustervector)
|
||||
@ -71,28 +70,15 @@ def test_Interpolator():
|
||||
assert interpolated_photons[0]["y"] == 0
|
||||
assert interpolated_photons[0]["energy"] == 4
|
||||
|
||||
@pytest.mark.files
|
||||
def test_cluster_file(test_data_path):
|
||||
"""Test ClusterFile"""
|
||||
cluster_file = aare.ClusterFile_Cluster3x3i(test_data_path / "clust/single_frame_97_clustrers.clust")
|
||||
clustervector = cluster_file.read_clusters(10) #conversion does not work
|
||||
|
||||
cluster_file.close()
|
||||
|
||||
assert clustervector.size == 10
|
||||
|
||||
###reading with wrong file
|
||||
with pytest.raises(TypeError):
|
||||
cluster_file = aare.ClusterFile_Cluster2x2i(test_data_path / "clust/single_frame_97_clustrers.clust")
|
||||
cluster_file.close()
|
||||
|
||||
def test_calculate_eta():
|
||||
"""Calculate Eta"""
|
||||
clusters = aare.ClusterVector_Cluster3x3i()
|
||||
clusters.push_back(aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32)))
|
||||
clusters.push_back(aare.Cluster3x3i(0,0, np.array([1,1,1,2,2,2,3,3,3])))
|
||||
clusters = _aare.ClusterVector_Cluster3x3i()
|
||||
clusters.push_back(_aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32)))
|
||||
clusters.push_back(_aare.Cluster3x3i(0,0, np.array([1,1,1,2,2,2,3,3,3])))
|
||||
|
||||
eta2 = aare.calculate_eta2(clusters)
|
||||
eta2 = _aare.calculate_eta2(clusters)
|
||||
|
||||
assert eta2.shape == (2,2)
|
||||
assert eta2[0,0] == 0.5
|
||||
@ -103,7 +89,7 @@ def test_calculate_eta():
|
||||
def test_cluster_finder():
|
||||
"""Test ClusterFinder"""
|
||||
|
||||
clusterfinder = aare.ClusterFinder_Cluster3x3i([100,100])
|
||||
clusterfinder = _aare.ClusterFinder_Cluster3x3i([100,100])
|
||||
|
||||
#frame = np.random.rand(100,100)
|
||||
frame = np.zeros(shape=[100,100])
|
||||
@ -115,18 +101,7 @@ def test_cluster_finder():
|
||||
assert clusters.size == 0
|
||||
|
||||
|
||||
#TODO dont understand behavior
|
||||
def test_cluster_collector():
|
||||
"""Test ClusterCollector"""
|
||||
|
||||
clusterfinder = aare.ClusterFinderMT_Cluster3x3i([100,100]) #TODO: no idea what the data is in InputQueue not zero
|
||||
|
||||
clustercollector = aare.ClusterCollector_Cluster3x3i(clusterfinder)
|
||||
|
||||
cluster_vectors = clustercollector.steal_clusters()
|
||||
|
||||
assert len(cluster_vectors) == 1 #single thread execution
|
||||
assert cluster_vectors[0].size == 0 #
|
||||
|
||||
|
||||
|
||||
|
||||
|
64
python/tests/test_ClusterFile.py
Normal file
64
python/tests/test_ClusterFile.py
Normal file
@ -0,0 +1,64 @@
|
||||
|
||||
import pytest
|
||||
import numpy as np
|
||||
import boost_histogram as bh
|
||||
import time
|
||||
from pathlib import Path
|
||||
import pickle
|
||||
|
||||
from aare import ClusterFile
|
||||
from conftest import test_data_path
|
||||
|
||||
@pytest.mark.files
|
||||
def test_cluster_file(test_data_path):
|
||||
"""Test ClusterFile"""
|
||||
f = ClusterFile(test_data_path / "clust/single_frame_97_clustrers.clust")
|
||||
cv = f.read_clusters(10) #conversion does not work
|
||||
|
||||
|
||||
assert cv.frame_number == 135
|
||||
assert cv.size == 10
|
||||
|
||||
#Known data
|
||||
#frame_number, num_clusters [135] 97
|
||||
#[ 1 200] [0 1 2 3 4 5 6 7 8]
|
||||
#[ 2 201] [ 9 10 11 12 13 14 15 16 17]
|
||||
#[ 3 202] [18 19 20 21 22 23 24 25 26]
|
||||
#[ 4 203] [27 28 29 30 31 32 33 34 35]
|
||||
#[ 5 204] [36 37 38 39 40 41 42 43 44]
|
||||
#[ 6 205] [45 46 47 48 49 50 51 52 53]
|
||||
#[ 7 206] [54 55 56 57 58 59 60 61 62]
|
||||
#[ 8 207] [63 64 65 66 67 68 69 70 71]
|
||||
#[ 9 208] [72 73 74 75 76 77 78 79 80]
|
||||
#[ 10 209] [81 82 83 84 85 86 87 88 89]
|
||||
|
||||
#conversion to numpy array
|
||||
arr = np.array(cv, copy = False)
|
||||
|
||||
assert arr.size == 10
|
||||
for i in range(10):
|
||||
assert arr[i]['x'] == i+1
|
||||
|
||||
@pytest.mark.files
|
||||
def test_read_clusters_and_fill_histogram(test_data_path):
|
||||
# Create the histogram
|
||||
n_bins = 100
|
||||
xmin = -100
|
||||
xmax = 1e4
|
||||
hist_aare = bh.Histogram(bh.axis.Regular(n_bins, xmin, xmax))
|
||||
|
||||
fname = test_data_path / "clust/beam_En700eV_-40deg_300V_10us_d0_f0_100.clust"
|
||||
|
||||
#Read clusters and fill the histogram with pixel values
|
||||
with ClusterFile(fname, chunk_size = 10000) as f:
|
||||
for clusters in f:
|
||||
arr = np.array(clusters, copy = False)
|
||||
hist_aare.fill(arr['data'].flat)
|
||||
|
||||
|
||||
#Load the histogram from the pickle file
|
||||
with open(fname.with_suffix('.pkl'), 'rb') as f:
|
||||
hist_py = pickle.load(f)
|
||||
|
||||
#Compare the two histograms
|
||||
assert hist_aare == hist_py
|
54
python/tests/test_ClusterVector.py
Normal file
54
python/tests/test_ClusterVector.py
Normal file
@ -0,0 +1,54 @@
|
||||
import pytest
|
||||
import numpy as np
|
||||
import boost_histogram as bh
|
||||
import time
|
||||
from pathlib import Path
|
||||
import pickle
|
||||
|
||||
from aare import ClusterFile
|
||||
from aare import _aare
|
||||
from conftest import test_data_path
|
||||
|
||||
|
||||
def test_create_cluster_vector():
|
||||
cv = _aare.ClusterVector_Cluster3x3i()
|
||||
assert cv.cluster_size_x == 3
|
||||
assert cv.cluster_size_y == 3
|
||||
assert cv.size == 0
|
||||
|
||||
|
||||
def test_push_back_on_cluster_vector():
|
||||
cv = _aare.ClusterVector_Cluster2x2i()
|
||||
assert cv.cluster_size_x == 2
|
||||
assert cv.cluster_size_y == 2
|
||||
assert cv.size == 0
|
||||
|
||||
cluster = _aare.Cluster2x2i(19, 22, np.ones(4, dtype=np.int32))
|
||||
cv.push_back(cluster)
|
||||
assert cv.size == 1
|
||||
|
||||
arr = np.array(cv, copy=False)
|
||||
assert arr[0]['x'] == 19
|
||||
assert arr[0]['y'] == 22
|
||||
|
||||
|
||||
def test_make_a_hitmap_from_cluster_vector():
|
||||
cv = _aare.ClusterVector_Cluster3x3i()
|
||||
|
||||
# Push back 4 clusters with different positions
|
||||
cv.push_back(_aare.Cluster3x3i(0, 0, np.ones(9, dtype=np.int32)))
|
||||
cv.push_back(_aare.Cluster3x3i(1, 1, np.ones(9, dtype=np.int32)))
|
||||
cv.push_back(_aare.Cluster3x3i(1, 1, np.ones(9, dtype=np.int32)))
|
||||
cv.push_back(_aare.Cluster3x3i(2, 2, np.ones(9, dtype=np.int32)))
|
||||
|
||||
ref = np.zeros((5, 5), dtype=np.int32)
|
||||
ref[0,0] = 1
|
||||
ref[1,1] = 2
|
||||
ref[2,2] = 1
|
||||
|
||||
|
||||
img = _aare.hitmap((5,5), cv)
|
||||
# print(img)
|
||||
# print(ref)
|
||||
assert (img == ref).all()
|
||||
|
36
python/tests/test_RawSubFile.py
Normal file
36
python/tests/test_RawSubFile.py
Normal file
@ -0,0 +1,36 @@
|
||||
import pytest
|
||||
import numpy as np
|
||||
from aare import RawSubFile, DetectorType
|
||||
|
||||
|
||||
@pytest.mark.files
|
||||
def test_read_a_jungfrau_RawSubFile(test_data_path):
|
||||
with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f1_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f:
|
||||
assert f.frames_in_file == 3
|
||||
|
||||
headers, frames = f.read()
|
||||
|
||||
assert headers.size == 3
|
||||
assert frames.shape == (3, 512, 1024)
|
||||
|
||||
# Frame numbers in this file should be 4, 5, 6
|
||||
for i,h in zip(range(4,7,1), headers):
|
||||
assert h["frameNumber"] == i
|
||||
|
||||
# Compare to canned data using numpy
|
||||
data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy")
|
||||
assert np.all(data[3:6] == frames)
|
||||
|
||||
@pytest.mark.files
|
||||
def test_iterate_over_a_jungfrau_RawSubFile(test_data_path):
|
||||
|
||||
data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy")
|
||||
|
||||
with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f:
|
||||
i = 0
|
||||
for header, frame in f:
|
||||
assert header["frameNumber"] == i+1
|
||||
assert np.all(frame == data[i])
|
||||
i += 1
|
||||
assert i == 3
|
||||
assert header["frameNumber"] == 3
|
377
src/AngleCalibration.cpp
Normal file
377
src/AngleCalibration.cpp
Normal file
@ -0,0 +1,377 @@
|
||||
#include "aare/AngleCalibration.hpp"
|
||||
|
||||
namespace aare {
|
||||
|
||||
AngleCalibration::AngleCalibration(
|
||||
std::shared_ptr<MythenDetectorSpecifications> mythen_detector_,
|
||||
std::shared_ptr<FlatField> flat_field_,
|
||||
std::shared_ptr<MythenFileReader> mythen_file_reader_)
|
||||
: mythen_detector(mythen_detector_), flat_field(flat_field_),
|
||||
mythen_file_reader(mythen_file_reader_) {
|
||||
centers.reserve(mythen_detector->max_modules());
|
||||
conversions.reserve(mythen_detector->max_modules());
|
||||
offsets.reserve(mythen_detector->max_modules());
|
||||
|
||||
num_bins = std::floor(mythen_detector->max_angle() / histogram_bin_width) -
|
||||
std::floor(mythen_detector->min_angle() / histogram_bin_width) +
|
||||
1; // TODO only works if negative
|
||||
// and positive angle
|
||||
}
|
||||
|
||||
void AngleCalibration::set_histogram_bin_width(double bin_width) {
|
||||
histogram_bin_width = bin_width;
|
||||
|
||||
num_bins = std::floor(mythen_detector->max_angle() / histogram_bin_width) -
|
||||
std::floor(mythen_detector->min_angle() / histogram_bin_width) +
|
||||
1; // TODO only works if negative
|
||||
// and positive angle
|
||||
}
|
||||
|
||||
double AngleCalibration::get_histogram_bin_width() const {
|
||||
return histogram_bin_width;
|
||||
}
|
||||
|
||||
ssize_t AngleCalibration::get_new_num_bins() const { return num_bins; }
|
||||
|
||||
std::vector<double> AngleCalibration::get_centers() const { return centers; }
|
||||
|
||||
std::vector<double> AngleCalibration::get_conversions() const {
|
||||
return conversions;
|
||||
}
|
||||
|
||||
std::vector<double> AngleCalibration::get_offsets() const { return offsets; }
|
||||
|
||||
NDView<double, 1> AngleCalibration::get_new_photon_counts() const {
|
||||
return new_photon_counts.view();
|
||||
}
|
||||
|
||||
NDView<double, 1> AngleCalibration::get_new_statistical_errors() const {
|
||||
return new_photon_count_errors.view();
|
||||
}
|
||||
|
||||
void AngleCalibration::read_initial_calibration_from_file(
|
||||
const std::string &filename) {
|
||||
|
||||
std::string line;
|
||||
uint32_t module_number{};
|
||||
|
||||
try {
|
||||
std::ifstream file(filename, std::ios_base::in);
|
||||
if (!file.good()) {
|
||||
throw std::logic_error("file does not exist");
|
||||
}
|
||||
|
||||
std::stringstream file_buffer;
|
||||
file_buffer << file.rdbuf();
|
||||
|
||||
while (file_buffer >> line) {
|
||||
if (line == "module") {
|
||||
file_buffer >> line;
|
||||
module_number = std::stoi(line);
|
||||
}
|
||||
if (line == "center") {
|
||||
file_buffer >> line;
|
||||
centers.insert(centers.begin() + module_number,
|
||||
std::stod(line));
|
||||
}
|
||||
if (line == "conversion") {
|
||||
file_buffer >> line;
|
||||
conversions.insert(conversions.begin() + module_number,
|
||||
std::stod(line));
|
||||
}
|
||||
if (line == "offset") {
|
||||
file_buffer >> line;
|
||||
offsets.insert(offsets.begin() + module_number,
|
||||
std::stod(line));
|
||||
}
|
||||
}
|
||||
|
||||
file.close();
|
||||
} catch (const std::exception &e) {
|
||||
std::cerr << "Error: " << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
parameters AngleCalibration::convert_to_EE_parameters() const {
|
||||
|
||||
// normal distance between sample and detector (R)
|
||||
std::vector<double> normal_distances(centers.size());
|
||||
// distances between intersection point of sample normal and module origin
|
||||
// (D)
|
||||
std::vector<double> module_center_distances(centers.size());
|
||||
// angles between undiffracted beam and orthogonal sample projection on
|
||||
// detector (phi)
|
||||
std::vector<double> angles(centers.size());
|
||||
|
||||
for (size_t i = 0; i < centers.size(); ++i) {
|
||||
auto [module_center_distance, normal_distance, angle] =
|
||||
convert_to_EE_parameters(i);
|
||||
normal_distances[i] = normal_distance;
|
||||
module_center_distances[i] = module_center_distance;
|
||||
angles[i] = angle;
|
||||
}
|
||||
|
||||
return std::make_tuple(module_center_distances, normal_distances, angles);
|
||||
}
|
||||
|
||||
std::tuple<double, double, double>
|
||||
AngleCalibration::convert_to_EE_parameters(const size_t module_index) const {
|
||||
return convert_to_EE_parameters(centers[module_index],
|
||||
conversions[module_index],
|
||||
offsets[module_index]);
|
||||
}
|
||||
|
||||
std::tuple<double, double, double> AngleCalibration::convert_to_EE_parameters(
|
||||
const double center, const double conversion, const double offset) const {
|
||||
const double module_center_distance =
|
||||
center * MythenDetectorSpecifications::pitch();
|
||||
const double normal_distance =
|
||||
MythenDetectorSpecifications::pitch() / std::abs(conversion);
|
||||
const double angle = offset + 180.0 / M_PI * center * std::abs(conversion);
|
||||
|
||||
return std::make_tuple(module_center_distance, normal_distance, angle);
|
||||
}
|
||||
|
||||
size_t AngleCalibration::global_to_local_strip_index_conversion(
|
||||
const size_t global_strip_index) const {
|
||||
const size_t module_index =
|
||||
global_strip_index / MythenDetectorSpecifications::strips_per_module();
|
||||
// local strip index in module
|
||||
size_t local_strip_index =
|
||||
global_strip_index -
|
||||
module_index * MythenDetectorSpecifications::strips_per_module();
|
||||
// switch if indexing is in clock-wise direction
|
||||
local_strip_index =
|
||||
std::signbit(conversions[module_index])
|
||||
? MythenDetectorSpecifications::strips_per_module() - 1 -
|
||||
local_strip_index
|
||||
: local_strip_index;
|
||||
|
||||
return local_strip_index;
|
||||
}
|
||||
|
||||
/*
|
||||
parameters
|
||||
AngleCalibration::convert_to_BC_parameters() {}
|
||||
*/
|
||||
|
||||
double AngleCalibration::diffraction_angle_from_DG_parameters(
|
||||
const double center, const double conversion, const double offset,
|
||||
const size_t strip_index, const double distance_to_strip) const {
|
||||
|
||||
return offset + 180.0 / M_PI *
|
||||
(center * std::abs(conversion) -
|
||||
atan((center - (strip_index + distance_to_strip)) *
|
||||
std::abs(conversion)));
|
||||
}
|
||||
|
||||
double AngleCalibration::diffraction_angle_from_EE_parameters(
|
||||
const double module_center_distance, const double normal_distance,
|
||||
const double angle, const size_t strip_index,
|
||||
const double distance_to_strip) const {
|
||||
|
||||
return angle - 180.0 / M_PI *
|
||||
atan((module_center_distance -
|
||||
MythenDetectorSpecifications::pitch() *
|
||||
(strip_index + distance_to_strip)) /
|
||||
normal_distance); // TODO: why is it minus
|
||||
// is it defined counter
|
||||
// clockwise? thought
|
||||
// should have a flipped
|
||||
// sign
|
||||
}
|
||||
|
||||
double AngleCalibration::angular_strip_width_from_DG_parameters(
|
||||
const double center, const double conversion, const double offset,
|
||||
const size_t local_strip_index) const {
|
||||
|
||||
return std::abs(diffraction_angle_from_DG_parameters(
|
||||
center, conversion, offset, local_strip_index, -0.5) -
|
||||
diffraction_angle_from_DG_parameters(
|
||||
center, conversion, offset, local_strip_index, 0.5));
|
||||
}
|
||||
|
||||
double AngleCalibration::angular_strip_width_from_EE_parameters(
|
||||
const double module_center_distance, const double normal_distance,
|
||||
const double angle, const size_t local_strip_index) const {
|
||||
|
||||
return std::abs(diffraction_angle_from_EE_parameters(
|
||||
module_center_distance, normal_distance, angle,
|
||||
local_strip_index, -0.5) -
|
||||
diffraction_angle_from_EE_parameters(
|
||||
module_center_distance, normal_distance, angle,
|
||||
local_strip_index, 0.5));
|
||||
|
||||
// TODO: again not sure about division order - taking abs anyway
|
||||
}
|
||||
|
||||
void AngleCalibration::calculate_fixed_bin_angle_width_histogram(
|
||||
const size_t start_frame_index, const size_t end_frame_index) {
|
||||
|
||||
new_photon_counts = NDArray<double, 1>(std::array<ssize_t, 1>{num_bins});
|
||||
|
||||
new_photon_count_errors =
|
||||
NDArray<double, 1>(std::array<ssize_t, 1>{num_bins});
|
||||
|
||||
// TODO: maybe group these into a 2d array - better cache usage
|
||||
NDArray<double, 1> bin_counts(std::array<ssize_t, 1>{num_bins}, 0.0);
|
||||
NDArray<double, 1> new_statistical_weights(std::array<ssize_t, 1>{num_bins},
|
||||
0.0);
|
||||
NDArray<double, 1> new_errors(std::array<ssize_t, 1>{num_bins}, 0.0);
|
||||
|
||||
NDArray<double, 1> inverse_normalized_flatfield =
|
||||
flat_field->inverse_normalized_flatfield();
|
||||
|
||||
for (size_t frame_index = start_frame_index; frame_index < end_frame_index;
|
||||
++frame_index) {
|
||||
MythenFrame frame = mythen_file_reader->read_frame(frame_index);
|
||||
redistribute_photon_counts_to_fixed_angle_bins(
|
||||
frame, bin_counts.view(), new_statistical_weights.view(),
|
||||
new_errors.view(), inverse_normalized_flatfield.view());
|
||||
}
|
||||
|
||||
for (ssize_t i = 0; i < new_photon_counts.size(); ++i) {
|
||||
new_photon_counts[i] = (new_statistical_weights[i] <=
|
||||
std::numeric_limits<double>::epsilon())
|
||||
? 0.
|
||||
: bin_counts[i] / new_statistical_weights[i];
|
||||
new_photon_count_errors[i] =
|
||||
(bin_counts[i] <= std::numeric_limits<double>::epsilon())
|
||||
? 0.
|
||||
: 1.0 / std::sqrt(bin_counts[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void AngleCalibration::redistribute_photon_counts_to_fixed_angle_bins(
|
||||
const MythenFrame &frame, NDView<double, 1> bin_counts,
|
||||
NDView<double, 1> new_statistical_weights, NDView<double, 1> new_errors,
|
||||
NDView<double, 1> inverse_normalized_flatfield) const {
|
||||
|
||||
ssize_t channel = 0; // TODO handle mask - FlatField still 1d
|
||||
|
||||
if (frame.photon_counts.shape()[0] != mythen_detector->num_strips()) {
|
||||
throw std::runtime_error("wrong number of strips read");
|
||||
}
|
||||
|
||||
ssize_t num_bins1 = mythen_detector->min_angle() / histogram_bin_width;
|
||||
ssize_t num_bins2 = mythen_detector->max_angle() / histogram_bin_width;
|
||||
|
||||
std::cout << "position: " << frame.detector_angle
|
||||
<< std::endl; // replace with log
|
||||
|
||||
double exposure_rate = 1. / mythen_detector->exposure_time();
|
||||
|
||||
for (ssize_t strip_index = 0; strip_index < mythen_detector->num_strips();
|
||||
++strip_index) {
|
||||
|
||||
size_t module_index =
|
||||
strip_index / MythenDetectorSpecifications::strips_per_module();
|
||||
|
||||
if (mythen_detector->get_bad_channels()[strip_index] ||
|
||||
!mythen_detector->get_connected_modules()[module_index])
|
||||
continue;
|
||||
|
||||
double poisson_error = std::sqrt(frame.photon_counts(strip_index)) *
|
||||
inverse_normalized_flatfield(strip_index) *
|
||||
exposure_rate;
|
||||
|
||||
double corrected_photon_count =
|
||||
frame.photon_counts(strip_index) *
|
||||
inverse_normalized_flatfield(strip_index) * exposure_rate;
|
||||
|
||||
size_t local_strip_index =
|
||||
global_to_local_strip_index_conversion(strip_index);
|
||||
|
||||
double diffraction_angle = diffraction_angle_from_DG_parameters(
|
||||
centers[module_index], conversions[module_index],
|
||||
offsets[module_index], local_strip_index);
|
||||
|
||||
diffraction_angle += (frame.detector_angle + mythen_detector->dtt0() +
|
||||
mythen_detector->bloffset());
|
||||
|
||||
if (diffraction_angle < mythen_detector->min_angle() ||
|
||||
diffraction_angle > mythen_detector->max_angle())
|
||||
continue;
|
||||
|
||||
double angle_covered_by_strip = angular_strip_width_from_DG_parameters(
|
||||
centers[module_index], conversions[module_index],
|
||||
offsets[module_index], local_strip_index);
|
||||
|
||||
double photon_count_per_bin = histogram_bin_width *
|
||||
corrected_photon_count /
|
||||
angle_covered_by_strip;
|
||||
double error_photon_count_per_bin =
|
||||
histogram_bin_width * poisson_error / angle_covered_by_strip;
|
||||
|
||||
double statistical_weights =
|
||||
1.0 / std::pow(error_photon_count_per_bin, 2); // 1./sigma²
|
||||
|
||||
double strip_boundary_left =
|
||||
diffraction_angle - 0.5 * angle_covered_by_strip;
|
||||
double strip_boundary_right =
|
||||
diffraction_angle + 0.5 * angle_covered_by_strip;
|
||||
|
||||
ssize_t left_bin_index = std::max(
|
||||
num_bins1,
|
||||
static_cast<ssize_t>(
|
||||
std::floor(strip_boundary_left / histogram_bin_width) - 1));
|
||||
ssize_t right_bin_index = std::min(
|
||||
num_bins2,
|
||||
static_cast<ssize_t>(
|
||||
std::ceil(strip_boundary_right / histogram_bin_width) + 1));
|
||||
|
||||
// TODO should it be < or <=
|
||||
for (ssize_t bin = left_bin_index; bin <= right_bin_index; ++bin) {
|
||||
double bin_coverage = std::min(strip_boundary_right,
|
||||
(bin + 0.5) * histogram_bin_width) -
|
||||
std::max(strip_boundary_left,
|
||||
(bin - 0.5) * histogram_bin_width);
|
||||
|
||||
double bin_coverage_factor = bin_coverage / histogram_bin_width;
|
||||
|
||||
ssize_t bin_index = bin - num_bins1;
|
||||
// TODO: maybe have this threshold configurable
|
||||
if (bin_coverage >= 0.0001) {
|
||||
new_statistical_weights(bin_index) +=
|
||||
statistical_weights * bin_coverage_factor;
|
||||
bin_counts(bin_index) += statistical_weights *
|
||||
bin_coverage_factor *
|
||||
photon_count_per_bin;
|
||||
new_errors(bin_index) += statistical_weights *
|
||||
bin_coverage_factor *
|
||||
std::pow(photon_count_per_bin, 2);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AngleCalibration::write_to_file(
|
||||
const std::string &filename, const bool store_nonzero_bins,
|
||||
const std::filesystem::path &filepath) const {
|
||||
|
||||
std::ofstream output_file(filepath / filename);
|
||||
|
||||
if (!output_file) {
|
||||
std::cerr << "Error opening file!"
|
||||
<< std::endl; // TODO: replace with log
|
||||
}
|
||||
|
||||
output_file << std::fixed << std::setprecision(15);
|
||||
|
||||
for (ssize_t i = 0; i < num_bins; ++i) {
|
||||
if (new_photon_counts[i] <= std::numeric_limits<double>::epsilon() &&
|
||||
store_nonzero_bins) {
|
||||
continue;
|
||||
}
|
||||
|
||||
output_file << std::floor(mythen_detector->min_angle() /
|
||||
histogram_bin_width) *
|
||||
histogram_bin_width +
|
||||
i * histogram_bin_width
|
||||
<< " " << new_photon_counts[i] << " "
|
||||
<< new_photon_count_errors[i] << std::endl;
|
||||
}
|
||||
output_file.close();
|
||||
}
|
||||
|
||||
} // namespace aare
|
234
src/AngleCalibration.test.cpp
Normal file
234
src/AngleCalibration.test.cpp
Normal file
@ -0,0 +1,234 @@
|
||||
/************************************************
|
||||
* @file AngleCalibration.test.cpp
|
||||
* @short test case for angle calibration class
|
||||
***********************************************/
|
||||
|
||||
#include "aare/AngleCalibration.hpp"
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
#include "test_config.hpp"
|
||||
|
||||
#include <iomanip>
|
||||
#include <type_traits>
|
||||
|
||||
#include <catch2/catch_all.hpp>
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <catch2/matchers/catch_matchers_floating_point.hpp>
|
||||
|
||||
using namespace aare;
|
||||
|
||||
TEST_CASE("read initial angle calibration file",
|
||||
"[.anglecalibration] [.files]") {
|
||||
|
||||
std::shared_ptr<MythenDetectorSpecifications> mythen_detector_ptr =
|
||||
std::make_shared<MythenDetectorSpecifications>();
|
||||
|
||||
AngleCalibration anglecalibration(mythen_detector_ptr,
|
||||
std::shared_ptr<FlatField>{},
|
||||
std::shared_ptr<MythenFileReader>{});
|
||||
|
||||
std::string filename = test_data_path() / "AngleCalibration_Test_Data" /
|
||||
"Angcal_2E_Feb2023_P29.off";
|
||||
|
||||
REQUIRE(std::filesystem::exists(filename));
|
||||
|
||||
anglecalibration.read_initial_calibration_from_file(filename);
|
||||
|
||||
auto centers = anglecalibration.get_centers();
|
||||
auto conversions = anglecalibration.get_conversions();
|
||||
auto offsets = anglecalibration.get_offsets();
|
||||
|
||||
std::cout.precision(17);
|
||||
|
||||
CHECK(centers.size() == 48);
|
||||
CHECK(conversions.size() == 48);
|
||||
CHECK(offsets.size() == 48);
|
||||
|
||||
CHECK(centers[9] == Catch::Approx(660.342326));
|
||||
CHECK(offsets[47] == Catch::Approx(5.8053312));
|
||||
CHECK(conversions[27] == Catch::Approx(-0.6581179125e-4));
|
||||
}
|
||||
|
||||
TEST_CASE("read bad channels",
|
||||
"[.anglecalibration][.mythenspecifications][.files]") {
|
||||
|
||||
MythenDetectorSpecifications mythen_detector;
|
||||
|
||||
std::string bad_channels_filename = test_data_path() /
|
||||
"AngleCalibration_Test_Data" /
|
||||
"bc2023_003_RING.chans";
|
||||
|
||||
REQUIRE(std::filesystem::exists(bad_channels_filename));
|
||||
|
||||
mythen_detector.read_bad_channels_from_file(bad_channels_filename);
|
||||
|
||||
CHECK(mythen_detector.get_bad_channels().size() == 61440);
|
||||
|
||||
CHECK(mythen_detector.get_bad_channels()[61437] == true);
|
||||
CHECK(std::all_of(mythen_detector.get_bad_channels().begin() + 30720,
|
||||
mythen_detector.get_bad_channels().begin() + 61439,
|
||||
[](const bool element) { return element; }));
|
||||
}
|
||||
|
||||
TEST_CASE("read unconnected modules",
|
||||
"[.anglecalibration][.mythenspecifications][.files]") {
|
||||
|
||||
MythenDetectorSpecifications mythen_detector;
|
||||
|
||||
std::string unconnected_modules_filename =
|
||||
test_data_path() / "AngleCalibration_Test_Data" / "ModOut.txt";
|
||||
|
||||
REQUIRE(std::filesystem::exists(unconnected_modules_filename));
|
||||
|
||||
mythen_detector.read_unconnected_modules_from_file(
|
||||
unconnected_modules_filename);
|
||||
|
||||
CHECK(mythen_detector.get_connected_modules().size() == 48);
|
||||
|
||||
CHECK(std::all_of(mythen_detector.get_connected_modules().begin(),
|
||||
mythen_detector.get_connected_modules().end(),
|
||||
[](const bool element) { return element; }));
|
||||
}
|
||||
|
||||
TEST_CASE("read flatfield", "[.anglecalibration][.flatfield][.files]") {
|
||||
|
||||
std::shared_ptr<MythenDetectorSpecifications> mythen_detector_ptr =
|
||||
std::make_shared<MythenDetectorSpecifications>();
|
||||
|
||||
FlatField flatfield(mythen_detector_ptr);
|
||||
|
||||
std::string flatfield_filename =
|
||||
test_data_path() / "AngleCalibration_Test_Data" /
|
||||
"Flatfield_E22p0keV_T11000eV_up_48M_a_LONG_Feb2023_open_WS_SUMC.raw";
|
||||
|
||||
REQUIRE(std::filesystem::exists(flatfield_filename));
|
||||
|
||||
flatfield.read_flatfield_from_file(flatfield_filename);
|
||||
|
||||
auto flatfield_data = flatfield.get_flatfield();
|
||||
|
||||
CHECK(flatfield_data.size() == 61440);
|
||||
|
||||
CHECK(flatfield_data[0] == 0);
|
||||
CHECK(flatfield_data[21] == 4234186);
|
||||
}
|
||||
|
||||
TEST_CASE("compare result with python code", "[.anglecalibration] [.files]") {
|
||||
|
||||
auto fpath = test_data_path() / "AngleCalibration_Test_Data";
|
||||
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
std::shared_ptr<MythenDetectorSpecifications> mythen_detector_ptr =
|
||||
std::make_shared<MythenDetectorSpecifications>();
|
||||
|
||||
std::string bad_channels_filename = fpath / "bc2023_003_RING.chans";
|
||||
|
||||
REQUIRE(std::filesystem::exists(bad_channels_filename));
|
||||
|
||||
mythen_detector_ptr->read_bad_channels_from_file(bad_channels_filename);
|
||||
|
||||
std::string unconnected_modules_filename = fpath / "ModOut.txt";
|
||||
|
||||
REQUIRE(std::filesystem::exists(unconnected_modules_filename));
|
||||
|
||||
mythen_detector_ptr->read_unconnected_modules_from_file(
|
||||
unconnected_modules_filename);
|
||||
|
||||
std::shared_ptr<FlatField> flat_field_ptr =
|
||||
std::make_shared<FlatField>(mythen_detector_ptr);
|
||||
|
||||
std::string flatfield_filename =
|
||||
fpath /
|
||||
"Flatfield_E22p0keV_T11000eV_up_48M_a_LONG_Feb2023_open_WS_SUMC.raw";
|
||||
|
||||
REQUIRE(std::filesystem::exists(flatfield_filename));
|
||||
|
||||
flat_field_ptr->read_flatfield_from_file(flatfield_filename);
|
||||
|
||||
std::shared_ptr<MythenFileReader> mythen_file_reader_ptr =
|
||||
std::make_shared<MythenFileReader>(fpath,
|
||||
"ang1up_22keV_LaB60p3mm_48M_a_0");
|
||||
|
||||
AngleCalibration anglecalibration(mythen_detector_ptr, flat_field_ptr,
|
||||
mythen_file_reader_ptr);
|
||||
|
||||
std::string initial_angles_filename = fpath / "Angcal_2E_Feb2023_P29.off";
|
||||
|
||||
REQUIRE(std::filesystem::exists(initial_angles_filename));
|
||||
|
||||
anglecalibration.read_initial_calibration_from_file(
|
||||
initial_angles_filename);
|
||||
|
||||
anglecalibration.calculate_fixed_bin_angle_width_histogram(320, 340);
|
||||
|
||||
// anglecalibration.write_to_file("cpp_new_photon_counts.xye");
|
||||
|
||||
auto expected_filename_photons =
|
||||
test_data_path() / "AngleCalibration_Test_Data" / "new_photons.bin";
|
||||
|
||||
REQUIRE(std::filesystem::exists(expected_filename_photons));
|
||||
|
||||
auto expected_filename_errors =
|
||||
test_data_path() / "AngleCalibration_Test_Data" / "new_errors.bin";
|
||||
|
||||
REQUIRE(std::filesystem::exists(expected_filename_errors));
|
||||
|
||||
ssize_t new_num_bins = anglecalibration.get_new_num_bins();
|
||||
|
||||
auto python_output_errors = load<double, 1>(
|
||||
expected_filename_errors, std::array<ssize_t, 1>{new_num_bins});
|
||||
|
||||
auto python_output_photons = load<double, 1>(
|
||||
expected_filename_photons, std::array<ssize_t, 1>{new_num_bins});
|
||||
|
||||
CHECK(anglecalibration.get_new_photon_counts().equals(
|
||||
python_output_photons.view(),
|
||||
1e-8)); // not sure about precision does not exactly match to all
|
||||
// decimal digits
|
||||
|
||||
CHECK(anglecalibration.get_new_statistical_errors().equals(
|
||||
python_output_errors.view(),
|
||||
1e-8)); //
|
||||
}
|
||||
|
||||
TEST_CASE("check conversion from DG to EE parameters", "[.anglecalibration]") {
|
||||
|
||||
std::shared_ptr<MythenDetectorSpecifications> mythen_detector_ptr =
|
||||
std::make_shared<MythenDetectorSpecifications>();
|
||||
|
||||
AngleCalibration anglecalibration(mythen_detector_ptr,
|
||||
std::shared_ptr<FlatField>{},
|
||||
std::shared_ptr<MythenFileReader>{});
|
||||
|
||||
// DG test parameters
|
||||
const double center = 642.197591224993;
|
||||
const double conversion = 0.657694036246975e-4;
|
||||
const double offset = 5.004892881251670;
|
||||
const ssize_t local_strip_index = 1;
|
||||
|
||||
double diffraction_angle_DG_param =
|
||||
anglecalibration.diffraction_angle_from_DG_parameters(
|
||||
center, conversion, offset, local_strip_index);
|
||||
|
||||
auto [distance_center, normal_distance, angle] =
|
||||
anglecalibration.convert_to_EE_parameters(center, conversion, offset);
|
||||
|
||||
double diffraction_angle_EE_param =
|
||||
anglecalibration.diffraction_angle_from_EE_parameters(
|
||||
distance_center, normal_distance, angle, local_strip_index);
|
||||
|
||||
CHECK(diffraction_angle_EE_param ==
|
||||
Catch::Approx(diffraction_angle_DG_param));
|
||||
|
||||
double strip_width_DG_param =
|
||||
anglecalibration.angular_strip_width_from_DG_parameters(
|
||||
center, conversion, offset, local_strip_index);
|
||||
|
||||
double strip_width_EE_param =
|
||||
anglecalibration.angular_strip_width_from_EE_parameters(
|
||||
distance_center, normal_distance, angle, local_strip_index);
|
||||
|
||||
CHECK(strip_width_DG_param == Catch::Approx(strip_width_EE_param));
|
||||
}
|
@ -21,23 +21,35 @@ using ClusterTypes =
|
||||
auto get_test_parameters() {
|
||||
return GENERATE(
|
||||
std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{0, 0, {1, 2, 3, 1}}},
|
||||
Eta2<int>{2. / 3, 3. / 4, corner::cBottomLeft, 7}),
|
||||
Eta2<int>{2. / 3, 3. / 4,
|
||||
static_cast<int>(corner::cBottomLeft), 7}),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 3, 3>{0, 0, {1, 2, 3, 4, 5, 6, 1, 2, 7}}},
|
||||
Eta2<int>{6. / 11, 2. / 7, corner::cTopRight, 20}),
|
||||
Eta2<int>{6. / 11, 2. / 7, static_cast<int>(corner::cTopRight),
|
||||
20}),
|
||||
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
|
||||
0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 8, 8, 9, 2,
|
||||
0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 2, 8, 9, 8,
|
||||
1, 4, 5, 6, 7, 8, 4, 1, 1, 1, 1, 1}}},
|
||||
Eta2<int>{9. / 17, 5. / 13, 8, 28}),
|
||||
Eta2<int>{8. / 17, 7. / 15, 9, 30}),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 4, 2>{0, 0, {1, 4, 7, 2, 5, 6, 4, 3}}},
|
||||
Eta2<int>{7. / 11, 6. / 10, 1, 21}),
|
||||
Eta2<int>{4. / 10, 4. / 11, 1, 21}),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 2, 3>{0, 0, {1, 3, 2, 3, 4, 2}}},
|
||||
Eta2<int>{3. / 5, 4. / 6, 1, 11}));
|
||||
Eta2<int>{3. / 5, 2. / 5, 1, 11}));
|
||||
}
|
||||
|
||||
TEST_CASE("calculate_eta2", "[.eta_calculation]") {
|
||||
TEST_CASE("compute_largest_2x2_subcluster", "[eta_calculation]") {
|
||||
auto [cluster, expected_eta] = get_test_parameters();
|
||||
|
||||
auto [sum, index] = std::visit(
|
||||
[](const auto &clustertype) { return clustertype.max_sum_2x2(); },
|
||||
cluster);
|
||||
CHECK(expected_eta.c == index);
|
||||
CHECK(expected_eta.sum == sum);
|
||||
}
|
||||
|
||||
TEST_CASE("calculate_eta2", "[eta_calculation]") {
|
||||
|
||||
auto [cluster, expected_eta] = get_test_parameters();
|
||||
|
||||
@ -50,3 +62,66 @@ TEST_CASE("calculate_eta2", "[.eta_calculation]") {
|
||||
CHECK(eta.c == expected_eta.c);
|
||||
CHECK(eta.sum == expected_eta.sum);
|
||||
}
|
||||
|
||||
// 3x3 cluster layout (rotated to match the cBottomLeft enum):
|
||||
// 6, 7, 8
|
||||
// 3, 4, 5
|
||||
// 0, 1, 2
|
||||
|
||||
TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in "
|
||||
"the bottom left",
|
||||
"[eta_calculation]") {
|
||||
|
||||
// Create a 3x3 cluster
|
||||
Cluster<int32_t, 3, 3> cl;
|
||||
cl.x = 0;
|
||||
cl.y = 0;
|
||||
cl.data[0] = 30;
|
||||
cl.data[1] = 23;
|
||||
cl.data[2] = 5;
|
||||
cl.data[3] = 20;
|
||||
cl.data[4] = 50;
|
||||
cl.data[5] = 3;
|
||||
cl.data[6] = 8;
|
||||
cl.data[7] = 2;
|
||||
cl.data[8] = 3;
|
||||
|
||||
// 8, 2, 3
|
||||
// 20, 50, 3
|
||||
// 30, 23, 5
|
||||
|
||||
auto eta = calculate_eta2(cl);
|
||||
CHECK(eta.c == static_cast<int>(corner::cBottomLeft));
|
||||
CHECK(eta.x == 50.0 / (20 + 50)); // 4/(3+4)
|
||||
CHECK(eta.y == 50.0 / (23 + 50)); // 4/(1+4)
|
||||
CHECK(eta.sum == 30 + 23 + 20 + 50);
|
||||
}
|
||||
|
||||
TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in "
|
||||
"the top left",
|
||||
"[eta_calculation]") {
|
||||
|
||||
// Create a 3x3 cluster
|
||||
Cluster<int32_t, 3, 3> cl;
|
||||
cl.x = 0;
|
||||
cl.y = 0;
|
||||
cl.data[0] = 8;
|
||||
cl.data[1] = 12;
|
||||
cl.data[2] = 5;
|
||||
cl.data[3] = 77;
|
||||
cl.data[4] = 80;
|
||||
cl.data[5] = 3;
|
||||
cl.data[6] = 82;
|
||||
cl.data[7] = 91;
|
||||
cl.data[8] = 3;
|
||||
|
||||
// 82, 91, 3
|
||||
// 77, 80, 3
|
||||
// 8, 12, 5
|
||||
|
||||
auto eta = calculate_eta2(cl);
|
||||
CHECK(eta.c == static_cast<int>(corner::cTopLeft));
|
||||
CHECK(eta.x == 80. / (77 + 80)); // 4/(3+4)
|
||||
CHECK(eta.y == 91.0 / (91 + 80)); // 7/(7+4)
|
||||
CHECK(eta.sum == 77 + 80 + 82 + 91);
|
||||
}
|
||||
|
@ -14,61 +14,8 @@
|
||||
|
||||
using namespace aare;
|
||||
|
||||
TEST_CASE("Correct Instantiation of Cluster and ClusterVector",
|
||||
"[.cluster][.instantiation]") {
|
||||
|
||||
CHECK(is_valid_cluster<double, 3, 3>);
|
||||
CHECK(is_valid_cluster<double, 3, 2>);
|
||||
CHECK(not is_valid_cluster<int, 0, 0>);
|
||||
CHECK(not is_valid_cluster<std::string, 2, 2>);
|
||||
CHECK(not is_valid_cluster<int, 2, 2, double>);
|
||||
|
||||
CHECK(not is_cluster_v<int>);
|
||||
CHECK(is_cluster_v<Cluster<int, 3, 3>>);
|
||||
}
|
||||
<<<<<<< Updated upstream
|
||||
=======
|
||||
|
||||
using ClusterTypes =
|
||||
std::variant<Cluster<int, 2, 2>, Cluster<int, 3, 3>, Cluster<int, 5, 5>,
|
||||
Cluster<int, 4, 2>, Cluster<int, 2, 3>>;
|
||||
|
||||
auto get_test_sum_parameters() {
|
||||
return GENERATE(
|
||||
std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{0, 0, {1, 2, 3, 1}}},
|
||||
std::make_pair(7, 0)),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 3, 3>{0, 0, {1, 2, 3, 4, 5, 6, 1, 2, 7}}},
|
||||
std::make_pair(20, 3)),
|
||||
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
|
||||
0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 8, 8, 9, 2,
|
||||
1, 4, 5, 6, 7, 8, 4, 1, 1, 1, 1, 1}}},
|
||||
std::make_pair(28, 8)),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 4, 2>{0, 0, {1, 4, 7, 2, 5, 6, 4, 3}}},
|
||||
std::make_pair(21, 1)),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 2, 3>{0, 0, {1, 3, 2, 3, 4, 2}}},
|
||||
std::make_pair(11, 1)));
|
||||
}
|
||||
|
||||
TEST_CASE("compute_largest_2x2_subcluster", "[.cluster]") {
|
||||
auto [cluster, sum_pair] = get_test_sum_parameters();
|
||||
|
||||
auto sum = std::visit(
|
||||
[](const auto &clustertype) { return clustertype.max_sum_2x2(); },
|
||||
cluster);
|
||||
CHECK(sum_pair.first == sum.first);
|
||||
CHECK(sum_pair.second == sum.second);
|
||||
}
|
||||
|
||||
TEST_CASE("Test sum of Cluster", "[.cluster]") {
|
||||
Cluster<int, 2, 2> cluster{0, 0, {1, 2, 3, 4}};
|
||||
|
||||
CHECK(cluster.sum() == 10);
|
||||
|
||||
Cluster<int, 2, 3> cluster2x3{0, 0, {1, 3, 2, 3, 4, 2}};
|
||||
|
||||
CHECK(cluster2x3.sum() == 15);
|
||||
}
|
||||
>>>>>>> Stashed changes
|
||||
}
|
@ -2,23 +2,32 @@
|
||||
#include "test_config.hpp"
|
||||
|
||||
#include "aare/defs.hpp"
|
||||
#include <algorithm>
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <filesystem>
|
||||
|
||||
using aare::Cluster;
|
||||
using aare::ClusterFile;
|
||||
using aare::ClusterVector;
|
||||
|
||||
TEST_CASE("Read one frame from a a cluster file", "[.files]") {
|
||||
// We know that the frame has 97 clusters
|
||||
|
||||
TEST_CASE("Read one frame from a cluster file", "[.files]") {
|
||||
//We know that the frame has 97 clusters
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
auto clusters = f.read_frame();
|
||||
REQUIRE(clusters.size() == 97);
|
||||
REQUIRE(clusters.frame_number() == 135);
|
||||
CHECK(clusters.size() == 97);
|
||||
CHECK(clusters.frame_number() == 135);
|
||||
CHECK(clusters[0].x == 1);
|
||||
CHECK(clusters[0].y == 200);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
|
||||
|
||||
TEST_CASE("Read one frame using ROI", "[.files]") {
|
||||
// We know that the frame has 97 clusters
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||
@ -37,14 +46,22 @@ TEST_CASE("Read one frame using ROI", "[.files]") {
|
||||
|
||||
// Check that all clusters are within the ROI
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
auto c = clusters.at(i);
|
||||
auto c = clusters[i];
|
||||
REQUIRE(c.x >= roi.xmin);
|
||||
REQUIRE(c.x <= roi.xmax);
|
||||
REQUIRE(c.y >= roi.ymin);
|
||||
REQUIRE(c.y <= roi.ymax);
|
||||
}
|
||||
|
||||
CHECK(clusters[0].x == 1);
|
||||
CHECK(clusters[0].y == 200);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
|
||||
|
||||
|
||||
TEST_CASE("Read clusters from single frame file", "[.files]") {
|
||||
|
||||
// frame_number, num_clusters [135] 97
|
||||
@ -147,6 +164,7 @@ TEST_CASE("Read clusters from single frame file", "[.files]") {
|
||||
// [ 97 296] [864 865 866 867 868 869 870 871 872]
|
||||
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
SECTION("Read fewer clusters than available") {
|
||||
@ -154,6 +172,12 @@ TEST_CASE("Read clusters from single frame file", "[.files]") {
|
||||
auto clusters = f.read_clusters(50);
|
||||
REQUIRE(clusters.size() == 50);
|
||||
REQUIRE(clusters.frame_number() == 135);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
REQUIRE(clusters[0].x == 1);
|
||||
REQUIRE(clusters[0].y == 200);
|
||||
CHECK(std::equal(std::begin(clusters[0].data),
|
||||
std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
SECTION("Read more clusters than available") {
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
@ -161,24 +185,167 @@ TEST_CASE("Read clusters from single frame file", "[.files]") {
|
||||
auto clusters = f.read_clusters(100);
|
||||
REQUIRE(clusters.size() == 97);
|
||||
REQUIRE(clusters.frame_number() == 135);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
REQUIRE(clusters[0].x == 1);
|
||||
REQUIRE(clusters[0].y == 200);
|
||||
CHECK(std::equal(std::begin(clusters[0].data),
|
||||
std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
SECTION("Read all clusters") {
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
auto clusters = f.read_clusters(97);
|
||||
REQUIRE(clusters.size() == 97);
|
||||
REQUIRE(clusters.frame_number() == 135);
|
||||
|
||||
REQUIRE(clusters.at(0).x == 1);
|
||||
REQUIRE(clusters.at(0).y == 200);
|
||||
REQUIRE(clusters[0].x == 1);
|
||||
REQUIRE(clusters[0].y == 200);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
CHECK(std::equal(std::begin(clusters[0].data),
|
||||
std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Read clusters", "[.files]") {
|
||||
// beam_En700eV_-40deg_300V_10us_d0_f0_100.clust
|
||||
auto fpath = test_data_path() / "clust" /
|
||||
"beam_En700eV_-40deg_300V_10us_d0_f0_100.clust";
|
||||
TEST_CASE("Read clusters from single frame file with ROI", "[.files]") {
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
auto clusters = f.read_clusters(500);
|
||||
|
||||
aare::ROI roi;
|
||||
roi.xmin = 0;
|
||||
roi.xmax = 50;
|
||||
roi.ymin = 200;
|
||||
roi.ymax = 249;
|
||||
f.set_roi(roi);
|
||||
|
||||
auto clusters = f.read_clusters(10);
|
||||
|
||||
CHECK(clusters.size() == 10);
|
||||
CHECK(clusters.frame_number() == 135);
|
||||
CHECK(clusters[0].x == 1);
|
||||
CHECK(clusters[0].y == 200);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
|
||||
TEST_CASE("Read cluster from multiple frame file", "[.files]") {
|
||||
|
||||
using ClusterType = Cluster<double, 2, 2>;
|
||||
|
||||
auto fpath =
|
||||
test_data_path() / "clust" / "Two_frames_2x2double_test_clusters.clust";
|
||||
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
// Two_frames_2x2double_test_clusters.clust
|
||||
// frame number, num_clusters 0, 4
|
||||
//[10, 20], {0. ,0., 0., 0.}
|
||||
//[11, 30], {1., 1., 1., 1.}
|
||||
//[12, 40], {2., 2., 2., 2.}
|
||||
//[13, 50], {3., 3., 3., 3.}
|
||||
// 1,4
|
||||
//[10, 20], {4., 4., 4., 4.}
|
||||
//[11, 30], {5., 5., 5., 5.}
|
||||
//[12, 40], {6., 6., 6., 6.}
|
||||
//[13, 50], {7., 7., 7., 7.}
|
||||
|
||||
SECTION("Read clusters from both frames") {
|
||||
ClusterFile<ClusterType> f(fpath);
|
||||
auto clusters = f.read_clusters(2);
|
||||
REQUIRE(clusters.size() == 2);
|
||||
REQUIRE(clusters.frame_number() == 0);
|
||||
|
||||
auto clusters1 = f.read_clusters(3);
|
||||
|
||||
REQUIRE(clusters1.size() == 3);
|
||||
REQUIRE(clusters1.frame_number() == 1);
|
||||
}
|
||||
|
||||
SECTION("Read all clusters") {
|
||||
ClusterFile<ClusterType> f(fpath);
|
||||
auto clusters = f.read_clusters(8);
|
||||
REQUIRE(clusters.size() == 8);
|
||||
REQUIRE(clusters.frame_number() == 1);
|
||||
}
|
||||
|
||||
SECTION("Read clusters from one frame") {
|
||||
ClusterFile<ClusterType> f(fpath);
|
||||
auto clusters = f.read_clusters(2);
|
||||
REQUIRE(clusters.size() == 2);
|
||||
REQUIRE(clusters.frame_number() == 0);
|
||||
|
||||
auto clusters1 = f.read_clusters(1);
|
||||
|
||||
REQUIRE(clusters1.size() == 1);
|
||||
REQUIRE(clusters1.frame_number() == 0);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Write cluster with potential padding", "[.files][.ClusterFile]") {
|
||||
|
||||
using ClusterType = Cluster<double, 3, 3>;
|
||||
|
||||
REQUIRE(std::filesystem::exists(test_data_path() / "clust"));
|
||||
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_2_clusters.clust";
|
||||
|
||||
ClusterFile<ClusterType> file(fpath, 1000, "w");
|
||||
|
||||
ClusterVector<ClusterType> clustervec(2);
|
||||
int16_t coordinate = 5;
|
||||
clustervec.push_back(ClusterType{
|
||||
coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}});
|
||||
clustervec.push_back(ClusterType{
|
||||
coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}});
|
||||
|
||||
file.write_frame(clustervec);
|
||||
|
||||
file.close();
|
||||
|
||||
file.open("r");
|
||||
|
||||
auto read_cluster_vector = file.read_frame();
|
||||
|
||||
CHECK(read_cluster_vector.size() == 2);
|
||||
CHECK(read_cluster_vector.frame_number() == 0);
|
||||
|
||||
CHECK(read_cluster_vector[0].x == clustervec[0].x);
|
||||
CHECK(read_cluster_vector[0].y == clustervec[0].y);
|
||||
CHECK(std::equal(
|
||||
clustervec[0].data.begin(), clustervec[0].data.end(),
|
||||
read_cluster_vector[0].data.begin(), [](double a, double b) {
|
||||
return std::abs(a - b) < std::numeric_limits<double>::epsilon();
|
||||
}));
|
||||
|
||||
CHECK(read_cluster_vector[1].x == clustervec[1].x);
|
||||
CHECK(read_cluster_vector[1].y == clustervec[1].y);
|
||||
CHECK(std::equal(
|
||||
clustervec[1].data.begin(), clustervec[1].data.end(),
|
||||
read_cluster_vector[1].data.begin(), [](double a, double b) {
|
||||
return std::abs(a - b) < std::numeric_limits<double>::epsilon();
|
||||
}));
|
||||
}
|
||||
|
||||
TEST_CASE("Read frame and modify cluster data", "[.files][.ClusterFile]") {
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
|
||||
auto clusters = f.read_frame();
|
||||
CHECK(clusters.size() == 97);
|
||||
CHECK(clusters.frame_number() == 135);
|
||||
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
clusters.push_back(
|
||||
Cluster<int32_t, 3, 3>{0, 0, {0, 1, 2, 3, 4, 5, 6, 7, 8}});
|
||||
|
||||
CHECK(clusters.size() == 98);
|
||||
CHECK(clusters[0].x == 1);
|
||||
CHECK(clusters[0].y == 200);
|
||||
|
||||
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
|
99
src/ClusterFinderMT.test.cpp
Normal file
99
src/ClusterFinderMT.test.cpp
Normal file
@ -0,0 +1,99 @@
|
||||
|
||||
#include "aare/ClusterFinderMT.hpp"
|
||||
#include "aare/Cluster.hpp"
|
||||
#include "aare/ClusterCollector.hpp"
|
||||
#include "aare/File.hpp"
|
||||
|
||||
#include "test_config.hpp"
|
||||
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <filesystem>
|
||||
#include <memory>
|
||||
|
||||
using namespace aare;
|
||||
|
||||
// wrapper function to access private member variables for testing
|
||||
template <typename ClusterType, typename FRAME_TYPE = uint16_t,
|
||||
typename PEDESTAL_TYPE = double>
|
||||
class ClusterFinderMTWrapper
|
||||
: public ClusterFinderMT<ClusterType, FRAME_TYPE, PEDESTAL_TYPE> {
|
||||
|
||||
public:
|
||||
ClusterFinderMTWrapper(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
|
||||
size_t capacity = 2000, size_t n_threads = 3)
|
||||
: ClusterFinderMT<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>(
|
||||
image_size, nSigma, capacity, n_threads) {}
|
||||
|
||||
size_t get_m_input_queues_size() const {
|
||||
return this->m_input_queues.size();
|
||||
}
|
||||
|
||||
size_t get_m_output_queues_size() const {
|
||||
return this->m_output_queues.size();
|
||||
}
|
||||
|
||||
size_t get_m_cluster_finders_size() const {
|
||||
return this->m_cluster_finders.size();
|
||||
}
|
||||
|
||||
bool m_output_queues_are_empty() const {
|
||||
for (auto &queue : this->m_output_queues) {
|
||||
if (!queue->isEmpty())
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool m_input_queues_are_empty() const {
|
||||
for (auto &queue : this->m_input_queues) {
|
||||
if (!queue->isEmpty())
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool m_sink_is_empty() const { return this->m_sink.isEmpty(); }
|
||||
|
||||
size_t m_sink_size() const { return this->m_sink.sizeGuess(); }
|
||||
};
|
||||
|
||||
TEST_CASE("multithreaded cluster finder", "[.files][.ClusterFinder]") {
|
||||
auto fpath = "/mnt/sls_det_storage/matterhorn_data/aare_test_data/"
|
||||
"Moench03new/cu_half_speed_master_4.json";
|
||||
|
||||
File file(fpath);
|
||||
|
||||
size_t n_threads = 2;
|
||||
size_t n_frames_pd = 10;
|
||||
|
||||
using ClusterType = Cluster<int32_t, 3, 3>;
|
||||
|
||||
ClusterFinderMTWrapper<ClusterType> cf(
|
||||
{static_cast<int64_t>(file.rows()), static_cast<int64_t>(file.cols())},
|
||||
5, 2000, n_threads); // no idea what frame type is!!! default uint16_t
|
||||
|
||||
CHECK(cf.get_m_input_queues_size() == n_threads);
|
||||
CHECK(cf.get_m_output_queues_size() == n_threads);
|
||||
CHECK(cf.get_m_cluster_finders_size() == n_threads);
|
||||
CHECK(cf.m_output_queues_are_empty() == true);
|
||||
CHECK(cf.m_input_queues_are_empty() == true);
|
||||
|
||||
for (size_t i = 0; i < n_frames_pd; ++i) {
|
||||
cf.find_clusters(file.read_frame().view<uint16_t>());
|
||||
}
|
||||
|
||||
cf.stop();
|
||||
|
||||
CHECK(cf.m_output_queues_are_empty() == true);
|
||||
CHECK(cf.m_input_queues_are_empty() == true);
|
||||
|
||||
CHECK(cf.m_sink_size() == n_frames_pd);
|
||||
ClusterCollector<ClusterType> clustercollector(&cf);
|
||||
|
||||
clustercollector.stop();
|
||||
|
||||
CHECK(cf.m_sink_size() == 0);
|
||||
|
||||
auto clustervec = clustercollector.steal_clusters();
|
||||
// CHECK(clustervec.size() == ) //dont know how many clusters to expect
|
||||
}
|
@ -8,15 +8,14 @@
|
||||
using aare::Cluster;
|
||||
using aare::ClusterVector;
|
||||
|
||||
|
||||
TEST_CASE("item_size return the size of the cluster stored"){
|
||||
TEST_CASE("item_size return the size of the cluster stored") {
|
||||
using C1 = Cluster<int32_t, 2, 2>;
|
||||
ClusterVector<C1> cv(4);
|
||||
CHECK(cv.item_size() == sizeof(C1));
|
||||
|
||||
//Sanity check
|
||||
//2*2*4 = 16 bytes of data for the cluster
|
||||
// 2*2 = 4 bytes for the x and y coordinates
|
||||
// Sanity check
|
||||
// 2*2*4 = 16 bytes of data for the cluster
|
||||
// 2*2 = 4 bytes for the x and y coordinates
|
||||
REQUIRE(cv.item_size() == 20);
|
||||
|
||||
using C2 = Cluster<int32_t, 3, 3>;
|
||||
@ -30,8 +29,6 @@ TEST_CASE("item_size return the size of the cluster stored"){
|
||||
using C4 = Cluster<char, 10, 5>;
|
||||
ClusterVector<C4> cv4(4);
|
||||
CHECK(cv4.item_size() == sizeof(C4));
|
||||
<<<<<<< Updated upstream
|
||||
=======
|
||||
|
||||
using C5 = Cluster<int32_t, 2, 3>;
|
||||
ClusterVector<C5> cv5(4);
|
||||
@ -39,12 +36,11 @@ TEST_CASE("item_size return the size of the cluster stored"){
|
||||
|
||||
using C6 = Cluster<double, 5, 5>;
|
||||
ClusterVector<C6> cv6(4);
|
||||
CHECK(cv6.item_size() == sizeof(C6)); // double uses padding!!!
|
||||
CHECK(cv6.item_size() == sizeof(C6));
|
||||
|
||||
using C7 = Cluster<double, 3, 3>;
|
||||
ClusterVector<C7> cv7(4);
|
||||
CHECK(cv7.item_size() == sizeof(C7));
|
||||
>>>>>>> Stashed changes
|
||||
}
|
||||
|
||||
TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read",
|
||||
@ -64,7 +60,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read",
|
||||
REQUIRE(cv.size() == 1);
|
||||
REQUIRE(cv.capacity() == 4);
|
||||
|
||||
auto c2 = cv.at(0);
|
||||
auto c2 = cv[0];
|
||||
|
||||
// Check that the data is the same
|
||||
REQUIRE(c1.x == c2.x);
|
||||
@ -226,32 +222,6 @@ TEST_CASE("Concatenate two cluster vectors where we need to allocate",
|
||||
REQUIRE(ptr[3].y == 17);
|
||||
}
|
||||
|
||||
TEST_CASE("calculate cluster sum", "[.ClusterVector]") {
|
||||
ClusterVector<Cluster<int32_t, 2, 2>> cv1(2);
|
||||
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
|
||||
cv1.push_back(c1);
|
||||
Cluster<int32_t, 2, 2> c2 = {6, 7, {8, 9, 10, 11}};
|
||||
cv1.push_back(c2);
|
||||
|
||||
auto sum1 = cv1.sum();
|
||||
|
||||
std::vector<int32_t> expected_sum1{18, 38};
|
||||
|
||||
CHECK(sum1 == expected_sum1);
|
||||
|
||||
ClusterVector<Cluster<int32_t, 3, 3>> cv2(2);
|
||||
Cluster<int32_t, 3, 3> c3 = {1, 2, {3, 4, 5, 6, 1, 7, 8, 1, 1}};
|
||||
cv2.push_back(c3);
|
||||
Cluster<int32_t, 3, 3> c4 = {6, 7, {8, 9, 10, 11, 13, 5, 12, 2, 4}};
|
||||
cv2.push_back(c4);
|
||||
|
||||
auto sum2 = cv2.sum();
|
||||
|
||||
std::vector<int32_t> expected_sum2{36, 74};
|
||||
|
||||
CHECK(sum2 == expected_sum2);
|
||||
}
|
||||
|
||||
struct ClusterTestData {
|
||||
uint8_t ClusterSizeX;
|
||||
uint8_t ClusterSizeY;
|
||||
|
@ -21,7 +21,7 @@ FilePtr &FilePtr::operator=(FilePtr &&other) {
|
||||
|
||||
FILE *FilePtr::get() { return fp_; }
|
||||
|
||||
int64_t FilePtr::tell() {
|
||||
ssize_t FilePtr::tell() {
|
||||
auto pos = ftell(fp_);
|
||||
if (pos == -1)
|
||||
throw std::runtime_error(fmt::format("Error getting file position: {}", error_msg()));
|
||||
|
249
src/Fit.cpp
249
src/Fit.cpp
@ -34,6 +34,30 @@ NDArray<double, 1> pol1(NDView<double, 1> x, NDView<double, 1> par) {
|
||||
return y;
|
||||
}
|
||||
|
||||
double scurve(const double x, const double * par) {
|
||||
return (par[0] + par[1] * x) + 0.5 * (1 + erf((x - par[2]) / (sqrt(2) * par[3]))) * (par[4] + par[5] * (x - par[2]));
|
||||
}
|
||||
|
||||
NDArray<double, 1> scurve(NDView<double, 1> x, NDView<double, 1> par) {
|
||||
NDArray<double, 1> y({x.shape()}, 0);
|
||||
for (ssize_t i = 0; i < x.size(); i++) {
|
||||
y(i) = scurve(x(i), par.data());
|
||||
}
|
||||
return y;
|
||||
}
|
||||
|
||||
double scurve2(const double x, const double * par) {
|
||||
return (par[0] + par[1] * x) + 0.5 * (1 - erf((x - par[2]) / (sqrt(2) * par[3]))) * (par[4] + par[5] * (x - par[2]));
|
||||
}
|
||||
|
||||
NDArray<double, 1> scurve2(NDView<double, 1> x, NDView<double, 1> par) {
|
||||
NDArray<double, 1> y({x.shape()}, 0);
|
||||
for (ssize_t i = 0; i < x.size(); i++) {
|
||||
y(i) = scurve2(x(i), par.data());
|
||||
}
|
||||
return y;
|
||||
}
|
||||
|
||||
} // namespace func
|
||||
|
||||
NDArray<double, 1> fit_gaus(NDView<double, 1> x, NDView<double, 1> y) {
|
||||
@ -273,4 +297,229 @@ NDArray<double, 3> fit_pol1(NDView<double, 1> x, NDView<double, 3> y,
|
||||
return result;
|
||||
}
|
||||
|
||||
// ~~ S-CURVES ~~
|
||||
|
||||
// SCURVE --
|
||||
std::array<double, 6> scurve_init_par(const NDView<double, 1> x, const NDView<double, 1> y){
|
||||
// Estimate the initial parameters for the fit
|
||||
std::array<double, 6> start_par{0, 0, 0, 0, 0, 0};
|
||||
|
||||
auto ymax = std::max_element(y.begin(), y.end());
|
||||
auto ymin = std::min_element(y.begin(), y.end());
|
||||
start_par[4] = *ymin + (*ymax - *ymin) / 2;
|
||||
|
||||
// Find the first x where the corresponding y value is above the threshold (start_par[4])
|
||||
for (ssize_t i = 0; i < y.size(); ++i) {
|
||||
if (y[i] >= start_par[4]) {
|
||||
start_par[2] = x[i];
|
||||
break; // Exit the loop after finding the first valid x
|
||||
}
|
||||
}
|
||||
|
||||
start_par[3] = 2 * sqrt(start_par[2]);
|
||||
start_par[0] = 100;
|
||||
start_par[1] = 0.25;
|
||||
start_par[5] = 1;
|
||||
return start_par;
|
||||
}
|
||||
|
||||
// - No error
|
||||
NDArray<double, 1> fit_scurve(NDView<double, 1> x, NDView<double, 1> y) {
|
||||
NDArray<double, 1> result = scurve_init_par(x, y);
|
||||
lm_status_struct status;
|
||||
|
||||
lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(),
|
||||
aare::func::scurve, &lm_control_double, &status);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
NDArray<double, 3> fit_scurve(NDView<double, 1> x, NDView<double, 3> y, int n_threads) {
|
||||
NDArray<double, 3> result({y.shape(0), y.shape(1), 6}, 0);
|
||||
|
||||
auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) {
|
||||
for (ssize_t row = first_row; row < last_row; row++) {
|
||||
for (ssize_t col = 0; col < y.shape(1); col++) {
|
||||
NDView<double, 1> values(&y(row, col, 0), {y.shape(2)});
|
||||
auto res = fit_scurve(x, values);
|
||||
result(row, col, 0) = res(0);
|
||||
result(row, col, 1) = res(1);
|
||||
result(row, col, 2) = res(2);
|
||||
result(row, col, 3) = res(3);
|
||||
result(row, col, 4) = res(4);
|
||||
result(row, col, 5) = res(5);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
auto tasks = split_task(0, y.shape(0), n_threads);
|
||||
RunInParallel(process, tasks);
|
||||
return result;
|
||||
}
|
||||
|
||||
// - Error
|
||||
void fit_scurve(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
|
||||
NDView<double, 1> par_out, NDView<double, 1> par_err_out, double& chi2) {
|
||||
|
||||
// Check that we have the correct sizes
|
||||
if (y.size() != x.size() || y.size() != y_err.size() ||
|
||||
par_out.size() != 6 || par_err_out.size() != 6) {
|
||||
throw std::runtime_error("Data, x, data_err must have the same size "
|
||||
"and par_out, par_err_out must have size 6");
|
||||
}
|
||||
|
||||
lm_status_struct status;
|
||||
par_out = scurve_init_par(x, y);
|
||||
std::array<double, 36> cov = {0}; // size 6x6
|
||||
// std::array<double, 4> cov{0, 0, 0, 0};
|
||||
|
||||
lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(),
|
||||
x.size(), x.data(), y.data(), y_err.data(), aare::func::scurve,
|
||||
&lm_control_double, &status);
|
||||
|
||||
// Calculate chi2
|
||||
chi2 = 0;
|
||||
for (ssize_t i = 0; i < y.size(); i++) {
|
||||
chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2);
|
||||
}
|
||||
}
|
||||
|
||||
void fit_scurve(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
|
||||
NDView<double, 3> par_out, NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
|
||||
int n_threads) {
|
||||
|
||||
auto process = [&](ssize_t first_row, ssize_t last_row) {
|
||||
for (ssize_t row = first_row; row < last_row; row++) {
|
||||
for (ssize_t col = 0; col < y.shape(1); col++) {
|
||||
NDView<double, 1> y_view(&y(row, col, 0), {y.shape(2)});
|
||||
NDView<double, 1> y_err_view(&y_err(row, col, 0),
|
||||
{y_err.shape(2)});
|
||||
NDView<double, 1> par_out_view(&par_out(row, col, 0),
|
||||
{par_out.shape(2)});
|
||||
NDView<double, 1> par_err_out_view(&par_err_out(row, col, 0),
|
||||
{par_err_out.shape(2)});
|
||||
|
||||
fit_scurve(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col));
|
||||
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
auto tasks = split_task(0, y.shape(0), n_threads);
|
||||
RunInParallel(process, tasks);
|
||||
|
||||
}
|
||||
|
||||
// SCURVE2 ---
|
||||
|
||||
std::array<double, 6> scurve2_init_par(const NDView<double, 1> x, const NDView<double, 1> y){
|
||||
// Estimate the initial parameters for the fit
|
||||
std::array<double, 6> start_par{0, 0, 0, 0, 0, 0};
|
||||
|
||||
auto ymax = std::max_element(y.begin(), y.end());
|
||||
auto ymin = std::min_element(y.begin(), y.end());
|
||||
start_par[4] = *ymin + (*ymax - *ymin) / 2;
|
||||
|
||||
// Find the first x where the corresponding y value is above the threshold (start_par[4])
|
||||
for (ssize_t i = 0; i < y.size(); ++i) {
|
||||
if (y[i] <= start_par[4]) {
|
||||
start_par[2] = x[i];
|
||||
break; // Exit the loop after finding the first valid x
|
||||
}
|
||||
}
|
||||
|
||||
start_par[3] = 2 * sqrt(start_par[2]);
|
||||
start_par[0] = 100;
|
||||
start_par[1] = 0.25;
|
||||
start_par[5] = -1;
|
||||
return start_par;
|
||||
}
|
||||
|
||||
// - No error
|
||||
NDArray<double, 1> fit_scurve2(NDView<double, 1> x, NDView<double, 1> y) {
|
||||
NDArray<double, 1> result = scurve2_init_par(x, y);
|
||||
lm_status_struct status;
|
||||
|
||||
lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(),
|
||||
aare::func::scurve2, &lm_control_double, &status);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
NDArray<double, 3> fit_scurve2(NDView<double, 1> x, NDView<double, 3> y, int n_threads) {
|
||||
NDArray<double, 3> result({y.shape(0), y.shape(1), 6}, 0);
|
||||
|
||||
auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) {
|
||||
for (ssize_t row = first_row; row < last_row; row++) {
|
||||
for (ssize_t col = 0; col < y.shape(1); col++) {
|
||||
NDView<double, 1> values(&y(row, col, 0), {y.shape(2)});
|
||||
auto res = fit_scurve2(x, values);
|
||||
result(row, col, 0) = res(0);
|
||||
result(row, col, 1) = res(1);
|
||||
result(row, col, 2) = res(2);
|
||||
result(row, col, 3) = res(3);
|
||||
result(row, col, 4) = res(4);
|
||||
result(row, col, 5) = res(5);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
auto tasks = split_task(0, y.shape(0), n_threads);
|
||||
RunInParallel(process, tasks);
|
||||
return result;
|
||||
}
|
||||
|
||||
// - Error
|
||||
void fit_scurve2(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
|
||||
NDView<double, 1> par_out, NDView<double, 1> par_err_out, double& chi2) {
|
||||
|
||||
// Check that we have the correct sizes
|
||||
if (y.size() != x.size() || y.size() != y_err.size() ||
|
||||
par_out.size() != 6 || par_err_out.size() != 6) {
|
||||
throw std::runtime_error("Data, x, data_err must have the same size "
|
||||
"and par_out, par_err_out must have size 6");
|
||||
}
|
||||
|
||||
lm_status_struct status;
|
||||
par_out = scurve2_init_par(x, y);
|
||||
std::array<double, 36> cov = {0}; // size 6x6
|
||||
// std::array<double, 4> cov{0, 0, 0, 0};
|
||||
|
||||
lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(),
|
||||
x.size(), x.data(), y.data(), y_err.data(), aare::func::scurve2,
|
||||
&lm_control_double, &status);
|
||||
|
||||
// Calculate chi2
|
||||
chi2 = 0;
|
||||
for (ssize_t i = 0; i < y.size(); i++) {
|
||||
chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2);
|
||||
}
|
||||
}
|
||||
|
||||
void fit_scurve2(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
|
||||
NDView<double, 3> par_out, NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
|
||||
int n_threads) {
|
||||
|
||||
auto process = [&](ssize_t first_row, ssize_t last_row) {
|
||||
for (ssize_t row = first_row; row < last_row; row++) {
|
||||
for (ssize_t col = 0; col < y.shape(1); col++) {
|
||||
NDView<double, 1> y_view(&y(row, col, 0), {y.shape(2)});
|
||||
NDView<double, 1> y_err_view(&y_err(row, col, 0),
|
||||
{y_err.shape(2)});
|
||||
NDView<double, 1> par_out_view(&par_out(row, col, 0),
|
||||
{par_out.shape(2)});
|
||||
NDView<double, 1> par_err_out_view(&par_err_out(row, col, 0),
|
||||
{par_err_out.shape(2)});
|
||||
|
||||
fit_scurve2(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col));
|
||||
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
auto tasks = split_task(0, y.shape(0), n_threads);
|
||||
RunInParallel(process, tasks);
|
||||
|
||||
}
|
||||
|
||||
} // namespace aare
|
110
src/Hdf5FileReader.test.cpp
Normal file
110
src/Hdf5FileReader.test.cpp
Normal file
@ -0,0 +1,110 @@
|
||||
/************************************************
|
||||
* @file Hdf5FileReader.test.cpp
|
||||
* @short test case for reading hdf5 files
|
||||
***********************************************/
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
#include "test_config.hpp"
|
||||
|
||||
#include <H5Cpp.h>
|
||||
|
||||
#include "aare/Hdf5FileReader.hpp"
|
||||
#include "aare/NDArray.hpp"
|
||||
|
||||
#include <catch2/catch_all.hpp>
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <catch2/matchers/catch_matchers_floating_point.hpp>
|
||||
|
||||
using namespace aare;
|
||||
|
||||
TEST_CASE("read hdf5 file", "[.hdf5file][.files]") {
|
||||
|
||||
// TODO generalize datasetpath
|
||||
std::string filename = test_data_path() / "AngleCalibration_Test_Data" /
|
||||
"ang1up_22keV_LaB60p3mm_48M_a_0320.h5";
|
||||
|
||||
REQUIRE(std::filesystem::exists(filename));
|
||||
|
||||
HDF5FileReader file_reader;
|
||||
|
||||
file_reader.open_file(filename);
|
||||
|
||||
auto dataset = file_reader.get_dataset("/entry/data/data");
|
||||
|
||||
auto shape = dataset.get_shape();
|
||||
|
||||
CHECK(shape[0] == 61440);
|
||||
|
||||
auto type = dataset.get_datatype();
|
||||
|
||||
const std::type_info *type_info = dataset.get_cpp_type();
|
||||
|
||||
CHECK(*type_info == typeid(uint32_t));
|
||||
|
||||
SECTION("read dataset into NDArray") {
|
||||
|
||||
NDArray<uint32_t, 1> dataset_array =
|
||||
dataset.store_as_ndarray<uint32_t, 1>();
|
||||
|
||||
CHECK(dataset_array(0) == 866);
|
||||
CHECK(dataset_array(61439) == 1436);
|
||||
}
|
||||
|
||||
SECTION("read dataset into Frame") {
|
||||
Frame frame = dataset.store_as_frame();
|
||||
CHECK(*(reinterpret_cast<uint32_t *>(frame.pixel_ptr(0, 0))) == 866);
|
||||
CHECK(*(reinterpret_cast<uint32_t *>(frame.pixel_ptr(0, 61439))) ==
|
||||
1436);
|
||||
}
|
||||
SECTION("read subset of dataset") {
|
||||
Frame frame(1, 10, Dtype(typeid(uint32_t)));
|
||||
|
||||
Subset subset{std::vector<hsize_t>{10}, std::vector<hsize_t>{10}};
|
||||
|
||||
dataset.read_into_buffer(frame.data(), subset);
|
||||
|
||||
CHECK(*(reinterpret_cast<uint32_t *>(frame.pixel_ptr(0, 0))) == 664);
|
||||
CHECK(*(reinterpret_cast<uint32_t *>(frame.pixel_ptr(0, 9))) == 654);
|
||||
}
|
||||
/*
|
||||
SECTION("read scalar") {
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
TEST_CASE("test datatypes", "[.hdf5file]") {
|
||||
|
||||
auto [dtype, expected_type_info] = GENERATE(
|
||||
std::make_tuple(H5::DataType(H5::PredType::NATIVE_INT), &typeid(int)),
|
||||
std::make_tuple(H5::DataType(H5::PredType::NATIVE_INT8),
|
||||
&typeid(int8_t)),
|
||||
std::make_tuple(H5::DataType(H5::PredType::NATIVE_UINT16),
|
||||
&typeid(uint16_t)),
|
||||
std::make_tuple(H5::DataType(H5::PredType::NATIVE_INT16),
|
||||
&typeid(int16_t)),
|
||||
std::make_tuple(H5::DataType(H5::PredType::STD_U32LE),
|
||||
&typeid(uint32_t)),
|
||||
std::make_tuple(H5::DataType(H5::PredType::STD_I32LE),
|
||||
&typeid(int32_t)),
|
||||
std::make_tuple(H5::DataType(H5::PredType::NATIVE_INT32),
|
||||
&typeid(int32_t)),
|
||||
std::make_tuple(H5::DataType(H5::PredType::IEEE_F64LE),
|
||||
&typeid(double)),
|
||||
std::make_tuple(H5::DataType(H5::PredType::IEEE_F32LE), &typeid(float)),
|
||||
std::make_tuple(H5::DataType(H5::PredType::NATIVE_FLOAT),
|
||||
&typeid(float)),
|
||||
std::make_tuple(H5::DataType(H5::PredType::NATIVE_DOUBLE),
|
||||
&typeid(double)),
|
||||
std::make_tuple(H5::DataType(H5::PredType::NATIVE_CHAR),
|
||||
&typeid(int8_t)));
|
||||
|
||||
const std::type_info &type_info = deduce_cpp_type(dtype);
|
||||
|
||||
CHECK(type_info == *expected_type_info);
|
||||
|
||||
// TODO: handle bit swapping
|
||||
REQUIRE_THROWS(deduce_cpp_type(
|
||||
H5::DataType(H5::PredType::IEEE_F32BE))); // does not convert from big
|
||||
// to little endian
|
||||
}
|
@ -89,7 +89,7 @@ void JungfrauDataFile::seek(size_t frame_index) {
|
||||
: frame_index;
|
||||
auto byte_offset = frame_offset * (m_bytes_per_frame + header_size);
|
||||
m_fp.seek(byte_offset);
|
||||
};
|
||||
}
|
||||
|
||||
size_t JungfrauDataFile::tell() { return m_current_frame_index; }
|
||||
size_t JungfrauDataFile::total_frames() const { return m_total_frames; }
|
||||
@ -235,4 +235,4 @@ std::filesystem::path JungfrauDataFile::fpath(size_t file_index) const {
|
||||
return m_path / fname;
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
} // namespace aare
|
||||
|
33
src/MythenFileReader.test.cpp
Normal file
33
src/MythenFileReader.test.cpp
Normal file
@ -0,0 +1,33 @@
|
||||
/************************************************
|
||||
* @file MythenFileReader.test.cpp
|
||||
* @short test case for angle calibration class
|
||||
***********************************************/
|
||||
|
||||
#include "aare/MythenFileReader.hpp"
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
#include "test_config.hpp"
|
||||
|
||||
#include <catch2/catch_all.hpp>
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <catch2/matchers/catch_matchers_floating_point.hpp>
|
||||
|
||||
using namespace aare;
|
||||
|
||||
TEST_CASE("test mythenfile_reader", "[.mythenfilereader][.files]") {
|
||||
|
||||
auto fpath = test_data_path() / "AngleCalibration_Test_Data";
|
||||
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
MythenFileReader file_reader(fpath, "ang1up_22keV_LaB60p3mm_48M_a_0");
|
||||
|
||||
auto frame = file_reader.read_frame(320);
|
||||
|
||||
CHECK(frame.detector_angle == 0.99955);
|
||||
|
||||
CHECK(frame.channel_mask == std::array<uint8_t, 3>{0, 0, 1});
|
||||
|
||||
CHECK(frame.photon_counts.size() == 61440);
|
||||
}
|
@ -44,9 +44,9 @@ TEST_CASE("3D NDArray from NDView"){
|
||||
REQUIRE(image.size() == view.size());
|
||||
REQUIRE(image.data() != view.data());
|
||||
|
||||
for(int64_t i=0; i<image.shape(0); i++){
|
||||
for(int64_t j=0; j<image.shape(1); j++){
|
||||
for(int64_t k=0; k<image.shape(2); k++){
|
||||
for(ssize_t i=0; i<image.shape(0); i++){
|
||||
for(ssize_t j=0; j<image.shape(1); j++){
|
||||
for(ssize_t k=0; k<image.shape(2); k++){
|
||||
REQUIRE(image(i, j, k) == view(i, j, k));
|
||||
}
|
||||
}
|
||||
@ -54,7 +54,7 @@ TEST_CASE("3D NDArray from NDView"){
|
||||
}
|
||||
|
||||
TEST_CASE("1D image") {
|
||||
std::array<int64_t, 1> shape{{20}};
|
||||
std::array<ssize_t, 1> shape{{20}};
|
||||
NDArray<short, 1> img(shape, 3);
|
||||
REQUIRE(img.size() == 20);
|
||||
REQUIRE(img(5) == 3);
|
||||
@ -71,7 +71,7 @@ TEST_CASE("Accessing a const object") {
|
||||
}
|
||||
|
||||
TEST_CASE("Indexing of a 2D image") {
|
||||
std::array<int64_t, 2> shape{{3, 7}};
|
||||
std::array<ssize_t, 2> shape{{3, 7}};
|
||||
NDArray<long> img(shape, 5);
|
||||
for (uint32_t i = 0; i != img.size(); ++i) {
|
||||
REQUIRE(img(i) == 5);
|
||||
@ -114,7 +114,7 @@ TEST_CASE("Divide double by int") {
|
||||
}
|
||||
|
||||
TEST_CASE("Elementwise multiplication of 3D image") {
|
||||
std::array<int64_t, 3> shape{3, 4, 2};
|
||||
std::array<ssize_t, 3> shape{3, 4, 2};
|
||||
NDArray<double, 3> a{shape};
|
||||
NDArray<double, 3> b{shape};
|
||||
for (uint32_t i = 0; i != a.size(); ++i) {
|
||||
@ -179,9 +179,9 @@ TEST_CASE("Compare two images") {
|
||||
}
|
||||
|
||||
TEST_CASE("Size and shape matches") {
|
||||
int64_t w = 15;
|
||||
int64_t h = 75;
|
||||
std::array<int64_t, 2> shape{w, h};
|
||||
ssize_t w = 15;
|
||||
ssize_t h = 75;
|
||||
std::array<ssize_t, 2> shape{w, h};
|
||||
NDArray<double> a{shape};
|
||||
REQUIRE(a.size() == w * h);
|
||||
REQUIRE(a.shape() == shape);
|
||||
@ -224,7 +224,7 @@ TEST_CASE("Bitwise and on data") {
|
||||
|
||||
|
||||
TEST_CASE("Elementwise operations on images") {
|
||||
std::array<int64_t, 2> shape{5, 5};
|
||||
std::array<ssize_t, 2> shape{5, 5};
|
||||
double a_val = 3.0;
|
||||
double b_val = 8.0;
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <numeric>
|
||||
|
||||
using aare::NDView;
|
||||
using aare::Shape;
|
||||
@ -21,10 +22,8 @@ TEST_CASE("Element reference 1D") {
|
||||
}
|
||||
|
||||
TEST_CASE("Element reference 2D") {
|
||||
std::vector<int> vec;
|
||||
for (int i = 0; i != 12; ++i) {
|
||||
vec.push_back(i);
|
||||
}
|
||||
std::vector<int> vec(12);
|
||||
std::iota(vec.begin(), vec.end(), 0);
|
||||
|
||||
NDView<int, 2> data(vec.data(), Shape<2>{3, 4});
|
||||
REQUIRE(vec.size() == static_cast<size_t>(data.size()));
|
||||
@ -58,10 +57,8 @@ TEST_CASE("Element reference 3D") {
|
||||
}
|
||||
|
||||
TEST_CASE("Plus and miuns with single value") {
|
||||
std::vector<int> vec;
|
||||
for (int i = 0; i != 12; ++i) {
|
||||
vec.push_back(i);
|
||||
}
|
||||
std::vector<int> vec(12);
|
||||
std::iota(vec.begin(), vec.end(), 0);
|
||||
NDView<int, 2> data(vec.data(), Shape<2>{3, 4});
|
||||
data += 5;
|
||||
int i = 0;
|
||||
@ -116,10 +113,8 @@ TEST_CASE("elementwise assign") {
|
||||
}
|
||||
|
||||
TEST_CASE("iterators") {
|
||||
std::vector<int> vec;
|
||||
for (int i = 0; i != 12; ++i) {
|
||||
vec.push_back(i);
|
||||
}
|
||||
std::vector<int> vec(12);
|
||||
std::iota(vec.begin(), vec.end(), 0);
|
||||
NDView<int, 1> data(vec.data(), Shape<1>{12});
|
||||
int i = 0;
|
||||
for (const auto item : data) {
|
||||
@ -147,7 +142,7 @@ TEST_CASE("iterators") {
|
||||
// for (int i = 0; i != 12; ++i) {
|
||||
// vec.push_back(i);
|
||||
// }
|
||||
// std::vector<int64_t> shape{3, 4};
|
||||
// std::vector<ssize_t> shape{3, 4};
|
||||
// NDView<int, 2> data(vec.data(), shape);
|
||||
// }
|
||||
|
||||
@ -156,8 +151,8 @@ TEST_CASE("divide with another span") {
|
||||
std::vector<int> vec1{3, 2, 1};
|
||||
std::vector<int> result{3, 6, 3};
|
||||
|
||||
NDView<int, 1> data0(vec0.data(), Shape<1>{static_cast<int64_t>(vec0.size())});
|
||||
NDView<int, 1> data1(vec1.data(), Shape<1>{static_cast<int64_t>(vec1.size())});
|
||||
NDView<int, 1> data0(vec0.data(), Shape<1>{static_cast<ssize_t>(vec0.size())});
|
||||
NDView<int, 1> data1(vec1.data(), Shape<1>{static_cast<ssize_t>(vec1.size())});
|
||||
|
||||
data0 /= data1;
|
||||
|
||||
@ -167,27 +162,31 @@ TEST_CASE("divide with another span") {
|
||||
}
|
||||
|
||||
TEST_CASE("Retrieve shape") {
|
||||
std::vector<int> vec;
|
||||
for (int i = 0; i != 12; ++i) {
|
||||
vec.push_back(i);
|
||||
}
|
||||
std::vector<int> vec(12);
|
||||
std::iota(vec.begin(), vec.end(), 0);
|
||||
NDView<int, 2> data(vec.data(), Shape<2>{3, 4});
|
||||
REQUIRE(data.shape()[0] == 3);
|
||||
REQUIRE(data.shape()[1] == 4);
|
||||
}
|
||||
|
||||
TEST_CASE("compare two views") {
|
||||
std::vector<int> vec1;
|
||||
for (int i = 0; i != 12; ++i) {
|
||||
vec1.push_back(i);
|
||||
}
|
||||
std::vector<int> vec1(12);
|
||||
std::iota(vec1.begin(), vec1.end(), 0);
|
||||
NDView<int, 2> view1(vec1.data(), Shape<2>{3, 4});
|
||||
|
||||
std::vector<int> vec2;
|
||||
for (int i = 0; i != 12; ++i) {
|
||||
vec2.push_back(i);
|
||||
}
|
||||
std::vector<int> vec2(12);
|
||||
std::iota(vec2.begin(), vec2.end(), 0);
|
||||
NDView<int, 2> view2(vec2.data(), Shape<2>{3, 4});
|
||||
|
||||
REQUIRE((view1 == view2));
|
||||
}
|
||||
|
||||
|
||||
TEST_CASE("Create a view over a vector"){
|
||||
std::vector<int> vec(12);
|
||||
std::iota(vec.begin(), vec.end(), 0);
|
||||
auto v = aare::make_view(vec);
|
||||
REQUIRE(v.shape()[0] == 12);
|
||||
REQUIRE(v[0] == 0);
|
||||
REQUIRE(v[11] == 11);
|
||||
}
|
@ -72,8 +72,8 @@ void NumpyFile::get_frame_into(size_t frame_number, std::byte *image_buf) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t NumpyFile::pixels_per_frame() { return m_pixels_per_frame; };
|
||||
size_t NumpyFile::bytes_per_frame() { return m_bytes_per_frame; };
|
||||
size_t NumpyFile::pixels_per_frame() { return m_pixels_per_frame; }
|
||||
size_t NumpyFile::bytes_per_frame() { return m_bytes_per_frame; }
|
||||
|
||||
std::vector<Frame> NumpyFile::read_n(size_t n_frames) {
|
||||
// TODO: implement this in a more efficient way
|
||||
@ -197,4 +197,4 @@ void NumpyFile::load_metadata() {
|
||||
m_header = {dtype, fortran_order, shape};
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
} // namespace aare
|
||||
|
@ -34,7 +34,7 @@ RawFile::RawFile(const std::filesystem::path &fname, const std::string &mode)
|
||||
}
|
||||
}
|
||||
|
||||
Frame RawFile::read_frame() { return get_frame(m_current_frame++); };
|
||||
Frame RawFile::read_frame() { return get_frame(m_current_frame++); }
|
||||
|
||||
Frame RawFile::read_frame(size_t frame_number) {
|
||||
seek(frame_number);
|
||||
@ -52,13 +52,13 @@ void RawFile::read_into(std::byte *image_buf, size_t n_frames) {
|
||||
|
||||
void RawFile::read_into(std::byte *image_buf) {
|
||||
return get_frame_into(m_current_frame++, image_buf);
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
void RawFile::read_into(std::byte *image_buf, DetectorHeader *header) {
|
||||
|
||||
return get_frame_into(m_current_frame++, image_buf, header);
|
||||
};
|
||||
}
|
||||
|
||||
void RawFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) {
|
||||
// return get_frame_into(m_current_frame++, image_buf, header);
|
||||
@ -70,7 +70,7 @@ void RawFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *h
|
||||
header+=n_mod();
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
size_t RawFile::n_mod() const { return n_subfile_parts; }
|
||||
|
||||
@ -94,9 +94,9 @@ void RawFile::seek(size_t frame_index) {
|
||||
frame_index, total_frames()));
|
||||
}
|
||||
m_current_frame = frame_index;
|
||||
};
|
||||
}
|
||||
|
||||
size_t RawFile::tell() { return m_current_frame; };
|
||||
size_t RawFile::tell() { return m_current_frame; }
|
||||
|
||||
size_t RawFile::total_frames() const { return m_master.frames_in_file(); }
|
||||
size_t RawFile::rows() const { return m_geometry.pixels_y; }
|
||||
@ -360,4 +360,4 @@ RawFile::~RawFile() {
|
||||
|
||||
|
||||
|
||||
} // namespace aare
|
||||
} // namespace aare
|
||||
|
@ -87,7 +87,7 @@ int ScanParameters::start() const { return m_start; }
|
||||
int ScanParameters::stop() const { return m_stop; }
|
||||
void ScanParameters::increment_stop(){
|
||||
m_stop += 1;
|
||||
};
|
||||
}
|
||||
int ScanParameters::step() const { return m_step; }
|
||||
const std::string &ScanParameters::dac() const { return m_dac; }
|
||||
bool ScanParameters::enabled() const { return m_enabled; }
|
||||
@ -417,4 +417,4 @@ void RawMasterFile::parse_raw(const std::filesystem::path &fpath) {
|
||||
if(m_frames_in_file==0)
|
||||
m_frames_in_file = m_total_frames_expected;
|
||||
}
|
||||
} // namespace aare
|
||||
} // namespace aare
|
||||
|
@ -1,9 +1,12 @@
|
||||
#include "aare/RawSubFile.hpp"
|
||||
#include "aare/PixelMap.hpp"
|
||||
#include "aare/utils/ifstream_helpers.hpp"
|
||||
#include <cstring> // memcpy
|
||||
#include <fmt/core.h>
|
||||
#include <iostream>
|
||||
|
||||
|
||||
|
||||
namespace aare {
|
||||
|
||||
RawSubFile::RawSubFile(const std::filesystem::path &fname,
|
||||
@ -20,7 +23,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname,
|
||||
}
|
||||
|
||||
if (std::filesystem::exists(fname)) {
|
||||
n_frames = std::filesystem::file_size(fname) /
|
||||
m_num_frames = std::filesystem::file_size(fname) /
|
||||
(sizeof(DetectorHeader) + rows * cols * bitdepth / 8);
|
||||
} else {
|
||||
throw std::runtime_error(
|
||||
@ -35,7 +38,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname,
|
||||
}
|
||||
|
||||
#ifdef AARE_VERBOSE
|
||||
fmt::print("Opened file: {} with {} frames\n", m_fname.string(), n_frames);
|
||||
fmt::print("Opened file: {} with {} frames\n", m_fname.string(), m_num_frames);
|
||||
fmt::print("m_rows: {}, m_cols: {}, m_bitdepth: {}\n", m_rows, m_cols,
|
||||
m_bitdepth);
|
||||
fmt::print("file size: {}\n", std::filesystem::file_size(fname));
|
||||
@ -43,8 +46,8 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname,
|
||||
}
|
||||
|
||||
void RawSubFile::seek(size_t frame_index) {
|
||||
if (frame_index >= n_frames) {
|
||||
throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, n_frames));
|
||||
if (frame_index >= m_num_frames) {
|
||||
throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, m_num_frames));
|
||||
}
|
||||
m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index);
|
||||
}
|
||||
@ -60,6 +63,10 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) {
|
||||
m_file.seekg(sizeof(DetectorHeader), std::ios::cur);
|
||||
}
|
||||
|
||||
if (m_file.fail()){
|
||||
throw std::runtime_error(LOCATION + ifstream_error_msg(m_file));
|
||||
}
|
||||
|
||||
// TODO! expand support for different bitdepths
|
||||
if (m_pixel_map) {
|
||||
// read into a temporary buffer and then copy the data to the buffer
|
||||
@ -79,8 +86,24 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) {
|
||||
// read directly into the buffer
|
||||
m_file.read(reinterpret_cast<char *>(image_buf), bytes_per_frame());
|
||||
}
|
||||
|
||||
if (m_file.fail()){
|
||||
throw std::runtime_error(LOCATION + ifstream_error_msg(m_file));
|
||||
}
|
||||
}
|
||||
|
||||
void RawSubFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) {
|
||||
for (size_t i = 0; i < n_frames; i++) {
|
||||
read_into(image_buf, header);
|
||||
image_buf += bytes_per_frame();
|
||||
if (header) {
|
||||
++header;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
template <typename T>
|
||||
void RawSubFile::read_with_map(std::byte *image_buf) {
|
||||
auto part_buffer = new std::byte[bytes_per_frame()];
|
||||
|
@ -1,5 +1,5 @@
|
||||
#include "aare/decode.hpp"
|
||||
|
||||
#include <cmath>
|
||||
namespace aare {
|
||||
|
||||
uint16_t adc_sar_05_decode64to16(uint64_t input){
|
||||
@ -22,8 +22,12 @@ uint16_t adc_sar_05_decode64to16(uint64_t input){
|
||||
}
|
||||
|
||||
void adc_sar_05_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output){
|
||||
for(int64_t i = 0; i < input.shape(0); i++){
|
||||
for(int64_t j = 0; j < input.shape(1); j++){
|
||||
if(input.shape() != output.shape()){
|
||||
throw std::invalid_argument(LOCATION + " input and output shapes must match");
|
||||
}
|
||||
|
||||
for(ssize_t i = 0; i < input.shape(0); i++){
|
||||
for(ssize_t j = 0; j < input.shape(1); j++){
|
||||
output(i,j) = adc_sar_05_decode64to16(input(i,j));
|
||||
}
|
||||
}
|
||||
@ -49,13 +53,50 @@ uint16_t adc_sar_04_decode64to16(uint64_t input){
|
||||
}
|
||||
|
||||
void adc_sar_04_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output){
|
||||
for(int64_t i = 0; i < input.shape(0); i++){
|
||||
for(int64_t j = 0; j < input.shape(1); j++){
|
||||
if(input.shape() != output.shape()){
|
||||
throw std::invalid_argument(LOCATION + " input and output shapes must match");
|
||||
}
|
||||
for(ssize_t i = 0; i < input.shape(0); i++){
|
||||
for(ssize_t j = 0; j < input.shape(1); j++){
|
||||
output(i,j) = adc_sar_04_decode64to16(input(i,j));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
double apply_custom_weights(uint16_t input, const NDView<double, 1> weights) {
|
||||
if(weights.size() > 16){
|
||||
throw std::invalid_argument("weights size must be less than or equal to 16");
|
||||
}
|
||||
|
||||
double result = 0.0;
|
||||
for (ssize_t i = 0; i < weights.size(); ++i) {
|
||||
result += ((input >> i) & 1) * std::pow(weights[i], i);
|
||||
}
|
||||
return result;
|
||||
|
||||
}
|
||||
|
||||
void apply_custom_weights(NDView<uint16_t, 1> input, NDView<double, 1> output, const NDView<double,1> weights) {
|
||||
if(input.shape() != output.shape()){
|
||||
throw std::invalid_argument(LOCATION + " input and output shapes must match");
|
||||
}
|
||||
|
||||
//Calculate weights to avoid repeatedly calling std::pow
|
||||
std::vector<double> weights_powers(weights.size());
|
||||
for (ssize_t i = 0; i < weights.size(); ++i) {
|
||||
weights_powers[i] = std::pow(weights[i], i);
|
||||
}
|
||||
|
||||
// Apply custom weights to each element in the input array
|
||||
for (ssize_t i = 0; i < input.shape(0); i++) {
|
||||
double result = 0.0;
|
||||
for (size_t bit_index = 0; bit_index < weights_powers.size(); ++bit_index) {
|
||||
result += ((input(i) >> bit_index) & 1) * weights_powers[bit_index];
|
||||
}
|
||||
output(i) = result;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
} // namespace aare
|
||||
|
80
src/decode.test.cpp
Normal file
80
src/decode.test.cpp
Normal file
@ -0,0 +1,80 @@
|
||||
#include "aare/decode.hpp"
|
||||
|
||||
#include <catch2/matchers/catch_matchers_floating_point.hpp>
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include "aare/NDArray.hpp"
|
||||
using Catch::Matchers::WithinAbs;
|
||||
#include <vector>
|
||||
|
||||
TEST_CASE("test_adc_sar_05_decode64to16"){
|
||||
uint64_t input = 0;
|
||||
uint16_t output = aare::adc_sar_05_decode64to16(input);
|
||||
CHECK(output == 0);
|
||||
|
||||
|
||||
// bit 29 on th input is bit 0 on the output
|
||||
input = 1UL << 29;
|
||||
output = aare::adc_sar_05_decode64to16(input);
|
||||
CHECK(output == 1);
|
||||
|
||||
// test all bits by iteratting through the bitlist
|
||||
std::vector<int> bitlist = {29, 19, 28, 18, 31, 21, 27, 20, 24, 23, 25, 22};
|
||||
for (size_t i = 0; i < bitlist.size(); i++) {
|
||||
input = 1UL << bitlist[i];
|
||||
output = aare::adc_sar_05_decode64to16(input);
|
||||
CHECK(output == (1 << i));
|
||||
}
|
||||
|
||||
|
||||
// test a few "random" values
|
||||
input = 0;
|
||||
input |= (1UL << 29);
|
||||
input |= (1UL << 19);
|
||||
input |= (1UL << 28);
|
||||
output = aare::adc_sar_05_decode64to16(input);
|
||||
CHECK(output == 7UL);
|
||||
|
||||
|
||||
input = 0;
|
||||
input |= (1UL << 18);
|
||||
input |= (1UL << 27);
|
||||
input |= (1UL << 25);
|
||||
output = aare::adc_sar_05_decode64to16(input);
|
||||
CHECK(output == 1096UL);
|
||||
|
||||
input = 0;
|
||||
input |= (1UL << 25);
|
||||
input |= (1UL << 22);
|
||||
output = aare::adc_sar_05_decode64to16(input);
|
||||
CHECK(output == 3072UL);
|
||||
}
|
||||
|
||||
|
||||
TEST_CASE("test_apply_custom_weights") {
|
||||
|
||||
uint16_t input = 1;
|
||||
aare::NDArray<double, 1> weights_data({3}, 0.0);
|
||||
weights_data(0) = 1.7;
|
||||
weights_data(1) = 2.1;
|
||||
weights_data(2) = 1.8;
|
||||
|
||||
auto weights = weights_data.view();
|
||||
|
||||
|
||||
double output = aare::apply_custom_weights(input, weights);
|
||||
CHECK_THAT(output, WithinAbs(1.0, 0.001));
|
||||
|
||||
input = 1 << 1;
|
||||
output = aare::apply_custom_weights(input, weights);
|
||||
CHECK_THAT(output, WithinAbs(2.1, 0.001));
|
||||
|
||||
|
||||
input = 1 << 2;
|
||||
output = aare::apply_custom_weights(input, weights);
|
||||
CHECK_THAT(output, WithinAbs(3.24, 0.001));
|
||||
|
||||
input = 0b111;
|
||||
output = aare::apply_custom_weights(input, weights);
|
||||
CHECK_THAT(output, WithinAbs(6.34, 0.001));
|
||||
|
||||
}
|
18
src/utils/ifstream_helpers.cpp
Normal file
18
src/utils/ifstream_helpers.cpp
Normal file
@ -0,0 +1,18 @@
|
||||
#include "aare/utils/ifstream_helpers.hpp"
|
||||
|
||||
namespace aare {
|
||||
|
||||
std::string ifstream_error_msg(std::ifstream &ifs) {
|
||||
std::ios_base::iostate state = ifs.rdstate();
|
||||
if (state & std::ios_base::eofbit) {
|
||||
return " End of file reached";
|
||||
} else if (state & std::ios_base::badbit) {
|
||||
return " Bad file stream";
|
||||
} else if (state & std::ios_base::failbit) {
|
||||
return " File read failed";
|
||||
}else{
|
||||
return " Unknown/no error";
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace aare
|
57
update_version.py
Normal file
57
update_version.py
Normal file
@ -0,0 +1,57 @@
|
||||
# SPDX-License-Identifier: LGPL-3.0-or-other
|
||||
# Copyright (C) 2021 Contributors to the Aare Package
|
||||
"""
|
||||
Script to update VERSION file with semantic versioning if provided as an argument, or with 0.0.0 if no argument is provided.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
|
||||
from packaging.version import Version, InvalidVersion
|
||||
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
def is_integer(value):
|
||||
try:
|
||||
int(value)
|
||||
except ValueError:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def get_version():
|
||||
|
||||
# Check at least one argument is passed
|
||||
if len(sys.argv) < 2:
|
||||
return "0.0.0"
|
||||
|
||||
version = sys.argv[1]
|
||||
|
||||
try:
|
||||
v = Version(version) # normalize check if version follows PEP 440 specification
|
||||
|
||||
version_normalized = version.replace("-", ".")
|
||||
|
||||
version_normalized = re.sub(r'0*(\d+)', lambda m : str(int(m.group(0))), version_normalized) #remove leading zeros
|
||||
|
||||
return version_normalized
|
||||
|
||||
except InvalidVersion as e:
|
||||
print(f"Invalid version {version}. Version format must follow semantic versioning format of python PEP 440 version identification specification.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def write_version_to_file(version):
|
||||
version_file_path = os.path.join(SCRIPT_DIR, "VERSION")
|
||||
with open(version_file_path, "w") as version_file:
|
||||
version_file.write(version)
|
||||
print(f"Version {version} written to VERSION file.")
|
||||
|
||||
# Main script
|
||||
if __name__ == "__main__":
|
||||
|
||||
version = get_version()
|
||||
write_version_to_file(version)
|
Reference in New Issue
Block a user