35 Commits

Author SHA1 Message Date
b7a47576a1 Multi threaded fitting and returning chi2 (#132)
Co-authored-by: Patrick <patrick.sieberer@psi.ch>
Co-authored-by: JulianHeymes <julian.heymes@psi.ch>
Co-authored-by: Dhanya Thattil <dhanya.thattil@psi.ch>
2025-02-19 07:19:59 +01:00
dadf5f4869 Added fitting, fixed roi etc (#129)
Co-authored-by: Patrick <patrick.sieberer@psi.ch>
Co-authored-by: JulianHeymes <julian.heymes@psi.ch>
2025-02-12 16:50:31 +01:00
7d6223d52d Cluster finder improvements (#113) 2024-12-16 14:42:18 +01:00
da67f58323 Cluster finder improvements (#112) 2024-12-16 14:26:35 +01:00
e6098c02ef bumped version 2024-12-16 14:24:46 +01:00
29b1dc8df3 missing header 2024-12-13 14:57:36 +01:00
f88b53387f WIP 2024-12-12 17:58:04 +01:00
a0f481c0ee mod pedestal 2024-12-12 14:34:10 +01:00
b3a9e9576b WIP 2024-12-11 16:27:36 +01:00
60534add92 WIP 2024-12-11 09:54:33 +01:00
7f2a23d5b1 accumulating clusters in one array 2024-12-10 22:00:12 +01:00
6a150e8d98 WIP 2024-12-10 17:21:05 +01:00
b43003966f build pkg on all branched deploy docs on main 2024-11-29 16:41:42 +01:00
c2d039a5bd fix conda build 2024-11-29 16:37:42 +01:00
6fd52f6b8d added missing enums (#111)
- Missing enums
- Matching values to slsDetectorPackage
- tests
2024-11-29 15:28:19 +01:00
659f1f36c5 AARE_INSTALL_PYTHONEXT (#109)
- added AARE_INSTALL_PYTHONEXT option to install also python files in
aare folder
2024-11-29 15:28:02 +01:00
0047d15de1 removed print flip 2024-11-29 15:00:18 +01:00
a1b7fb8fc8 added missing enums 2024-11-29 14:56:39 +01:00
2e4a491d7a CMAKE_INSTALL_PREFIX not needed to specify destination folder and removed 2024-11-29 14:38:32 +01:00
fdce2f69b9 python 3.10 required in cmake 2024-11-29 11:07:05 +01:00
ada4d41f4a bugfix on iteration and returning master file (#110) 2024-11-29 08:52:04 +01:00
115dfc0abf bugfix on iteration and returning master file 2024-11-28 21:14:40 +01:00
31b834c3fd added AARE_INSTALL_PYTHONEXT option to install python in make install, which also installs the python files in the aare folder 2024-11-28 15:18:13 +01:00
feed4860a6 Developer (#108)
- Support for very old moench
- read_n in RawFile
2024-11-27 21:22:01 +01:00
0df8e4bb7d added support for old old moench files 2024-11-27 16:27:55 +01:00
8bf9ac55ce modified read_n also for File and RawFile 2024-11-27 09:31:57 +01:00
2d33fd4813 Streamlined build, new transforms (#106) 2024-11-26 16:27:00 +01:00
996a8861f6 roll back conda-build 2024-11-26 15:53:06 +01:00
06670a7e24 read_n returns remaining frames (#105)
Modified read_n to return the number of frames available if less than
the number of frames requested.

```python
#f is a CtbRawFile containing 10 frames

f.read_n(7) # you get 7 frames
f.read_n(7) # you get 3 frames
f.read_n(7) # RuntimeError
```

Also added support for chunk_size when iterating over a file:

```python
# The file contains 10 frames


with CtbRawFile(fname, chunk_size = 7) as f:
    for headers, frames in f:
        #do something with the data
        # 1 iteration 7 frames
        # 2 iteration 3 frames
        # 3 iteration stops
```
2024-11-26 14:07:21 +01:00
8e3d997bed read_n returns remaining frames 2024-11-26 12:07:17 +01:00
a3f813f9b4 Modified moench05 transform (#103)
Moench05 transforms: 
- moench05: Works with the updated firmware and better data compression
(adcenable10g=0xFF0F)
- moench05_old: Works with the previous data and can be used with
adcenable10g=0xFFFFFFFF
- moench05_1g: For the 1g data acquisition only with adcenable=0x2202
2024-11-26 09:02:33 +01:00
d48482e9da Modified moench05 transform: new firmware (moench05), legacy firmware (moench05_old), 1g readout (moench05_1g) 2024-11-25 16:39:08 +01:00
8f729fc83e Developer (#102) 2024-11-21 10:27:26 +01:00
f9a2d49244 removed extra print 2024-11-21 10:22:22 +01:00
9f7cdbcb48 conversion warnings 2024-11-18 18:18:55 +01:00
78 changed files with 4202 additions and 859 deletions

View File

@ -1,7 +1,9 @@
name: Deploy to slsdetectorgroup conda channel name: Build pkgs and deploy if on main
on: on:
workflow_dispatch push:
branches:
- main
jobs: jobs:
build: build:
@ -28,7 +30,7 @@ jobs:
channels: conda-forge channels: conda-forge
- name: Prepare - name: Prepare
run: conda install conda-build conda-verify pytest anaconda-client run: conda install conda-build=24.9 conda-verify pytest anaconda-client
- name: Enable upload - name: Enable upload
run: conda config --set anaconda_upload yes run: conda config --set anaconda_upload yes

View File

@ -1,14 +1,10 @@
name: Build packages but don't deploy name: Build pkgs and deploy if on main
on: on:
workflow_dispatch:
push: push:
branches: branches:
- main
- developer - developer
#run on PRs as well?
jobs: jobs:
build: build:
strategy: strategy:
@ -34,10 +30,11 @@ jobs:
channels: conda-forge channels: conda-forge
- name: Prepare - name: Prepare
run: conda install conda-build conda-verify pytest anaconda-client run: conda install conda-build=24.9 conda-verify pytest anaconda-client
- name: Disable upload
run: conda config --set anaconda_upload no
- name: Build - name: Build
env: run: conda build conda-recipe
CONDA_TOKEN: ${{ secrets.CONDA_TOKEN }}
run: conda build conda-recipe

View File

@ -3,8 +3,7 @@ name: Build the package using cmake then documentation
on: on:
workflow_dispatch: workflow_dispatch:
push: push:
branches:
- main
permissions: permissions:
@ -58,6 +57,7 @@ jobs:
url: ${{ steps.deployment.outputs.page_url }} url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build needs: build
if: github.ref == 'refs/heads/main'
steps: steps:
- name: Deploy to GitHub Pages - name: Deploy to GitHub Pages
id: deployment id: deployment

View File

@ -39,7 +39,8 @@ option(AARE_IN_GITHUB_ACTIONS "Running in Github Actions" OFF)
option(AARE_DOCS "Build documentation" OFF) option(AARE_DOCS "Build documentation" OFF)
option(AARE_VERBOSE "Verbose output" OFF) option(AARE_VERBOSE "Verbose output" OFF)
option(AARE_CUSTOM_ASSERT "Use custom assert" OFF) option(AARE_CUSTOM_ASSERT "Use custom assert" OFF)
option(AARE_INSTALL_PYTHONEXT "Install the python extension in the install tree under CMAKE_INSTALL_PREFIX/aare/" OFF)
option(AARE_ASAN "Enable AddressSanitizer" OFF)
# Configure which of the dependencies to use FetchContent for # Configure which of the dependencies to use FetchContent for
option(AARE_FETCH_FMT "Use FetchContent to download fmt" ON) option(AARE_FETCH_FMT "Use FetchContent to download fmt" ON)
@ -47,6 +48,7 @@ option(AARE_FETCH_PYBIND11 "Use FetchContent to download pybind11" ON)
option(AARE_FETCH_CATCH "Use FetchContent to download catch2" ON) option(AARE_FETCH_CATCH "Use FetchContent to download catch2" ON)
option(AARE_FETCH_JSON "Use FetchContent to download nlohmann::json" ON) option(AARE_FETCH_JSON "Use FetchContent to download nlohmann::json" ON)
option(AARE_FETCH_ZMQ "Use FetchContent to download libzmq" ON) option(AARE_FETCH_ZMQ "Use FetchContent to download libzmq" ON)
option(AARE_FETCH_LMFIT "Use FetchContent to download lmfit" ON)
#Convenience option to use system libraries only (no FetchContent) #Convenience option to use system libraries only (no FetchContent)
@ -75,6 +77,31 @@ endif()
set(CMAKE_EXPORT_COMPILE_COMMANDS ON) set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
if(AARE_FETCH_LMFIT)
set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch)
FetchContent_Declare(
lmfit
GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git
GIT_TAG main
PATCH_COMMAND ${lmfit_patch}
UPDATE_DISCONNECTED 1
EXCLUDE_FROM_ALL 1
)
#Disable what we don't need from lmfit
set(BUILD_TESTING OFF CACHE BOOL "")
set(LMFIT_CPPTEST OFF CACHE BOOL "")
set(LIB_MAN OFF CACHE BOOL "")
set(LMFIT_CPPTEST OFF CACHE BOOL "")
set(BUILD_SHARED_LIBS OFF CACHE BOOL "")
FetchContent_MakeAvailable(lmfit)
set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON)
else()
find_package(lmfit REQUIRED)
endif()
if(AARE_FETCH_ZMQ) if(AARE_FETCH_ZMQ)
# Fetchcontent_Declare is deprecated need to find a way to update this # Fetchcontent_Declare is deprecated need to find a way to update this
# for now setting the policy to old is enough # for now setting the policy to old is enough
@ -126,8 +153,8 @@ if (AARE_FETCH_FMT)
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
) )
else() else()
find_package(fmt 6 REQUIRED) find_package(fmt 6 REQUIRED)
endif() endif()
@ -145,7 +172,6 @@ if (AARE_FETCH_JSON)
install( install(
TARGETS nlohmann_json TARGETS nlohmann_json
EXPORT "${TARGETS_EXPORT_NAME}" EXPORT "${TARGETS_EXPORT_NAME}"
) )
message(STATUS "target: ${NLOHMANN_JSON_TARGET_NAME}") message(STATUS "target: ${NLOHMANN_JSON_TARGET_NAME}")
else() else()
@ -224,13 +250,6 @@ if(CMAKE_BUILD_TYPE STREQUAL "Release")
target_compile_options(aare_compiler_flags INTERFACE -O3) target_compile_options(aare_compiler_flags INTERFACE -O3)
else() else()
message(STATUS "Debug build") message(STATUS "Debug build")
target_compile_options(
aare_compiler_flags
INTERFACE
-Og
-ggdb3
)
endif() endif()
# Common flags for GCC and Clang # Common flags for GCC and Clang
@ -241,6 +260,7 @@ target_compile_options(
-Wextra -Wextra
-pedantic -pedantic
-Wshadow -Wshadow
-Wold-style-cast
-Wnon-virtual-dtor -Wnon-virtual-dtor
-Woverloaded-virtual -Woverloaded-virtual
-Wdouble-promotion -Wdouble-promotion
@ -254,7 +274,21 @@ target_compile_options(
endif() #GCC/Clang specific endif() #GCC/Clang specific
if(AARE_ASAN)
message(STATUS "AddressSanitizer enabled")
target_compile_options(
aare_compiler_flags
INTERFACE
-fsanitize=address,undefined,pointer-compare
-fno-omit-frame-pointer
)
target_link_libraries(
aare_compiler_flags
INTERFACE
-fsanitize=address,undefined,pointer-compare
-fno-omit-frame-pointer
)
endif()
@ -273,11 +307,15 @@ set(PUBLICHEADERS
include/aare/ClusterFinder.hpp include/aare/ClusterFinder.hpp
include/aare/ClusterFile.hpp include/aare/ClusterFile.hpp
include/aare/CtbRawFile.hpp include/aare/CtbRawFile.hpp
include/aare/ClusterVector.hpp
include/aare/decode.hpp
include/aare/defs.hpp include/aare/defs.hpp
include/aare/Dtype.hpp include/aare/Dtype.hpp
include/aare/File.hpp include/aare/File.hpp
include/aare/Fit.hpp
include/aare/FileInterface.hpp include/aare/FileInterface.hpp
include/aare/Frame.hpp include/aare/Frame.hpp
include/aare/geo_helpers.hpp
include/aare/NDArray.hpp include/aare/NDArray.hpp
include/aare/NDView.hpp include/aare/NDView.hpp
include/aare/NumpyFile.hpp include/aare/NumpyFile.hpp
@ -288,6 +326,7 @@ set(PUBLICHEADERS
include/aare/RawMasterFile.hpp include/aare/RawMasterFile.hpp
include/aare/RawSubFile.hpp include/aare/RawSubFile.hpp
include/aare/VarClusterFinder.hpp include/aare/VarClusterFinder.hpp
include/aare/utils/task.hpp
) )
@ -297,14 +336,18 @@ set(SourceFiles
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Fit.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
) )
@ -314,6 +357,8 @@ target_include_directories(aare_core PUBLIC
"$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>" "$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>"
) )
target_link_libraries( target_link_libraries(
aare_core aare_core
PUBLIC PUBLIC
@ -322,6 +367,8 @@ target_link_libraries(
${STD_FS_LIB} # from helpers.cmake ${STD_FS_LIB} # from helpers.cmake
PRIVATE PRIVATE
aare_compiler_flags aare_compiler_flags
"$<BUILD_INTERFACE:lmfit>"
) )
set_target_properties(aare_core PROPERTIES set_target_properties(aare_core PROPERTIES
@ -338,14 +385,17 @@ if(AARE_TESTS)
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.test.cpp
) )
target_sources(tests PRIVATE ${TestSources} ) target_sources(tests PRIVATE ${TestSources} )
@ -413,7 +463,7 @@ endif()
add_custom_target( add_custom_target(
clang-tidy clang-tidy
COMMAND find \( -path "./src/*" -a -not -path "./src/python/*" -a \( -name "*.cpp" -not -name "*.test.cpp"\) \) -not -name "CircularFifo.hpp" -not -name "ProducerConsumerQueue.hpp" -not -name "VariableSizeClusterFinder.hpp" | xargs -I {} -n 1 -P 10 bash -c "${CLANG_TIDY_COMMAND} --config-file=.clang-tidy -p build {}" COMMAND find \( -path "./src/*" -a -not -path "./src/python/*" -a \( -name "*.cpp" -not -name "*.test.cpp" \) \) -not -name "CircularFifo.hpp" -not -name "ProducerConsumerQueue.hpp" -not -name "VariableSizeClusterFinder.hpp" | xargs -I {} -n 1 -P 10 bash -c "${CLANG_TIDY_COMMAND} --config-file=.clang-tidy -p build {}"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "linting with clang-tidy" COMMENT "linting with clang-tidy"
VERBATIM VERBATIM
@ -423,4 +473,4 @@ if(AARE_MASTER_PROJECT)
set(CMAKE_INSTALL_DIR "share/cmake/${PROJECT_NAME}") set(CMAKE_INSTALL_DIR "share/cmake/${PROJECT_NAME}")
set(PROJECT_LIBRARIES aare-core aare-compiler-flags ) set(PROJECT_LIBRARIES aare-core aare-compiler-flags )
include(cmake/package_config.cmake) include(cmake/package_config.cmake)
endif() endif()

View File

@ -1,6 +1,7 @@
package: package:
name: aare name: aare
version: 2024.11.15.dev0 #TODO! how to not duplicate this? version: 2025.2.18 #TODO! how to not duplicate this?
source: source:

View File

@ -12,28 +12,7 @@ set(SPHINX_BUILD ${CMAKE_CURRENT_BINARY_DIR})
file(GLOB SPHINX_SOURCE_FILES CONFIGURE_DEPENDS "src/*.rst") file(GLOB SPHINX_SOURCE_FILES CONFIGURE_DEPENDS "src/*.rst")
# set(SPHINX_SOURCE_FILES
# src/index.rst
# src/Installation.rst
# src/Requirements.rst
# src/NDArray.rst
# src/NDView.rst
# src/File.rst
# src/Frame.rst
# src/Dtype.rst
# src/ClusterFinder.rst
# src/ClusterFile.rst
# src/Pedestal.rst
# src/RawFile.rst
# src/RawSubFile.rst
# src/RawMasterFile.rst
# src/VarClusterFinder.rst
# src/pyVarClusterFinder.rst
# src/pyFile.rst
# src/pyCtbRawFile.rst
# src/pyRawFile.rst
# src/pyRawMasterFile.rst
# )
foreach(filename ${SPHINX_SOURCE_FILES}) foreach(filename ${SPHINX_SOURCE_FILES})

View File

@ -29,7 +29,6 @@ version = '@PROJECT_VERSION@'
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones. # ones.
extensions = ['breathe', extensions = ['breathe',
'sphinx_rtd_theme',
'sphinx.ext.autodoc', 'sphinx.ext.autodoc',
'sphinx.ext.napoleon', 'sphinx.ext.napoleon',
] ]

View File

@ -0,0 +1,7 @@
ClusterFinderMT
==================
.. doxygenclass:: aare::ClusterFinderMT
:members:
:undoc-members:

View File

@ -0,0 +1,6 @@
ClusterVector
=============
.. doxygenclass:: aare::ClusterVector
:members:
:undoc-members:

View File

@ -30,10 +30,13 @@ AARE
pyFile pyFile
pyCtbRawFile pyCtbRawFile
pyClusterFile pyClusterFile
pyClusterVector
pyRawFile pyRawFile
pyRawMasterFile pyRawMasterFile
pyVarClusterFinder pyVarClusterFinder
pyFit
.. toctree:: .. toctree::
:caption: C++ API :caption: C++ API
@ -45,7 +48,9 @@ AARE
File File
Dtype Dtype
ClusterFinder ClusterFinder
ClusterFinderMT
ClusterFile ClusterFile
ClusterVector
Pedestal Pedestal
RawFile RawFile
RawSubFile RawSubFile

View File

@ -0,0 +1,33 @@
ClusterVector
================
The ClusterVector, holds clusters from the ClusterFinder. Since it is templated
in C++ we use a suffix indicating the data type in python. The suffix is
``_i`` for integer, ``_f`` for float, and ``_d`` for double.
At the moment the functionality from python is limited and it is not supported
to push_back clusters to the vector. The intended use case is to pass it to
C++ functions that support the ClusterVector or to view it as a numpy array.
**View ClusterVector as numpy array**
.. code:: python
from aare import ClusterFile
with ClusterFile("path/to/file") as f:
cluster_vector = f.read_frame()
# Create a copy of the cluster data in a numpy array
clusters = np.array(cluster_vector)
# Avoid copying the data by passing copy=False
clusters = np.array(cluster_vector, copy = False)
.. py:currentmodule:: aare
.. autoclass:: ClusterVector_i
:members:
:undoc-members:
:show-inheritance:
:inherited-members:

19
docs/src/pyFit.rst Normal file
View File

@ -0,0 +1,19 @@
Fit
========
.. py:currentmodule:: aare
**Functions**
.. autofunction:: gaus
.. autofunction:: pol1
**Fitting**
.. autofunction:: fit_gaus
.. autofunction:: fit_pol1

View File

@ -0,0 +1,97 @@
#pragma once
#include <chrono>
#include <fmt/color.h>
#include <fmt/format.h>
#include <memory>
#include <thread>
#include "aare/ProducerConsumerQueue.hpp"
namespace aare {
template <class ItemType> class CircularFifo {
uint32_t fifo_size;
aare::ProducerConsumerQueue<ItemType> free_slots;
aare::ProducerConsumerQueue<ItemType> filled_slots;
public:
CircularFifo() : CircularFifo(100){};
CircularFifo(uint32_t size) : fifo_size(size), free_slots(size + 1), filled_slots(size + 1) {
// TODO! how do we deal with alignment for writing? alignas???
// Do we give the user a chance to provide memory locations?
// Templated allocator?
for (size_t i = 0; i < fifo_size; ++i) {
free_slots.write(ItemType{});
}
}
bool next() {
// TODO! avoid default constructing ItemType
ItemType it;
if (!filled_slots.read(it))
return false;
if (!free_slots.write(std::move(it)))
return false;
return true;
}
~CircularFifo() {}
using value_type = ItemType;
auto numFilledSlots() const noexcept { return filled_slots.sizeGuess(); }
auto numFreeSlots() const noexcept { return free_slots.sizeGuess(); }
auto isFull() const noexcept { return filled_slots.isFull(); }
ItemType pop_free() {
ItemType v;
while (!free_slots.read(v))
;
return std::move(v);
// return v;
}
bool try_pop_free(ItemType &v) { return free_slots.read(v); }
ItemType pop_value(std::chrono::nanoseconds wait, std::atomic<bool> &stopped) {
ItemType v;
while (!filled_slots.read(v) && !stopped) {
std::this_thread::sleep_for(wait);
}
return std::move(v);
}
ItemType pop_value() {
ItemType v;
while (!filled_slots.read(v))
;
return std::move(v);
}
ItemType *frontPtr() { return filled_slots.frontPtr(); }
// TODO! Add function to move item from filled to free to be used
// with the frontPtr function
template <class... Args> void push_value(Args &&...recordArgs) {
while (!filled_slots.write(std::forward<Args>(recordArgs)...))
;
}
template <class... Args> bool try_push_value(Args &&...recordArgs) {
return filled_slots.write(std::forward<Args>(recordArgs)...);
}
template <class... Args> void push_free(Args &&...recordArgs) {
while (!free_slots.write(std::forward<Args>(recordArgs)...))
;
}
template <class... Args> bool try_push_free(Args &&...recordArgs) {
return free_slots.write(std::forward<Args>(recordArgs)...);
}
};
} // namespace aare

View File

@ -0,0 +1,52 @@
#pragma once
#include <atomic>
#include <thread>
#include "aare/ProducerConsumerQueue.hpp"
#include "aare/ClusterVector.hpp"
#include "aare/ClusterFinderMT.hpp"
namespace aare {
class ClusterCollector{
ProducerConsumerQueue<ClusterVector<int>>* m_source;
std::atomic<bool> m_stop_requested{false};
std::atomic<bool> m_stopped{true};
std::chrono::milliseconds m_default_wait{1};
std::thread m_thread;
std::vector<ClusterVector<int>> m_clusters;
void process(){
m_stopped = false;
fmt::print("ClusterCollector started\n");
while (!m_stop_requested || !m_source->isEmpty()) {
if (ClusterVector<int> *clusters = m_source->frontPtr();
clusters != nullptr) {
m_clusters.push_back(std::move(*clusters));
m_source->popFront();
}else{
std::this_thread::sleep_for(m_default_wait);
}
}
fmt::print("ClusterCollector stopped\n");
m_stopped = true;
}
public:
ClusterCollector(ClusterFinderMT<uint16_t, double, int32_t>* source){
m_source = source->sink();
m_thread = std::thread(&ClusterCollector::process, this);
}
void stop(){
m_stop_requested = true;
m_thread.join();
}
std::vector<ClusterVector<int>> steal_clusters(){
if(!m_stopped){
throw std::runtime_error("ClusterCollector is still running");
}
return std::move(m_clusters);
}
};
} // namespace aare

View File

@ -1,12 +1,14 @@
#pragma once
#include "aare/ClusterVector.hpp"
#include "aare/NDArray.hpp"
#include "aare/defs.hpp" #include "aare/defs.hpp"
#include <filesystem> #include <filesystem>
#include <fstream> #include <fstream>
namespace aare { namespace aare {
struct Cluster { struct Cluster3x3 {
int16_t x; int16_t x;
int16_t y; int16_t y;
int32_t data[9]; int32_t data[9];
@ -31,6 +33,12 @@ typedef enum {
pTopRight = 8 pTopRight = 8
} pixel; } pixel;
struct Eta2 {
double x;
double y;
corner c;
};
struct ClusterAnalysis { struct ClusterAnalysis {
uint32_t c; uint32_t c;
int32_t tot; int32_t tot;
@ -38,28 +46,90 @@ struct ClusterAnalysis {
double etay; double etay;
}; };
/*
Binary cluster file. Expects data to be layed out as:
int32_t frame_number
uint32_t number_of_clusters
int16_t x, int16_t y, int32_t data[9] x number_of_clusters
int32_t frame_number
uint32_t number_of_clusters
....
*/
/**
* @brief Class to read and write cluster files
* Expects data to be laid out as:
*
*
* int32_t frame_number
* uint32_t number_of_clusters
* int16_t x, int16_t y, int32_t data[9] x number_of_clusters
* int32_t frame_number
* uint32_t number_of_clusters
* etc.
*/
class ClusterFile { class ClusterFile {
FILE *fp{}; FILE *fp{};
uint32_t m_num_left{}; uint32_t m_num_left{};
size_t m_chunk_size{}; size_t m_chunk_size{};
const std::string m_mode;
public: public:
ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000); /**
std::vector<Cluster> read_clusters(size_t n_clusters); * @brief Construct a new Cluster File object
std::vector<Cluster> read_frame(int32_t &out_fnum); * @param fname path to the file
std::vector<Cluster> * @param chunk_size number of clusters to read at a time when iterating
read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); * over the file
* @param mode mode to open the file in. "r" for reading, "w" for writing,
* "a" for appending
* @throws std::runtime_error if the file could not be opened
*/
ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000,
const std::string &mode = "r");
~ClusterFile();
int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, /**
double *eta2x, double *eta2y, double *eta3x, double *eta3y); * @brief Read n_clusters clusters from the file discarding frame numbers.
int analyze_cluster(Cluster cl, int32_t *t2, int32_t *t3, char *quad, * If EOF is reached the returned vector will have less than n_clusters
double *eta2x, double *eta2y, double *eta3x, * clusters
double *eta3y); */
ClusterVector<int32_t> read_clusters(size_t n_clusters);
/**
* @brief Read a single frame from the file and return the clusters. The
* cluster vector will have the frame number set.
* @throws std::runtime_error if the file is not opened for reading or the file pointer not
* at the beginning of a frame
*/
ClusterVector<int32_t> read_frame();
void write_frame(const ClusterVector<int32_t> &clusters);
// Need to be migrated to support NDArray and return a ClusterVector
// std::vector<Cluster3x3>
// read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny);
/**
* @brief Return the chunk size
*/
size_t chunk_size() const { return m_chunk_size; } size_t chunk_size() const { return m_chunk_size; }
/**
* @brief Close the file. If not closed the file will be closed in the destructor
*/
void close();
}; };
int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad,
double *eta2x, double *eta2y, double *eta3x, double *eta3y);
int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad,
double *eta2x, double *eta2y, double *eta3x, double *eta3y);
NDArray<double, 2> calculate_eta2(ClusterVector<int> &clusters);
Eta2 calculate_eta2(Cluster3x3 &cl);
} // namespace aare } // namespace aare

View File

@ -0,0 +1,56 @@
#pragma once
#include <atomic>
#include <filesystem>
#include <thread>
#include "aare/ProducerConsumerQueue.hpp"
#include "aare/ClusterVector.hpp"
#include "aare/ClusterFinderMT.hpp"
namespace aare{
class ClusterFileSink{
ProducerConsumerQueue<ClusterVector<int>>* m_source;
std::atomic<bool> m_stop_requested{false};
std::atomic<bool> m_stopped{true};
std::chrono::milliseconds m_default_wait{1};
std::thread m_thread;
std::ofstream m_file;
void process(){
m_stopped = false;
fmt::print("ClusterFileSink started\n");
while (!m_stop_requested || !m_source->isEmpty()) {
if (ClusterVector<int> *clusters = m_source->frontPtr();
clusters != nullptr) {
// Write clusters to file
int32_t frame_number = clusters->frame_number(); //TODO! Should we store frame number already as int?
uint32_t num_clusters = clusters->size();
m_file.write(reinterpret_cast<const char*>(&frame_number), sizeof(frame_number));
m_file.write(reinterpret_cast<const char*>(&num_clusters), sizeof(num_clusters));
m_file.write(reinterpret_cast<const char*>(clusters->data()), clusters->size() * clusters->item_size());
m_source->popFront();
}else{
std::this_thread::sleep_for(m_default_wait);
}
}
fmt::print("ClusterFileSink stopped\n");
m_stopped = true;
}
public:
ClusterFileSink(ClusterFinderMT<uint16_t, double, int32_t>* source, const std::filesystem::path& fname){
m_source = source->sink();
m_thread = std::thread(&ClusterFileSink::process, this);
m_file.open(fname, std::ios::binary);
}
void stop(){
m_stop_requested = true;
m_thread.join();
m_file.close();
}
};
} // namespace aare

View File

@ -1,4 +1,6 @@
#pragma once #pragma once
#include "aare/ClusterFile.hpp"
#include "aare/ClusterVector.hpp"
#include "aare/Dtype.hpp" #include "aare/Dtype.hpp"
#include "aare/NDArray.hpp" #include "aare/NDArray.hpp"
#include "aare/NDView.hpp" #include "aare/NDView.hpp"
@ -8,251 +10,139 @@
namespace aare { namespace aare {
/** enum to define the event types */ template <typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double,
enum eventType { typename CT = int32_t>
PEDESTAL, /** pedestal */
NEIGHBOUR, /** neighbour i.e. below threshold, but in the cluster of a
photon */
PHOTON, /** photon i.e. above threshold */
PHOTON_MAX, /** maximum of a cluster satisfying the photon conditions */
NEGATIVE_PEDESTAL, /** negative value, will not be accounted for as pedestal
in order to avoid drift of the pedestal towards
negative values */
UNDEFINED_EVENT = -1 /** undefined */
};
template <typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double>
class ClusterFinder { class ClusterFinder {
Shape<2> m_image_size; Shape<2> m_image_size;
const int m_cluster_sizeX; const int m_cluster_sizeX;
const int m_cluster_sizeY; const int m_cluster_sizeY;
const double m_threshold; const PEDESTAL_TYPE m_nSigma;
const double m_nSigma; const PEDESTAL_TYPE c2;
const double c2; const PEDESTAL_TYPE c3;
const double c3;
Pedestal<PEDESTAL_TYPE> m_pedestal; Pedestal<PEDESTAL_TYPE> m_pedestal;
ClusterVector<CT> m_clusters;
public: public:
ClusterFinder(Shape<2> image_size, Shape<2>cluster_size, double nSigma = 5.0, /**
double threshold = 0.0) * @brief Construct a new ClusterFinder object
: m_image_size(image_size), m_cluster_sizeX(cluster_size[0]), m_cluster_sizeY(cluster_size[1]), * @param image_size size of the image
m_threshold(threshold), m_nSigma(nSigma), * @param cluster_size size of the cluster (x, y)
* @param nSigma number of sigma above the pedestal to consider a photon
* @param capacity initial capacity of the cluster vector
*
*/
ClusterFinder(Shape<2> image_size, Shape<2> cluster_size,
PEDESTAL_TYPE nSigma = 5.0, size_t capacity = 1000000)
: m_image_size(image_size), m_cluster_sizeX(cluster_size[0]),
m_cluster_sizeY(cluster_size[1]),
m_nSigma(nSigma),
c2(sqrt((m_cluster_sizeY + 1) / 2 * (m_cluster_sizeX + 1) / 2)), c2(sqrt((m_cluster_sizeY + 1) / 2 * (m_cluster_sizeX + 1) / 2)),
c3(sqrt(m_cluster_sizeX * m_cluster_sizeY)), c3(sqrt(m_cluster_sizeX * m_cluster_sizeY)),
m_pedestal(image_size[0], image_size[1]) { m_pedestal(image_size[0], image_size[1]),
m_clusters(m_cluster_sizeX, m_cluster_sizeY, capacity) {};
// c2 = sqrt((cluster_sizeY + 1) / 2 * (cluster_sizeX + 1) / 2);
// c3 = sqrt(cluster_sizeX * cluster_sizeY);
};
void push_pedestal_frame(NDView<FRAME_TYPE, 2> frame) { void push_pedestal_frame(NDView<FRAME_TYPE, 2> frame) {
m_pedestal.push(frame); m_pedestal.push(frame);
} }
NDArray<PEDESTAL_TYPE, 2> pedestal() { NDArray<PEDESTAL_TYPE, 2> pedestal() { return m_pedestal.mean(); }
return m_pedestal.mean(); NDArray<PEDESTAL_TYPE, 2> noise() { return m_pedestal.std(); }
void clear_pedestal() { m_pedestal.clear(); }
/**
* @brief Move the clusters from the ClusterVector in the ClusterFinder to a
* new ClusterVector and return it.
* @param realloc_same_capacity if true the new ClusterVector will have the
* same capacity as the old one
*
*/
ClusterVector<CT> steal_clusters(bool realloc_same_capacity = false) {
ClusterVector<CT> tmp = std::move(m_clusters);
if (realloc_same_capacity)
m_clusters = ClusterVector<CT>(m_cluster_sizeX, m_cluster_sizeY,
tmp.capacity());
else
m_clusters = ClusterVector<CT>(m_cluster_sizeX, m_cluster_sizeY);
return tmp;
} }
void find_clusters(NDView<FRAME_TYPE, 2> frame, uint64_t frame_number = 0) {
std::vector<DynamicCluster> // // TODO! deal with even size clusters
find_clusters_without_threshold(NDView<FRAME_TYPE, 2> frame, // // currently 3,3 -> +/- 1
// Pedestal<PEDESTAL_TYPE> &pedestal, // // 4,4 -> +/- 2
bool late_update = false) { int dy = m_cluster_sizeY / 2;
struct pedestal_update { int dx = m_cluster_sizeX / 2;
int x; m_clusters.set_frame_number(frame_number);
int y; std::vector<CT> cluster_data(m_cluster_sizeX * m_cluster_sizeY);
FRAME_TYPE value;
};
std::vector<pedestal_update> pedestal_updates;
std::vector<DynamicCluster> clusters;
std::vector<std::vector<eventType>> eventMask;
for (int i = 0; i < frame.shape(0); i++) {
eventMask.push_back(std::vector<eventType>(frame.shape(1)));
}
long double val;
long double max;
for (int iy = 0; iy < frame.shape(0); iy++) { for (int iy = 0; iy < frame.shape(0); iy++) {
for (int ix = 0; ix < frame.shape(1); ix++) { for (int ix = 0; ix < frame.shape(1); ix++) {
// initialize max and total
max = std::numeric_limits<FRAME_TYPE>::min();
long double total = 0;
eventMask[iy][ix] = PEDESTAL;
for (short ir = -(m_cluster_sizeY / 2); PEDESTAL_TYPE max = std::numeric_limits<FRAME_TYPE>::min();
ir < (m_cluster_sizeY / 2) + 1; ir++) { PEDESTAL_TYPE total = 0;
for (short ic = -(m_cluster_sizeX / 2);
ic < (m_cluster_sizeX / 2) + 1; ic++) { // What can we short circuit here?
PEDESTAL_TYPE rms = m_pedestal.std(iy, ix);
PEDESTAL_TYPE value = (frame(iy, ix) - m_pedestal.mean(iy, ix));
if (value < -m_nSigma * rms)
continue; // NEGATIVE_PEDESTAL go to next pixel
// TODO! No pedestal update???
for (int ir = -dy; ir < dy + 1; ir++) {
for (int ic = -dx; ic < dx + 1; ic++) {
if (ix + ic >= 0 && ix + ic < frame.shape(1) && if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
iy + ir >= 0 && iy + ir < frame.shape(0)) { iy + ir >= 0 && iy + ir < frame.shape(0)) {
val = frame(iy + ir, ix + ic) - PEDESTAL_TYPE val =
m_pedestal.mean(iy + ir, ix + ic); frame(iy + ir, ix + ic) -
m_pedestal.mean(iy + ir, ix + ic);
total += val; total += val;
if (val > max) { max = std::max(max, val);
max = val;
}
} }
} }
} }
auto rms = m_pedestal.std(iy, ix);
if (frame(iy, ix) - m_pedestal.mean(iy, ix) < -m_nSigma * rms) {
eventMask[iy][ix] = NEGATIVE_PEDESTAL;
continue;
} else if (max > m_nSigma * rms) {
eventMask[iy][ix] = PHOTON;
if ((max > m_nSigma * rms)) {
if (value < max)
continue; // Not max go to the next pixel
// but also no pedestal update
} else if (total > c3 * m_nSigma * rms) { } else if (total > c3 * m_nSigma * rms) {
eventMask[iy][ix] = PHOTON; // pass
} else { } else {
if (late_update) { // m_pedestal.push(iy, ix, frame(iy, ix)); // Safe option
pedestal_updates.push_back({ix, iy, frame(iy, ix)}); m_pedestal.push_fast(iy, ix, frame(iy, ix)); // Assume we have reached n_samples in the pedestal, slight performance improvement
} else { continue; // It was a pedestal value nothing to store
m_pedestal.push(iy, ix, frame(iy, ix));
}
continue;
} }
if (eventMask[iy][ix] == PHOTON &&
(frame(iy, ix) - m_pedestal.mean(iy, ix)) >= max) {
eventMask[iy][ix] = PHOTON_MAX;
DynamicCluster cluster(m_cluster_sizeX, m_cluster_sizeY,
Dtype(typeid(PEDESTAL_TYPE)));
cluster.x = ix;
cluster.y = iy;
short i = 0;
for (short ir = -(m_cluster_sizeY / 2); // Store cluster
ir < (m_cluster_sizeY / 2) + 1; ir++) { if (value == max) {
for (short ic = -(m_cluster_sizeX / 2); // Zero out the cluster data
ic < (m_cluster_sizeX / 2) + 1; ic++) { std::fill(cluster_data.begin(), cluster_data.end(), 0);
// Fill the cluster data since we have a photon to store
// It's worth redoing the look since most of the time we
// don't have a photon
int i = 0;
for (int ir = -dy; ir < dy + 1; ir++) {
for (int ic = -dx; ic < dx + 1; ic++) {
if (ix + ic >= 0 && ix + ic < frame.shape(1) && if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
iy + ir >= 0 && iy + ir < frame.shape(0)) { iy + ir >= 0 && iy + ir < frame.shape(0)) {
PEDESTAL_TYPE tmp = CT tmp =
static_cast<PEDESTAL_TYPE>( static_cast<CT>(frame(iy + ir, ix + ic)) -
frame(iy + ir, ix + ic)) -
m_pedestal.mean(iy + ir, ix + ic); m_pedestal.mean(iy + ir, ix + ic);
cluster.set<PEDESTAL_TYPE>(i, tmp); cluster_data[i] =
tmp; // Watch for out of bounds access
i++; i++;
} }
} }
} }
clusters.push_back(cluster);
// Add the cluster to the output ClusterVector
m_clusters.push_back(
ix, iy,
reinterpret_cast<std::byte *>(cluster_data.data()));
} }
} }
} }
if (late_update) {
for (auto &update : pedestal_updates) {
m_pedestal.push(update.y, update.x, update.value);
}
}
return clusters;
}
// template <typename FRAME_TYPE, typename PEDESTAL_TYPE>
std::vector<DynamicCluster>
find_clusters_with_threshold(NDView<FRAME_TYPE, 2> frame,
Pedestal<PEDESTAL_TYPE> &pedestal) {
assert(m_threshold > 0);
std::vector<DynamicCluster> clusters;
std::vector<std::vector<eventType>> eventMask;
for (int i = 0; i < frame.shape(0); i++) {
eventMask.push_back(std::vector<eventType>(frame.shape(1)));
}
double tthr, tthr1, tthr2;
NDArray<FRAME_TYPE, 2> rest({frame.shape(0), frame.shape(1)});
NDArray<int, 2> nph({frame.shape(0), frame.shape(1)});
// convert to n photons
// nph = (frame-pedestal.mean()+0.5*m_threshold)/m_threshold; // can be
// optimized with expression templates?
for (int iy = 0; iy < frame.shape(0); iy++) {
for (int ix = 0; ix < frame.shape(1); ix++) {
auto val = frame(iy, ix) - pedestal.mean(iy, ix);
nph(iy, ix) = (val + 0.5 * m_threshold) / m_threshold;
nph(iy, ix) = nph(iy, ix) < 0 ? 0 : nph(iy, ix);
rest(iy, ix) = val - nph(iy, ix) * m_threshold;
}
}
// iterate over frame pixels
for (int iy = 0; iy < frame.shape(0); iy++) {
for (int ix = 0; ix < frame.shape(1); ix++) {
eventMask[iy][ix] = PEDESTAL;
// initialize max and total
FRAME_TYPE max = std::numeric_limits<FRAME_TYPE>::min();
long double total = 0;
if (rest(iy, ix) <= 0.25 * m_threshold) {
pedestal.push(iy, ix, frame(iy, ix));
continue;
}
eventMask[iy][ix] = NEIGHBOUR;
// iterate over cluster pixels around the current pixel (ix,iy)
for (short ir = -(m_cluster_sizeY / 2);
ir < (m_cluster_sizeY / 2) + 1; ir++) {
for (short ic = -(m_cluster_sizeX / 2);
ic < (m_cluster_sizeX / 2) + 1; ic++) {
if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
iy + ir >= 0 && iy + ir < frame.shape(0)) {
auto val = frame(iy + ir, ix + ic) -
pedestal.mean(iy + ir, ix + ic);
total += val;
if (val > max) {
max = val;
}
}
}
}
auto rms = pedestal.std(iy, ix);
if (m_nSigma == 0) {
tthr = m_threshold;
tthr1 = m_threshold;
tthr2 = m_threshold;
} else {
tthr = m_nSigma * rms;
tthr1 = m_nSigma * rms * c3;
tthr2 = m_nSigma * rms * c2;
if (m_threshold > 2 * tthr)
tthr = m_threshold - tthr;
if (m_threshold > 2 * tthr1)
tthr1 = tthr - tthr1;
if (m_threshold > 2 * tthr2)
tthr2 = tthr - tthr2;
}
if (total > tthr1 || max > tthr) {
eventMask[iy][ix] = PHOTON;
nph(iy, ix) += 1;
rest(iy, ix) -= m_threshold;
} else {
pedestal.push(iy, ix, frame(iy, ix));
continue;
}
if (eventMask[iy][ix] == PHOTON &&
frame(iy, ix) - pedestal.mean(iy, ix) >= max) {
eventMask[iy][ix] = PHOTON_MAX;
DynamicCluster cluster(m_cluster_sizeX, m_cluster_sizeY,
Dtype(typeid(FRAME_TYPE)));
cluster.x = ix;
cluster.y = iy;
short i = 0;
for (short ir = -(m_cluster_sizeY / 2);
ir < (m_cluster_sizeY / 2) + 1; ir++) {
for (short ic = -(m_cluster_sizeX / 2);
ic < (m_cluster_sizeX / 2) + 1; ic++) {
if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
iy + ir >= 0 && iy + ir < frame.shape(0)) {
auto tmp = frame(iy + ir, ix + ic) -
pedestal.mean(iy + ir, ix + ic);
cluster.set<FRAME_TYPE>(i, tmp);
i++;
}
}
}
clusters.push_back(cluster);
}
}
}
return clusters;
} }
}; };

View File

@ -0,0 +1,268 @@
#pragma once
#include <atomic>
#include <cstdint>
#include <memory>
#include <thread>
#include <vector>
#include "aare/ClusterFinder.hpp"
#include "aare/NDArray.hpp"
#include "aare/ProducerConsumerQueue.hpp"
namespace aare {
enum class FrameType {
DATA,
PEDESTAL,
};
struct FrameWrapper {
FrameType type;
uint64_t frame_number;
NDArray<uint16_t, 2> data;
};
/**
* @brief ClusterFinderMT is a multi-threaded version of ClusterFinder. It uses
* a producer-consumer queue to distribute the frames to the threads. The
* clusters are collected in a single output queue.
* @tparam FRAME_TYPE type of the frame data
* @tparam PEDESTAL_TYPE type of the pedestal data
* @tparam CT type of the cluster data
*/
template <typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double,
typename CT = int32_t>
class ClusterFinderMT {
size_t m_current_thread{0};
size_t m_n_threads{0};
using Finder = ClusterFinder<FRAME_TYPE, PEDESTAL_TYPE, CT>;
using InputQueue = ProducerConsumerQueue<FrameWrapper>;
using OutputQueue = ProducerConsumerQueue<ClusterVector<int>>;
std::vector<std::unique_ptr<InputQueue>> m_input_queues;
std::vector<std::unique_ptr<OutputQueue>> m_output_queues;
OutputQueue m_sink{1000}; // All clusters go into this queue
std::vector<std::unique_ptr<Finder>> m_cluster_finders;
std::vector<std::thread> m_threads;
std::thread m_collect_thread;
std::chrono::milliseconds m_default_wait{1};
std::atomic<bool> m_stop_requested{false};
std::atomic<bool> m_processing_threads_stopped{true};
/**
* @brief Function called by the processing threads. It reads the frames
* from the input queue and processes them.
*/
void process(int thread_id) {
auto cf = m_cluster_finders[thread_id].get();
auto q = m_input_queues[thread_id].get();
bool realloc_same_capacity = true;
while (!m_stop_requested || !q->isEmpty()) {
if (FrameWrapper *frame = q->frontPtr(); frame != nullptr) {
switch (frame->type) {
case FrameType::DATA:
cf->find_clusters(frame->data.view(), frame->frame_number);
m_output_queues[thread_id]->write(cf->steal_clusters(realloc_same_capacity));
break;
case FrameType::PEDESTAL:
m_cluster_finders[thread_id]->push_pedestal_frame(
frame->data.view());
break;
}
// frame is processed now discard it
m_input_queues[thread_id]->popFront();
} else {
std::this_thread::sleep_for(m_default_wait);
}
}
}
/**
* @brief Collect all the clusters from the output queues and write them to
* the sink
*/
void collect() {
bool empty = true;
while (!m_stop_requested || !empty || !m_processing_threads_stopped) {
empty = true;
for (auto &queue : m_output_queues) {
if (!queue->isEmpty()) {
while (!m_sink.write(std::move(*queue->frontPtr()))) {
std::this_thread::sleep_for(m_default_wait);
}
queue->popFront();
empty = false;
}
}
}
}
public:
/**
* @brief Construct a new ClusterFinderMT object
* @param image_size size of the image
* @param cluster_size size of the cluster
* @param nSigma number of sigma above the pedestal to consider a photon
* @param capacity initial capacity of the cluster vector. Should match
* expected number of clusters in a frame per frame.
* @param n_threads number of threads to use
*/
ClusterFinderMT(Shape<2> image_size, Shape<2> cluster_size,
PEDESTAL_TYPE nSigma = 5.0, size_t capacity = 2000,
size_t n_threads = 3)
: m_n_threads(n_threads) {
for (size_t i = 0; i < n_threads; i++) {
m_cluster_finders.push_back(
std::make_unique<ClusterFinder<FRAME_TYPE, PEDESTAL_TYPE, CT>>(
image_size, cluster_size, nSigma, capacity));
}
for (size_t i = 0; i < n_threads; i++) {
m_input_queues.emplace_back(std::make_unique<InputQueue>(200));
m_output_queues.emplace_back(std::make_unique<OutputQueue>(200));
}
//TODO! Should we start automatically?
start();
}
/**
* @brief Return the sink queue where all the clusters are collected
* @warning You need to empty this queue otherwise the cluster finder will wait forever
*/
ProducerConsumerQueue<ClusterVector<int>> *sink() { return &m_sink; }
/**
* @brief Start all processing threads
*/
void start() {
m_processing_threads_stopped = false;
m_stop_requested = false;
for (size_t i = 0; i < m_n_threads; i++) {
m_threads.push_back(
std::thread(&ClusterFinderMT::process, this, i));
}
m_collect_thread = std::thread(&ClusterFinderMT::collect, this);
}
/**
* @brief Stop all processing threads
*/
void stop() {
m_stop_requested = true;
for (auto &thread : m_threads) {
thread.join();
}
m_threads.clear();
m_processing_threads_stopped = true;
m_collect_thread.join();
}
/**
* @brief Wait for all the queues to be empty. Mostly used for timing tests.
*/
void sync() {
for (auto &q : m_input_queues) {
while (!q->isEmpty()) {
std::this_thread::sleep_for(m_default_wait);
}
}
for (auto &q : m_output_queues) {
while (!q->isEmpty()) {
std::this_thread::sleep_for(m_default_wait);
}
}
while (!m_sink.isEmpty()) {
std::this_thread::sleep_for(m_default_wait);
}
}
/**
* @brief Push a pedestal frame to all the cluster finders. The frames is
* expected to be dark. No photon finding is done. Just pedestal update.
*/
void push_pedestal_frame(NDView<FRAME_TYPE, 2> frame) {
FrameWrapper fw{FrameType::PEDESTAL, 0,
NDArray(frame)}; // TODO! copies the data!
for (auto &queue : m_input_queues) {
while (!queue->write(fw)) {
std::this_thread::sleep_for(m_default_wait);
}
}
}
/**
* @brief Push the frame to the queue of the next available thread. Function
* returns once the frame is in a queue.
* @note Spin locks with a default wait if the queue is full.
*/
void find_clusters(NDView<FRAME_TYPE, 2> frame, uint64_t frame_number = 0) {
FrameWrapper fw{FrameType::DATA, frame_number,
NDArray(frame)}; // TODO! copies the data!
while (!m_input_queues[m_current_thread % m_n_threads]->write(fw)) {
std::this_thread::sleep_for(m_default_wait);
}
m_current_thread++;
}
void clear_pedestal() {
if (!m_processing_threads_stopped) {
throw std::runtime_error("ClusterFinderMT is still running");
}
for (auto &cf : m_cluster_finders) {
cf->clear_pedestal();
}
}
/**
* @brief Return the pedestal currently used by the cluster finder
* @param thread_index index of the thread
*/
auto pedestal(size_t thread_index = 0) {
if (m_cluster_finders.empty()) {
throw std::runtime_error("No cluster finders available");
}
if (!m_processing_threads_stopped) {
throw std::runtime_error("ClusterFinderMT is still running");
}
if (thread_index >= m_cluster_finders.size()) {
throw std::runtime_error("Thread index out of range");
}
return m_cluster_finders[thread_index]->pedestal();
}
/**
* @brief Return the noise currently used by the cluster finder
* @param thread_index index of the thread
*/
auto noise(size_t thread_index = 0) {
if (m_cluster_finders.empty()) {
throw std::runtime_error("No cluster finders available");
}
if (!m_processing_threads_stopped) {
throw std::runtime_error("ClusterFinderMT is still running");
}
if (thread_index >= m_cluster_finders.size()) {
throw std::runtime_error("Thread index out of range");
}
return m_cluster_finders[thread_index]->noise();
}
// void push(FrameWrapper&& frame) {
// //TODO! need to loop until we are successful
// auto rc = m_input_queue.write(std::move(frame));
// fmt::print("pushed frame {}\n", rc);
// }
};
} // namespace aare

View File

@ -0,0 +1,275 @@
#pragma once
#include <algorithm>
#include <array>
#include <cstddef>
#include <cstdint>
#include <numeric>
#include <vector>
#include <fmt/core.h>
namespace aare {
/**
* @brief ClusterVector is a container for clusters of various sizes. It uses a
* contiguous memory buffer to store the clusters. It is templated on the data
* type and the coordinate type of the clusters.
* @note push_back can invalidate pointers to elements in the container
* @warning ClusterVector is currently move only to catch unintended copies, but
* this might change since there are probably use cases where copying is needed.
* @tparam T data type of the pixels in the cluster
* @tparam CoordType data type of the x and y coordinates of the cluster
* (normally int16_t)
*/
template <typename T, typename CoordType = int16_t> class ClusterVector {
using value_type = T;
size_t m_cluster_size_x;
size_t m_cluster_size_y;
std::byte *m_data{};
size_t m_size{0};
size_t m_capacity;
uint64_t m_frame_number{0}; // TODO! Check frame number size and type
/*
Format string used in the python bindings to create a numpy
array from the buffer
= - native byte order
h - short
d - double
i - int
*/
constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:";
public:
/**
* @brief Construct a new ClusterVector object
* @param cluster_size_x size of the cluster in x direction
* @param cluster_size_y size of the cluster in y direction
* @param capacity initial capacity of the buffer in number of clusters
* @param frame_number frame number of the clusters. Default is 0, which is
* also used to indicate that the clusters come from many frames
*/
ClusterVector(size_t cluster_size_x = 3, size_t cluster_size_y = 3,
size_t capacity = 1024, uint64_t frame_number = 0)
: m_cluster_size_x(cluster_size_x), m_cluster_size_y(cluster_size_y),
m_capacity(capacity), m_frame_number(frame_number) {
allocate_buffer(capacity);
}
~ClusterVector() { delete[] m_data; }
// Move constructor
ClusterVector(ClusterVector &&other) noexcept
: m_cluster_size_x(other.m_cluster_size_x),
m_cluster_size_y(other.m_cluster_size_y), m_data(other.m_data),
m_size(other.m_size), m_capacity(other.m_capacity),
m_frame_number(other.m_frame_number) {
other.m_data = nullptr;
other.m_size = 0;
other.m_capacity = 0;
}
// Move assignment operator
ClusterVector &operator=(ClusterVector &&other) noexcept {
if (this != &other) {
delete[] m_data;
m_cluster_size_x = other.m_cluster_size_x;
m_cluster_size_y = other.m_cluster_size_y;
m_data = other.m_data;
m_size = other.m_size;
m_capacity = other.m_capacity;
m_frame_number = other.m_frame_number;
other.m_data = nullptr;
other.m_size = 0;
other.m_capacity = 0;
other.m_frame_number = 0;
}
return *this;
}
/**
* @brief Reserve space for at least capacity clusters
* @param capacity number of clusters to reserve space for
* @note If capacity is less than the current capacity, the function does
* nothing.
*/
void reserve(size_t capacity) {
if (capacity > m_capacity) {
allocate_buffer(capacity);
}
}
/**
* @brief Add a cluster to the vector
* @param x x-coordinate of the cluster
* @param y y-coordinate of the cluster
* @param data pointer to the data of the cluster
* @warning The data pointer must point to a buffer of size cluster_size_x *
* cluster_size_y * sizeof(T)
*/
void push_back(CoordType x, CoordType y, const std::byte *data) {
if (m_size == m_capacity) {
allocate_buffer(m_capacity * 2);
}
std::byte *ptr = element_ptr(m_size);
*reinterpret_cast<CoordType *>(ptr) = x;
ptr += sizeof(CoordType);
*reinterpret_cast<CoordType *>(ptr) = y;
ptr += sizeof(CoordType);
std::copy(data, data + m_cluster_size_x * m_cluster_size_y * sizeof(T),
ptr);
m_size++;
}
ClusterVector &operator+=(const ClusterVector &other) {
if (m_size + other.m_size > m_capacity) {
allocate_buffer(m_capacity + other.m_size);
}
std::copy(other.m_data, other.m_data + other.m_size * item_size(),
m_data + m_size * item_size());
m_size += other.m_size;
return *this;
}
/**
* @brief Sum the pixels in each cluster
* @return std::vector<T> vector of sums for each cluster
*/
std::vector<T> sum() {
std::vector<T> sums(m_size);
const size_t stride = item_size();
const size_t n_pixels = m_cluster_size_x * m_cluster_size_y;
std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y
for (size_t i = 0; i < m_size; i++) {
sums[i] =
std::accumulate(reinterpret_cast<T *>(ptr),
reinterpret_cast<T *>(ptr) + n_pixels, T{});
ptr += stride;
}
return sums;
}
/**
* @brief Return the maximum sum of the 2x2 subclusters in each cluster
* @return std::vector<T> vector of sums for each cluster
* @throws std::runtime_error if the cluster size is not 3x3
* @warning Only 3x3 clusters are supported for the 2x2 sum.
*/
std::vector<T> sum_2x2() {
std::vector<T> sums(m_size);
const size_t stride = item_size();
if (m_cluster_size_x != 3 || m_cluster_size_y != 3) {
throw std::runtime_error(
"Only 3x3 clusters are supported for the 2x2 sum.");
}
std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y
for (size_t i = 0; i < m_size; i++) {
std::array<T, 4> total;
auto T_ptr = reinterpret_cast<T *>(ptr);
total[0] = T_ptr[0] + T_ptr[1] + T_ptr[3] + T_ptr[4];
total[1] = T_ptr[1] + T_ptr[2] + T_ptr[4] + T_ptr[5];
total[2] = T_ptr[3] + T_ptr[4] + T_ptr[6] + T_ptr[7];
total[3] = T_ptr[4] + T_ptr[5] + T_ptr[7] + T_ptr[8];
sums[i] = *std::max_element(total.begin(), total.end());
ptr += stride;
}
return sums;
}
/**
* @brief Return the number of clusters in the vector
*/
size_t size() const { return m_size; }
/**
* @brief Return the capacity of the buffer in number of clusters. This is
* the number of clusters that can be stored in the current buffer without
* reallocation.
*/
size_t capacity() const { return m_capacity; }
/**
* @brief Return the size in bytes of a single cluster
*/
size_t item_size() const {
return 2 * sizeof(CoordType) +
m_cluster_size_x * m_cluster_size_y * sizeof(T);
}
/**
* @brief Return the offset in bytes for the i-th cluster
*/
size_t element_offset(size_t i) const { return item_size() * i; }
/**
* @brief Return a pointer to the i-th cluster
*/
std::byte *element_ptr(size_t i) { return m_data + element_offset(i); }
/**
* @brief Return a pointer to the i-th cluster
*/
const std::byte *element_ptr(size_t i) const {
return m_data + element_offset(i);
}
size_t cluster_size_x() const { return m_cluster_size_x; }
size_t cluster_size_y() const { return m_cluster_size_y; }
std::byte *data() { return m_data; }
std::byte const *data() const { return m_data; }
/**
* @brief Return a reference to the i-th cluster casted to type V
* @tparam V type of the cluster
*/
template <typename V> V &at(size_t i) {
return *reinterpret_cast<V *>(element_ptr(i));
}
const std::string_view fmt_base() const {
// TODO! how do we match on coord_t?
return m_fmt_base;
}
/**
* @brief Return the frame number of the clusters. 0 is used to indicate
* that the clusters come from many frames
*/
uint64_t frame_number() const { return m_frame_number; }
void set_frame_number(uint64_t frame_number) {
m_frame_number = frame_number;
}
/**
* @brief Resize the vector to contain new_size clusters. If new_size is
* greater than the current capacity, a new buffer is allocated. If the size
* is smaller no memory is freed, size is just updated.
* @param new_size new size of the vector
* @warning The additional clusters are not initialized
*/
void resize(size_t new_size) {
// TODO! Should we initialize the new clusters?
if (new_size > m_capacity) {
allocate_buffer(new_size);
}
m_size = new_size;
}
private:
void allocate_buffer(size_t new_capacity) {
size_t num_bytes = item_size() * new_capacity;
std::byte *new_data = new std::byte[num_bytes]{};
std::copy(m_data, m_data + item_size() * m_size, new_data);
delete[] m_data;
m_data = new_data;
m_capacity = new_capacity;
}
};
} // namespace aare

View File

@ -36,6 +36,8 @@ class File {
File(File &&other) noexcept; File(File &&other) noexcept;
File& operator=(File &&other) noexcept; File& operator=(File &&other) noexcept;
~File() = default; ~File() = default;
// void close(); //!< close the file
Frame read_frame(); //!< read one frame from the file at the current position Frame read_frame(); //!< read one frame from the file at the current position
Frame read_frame(size_t frame_index); //!< read one frame at the position given by frame number Frame read_frame(size_t frame_index); //!< read one frame at the position given by frame number
@ -44,6 +46,7 @@ class File {
void read_into(std::byte *image_buf); void read_into(std::byte *image_buf);
void read_into(std::byte *image_buf, size_t n_frames); void read_into(std::byte *image_buf, size_t n_frames);
size_t frame_number(); //!< get the frame number at the current position
size_t frame_number(size_t frame_index); //!< get the frame number at the given frame index size_t frame_number(size_t frame_index); //!< get the frame number at the given frame index
size_t bytes_per_frame() const; size_t bytes_per_frame() const;
size_t pixels_per_frame() const; size_t pixels_per_frame() const;

92
include/aare/Fit.hpp Normal file
View File

@ -0,0 +1,92 @@
#pragma once
#include <cmath>
#include <fmt/core.h>
#include <vector>
#include "aare/NDArray.hpp"
namespace aare {
namespace func {
double gaus(const double x, const double *par);
NDArray<double, 1> gaus(NDView<double, 1> x, NDView<double, 1> par);
double pol1(const double x, const double *par);
NDArray<double, 1> pol1(NDView<double, 1> x, NDView<double, 1> par);
} // namespace func
/**
* @brief Estimate the initial parameters for a Gaussian fit
*/
std::array<double, 3> gaus_init_par(const NDView<double, 1> x, const NDView<double, 1> y);
std::array<double, 2> pol1_init_par(const NDView<double, 1> x, const NDView<double, 1> y);
static constexpr int DEFAULT_NUM_THREADS = 4;
/**
* @brief Fit a 1D Gaussian to data.
* @param data data to fit
* @param x x values
*/
NDArray<double, 1> fit_gaus(NDView<double, 1> x, NDView<double, 1> y);
/**
* @brief Fit a 1D Gaussian to each pixel. Data layout [row, col, values]
* @param x x values
* @param y y vales, layout [row, col, values]
* @param n_threads number of threads to use
*/
NDArray<double, 3> fit_gaus(NDView<double, 1> x, NDView<double, 3> y,
int n_threads = DEFAULT_NUM_THREADS);
/**
* @brief Fit a 1D Gaussian with error estimates
* @param x x values
* @param y y vales, layout [row, col, values]
* @param y_err error in y, layout [row, col, values]
* @param par_out output parameters
* @param par_err_out output error parameters
*/
void fit_gaus(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
NDView<double, 1> par_out, NDView<double, 1> par_err_out,
double& chi2);
/**
* @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout
* [row, col, values]
* @param x x values
* @param y y vales, layout [row, col, values]
* @param y_err error in y, layout [row, col, values]
* @param par_out output parameters, layout [row, col, values]
* @param par_err_out output parameter errors, layout [row, col, values]
* @param n_threads number of threads to use
*/
void fit_gaus(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
NDView<double, 3> par_out, NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
int n_threads = DEFAULT_NUM_THREADS
);
NDArray<double, 1> fit_pol1(NDView<double, 1> x, NDView<double, 1> y);
NDArray<double, 3> fit_pol1(NDView<double, 1> x, NDView<double, 3> y,
int n_threads = DEFAULT_NUM_THREADS);
void fit_pol1(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
NDView<double, 1> par_out, NDView<double, 1> par_err_out, double& chi2);
// TODO! not sure we need to offer the different version in C++
void fit_pol1(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
NDView<double, 3> par_out, NDView<double, 3> par_err_out,NDView<double, 2> chi2_out,
int n_threads = DEFAULT_NUM_THREADS);
} // namespace aare

View File

@ -69,6 +69,11 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
std::copy(v.begin(), v.end(), begin()); std::copy(v.begin(), v.end(), begin());
} }
template<size_t Size>
NDArray(const std::array<T, Size>& arr) : NDArray<T,1>({Size}) {
std::copy(arr.begin(), arr.end(), begin());
}
// Move constructor // Move constructor
NDArray(NDArray &&other) noexcept NDArray(NDArray &&other) noexcept
: shape_(other.shape_), strides_(c_strides<Ndim>(shape_)), : shape_(other.shape_), strides_(c_strides<Ndim>(shape_)),
@ -87,7 +92,7 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
// Conversion operator from array expression to array // Conversion operator from array expression to array
template <typename E> template <typename E>
NDArray(ArrayExpr<E, Ndim> &&expr) : NDArray(expr.shape()) { NDArray(ArrayExpr<E, Ndim> &&expr) : NDArray(expr.shape()) {
for (int i = 0; i < size_; ++i) { for (size_t i = 0; i < size_; ++i) {
data_[i] = expr[i]; data_[i] = expr[i];
} }
} }
@ -105,6 +110,20 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
NDArray &operator-=(const NDArray &other); NDArray &operator-=(const NDArray &other);
NDArray &operator*=(const NDArray &other); NDArray &operator*=(const NDArray &other);
//Write directly to the data array, or create a new one
template<size_t Size>
NDArray<T,1>& operator=(const std::array<T,Size> &other){
if(Size != size_){
delete[] data_;
size_ = Size;
data_ = new T[size_];
}
for (size_t i = 0; i < Size; ++i) {
data_[i] = other[i];
}
return *this;
}
// NDArray& operator/=(const NDArray& other); // NDArray& operator/=(const NDArray& other);
template <typename V> NDArray &operator/=(const NDArray<V, Ndim> &other) { template <typename V> NDArray &operator/=(const NDArray<V, Ndim> &other) {
@ -135,6 +154,11 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
NDArray &operator&=(const T & /*mask*/); NDArray &operator&=(const T & /*mask*/);
void sqrt() { void sqrt() {
for (int i = 0; i < size_; ++i) { for (int i = 0; i < size_; ++i) {
data_[i] = std::sqrt(data_[i]); data_[i] = std::sqrt(data_[i]);
@ -159,11 +183,11 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
} }
// TODO! is int the right type for index? // TODO! is int the right type for index?
T &operator()(int i) { return data_[i]; } T &operator()(int64_t i) { return data_[i]; }
const T &operator()(int i) const { return data_[i]; } const T &operator()(int64_t i) const { return data_[i]; }
T &operator[](int i) { return data_[i]; } T &operator[](int64_t i) { return data_[i]; }
const T &operator[](int i) const { return data_[i]; } const T &operator[](int64_t i) const { return data_[i]; }
T *data() { return data_; } T *data() { return data_; }
std::byte *buffer() { return reinterpret_cast<std::byte *>(data_); } std::byte *buffer() { return reinterpret_cast<std::byte *>(data_); }
@ -318,6 +342,9 @@ NDArray<T, Ndim> &NDArray<T, Ndim>::operator+=(const T &value) {
return *this; return *this;
} }
template <typename T, int64_t Ndim> template <typename T, int64_t Ndim>
NDArray<T, Ndim> NDArray<T, Ndim>::operator+(const T &value) { NDArray<T, Ndim> NDArray<T, Ndim>::operator+(const T &value) {
NDArray result = *this; NDArray result = *this;
@ -418,4 +445,6 @@ NDArray<T, Ndim> load(const std::string &pathname,
return img; return img;
} }
} // namespace aare } // namespace aare

View File

@ -1,5 +1,5 @@
#pragma once #pragma once
#include "aare/defs.hpp"
#include "aare/ArrayExpr.hpp" #include "aare/ArrayExpr.hpp"
#include <algorithm> #include <algorithm>
@ -99,6 +99,15 @@ template <typename T, int64_t Ndim = 2> class NDView : public ArrayExpr<NDView<T
NDView &operator/=(const NDView &other) { return elemenwise(other, std::divides<T>()); } NDView &operator/=(const NDView &other) { return elemenwise(other, std::divides<T>()); }
template<size_t Size>
NDView& operator=(const std::array<T, Size> &arr) {
if(size() != arr.size())
throw std::runtime_error(LOCATION + "Array and NDView size mismatch");
std::copy(arr.begin(), arr.end(), begin());
return *this;
}
NDView &operator=(const T val) { NDView &operator=(const T val) {
for (auto it = begin(); it != end(); ++it) for (auto it = begin(); it != end(); ++it)
*it = val; *it = val;

View File

@ -18,34 +18,48 @@ template <typename SUM_TYPE = double> class Pedestal {
uint32_t m_samples; uint32_t m_samples;
NDArray<uint32_t, 2> m_cur_samples; NDArray<uint32_t, 2> m_cur_samples;
//TODO! in case of int needs to be changed to uint64_t
NDArray<SUM_TYPE, 2> m_sum; NDArray<SUM_TYPE, 2> m_sum;
NDArray<SUM_TYPE, 2> m_sum2; NDArray<SUM_TYPE, 2> m_sum2;
//Cache mean since it is used over and over in the ClusterFinder
//This optimization is related to the access pattern of the ClusterFinder
//Relies on having more reads than pushes to the pedestal
NDArray<SUM_TYPE, 2> m_mean;
public: public:
Pedestal(uint32_t rows, uint32_t cols, uint32_t n_samples = 1000) Pedestal(uint32_t rows, uint32_t cols, uint32_t n_samples = 1000)
: m_rows(rows), m_cols(cols), m_samples(n_samples), : m_rows(rows), m_cols(cols), m_samples(n_samples),
m_cur_samples(NDArray<uint32_t, 2>({rows, cols}, 0)), m_cur_samples(NDArray<uint32_t, 2>({rows, cols}, 0)),
m_sum(NDArray<SUM_TYPE, 2>({rows, cols})), m_sum(NDArray<SUM_TYPE, 2>({rows, cols})),
m_sum2(NDArray<SUM_TYPE, 2>({rows, cols})) { m_sum2(NDArray<SUM_TYPE, 2>({rows, cols})),
m_mean(NDArray<SUM_TYPE, 2>({rows, cols})) {
assert(rows > 0 && cols > 0 && n_samples > 0); assert(rows > 0 && cols > 0 && n_samples > 0);
m_sum = 0; m_sum = 0;
m_sum2 = 0; m_sum2 = 0;
m_mean = 0;
} }
~Pedestal() = default; ~Pedestal() = default;
NDArray<SUM_TYPE, 2> mean() { NDArray<SUM_TYPE, 2> mean() {
NDArray<SUM_TYPE, 2> mean_array({m_rows, m_cols}); return m_mean;
for (uint32_t i = 0; i < m_rows * m_cols; i++) {
mean_array(i / m_cols, i % m_cols) = mean(i / m_cols, i % m_cols);
}
return mean_array;
} }
SUM_TYPE mean(const uint32_t row, const uint32_t col) const { SUM_TYPE mean(const uint32_t row, const uint32_t col) const {
return m_mean(row, col);
}
SUM_TYPE std(const uint32_t row, const uint32_t col) const {
return std::sqrt(variance(row, col));
}
SUM_TYPE variance(const uint32_t row, const uint32_t col) const {
if (m_cur_samples(row, col) == 0) { if (m_cur_samples(row, col) == 0) {
return 0.0; return 0.0;
} }
return m_sum(row, col) / m_cur_samples(row, col); return m_sum2(row, col) / m_cur_samples(row, col) -
mean(row, col) * mean(row, col);
} }
NDArray<SUM_TYPE, 2> variance() { NDArray<SUM_TYPE, 2> variance() {
@ -57,13 +71,7 @@ template <typename SUM_TYPE = double> class Pedestal {
return variance_array; return variance_array;
} }
SUM_TYPE variance(const uint32_t row, const uint32_t col) const {
if (m_cur_samples(row, col) == 0) {
return 0.0;
}
return m_sum2(row, col) / m_cur_samples(row, col) -
mean(row, col) * mean(row, col);
}
NDArray<SUM_TYPE, 2> std() { NDArray<SUM_TYPE, 2> std() {
NDArray<SUM_TYPE, 2> standard_deviation_array({m_rows, m_cols}); NDArray<SUM_TYPE, 2> standard_deviation_array({m_rows, m_cols});
@ -75,14 +83,13 @@ template <typename SUM_TYPE = double> class Pedestal {
return standard_deviation_array; return standard_deviation_array;
} }
SUM_TYPE std(const uint32_t row, const uint32_t col) const {
return std::sqrt(variance(row, col));
}
void clear() { void clear() {
for (uint32_t i = 0; i < m_rows * m_cols; i++) { m_sum = 0;
clear(i / m_cols, i % m_cols); m_sum2 = 0;
} m_cur_samples = 0;
m_mean = 0;
} }
@ -91,8 +98,11 @@ template <typename SUM_TYPE = double> class Pedestal {
m_sum(row, col) = 0; m_sum(row, col) = 0;
m_sum2(row, col) = 0; m_sum2(row, col) = 0;
m_cur_samples(row, col) = 0; m_cur_samples(row, col) = 0;
m_mean(row, col) = 0;
} }
// frame level operations
template <typename T> void push(NDView<T, 2> frame) { template <typename T> void push(NDView<T, 2> frame) {
assert(frame.size() == m_rows * m_cols); assert(frame.size() == m_rows * m_cols);
@ -102,17 +112,37 @@ template <typename SUM_TYPE = double> class Pedestal {
"Frame shape does not match pedestal shape"); "Frame shape does not match pedestal shape");
} }
for (uint32_t row = 0; row < m_rows; row++) { for (size_t row = 0; row < m_rows; row++) {
for (uint32_t col = 0; col < m_cols; col++) { for (size_t col = 0; col < m_cols; col++) {
push<T>(row, col, frame(row, col)); push<T>(row, col, frame(row, col));
} }
} }
// // TODO: test the effect of #pragma omp parallel for
// for (uint32_t index = 0; index < m_rows * m_cols; index++) {
// push<T>(index / m_cols, index % m_cols, frame(index));
// }
} }
/**
* Push but don't update the cached mean. Speeds up the process
* when initializing the pedestal.
*
*/
template <typename T> void push_no_update(NDView<T, 2> frame) {
assert(frame.size() == m_rows * m_cols);
// TODO! move away from m_rows, m_cols
if (frame.shape() != std::array<int64_t, 2>{m_rows, m_cols}) {
throw std::runtime_error(
"Frame shape does not match pedestal shape");
}
for (size_t row = 0; row < m_rows; row++) {
for (size_t col = 0; col < m_cols; col++) {
push_no_update<T>(row, col, frame(row, col));
}
}
}
template <typename T> void push(Frame &frame) { template <typename T> void push(Frame &frame) {
assert(frame.rows() == static_cast<size_t>(m_rows) && assert(frame.rows() == static_cast<size_t>(m_rows) &&
frame.cols() == static_cast<size_t>(m_cols)); frame.cols() == static_cast<size_t>(m_cols));
@ -132,18 +162,48 @@ template <typename SUM_TYPE = double> class Pedestal {
template <typename T> template <typename T>
void push(const uint32_t row, const uint32_t col, const T val_) { void push(const uint32_t row, const uint32_t col, const T val_) {
SUM_TYPE val = static_cast<SUM_TYPE>(val_); SUM_TYPE val = static_cast<SUM_TYPE>(val_);
const uint32_t idx = index(row, col); if (m_cur_samples(row, col) < m_samples) {
if (m_cur_samples(idx) < m_samples) { m_sum(row, col) += val;
m_sum(idx) += val; m_sum2(row, col) += val * val;
m_sum2(idx) += val * val; m_cur_samples(row, col)++;
m_cur_samples(idx)++;
} else { } else {
m_sum(idx) += val - m_sum(idx) / m_cur_samples(idx); m_sum(row, col) += val - m_sum(row, col) / m_samples;
m_sum2(idx) += val * val - m_sum2(idx) / m_cur_samples(idx); m_sum2(row, col) += val * val - m_sum2(row, col) / m_samples;
}
//Since we just did a push we know that m_cur_samples(row, col) is at least 1
m_mean(row, col) = m_sum(row, col) / m_cur_samples(row, col);
}
template <typename T>
void push_no_update(const uint32_t row, const uint32_t col, const T val_) {
SUM_TYPE val = static_cast<SUM_TYPE>(val_);
if (m_cur_samples(row, col) < m_samples) {
m_sum(row, col) += val;
m_sum2(row, col) += val * val;
m_cur_samples(row, col)++;
} else {
m_sum(row, col) += val - m_sum(row, col) / m_cur_samples(row, col);
m_sum2(row, col) += val * val - m_sum2(row, col) / m_cur_samples(row, col);
} }
} }
uint32_t index(const uint32_t row, const uint32_t col) const {
return row * m_cols + col; /**
}; * @brief Update the mean of the pedestal. This is used after having done
* push_no_update. It is not necessary to call this function after push.
*/
void update_mean(){
m_mean = m_sum / m_cur_samples;
}
template<typename T>
void push_fast(const uint32_t row, const uint32_t col, const T val_){
//Assume we reached the steady state where all pixels have
//m_samples samples
SUM_TYPE val = static_cast<SUM_TYPE>(val_);
m_sum(row, col) += val - m_sum(row, col) / m_samples;
m_sum2(row, col) += val * val - m_sum2(row, col) / m_samples;
m_mean(row, col) = m_sum(row, col) / m_samples;
}
}; };
} // namespace aare } // namespace aare

View File

@ -7,6 +7,8 @@ namespace aare {
NDArray<ssize_t, 2> GenerateMoench03PixelMap(); NDArray<ssize_t, 2> GenerateMoench03PixelMap();
NDArray<ssize_t, 2> GenerateMoench05PixelMap(); NDArray<ssize_t, 2> GenerateMoench05PixelMap();
NDArray<ssize_t, 2> GenerateMoench05PixelMap1g();
NDArray<ssize_t, 2> GenerateMoench05PixelMapOld();
//Matterhorn02 //Matterhorn02
NDArray<ssize_t, 2>GenerateMH02SingleCounterPixelMap(); NDArray<ssize_t, 2>GenerateMH02SingleCounterPixelMap();

View File

@ -0,0 +1,203 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// @author Bo Hu (bhu@fb.com)
// @author Jordan DeLong (delong.j@fb.com)
// Changes made by PSD Detector Group:
// Copied: Line 34 constexpr std::size_t hardware_destructive_interference_size = 128; from folly/lang/Align.h
// Changed extension to .hpp
// Changed namespace to aare
#pragma once
#include <atomic>
#include <cassert>
#include <cstdlib>
#include <memory>
#include <stdexcept>
#include <type_traits>
#include <utility>
constexpr std::size_t hardware_destructive_interference_size = 128;
namespace aare {
/*
* ProducerConsumerQueue is a one producer and one consumer queue
* without locks.
*/
template <class T> struct ProducerConsumerQueue {
typedef T value_type;
ProducerConsumerQueue(const ProducerConsumerQueue &) = delete;
ProducerConsumerQueue &operator=(const ProducerConsumerQueue &) = delete;
ProducerConsumerQueue(ProducerConsumerQueue &&other){
size_ = other.size_;
records_ = other.records_;
other.records_ = nullptr;
readIndex_ = other.readIndex_.load(std::memory_order_acquire);
writeIndex_ = other.writeIndex_.load(std::memory_order_acquire);
}
ProducerConsumerQueue &operator=(ProducerConsumerQueue &&other){
size_ = other.size_;
records_ = other.records_;
other.records_ = nullptr;
readIndex_ = other.readIndex_.load(std::memory_order_acquire);
writeIndex_ = other.writeIndex_.load(std::memory_order_acquire);
return *this;
}
ProducerConsumerQueue():ProducerConsumerQueue(2){};
// size must be >= 2.
//
// Also, note that the number of usable slots in the queue at any
// given time is actually (size-1), so if you start with an empty queue,
// isFull() will return true after size-1 insertions.
explicit ProducerConsumerQueue(uint32_t size)
: size_(size), records_(static_cast<T *>(std::malloc(sizeof(T) * size))), readIndex_(0), writeIndex_(0) {
assert(size >= 2);
if (!records_) {
throw std::bad_alloc();
}
}
~ProducerConsumerQueue() {
// We need to destruct anything that may still exist in our queue.
// (No real synchronization needed at destructor time: only one
// thread can be doing this.)
if (!std::is_trivially_destructible<T>::value) {
size_t readIndex = readIndex_;
size_t endIndex = writeIndex_;
while (readIndex != endIndex) {
records_[readIndex].~T();
if (++readIndex == size_) {
readIndex = 0;
}
}
}
std::free(records_);
}
template <class... Args> bool write(Args &&...recordArgs) {
auto const currentWrite = writeIndex_.load(std::memory_order_relaxed);
auto nextRecord = currentWrite + 1;
if (nextRecord == size_) {
nextRecord = 0;
}
if (nextRecord != readIndex_.load(std::memory_order_acquire)) {
new (&records_[currentWrite]) T(std::forward<Args>(recordArgs)...);
writeIndex_.store(nextRecord, std::memory_order_release);
return true;
}
// queue is full
return false;
}
// move (or copy) the value at the front of the queue to given variable
bool read(T &record) {
auto const currentRead = readIndex_.load(std::memory_order_relaxed);
if (currentRead == writeIndex_.load(std::memory_order_acquire)) {
// queue is empty
return false;
}
auto nextRecord = currentRead + 1;
if (nextRecord == size_) {
nextRecord = 0;
}
record = std::move(records_[currentRead]);
records_[currentRead].~T();
readIndex_.store(nextRecord, std::memory_order_release);
return true;
}
// pointer to the value at the front of the queue (for use in-place) or
// nullptr if empty.
T *frontPtr() {
auto const currentRead = readIndex_.load(std::memory_order_relaxed);
if (currentRead == writeIndex_.load(std::memory_order_acquire)) {
// queue is empty
return nullptr;
}
return &records_[currentRead];
}
// queue must not be empty
void popFront() {
auto const currentRead = readIndex_.load(std::memory_order_relaxed);
assert(currentRead != writeIndex_.load(std::memory_order_acquire));
auto nextRecord = currentRead + 1;
if (nextRecord == size_) {
nextRecord = 0;
}
records_[currentRead].~T();
readIndex_.store(nextRecord, std::memory_order_release);
}
bool isEmpty() const {
return readIndex_.load(std::memory_order_acquire) == writeIndex_.load(std::memory_order_acquire);
}
bool isFull() const {
auto nextRecord = writeIndex_.load(std::memory_order_acquire) + 1;
if (nextRecord == size_) {
nextRecord = 0;
}
if (nextRecord != readIndex_.load(std::memory_order_acquire)) {
return false;
}
// queue is full
return true;
}
// * If called by consumer, then true size may be more (because producer may
// be adding items concurrently).
// * If called by producer, then true size may be less (because consumer may
// be removing items concurrently).
// * It is undefined to call this from any other thread.
size_t sizeGuess() const {
int ret = writeIndex_.load(std::memory_order_acquire) - readIndex_.load(std::memory_order_acquire);
if (ret < 0) {
ret += size_;
}
return ret;
}
// maximum number of items in the queue.
size_t capacity() const { return size_ - 1; }
private:
using AtomicIndex = std::atomic<unsigned int>;
char pad0_[hardware_destructive_interference_size];
// const uint32_t size_;
uint32_t size_;
// T *const records_;
T* records_;
alignas(hardware_destructive_interference_size) AtomicIndex readIndex_;
alignas(hardware_destructive_interference_size) AtomicIndex writeIndex_;
char pad1_[hardware_destructive_interference_size - sizeof(AtomicIndex)];
};
} // namespace aare

View File

@ -34,15 +34,19 @@ class RawFile : public FileInterface {
size_t n_subfile_parts{}; // d0,d1...dn size_t n_subfile_parts{}; // d0,d1...dn
//TODO! move to vector of SubFile instead of pointers //TODO! move to vector of SubFile instead of pointers
std::vector<std::vector<RawSubFile *>> subfiles; //subfiles[f0,f1...fn][d0,d1...dn] std::vector<std::vector<RawSubFile *>> subfiles; //subfiles[f0,f1...fn][d0,d1...dn]
std::vector<xy> positions; // std::vector<xy> positions;
std::vector<ModuleGeometry> m_module_pixel_0;
ModuleConfig cfg{0, 0}; ModuleConfig cfg{0, 0};
RawMasterFile m_master; RawMasterFile m_master;
size_t m_current_frame{}; size_t m_current_frame{};
size_t m_rows{};
size_t m_cols{}; // std::vector<ModuleGeometry> m_module_pixel_0;
// size_t m_rows{};
// size_t m_cols{};
DetectorGeometry m_geometry;
public: public:
/** /**
@ -111,11 +115,12 @@ class RawFile : public FileInterface {
*/ */
static DetectorHeader read_header(const std::filesystem::path &fname); static DetectorHeader read_header(const std::filesystem::path &fname);
void update_geometry_with_roi(); // void update_geometry_with_roi();
int find_number_of_subfiles(); int find_number_of_subfiles();
void open_subfiles(); void open_subfiles();
void find_geometry(); void find_geometry();
}; };
} // namespace aare } // namespace aare

View File

@ -14,6 +14,7 @@ namespace aare {
* @brief Implementation used in RawMasterFile to parse the file name * @brief Implementation used in RawMasterFile to parse the file name
*/ */
class RawFileNameComponents { class RawFileNameComponents {
bool m_old_scheme{false};
std::filesystem::path m_base_path{}; std::filesystem::path m_base_path{};
std::string m_base_name{}; std::string m_base_name{};
std::string m_ext{}; std::string m_ext{};
@ -35,6 +36,7 @@ class RawFileNameComponents {
const std::string &base_name() const; const std::string &base_name() const;
const std::string &ext() const; const std::string &ext() const;
int file_index() const; int file_index() const;
void set_old_scheme(bool old_scheme);
}; };
class ScanParameters { class ScanParameters {
@ -60,17 +62,6 @@ class ScanParameters {
}; };
struct ROI{
size_t xmin{};
size_t xmax{};
size_t ymin{};
size_t ymax{};
size_t height() const { return ymax - ymin; }
size_t width() const { return xmax - xmin; }
}__attribute__((packed));
/** /**
* @brief Class for parsing a master file either in our .json format or the old * @brief Class for parsing a master file either in our .json format or the old
* .raw format * .raw format
@ -88,10 +79,10 @@ class RawMasterFile {
size_t m_pixels_x{}; size_t m_pixels_x{};
size_t m_bitdepth{}; size_t m_bitdepth{};
xy m_geometry; xy m_geometry{};
size_t m_max_frames_per_file{}; size_t m_max_frames_per_file{};
uint32_t m_adc_mask{}; // uint32_t m_adc_mask{}; // TODO! implement reading
FrameDiscardPolicy m_frame_discard_policy{}; FrameDiscardPolicy m_frame_discard_policy{};
size_t m_frame_padding{}; size_t m_frame_padding{};

View File

@ -16,6 +16,7 @@ namespace aare {
class RawSubFile { class RawSubFile {
protected: protected:
std::ifstream m_file; std::ifstream m_file;
DetectorType m_detector_type;
size_t m_bitdepth; size_t m_bitdepth;
std::filesystem::path m_fname; std::filesystem::path m_fname;
size_t m_rows{}; size_t m_rows{};
@ -25,7 +26,7 @@ class RawSubFile {
uint32_t m_pos_row{}; uint32_t m_pos_row{};
uint32_t m_pos_col{}; uint32_t m_pos_col{};
DetectorType m_detector_type;
std::optional<NDArray<ssize_t, 2>> m_pixel_map; std::optional<NDArray<ssize_t, 2>> m_pixel_map;
public: public:
@ -65,6 +66,9 @@ class RawSubFile {
size_t pixels_per_frame() const { return m_rows * m_cols; } size_t pixels_per_frame() const { return m_rows * m_cols; }
size_t bytes_per_pixel() const { return m_bitdepth / 8; } size_t bytes_per_pixel() const { return m_bitdepth / 8; }
private:
template <typename T>
void read_with_map(std::byte *image_buf);
}; };

View File

@ -226,7 +226,7 @@ template <typename T> void VarClusterFinder<T>::single_pass(NDView<T, 2> img) {
template <typename T> void VarClusterFinder<T>::first_pass() { template <typename T> void VarClusterFinder<T>::first_pass() {
for (int i = 0; i < original_.size(); ++i) { for (size_t i = 0; i < original_.size(); ++i) {
if (use_noise_map) if (use_noise_map)
threshold_ = 5 * noiseMap(i); threshold_ = 5 * noiseMap(i);
binary_(i) = (original_(i) > threshold_); binary_(i) = (original_(i) > threshold_);
@ -250,17 +250,17 @@ template <typename T> void VarClusterFinder<T>::first_pass() {
template <typename T> void VarClusterFinder<T>::second_pass() { template <typename T> void VarClusterFinder<T>::second_pass() {
for (int64_t i = 0; i != labeled_.size(); ++i) { for (size_t i = 0; i != labeled_.size(); ++i) {
auto current_label = labeled_(i); auto cl = labeled_(i);
if (current_label != 0) { if (cl != 0) {
auto it = child.find(current_label); auto it = child.find(cl);
while (it != child.end()) { while (it != child.end()) {
current_label = it->second; cl = it->second;
it = child.find(current_label); it = child.find(cl);
// do this once before doing the second pass? // do this once before doing the second pass?
// all values point to the final one... // all values point to the final one...
} }
labeled_(i) = current_label; labeled_(i) = cl;
} }
} }
} }
@ -271,7 +271,7 @@ template <typename T> void VarClusterFinder<T>::store_clusters() {
// Do we always have monotonic increasing // Do we always have monotonic increasing
// labels? Then vector? // labels? Then vector?
// here the translation is label -> Hit // here the translation is label -> Hit
std::unordered_map<int, Hit> h_size; std::unordered_map<int, Hit> h_map;
for (int i = 0; i < shape_[0]; ++i) { for (int i = 0; i < shape_[0]; ++i) {
for (int j = 0; j < shape_[1]; ++j) { for (int j = 0; j < shape_[1]; ++j) {
if (labeled_(i, j) != 0 || false if (labeled_(i, j) != 0 || false
@ -280,7 +280,7 @@ template <typename T> void VarClusterFinder<T>::store_clusters() {
// (i+1 < shape_[0] and labeled_(i+1, j) != 0) or // (i+1 < shape_[0] and labeled_(i+1, j) != 0) or
// (j+1 < shape_[1] and labeled_(i, j+1) != 0) // (j+1 < shape_[1] and labeled_(i, j+1) != 0)
) { ) {
Hit &record = h_size[labeled_(i, j)]; Hit &record = h_map[labeled_(i, j)];
if (record.size < MAX_CLUSTER_SIZE) { if (record.size < MAX_CLUSTER_SIZE) {
record.rows[record.size] = i; record.rows[record.size] = i;
record.cols[record.size] = j; record.cols[record.size] = j;
@ -300,7 +300,7 @@ template <typename T> void VarClusterFinder<T>::store_clusters() {
} }
} }
for (const auto &h : h_size) for (const auto &h : h_map)
hits.push_back(h.second); hits.push_back(h.second);
} }

13
include/aare/decode.hpp Normal file
View File

@ -0,0 +1,13 @@
#pragma once
#include <cstdint>
#include <aare/NDView.hpp>
namespace aare {
uint16_t adc_sar_05_decode64to16(uint64_t input);
uint16_t adc_sar_04_decode64to16(uint64_t input);
void adc_sar_05_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output);
void adc_sar_04_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output);
} // namespace aare

View File

@ -47,7 +47,7 @@ class DynamicCluster {
int cluster_sizeY; int cluster_sizeY;
int16_t x; int16_t x;
int16_t y; int16_t y;
Dtype dt; Dtype dt; // 4 bytes
private: private:
std::byte *m_data; std::byte *m_data;
@ -93,7 +93,7 @@ class DynamicCluster {
(sizeof(T) == dt.bytes()) (sizeof(T) == dt.bytes())
? 0 ? 0
: throw std::invalid_argument("[ERROR] Type size mismatch"); : throw std::invalid_argument("[ERROR] Type size mismatch");
return memcpy(m_data + idx * dt.bytes(), &val, (size_t)dt.bytes()); return memcpy(m_data + idx * dt.bytes(), &val, dt.bytes());
} }
template <typename T> std::string to_string() const { template <typename T> std::string to_string() const {
@ -179,25 +179,67 @@ template <typename T> struct t_xy {
using xy = t_xy<uint32_t>; using xy = t_xy<uint32_t>;
/**
* @brief Class to hold the geometry of a module. Where pixel 0 is located and the size of the module
*/
struct ModuleGeometry{ struct ModuleGeometry{
int x{}; int origin_x{};
int y{}; int origin_y{};
int height{}; int height{};
int width{}; int width{};
int row_index{};
int col_index{};
}; };
/**
* @brief Class to hold the geometry of a detector. Number of modules, their size and where pixel 0
* for each module is located
*/
struct DetectorGeometry{
int modules_x{};
int modules_y{};
int pixels_x{};
int pixels_y{};
int module_gap_row{};
int module_gap_col{};
std::vector<ModuleGeometry> module_pixel_0;
};
struct ROI{
int64_t xmin{};
int64_t xmax{};
int64_t ymin{};
int64_t ymax{};
int64_t height() const { return ymax - ymin; }
int64_t width() const { return xmax - xmin; }
};
using dynamic_shape = std::vector<int64_t>; using dynamic_shape = std::vector<int64_t>;
//TODO! Can we uniform enums between the libraries? //TODO! Can we uniform enums between the libraries?
/**
* @brief Enum class to identify different detectors.
* The values are the same as in slsDetectorPackage
* Different spelling to avoid confusion with the slsDetectorPackage
*/
enum class DetectorType { enum class DetectorType {
Jungfrau, //Standard detectors match the enum values from slsDetectorPackage
Generic,
Eiger, Eiger,
Mythen3, Gotthard,
Moench, Jungfrau,
Moench03,
Moench03_old,
ChipTestBoard, ChipTestBoard,
Moench,
Mythen3,
Gotthard2,
Xilinx_ChipTestBoard,
//Additional detectors used for defining processing. Variants of the standard ones.
Moench03=100,
Moench03_old,
Unknown Unknown
}; };

View File

@ -0,0 +1,16 @@
#pragma once
#include "aare/defs.hpp"
#include "aare/RawMasterFile.hpp" //ROI refactor away
namespace aare{
/**
* @brief Update the detector geometry given a region of interest
*
* @param geo
* @param roi
* @return DetectorGeometry
*/
DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, ROI roi);
} // namespace aare

View File

@ -0,0 +1,18 @@
#include <thread>
#include <vector>
#include <utility>
namespace aare {
template<typename F>
void RunInParallel(F func, const std::vector<std::pair<int, int>>& tasks) {
// auto tasks = split_task(0, y.shape(0), n_threads);
std::vector<std::thread> threads;
for (auto &task : tasks) {
threads.push_back(std::thread(func, task.first, task.second));
}
for (auto &thread : threads) {
thread.join();
}
}
} // namespace aare

View File

@ -0,0 +1,8 @@
#include <utility>
#include <vector>
namespace aare {
std::vector<std::pair<int, int>> split_task(int first, int last, int n_threads);
} // namespace aare

13
patches/lmfit.patch Normal file
View File

@ -0,0 +1,13 @@
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
index 4efb7ed..6533660 100644
--- a/lib/CMakeLists.txt
+++ b/lib/CMakeLists.txt
@@ -11,7 +11,7 @@ target_compile_definitions(${lib} PRIVATE "LMFIT_EXPORT") # for Windows DLL expo
target_include_directories(${lib}
PUBLIC
- $<BUILD_INTERFACE:${CMAKE_SOURCE_DIR}/>
+ $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/>
$<INSTALL_INTERFACE:include/>
)

View File

@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build"
[project] [project]
name = "aare" name = "aare"
version = "2024.11.15.dev0" version = "2025.2.18"
[tool.scikit-build] [tool.scikit-build]
@ -12,4 +12,5 @@ cmake.verbose = true
[tool.scikit-build.cmake.define] [tool.scikit-build.cmake.define]
AARE_PYTHON_BINDINGS = "ON" AARE_PYTHON_BINDINGS = "ON"
AARE_SYSTEM_LIBRARIES = "ON" AARE_SYSTEM_LIBRARIES = "ON"
AARE_INSTALL_PYTHONEXT = "ON"

View File

@ -1,5 +1,5 @@
find_package (Python 3.10 COMPONENTS Interpreter Development) find_package (Python 3.10 COMPONENTS Interpreter Development REQUIRED)
# Download or find pybind11 depending on configuration # Download or find pybind11 depending on configuration
if(AARE_FETCH_PYBIND11) if(AARE_FETCH_PYBIND11)
@ -28,8 +28,11 @@ target_link_libraries(_aare PRIVATE aare_core aare_compiler_flags)
set( PYTHON_FILES set( PYTHON_FILES
aare/__init__.py aare/__init__.py
aare/CtbRawFile.py aare/CtbRawFile.py
aare/func.py
aare/RawFile.py
aare/transform.py aare/transform.py
aare/ScanParameters.py aare/ScanParameters.py
aare/utils.py
) )
# Copy the python files to the build directory # Copy the python files to the build directory
@ -41,10 +44,24 @@ set_target_properties(_aare PROPERTIES
LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/aare LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/aare
) )
set(PYTHON_EXAMPLES
examples/play.py
examples/fits.py
)
# Copy the examples/scripts to the build directory # Copy the python examples to the build directory
configure_file(examples/play.py ${CMAKE_BINARY_DIR}/play.py) foreach(FILE ${PYTHON_EXAMPLES})
configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} )
message(STATUS "Copying ${FILE} to ${CMAKE_BINARY_DIR}/${FILE}")
endforeach(FILE ${PYTHON_EXAMPLES})
install(TARGETS _aare DESTINATION aare) if(AARE_INSTALL_PYTHONEXT)
install(TARGETS _aare
EXPORT "${TARGETS_EXPORT_NAME}"
LIBRARY DESTINATION aare
)
install(FILES ${PYTHON_FILES} DESTINATION aare)
endif()

View File

@ -2,18 +2,21 @@
from . import _aare from . import _aare
import numpy as np import numpy as np
from .ScanParameters import ScanParameters from .ScanParameters import ScanParameters
class CtbRawFile(_aare.CtbRawFile): class CtbRawFile(_aare.CtbRawFile):
"""File reader for the CTB raw file format. """File reader for the CTB raw file format.
Args: Args:
fname (pathlib.Path | str): Path to the file to be read. fname (pathlib.Path | str): Path to the file to be read.
chunk_size (int): Number of frames to read at a time. Default is 1.
transform (function): Function to apply to the data after reading it. transform (function): Function to apply to the data after reading it.
The function should take a numpy array of type uint8 and return one The function should take a numpy array of type uint8 and return one
or several numpy arrays. or several numpy arrays.
""" """
def __init__(self, fname, transform = None): def __init__(self, fname, chunk_size = 1, transform = None):
super().__init__(fname) super().__init__(fname)
self.transform = transform self._chunk_size = chunk_size
self._transform = transform
def read_frame(self, frame_index: int | None = None ) -> tuple: def read_frame(self, frame_index: int | None = None ) -> tuple:
@ -42,8 +45,8 @@ class CtbRawFile(_aare.CtbRawFile):
header = header[0] header = header[0]
if self.transform: if self._transform:
res = self.transform(data) res = self._transform(data)
if isinstance(res, tuple): if isinstance(res, tuple):
return header, *res return header, *res
else: else:
@ -59,6 +62,10 @@ class CtbRawFile(_aare.CtbRawFile):
Uses the position of the file pointer :py:meth:`~CtbRawFile.tell` to determine Uses the position of the file pointer :py:meth:`~CtbRawFile.tell` to determine
where to start reading from. where to start reading from.
If the number of frames requested is larger than the number of frames left in the file,
the function will read the remaining frames. If no frames are left in the file
a RuntimeError is raised.
Args: Args:
n_frames (int): Number of frames to read. n_frames (int): Number of frames to read.
@ -68,6 +75,12 @@ class CtbRawFile(_aare.CtbRawFile):
Raises: Raises:
RuntimeError: If EOF is reached. RuntimeError: If EOF is reached.
""" """
# Calculate the number of frames to actually read
n_frames = min(n_frames, self.frames_in_file - self.tell())
if n_frames == 0:
raise RuntimeError("No frames left in file.")
# Do the first read to figure out what we have # Do the first read to figure out what we have
tmp_header, tmp_data = self.read_frame() tmp_header, tmp_data = self.read_frame()
@ -87,10 +100,12 @@ class CtbRawFile(_aare.CtbRawFile):
def read(self) -> tuple: def read(self) -> tuple:
"""Read the entire file. """Read the entire file.
Seeks to the beginning of the file before reading.
Returns: Returns:
tuple: header, data tuple: header, data
""" """
self.seek(0)
return self.read_n(self.frames_in_file) return self.read_n(self.frames_in_file)
def seek(self, frame_index:int) -> None: def seek(self, frame_index:int) -> None:
@ -101,7 +116,7 @@ class CtbRawFile(_aare.CtbRawFile):
""" """
super().seek(frame_index) super().seek(frame_index)
def tell() -> int: def tell(self) -> int:
"""Return the current frame position in the file. """Return the current frame position in the file.
Returns: Returns:
@ -164,7 +179,12 @@ class CtbRawFile(_aare.CtbRawFile):
def __next__(self): def __next__(self):
try: try:
return self.read_frame() if self._chunk_size == 1:
return self.read_frame()
else:
return self.read_n(self._chunk_size)
except RuntimeError: except RuntimeError:
# TODO! find a good way to check that we actually have the right exception # TODO! find a good way to check that we actually have the right exception
raise StopIteration raise StopIteration

66
python/aare/RawFile.py Normal file
View File

@ -0,0 +1,66 @@
from . import _aare
import numpy as np
from .ScanParameters import ScanParameters
class RawFile(_aare.RawFile):
def __init__(self, fname, chunk_size = 1):
super().__init__(fname)
self._chunk_size = chunk_size
def read(self) -> tuple:
"""Read the entire file.
Seeks to the beginning of the file before reading.
Returns:
tuple: header, data
"""
self.seek(0)
return self.read_n(self.total_frames)
@property
def scan_parameters(self):
"""Return the scan parameters.
Returns:
ScanParameters: Scan parameters.
"""
return ScanParameters(self.master.scan_parameters)
@property
def master(self):
"""Return the master file.
Returns:
RawMasterFile: Master file.
"""
return super().master
def __len__(self) -> int:
"""Return the number of frames in the file.
Returns:
int: Number of frames in file.
"""
return super().frames_in_file
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def __iter__(self):
return self
def __next__(self):
try:
if self._chunk_size == 1:
return self.read_frame()
else:
return self.read_n(self._chunk_size)
except RuntimeError:
# TODO! find a good way to check that we actually have the right exception
raise StopIteration

View File

@ -2,10 +2,22 @@
from . import _aare from . import _aare
from ._aare import File, RawFile, RawMasterFile, RawSubFile from ._aare import File, RawMasterFile, RawSubFile
from ._aare import Pedestal, ClusterFinder, VarClusterFinder from ._aare import Pedestal_d, Pedestal_f, ClusterFinder, VarClusterFinder
from ._aare import DetectorType from ._aare import DetectorType
from ._aare import ClusterFile from ._aare import ClusterFile
from ._aare import hitmap
from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i
from ._aare import fit_gaus, fit_pol1
from .CtbRawFile import CtbRawFile from .CtbRawFile import CtbRawFile
from .ScanParameters import ScanParameters from .RawFile import RawFile
from .ScanParameters import ScanParameters
from .utils import random_pixels, random_pixel, flat_list
#make functions available in the top level API
from .func import *

1
python/aare/func.py Normal file
View File

@ -0,0 +1 @@
from ._aare import gaus, pol1

View File

@ -2,6 +2,14 @@ import numpy as np
from . import _aare from . import _aare
class AdcSar04Transform64to16:
def __call__(self, data):
return _aare.adc_sar_04_decode64to16(data)
class AdcSar05Transform64to16:
def __call__(self, data):
return _aare.adc_sar_05_decode64to16(data)
class Moench05Transform: class Moench05Transform:
#Could be moved to C++ without changing the interface #Could be moved to C++ without changing the interface
def __init__(self): def __init__(self):
@ -9,6 +17,24 @@ class Moench05Transform:
def __call__(self, data): def __call__(self, data):
return np.take(data.view(np.uint16), self.pixel_map) return np.take(data.view(np.uint16), self.pixel_map)
class Moench05Transform1g:
#Could be moved to C++ without changing the interface
def __init__(self):
self.pixel_map = _aare.GenerateMoench05PixelMap1g()
def __call__(self, data):
return np.take(data.view(np.uint16), self.pixel_map)
class Moench05TransformOld:
#Could be moved to C++ without changing the interface
def __init__(self):
self.pixel_map = _aare.GenerateMoench05PixelMapOld()
def __call__(self, data):
return np.take(data.view(np.uint16), self.pixel_map)
class Matterhorn02Transform: class Matterhorn02Transform:
@ -25,4 +51,8 @@ class Matterhorn02Transform:
#on import generate the pixel maps to avoid doing it every time #on import generate the pixel maps to avoid doing it every time
moench05 = Moench05Transform() moench05 = Moench05Transform()
matterhorn02 = Matterhorn02Transform() moench05_1g = Moench05Transform1g()
moench05_old = Moench05TransformOld()
matterhorn02 = Matterhorn02Transform()
adc_sar_04_64to16 = AdcSar04Transform64to16()
adc_sar_05_64to16 = AdcSar05Transform64to16()

27
python/aare/utils.py Normal file
View File

@ -0,0 +1,27 @@
import numpy as np
def random_pixels(n_pixels, xmin=0, xmax=512, ymin=0, ymax=1024):
"""Return a list of random pixels.
Args:
n_pixels (int): Number of pixels to return.
rows (int): Number of rows in the image.
cols (int): Number of columns in the image.
Returns:
list: List of (row, col) tuples.
"""
return [(np.random.randint(ymin, ymax), np.random.randint(xmin, xmax)) for _ in range(n_pixels)]
def random_pixel(xmin=0, xmax=512, ymin=0, ymax=1024):
"""Return a random pixel.
Returns:
tuple: (row, col)
"""
return random_pixels(1, xmin, xmax, ymin, ymax)[0]
def flat_list(xss):
"""Flatten a list of lists."""
return [x for xs in xss for x in xs]

79
python/examples/fits.py Normal file
View File

@ -0,0 +1,79 @@
import matplotlib.pyplot as plt
import numpy as np
from aare import fit_gaus, fit_pol1
from aare import gaus, pol1
textpm = f"±" #
textmu = f"μ" #
textsigma = f"σ" #
# ================================= Gauss fit =================================
# Parameters
mu = np.random.uniform(1, 100) # Mean of Gaussian
sigma = np.random.uniform(4, 20) # Standard deviation
num_points = 10000 # Number of points for smooth distribution
noise_sigma = 100
# Generate Gaussian distribution
data = np.random.normal(mu, sigma, num_points)
# Generate errors for each point
errors = np.abs(np.random.normal(0, sigma, num_points)) # Errors with mean 0, std 0.5
# Create subplot
fig0, ax0 = plt.subplots(1, 1, num=0, figsize=(12, 8))
x = np.histogram(data, bins=30)[1][:-1] + 0.05
y = np.histogram(data, bins=30)[0]
yerr = errors[:30]
# Add the errors as error bars in the step plot
ax0.errorbar(x, y, yerr=yerr, fmt=". ", capsize=5)
ax0.grid()
par, err = fit_gaus(x, y, yerr)
print(par, err)
x = np.linspace(x[0], x[-1], 1000)
ax0.plot(x, gaus(x, par), marker="")
ax0.set(xlabel="x", ylabel="Counts", title=f"A0 = {par[0]:0.2f}{textpm}{err[0]:0.2f}\n"
f"{textmu} = {par[1]:0.2f}{textpm}{err[1]:0.2f}\n"
f"{textsigma} = {par[2]:0.2f}{textpm}{err[2]:0.2f}\n"
f"(init: {textmu}: {mu:0.2f}, {textsigma}: {sigma:0.2f})")
fig0.tight_layout()
# ================================= pol1 fit =================================
# Parameters
n_points = 40
# Generate random slope and intercept (origin)
slope = np.random.uniform(-10, 10) # Random slope between 0.5 and 2.0
intercept = np.random.uniform(-10, 10) # Random intercept between -10 and 10
# Generate random x values
x_values = np.random.uniform(-10, 10, n_points)
# Calculate y values based on the linear function y = mx + b + error
errors = np.abs(np.random.normal(0, np.random.uniform(1, 5), n_points))
var_points = np.random.normal(0, np.random.uniform(0.1, 2), n_points)
y_values = slope * x_values + intercept + var_points
fig1, ax1 = plt.subplots(1, 1, num=1, figsize=(12, 8))
ax1.errorbar(x_values, y_values, yerr=errors, fmt=". ", capsize=5)
par, err = fit_pol1(x_values, y_values, errors)
x = np.linspace(np.min(x_values), np.max(x_values), 1000)
ax1.plot(x, pol1(x, par), marker="")
ax1.set(xlabel="x", ylabel="y", title=f"a = {par[0]:0.2f}{textpm}{err[0]:0.2f}\n"
f"b = {par[1]:0.2f}{textpm}{err[1]:0.2f}\n"
f"(init: {slope:0.2f}, {intercept:0.2f})")
fig1.tight_layout()
plt.show()

View File

@ -1,15 +1,50 @@
import sys
sys.path.append('/home/l_msdetect/erik/aare/build')
#Our normal python imports
from pathlib import Path
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
plt.ion() import boost_histogram as bh
from pathlib import Path import time
from aare import ClusterFile
base = Path('~/data/aare_test_data/clusters').expanduser()
f = ClusterFile(base / 'beam_En700eV_-40deg_300V_10us_d0_f0_100.clust')
# f = ClusterFile(base / 'single_frame_97_clustrers.clust')
for i in range(10): import aare
fn, cl = f.read_frame()
print(fn, cl.size) data = np.random.normal(10, 1, 1000)
hist = bh.Histogram(bh.axis.Regular(10, 0, 20))
hist.fill(data)
x = hist.axes[0].centers
y = hist.values()
y_err = np.sqrt(y)+1
res = aare.fit_gaus(x, y, y_err, chi2 = True)
t_elapsed = time.perf_counter()-t0
print(f'Histogram filling took: {t_elapsed:.3f}s {total_clusters/t_elapsed/1e6:.3f}M clusters/s')
histogram_data = hist3d.counts()
x = hist3d.axes[2].edges[:-1]
y = histogram_data[100,100,:]
xx = np.linspace(x[0], x[-1])
# fig, ax = plt.subplots()
# ax.step(x, y, where = 'post')
y_err = np.sqrt(y)
y_err = np.zeros(y.size)
y_err += 1
# par = fit_gaus2(y,x, y_err)
# ax.plot(xx, gaus(xx,par))
# print(par)
res = fit_gaus(y,x)
res2 = fit_gaus(y,x, y_err)
print(res)
print(res2)

View File

@ -1,4 +1,8 @@
#include "aare/ClusterCollector.hpp"
#include "aare/ClusterFileSink.hpp"
#include "aare/ClusterFinder.hpp" #include "aare/ClusterFinder.hpp"
#include "aare/ClusterFinderMT.hpp"
#include "aare/ClusterVector.hpp"
#include "aare/NDView.hpp" #include "aare/NDView.hpp"
#include "aare/Pedestal.hpp" #include "aare/Pedestal.hpp"
#include "np_helper.hpp" #include "np_helper.hpp"
@ -7,31 +11,176 @@
#include <filesystem> #include <filesystem>
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include <pybind11/stl.h> #include <pybind11/stl.h>
#include <pybind11/stl_bind.h>
namespace py = pybind11; namespace py = pybind11;
using pd_type = double;
void define_cluster_finder_bindings(py::module &m) { template <typename T>
py::class_<ClusterFinder<uint16_t, double>>(m, "ClusterFinder") void define_cluster_vector(py::module &m, const std::string &typestr) {
.def(py::init<Shape<2>, Shape<2>>()) auto class_name = fmt::format("ClusterVector_{}", typestr);
py::class_<ClusterVector<T>>(m, class_name.c_str(), py::buffer_protocol())
.def(py::init<int, int>())
.def_property_readonly("size", &ClusterVector<T>::size)
.def("item_size", &ClusterVector<T>::item_size)
.def_property_readonly("fmt",
[typestr](ClusterVector<T> &self) {
return fmt::format(
self.fmt_base(), self.cluster_size_x(),
self.cluster_size_y(), typestr);
})
.def("sum",
[](ClusterVector<T> &self) {
auto *vec = new std::vector<T>(self.sum());
return return_vector(vec);
})
.def("sum_2x2", [](ClusterVector<T> &self) {
auto *vec = new std::vector<T>(self.sum_2x2());
return return_vector(vec);
})
.def_property_readonly("capacity", &ClusterVector<T>::capacity)
.def_property("frame_number", &ClusterVector<T>::frame_number,
&ClusterVector<T>::set_frame_number)
.def_buffer([typestr](ClusterVector<T> &self) -> py::buffer_info {
return py::buffer_info(
self.data(), /* Pointer to buffer */
self.item_size(), /* Size of one scalar */
fmt::format(self.fmt_base(), self.cluster_size_x(),
self.cluster_size_y(),
typestr), /* Format descriptor */
1, /* Number of dimensions */
{self.size()}, /* Buffer dimensions */
{self.item_size()} /* Strides (in bytes) for each index */
);
});
}
void define_cluster_finder_mt_bindings(py::module &m) {
py::class_<ClusterFinderMT<uint16_t, pd_type>>(m, "ClusterFinderMT")
.def(py::init<Shape<2>, Shape<2>, pd_type, size_t, size_t>(),
py::arg("image_size"), py::arg("cluster_size"),
py::arg("n_sigma") = 5.0, py::arg("capacity") = 2048,
py::arg("n_threads") = 3)
.def("push_pedestal_frame", .def("push_pedestal_frame",
[](ClusterFinder<uint16_t, double> &self, [](ClusterFinderMT<uint16_t, pd_type> &self,
py::array_t<uint16_t> frame) { py::array_t<uint16_t> frame) {
auto view = make_view_2d(frame); auto view = make_view_2d(frame);
self.push_pedestal_frame(view); self.push_pedestal_frame(view);
}) })
.def(
"find_clusters",
[](ClusterFinderMT<uint16_t, pd_type> &self,
py::array_t<uint16_t> frame, uint64_t frame_number) {
auto view = make_view_2d(frame);
self.find_clusters(view, frame_number);
return;
},
py::arg(), py::arg("frame_number") = 0)
.def("clear_pedestal", &ClusterFinderMT<uint16_t, pd_type>::clear_pedestal)
.def("sync", &ClusterFinderMT<uint16_t, pd_type>::sync)
.def("stop", &ClusterFinderMT<uint16_t, pd_type>::stop)
.def("start", &ClusterFinderMT<uint16_t, pd_type>::start)
.def("pedestal", .def("pedestal",
[](ClusterFinder<uint16_t, double> &self) { [](ClusterFinderMT<uint16_t, pd_type> &self, size_t thread_index) {
auto m = new NDArray<double, 2>{}; auto pd = new NDArray<pd_type, 2>{};
*m = self.pedestal(); *pd = self.pedestal(thread_index);
return return_image_data(m); return return_image_data(pd);
}) },py::arg("thread_index") = 0)
.def("find_clusters_without_threshold", .def("noise",
[](ClusterFinder<uint16_t, double> &self, [](ClusterFinderMT<uint16_t, pd_type> &self, size_t thread_index) {
auto arr = new NDArray<pd_type, 2>{};
*arr = self.noise(thread_index);
return return_image_data(arr);
},py::arg("thread_index") = 0);
}
void define_cluster_collector_bindings(py::module &m) {
py::class_<ClusterCollector>(m, "ClusterCollector")
.def(py::init<ClusterFinderMT<uint16_t, double, int32_t> *>())
.def("stop", &ClusterCollector::stop)
.def(
"steal_clusters",
[](ClusterCollector &self) {
auto v =
new std::vector<ClusterVector<int>>(self.steal_clusters());
return v;
},
py::return_value_policy::take_ownership);
}
void define_cluster_file_sink_bindings(py::module &m) {
py::class_<ClusterFileSink>(m, "ClusterFileSink")
.def(py::init<ClusterFinderMT<uint16_t, double, int32_t> *,
const std::filesystem::path &>())
.def("stop", &ClusterFileSink::stop);
}
void define_cluster_finder_bindings(py::module &m) {
py::class_<ClusterFinder<uint16_t, pd_type>>(m, "ClusterFinder")
.def(py::init<Shape<2>, Shape<2>, pd_type, size_t>(),
py::arg("image_size"), py::arg("cluster_size"),
py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000)
.def("push_pedestal_frame",
[](ClusterFinder<uint16_t, pd_type> &self,
py::array_t<uint16_t> frame) { py::array_t<uint16_t> frame) {
auto view = make_view_2d(frame); auto view = make_view_2d(frame);
auto clusters = self.find_clusters_without_threshold(view); self.push_pedestal_frame(view);
return clusters; })
}); .def("clear_pedestal", &ClusterFinder<uint16_t, pd_type>::clear_pedestal)
.def_property_readonly("pedestal",
[](ClusterFinder<uint16_t, pd_type> &self) {
auto pd = new NDArray<pd_type, 2>{};
*pd = self.pedestal();
return return_image_data(pd);
})
.def_property_readonly("noise",
[](ClusterFinder<uint16_t, pd_type> &self) {
auto arr = new NDArray<pd_type, 2>{};
*arr = self.noise();
return return_image_data(arr);
})
.def(
"steal_clusters",
[](ClusterFinder<uint16_t, pd_type> &self,
bool realloc_same_capacity) {
auto v = new ClusterVector<int>(
self.steal_clusters(realloc_same_capacity));
return v;
},
py::arg("realloc_same_capacity") = false)
.def(
"find_clusters",
[](ClusterFinder<uint16_t, pd_type> &self,
py::array_t<uint16_t> frame, uint64_t frame_number) {
auto view = make_view_2d(frame);
self.find_clusters(view, frame_number);
return;
},
py::arg(), py::arg("frame_number") = 0);
m.def("hitmap",
[](std::array<size_t, 2> image_size, ClusterVector<int32_t> &cv) {
py::array_t<int32_t> hitmap(image_size);
auto r = hitmap.mutable_unchecked<2>();
// Initialize hitmap to 0
for (py::ssize_t i = 0; i < r.shape(0); i++)
for (py::ssize_t j = 0; j < r.shape(1); j++)
r(i, j) = 0;
size_t stride = cv.item_size();
auto ptr = cv.data();
for (size_t i = 0; i < cv.size(); i++) {
auto x = *reinterpret_cast<int16_t *>(ptr);
auto y = *reinterpret_cast<int16_t *>(ptr + sizeof(int16_t));
r(y, x) += 1;
ptr += stride;
}
return hitmap;
});
define_cluster_vector<int>(m, "i");
define_cluster_vector<double>(m, "d");
define_cluster_vector<float>(m, "f");
py::class_<DynamicCluster>(m, "DynamicCluster", py::buffer_protocol()) py::class_<DynamicCluster>(m, "DynamicCluster", py::buffer_protocol())
.def(py::init<int, int, Dtype>()) .def(py::init<int, int, Dtype>())

View File

@ -1,7 +1,6 @@
#include "aare/ClusterFile.hpp" #include "aare/ClusterFile.hpp"
#include "aare/defs.hpp" #include "aare/defs.hpp"
#include <cstdint> #include <cstdint>
#include <filesystem> #include <filesystem>
#include <pybind11/iostream.h> #include <pybind11/iostream.h>
@ -11,40 +10,63 @@
#include <pybind11/stl/filesystem.h> #include <pybind11/stl/filesystem.h>
#include <string> #include <string>
//Disable warnings for unused parameters, as we ignore some
//in the __exit__ method
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
namespace py = pybind11; namespace py = pybind11;
using namespace ::aare; using namespace ::aare;
void define_cluster_file_io_bindings(py::module &m) { void define_cluster_file_io_bindings(py::module &m) {
PYBIND11_NUMPY_DTYPE(Cluster, x, y, data); PYBIND11_NUMPY_DTYPE(Cluster3x3, x, y, data);
py::class_<ClusterFile>(m, "ClusterFile") py::class_<ClusterFile>(m, "ClusterFile")
.def(py::init<const std::filesystem::path &, size_t>(), py::arg(), py::arg("chunk_size") = 1000) .def(py::init<const std::filesystem::path &, size_t,
const std::string &>(),
py::arg(), py::arg("chunk_size") = 1000, py::arg("mode") = "r")
.def("read_clusters", .def("read_clusters",
[](ClusterFile &self, size_t n_clusters) { [](ClusterFile &self, size_t n_clusters) {
auto* vec = new std::vector<Cluster>(self.read_clusters(n_clusters)); auto v = new ClusterVector<int32_t>(self.read_clusters(n_clusters));
return return_vector(vec); return v;
}) },py::return_value_policy::take_ownership)
.def("read_frame", .def("read_frame",
[](ClusterFile &self) { [](ClusterFile &self) {
int32_t frame_number; auto v = new ClusterVector<int32_t>(self.read_frame());
auto* vec = new std::vector<Cluster>(self.read_frame(frame_number)); return v;
return py::make_tuple(frame_number, return_vector(vec));
})
.def("read_cluster_with_cut",
[](ClusterFile &self, size_t n_clusters, py::array_t<double> noise_map, int nx, int ny) {
auto view = make_view_2d(noise_map);
auto* vec = new std::vector<Cluster>(self.read_cluster_with_cut(n_clusters, view.data(), nx, ny));
return return_vector(vec);
}) })
.def("write_frame", &ClusterFile::write_frame)
// .def("read_cluster_with_cut",
// [](ClusterFile &self, size_t n_clusters,
// py::array_t<double> noise_map, int nx, int ny) {
// auto view = make_view_2d(noise_map);
// auto *vec =
// new std::vector<Cluster3x3>(self.read_cluster_with_cut(
// n_clusters, view.data(), nx, ny));
// return return_vector(vec);
// })
.def("__enter__", [](ClusterFile &self) { return &self; }) .def("__enter__", [](ClusterFile &self) { return &self; })
.def("__exit__", [](ClusterFile &self, py::args args) { return; }) .def("__exit__",
[](ClusterFile &self,
const std::optional<pybind11::type> &exc_type,
const std::optional<pybind11::object> &exc_value,
const std::optional<pybind11::object> &traceback) {
self.close();
})
.def("__iter__", [](ClusterFile &self) { return &self; }) .def("__iter__", [](ClusterFile &self) { return &self; })
.def("__next__", [](ClusterFile &self) { .def("__next__", [](ClusterFile &self) {
auto vec = new std::vector<Cluster>(self.read_clusters(self.chunk_size())); auto v = new ClusterVector<int32_t>(self.read_clusters(self.chunk_size()));
if(vec->size() == 0) { if (v->size() == 0) {
throw py::stop_iteration(); throw py::stop_iteration();
} }
return return_vector(vec); return v;
}); });
} m.def("calculate_eta2", []( aare::ClusterVector<int32_t> &clusters) {
auto eta2 = new NDArray<double, 2>(calculate_eta2(clusters));
return return_image_data(eta2);
});
}
#pragma GCC diagnostic pop

View File

@ -0,0 +1,99 @@
#include "aare/CtbRawFile.hpp"
#include "aare/File.hpp"
#include "aare/Frame.hpp"
#include "aare/RawFile.hpp"
#include "aare/RawMasterFile.hpp"
#include "aare/RawSubFile.hpp"
#include "aare/defs.hpp"
#include "aare/decode.hpp"
// #include "aare/fClusterFileV2.hpp"
#include <cstdint>
#include <filesystem>
#include <pybind11/iostream.h>
#include <pybind11/numpy.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/stl/filesystem.h>
#include <string>
namespace py = pybind11;
using namespace ::aare;
void define_ctb_raw_file_io_bindings(py::module &m) {
m.def("adc_sar_05_decode64to16", [](py::array_t<uint8_t> input) {
if(input.ndim() != 2){
throw std::runtime_error("Only 2D arrays are supported at this moment");
}
//Create a 2D output array with the same shape as the input
std::vector<ssize_t> shape{input.shape(0), input.shape(1)/8};
py::array_t<uint16_t> output(shape);
//Create a view of the input and output arrays
NDView<uint64_t, 2> input_view(reinterpret_cast<uint64_t*>(input.mutable_data()), {output.shape(0), output.shape(1)});
NDView<uint16_t, 2> output_view(output.mutable_data(), {output.shape(0), output.shape(1)});
adc_sar_05_decode64to16(input_view, output_view);
return output;
});
m.def("adc_sar_04_decode64to16", [](py::array_t<uint8_t> input) {
if(input.ndim() != 2){
throw std::runtime_error("Only 2D arrays are supported at this moment");
}
//Create a 2D output array with the same shape as the input
std::vector<ssize_t> shape{input.shape(0), input.shape(1)/8};
py::array_t<uint16_t> output(shape);
//Create a view of the input and output arrays
NDView<uint64_t, 2> input_view(reinterpret_cast<uint64_t*>(input.mutable_data()), {output.shape(0), output.shape(1)});
NDView<uint16_t, 2> output_view(output.mutable_data(), {output.shape(0), output.shape(1)});
adc_sar_04_decode64to16(input_view, output_view);
return output;
});
py::class_<CtbRawFile>(m, "CtbRawFile")
.def(py::init<const std::filesystem::path &>())
.def("read_frame",
[](CtbRawFile &self) {
size_t image_size = self.image_size_in_bytes();
py::array image;
std::vector<ssize_t> shape;
shape.reserve(2);
shape.push_back(1);
shape.push_back(image_size);
py::array_t<DetectorHeader> header(1);
// always read bytes
image = py::array_t<uint8_t>(shape);
self.read_into(
reinterpret_cast<std::byte *>(image.mutable_data()),
header.mutable_data());
return py::make_tuple(header, image);
})
.def("seek", &CtbRawFile::seek)
.def("tell", &CtbRawFile::tell)
.def("master", &CtbRawFile::master)
.def_property_readonly("image_size_in_bytes",
&CtbRawFile::image_size_in_bytes)
.def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file);
}

View File

@ -20,6 +20,11 @@
namespace py = pybind11; namespace py = pybind11;
using namespace ::aare; using namespace ::aare;
//Disable warnings for unused parameters, as we ignore some
//in the __exit__ method
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
void define_file_io_bindings(py::module &m) { void define_file_io_bindings(py::module &m) {
@ -38,36 +43,7 @@ void define_file_io_bindings(py::module &m) {
bunchId, timestamp, modId, row, column, reserved, bunchId, timestamp, modId, row, column, reserved,
debug, roundRNumber, detType, version, packetMask); debug, roundRNumber, detType, version, packetMask);
py::class_<CtbRawFile>(m, "CtbRawFile")
.def(py::init<const std::filesystem::path &>())
.def("read_frame",
[](CtbRawFile &self) {
size_t image_size = self.image_size_in_bytes();
py::array image;
std::vector<ssize_t> shape;
shape.reserve(2);
shape.push_back(1);
shape.push_back(image_size);
py::array_t<DetectorHeader> header(1);
// always read bytes
image = py::array_t<uint8_t>(shape);
self.read_into(
reinterpret_cast<std::byte *>(image.mutable_data()),
header.mutable_data());
return py::make_tuple(header, image);
})
.def("seek", &CtbRawFile::seek)
.def("tell", &CtbRawFile::tell)
.def("master", &CtbRawFile::master)
.def_property_readonly("image_size_in_bytes",
&CtbRawFile::image_size_in_bytes)
.def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file);
py::class_<File>(m, "File") py::class_<File>(m, "File")
.def(py::init([](const std::filesystem::path &fname) { .def(py::init([](const std::filesystem::path &fname) {
@ -80,7 +56,8 @@ void define_file_io_bindings(py::module &m) {
.def(py::init<const std::filesystem::path &, const std::string &, .def(py::init<const std::filesystem::path &, const std::string &,
const FileConfig &>()) const FileConfig &>())
.def("frame_number", &File::frame_number) .def("frame_number", py::overload_cast<>(&File::frame_number))
.def("frame_number", py::overload_cast<size_t>(&File::frame_number))
.def_property_readonly("bytes_per_frame", &File::bytes_per_frame) .def_property_readonly("bytes_per_frame", &File::bytes_per_frame)
.def_property_readonly("pixels_per_frame", &File::pixels_per_frame) .def_property_readonly("pixels_per_frame", &File::pixels_per_frame)
.def("seek", &File::seek) .def("seek", &File::seek)
@ -133,13 +110,15 @@ void define_file_io_bindings(py::module &m) {
return image; return image;
}) })
.def("read_n", [](File &self, size_t n_frames) { .def("read_n", [](File &self, size_t n_frames) {
const uint8_t item_size = self.bytes_per_pixel(); //adjust for actual frames left in the file
n_frames = std::min(n_frames, self.total_frames()-self.tell());
if(n_frames == 0){
throw std::runtime_error("No frames left in file");
}
std::vector<size_t> shape{n_frames, self.rows(), self.cols()};
py::array image; py::array image;
std::vector<ssize_t> shape; const uint8_t item_size = self.bytes_per_pixel();
shape.reserve(3);
shape.push_back(n_frames);
shape.push_back(self.rows());
shape.push_back(self.cols());
if (item_size == 1) { if (item_size == 1) {
image = py::array_t<uint8_t>(shape); image = py::array_t<uint8_t>(shape);
} else if (item_size == 2) { } else if (item_size == 2) {
@ -150,8 +129,41 @@ void define_file_io_bindings(py::module &m) {
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()), self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()),
n_frames); n_frames);
return image; return image;
})
.def("__enter__", [](File &self) { return &self; })
.def("__exit__",
[](File &self,
const std::optional<pybind11::type> &exc_type,
const std::optional<pybind11::object> &exc_value,
const std::optional<pybind11::object> &traceback) {
// self.close();
})
.def("__iter__", [](File &self) { return &self; })
.def("__next__", [](File &self) {
try{
const uint8_t item_size = self.bytes_per_pixel();
py::array image;
std::vector<ssize_t> shape;
shape.reserve(2);
shape.push_back(self.rows());
shape.push_back(self.cols());
if (item_size == 1) {
image = py::array_t<uint8_t>(shape);
} else if (item_size == 2) {
image = py::array_t<uint16_t>(shape);
} else if (item_size == 4) {
image = py::array_t<uint32_t>(shape);
}
self.read_into(
reinterpret_cast<std::byte *>(image.mutable_data()));
return image;
}catch(std::runtime_error &e){
throw py::stop_iteration();
}
}); });
py::class_<FileConfig>(m, "FileConfig") py::class_<FileConfig>(m, "FileConfig")
.def(py::init<>()) .def(py::init<>())
.def_readwrite("rows", &FileConfig::rows) .def_readwrite("rows", &FileConfig::rows)
@ -194,7 +206,7 @@ void define_file_io_bindings(py::module &m) {
return fmt::format("<ROI: xmin: {} xmax: {} ymin: {} ymax: {}>", self.xmin, self.xmax, self.ymin, self.ymax); return fmt::format("<ROI: xmin: {} xmax: {} ymin: {} ymax: {}>", self.xmin, self.xmax, self.ymin, self.ymax);
}) })
.def("__iter__", [](const ROI &self) { .def("__iter__", [](const ROI &self) {
return py::make_iterator(&self.xmin, &self.ymax+1); return py::make_iterator(&self.xmin, &self.ymax+1); //NOLINT
}); });
@ -231,7 +243,7 @@ void define_file_io_bindings(py::module &m) {
return image; return image;
}); });
#pragma GCC diagnostic pop
// py::class_<ClusterHeader>(m, "ClusterHeader") // py::class_<ClusterHeader>(m, "ClusterHeader")
// .def(py::init<>()) // .def(py::init<>())
// .def_readwrite("frame_number", &ClusterHeader::frame_number) // .def_readwrite("frame_number", &ClusterHeader::frame_number)

250
python/src/fit.hpp Normal file
View File

@ -0,0 +1,250 @@
#include <cstdint>
#include <filesystem>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/stl_bind.h>
#include "aare/Fit.hpp"
namespace py = pybind11;
using namespace pybind11::literals;
void define_fit_bindings(py::module &m) {
// TODO! Evaluate without converting to double
m.def(
"gaus",
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> par) {
auto x_view = make_view_1d(x);
auto par_view = make_view_1d(par);
auto y = new NDArray<double, 1>{aare::func::gaus(x_view, par_view)};
return return_image_data(y);
},
R"(
Evaluate a 1D Gaussian function for all points in x using parameters par.
Parameters
----------
x : array_like
The points at which to evaluate the Gaussian function.
par : array_like
The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation.
)",
py::arg("x"), py::arg("par"));
m.def(
"pol1",
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> par) {
auto x_view = make_view_1d(x);
auto par_view = make_view_1d(par);
auto y = new NDArray<double, 1>{aare::func::pol1(x_view, par_view)};
return return_image_data(y);
},
R"(
Evaluate a 1D polynomial function for all points in x using parameters par. (p0+p1*x)
Parameters
----------
x : array_like
The points at which to evaluate the polynomial function.
par : array_like
The parameters of the polynomial function. The first element is the intercept, and the second element is the slope.
)",
py::arg("x"), py::arg("par"));
m.def(
"fit_gaus",
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> y,
int n_threads) {
if (y.ndim() == 3) {
auto par = new NDArray<double, 3>{};
auto y_view = make_view_3d(y);
auto x_view = make_view_1d(x);
*par = aare::fit_gaus(x_view, y_view, n_threads);
return return_image_data(par);
} else if (y.ndim() == 1) {
auto par = new NDArray<double, 1>{};
auto y_view = make_view_1d(y);
auto x_view = make_view_1d(x);
*par = aare::fit_gaus(x_view, y_view);
return return_image_data(par);
} else {
throw std::runtime_error("Data must be 1D or 3D");
}
},
R"(
Fit a 1D Gaussian to data.
Parameters
----------
x : array_like
The x values.
y : array_like
The y values.
n_threads : int, optional
The number of threads to use. Default is 4.
)",
py::arg("x"), py::arg("y"), py::arg("n_threads") = 4);
m.def(
"fit_gaus",
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> y,
py::array_t<double, py::array::c_style | py::array::forcecast> y_err,
int n_threads) {
if (y.ndim() == 3) {
// Allocate memory for the output
// Need to have pointers to allow python to manage
// the memory
auto par = new NDArray<double, 3>({y.shape(0), y.shape(1), 3});
auto par_err =
new NDArray<double, 3>({y.shape(0), y.shape(1), 3});
auto chi2 = new NDArray<double, 2>({y.shape(0), y.shape(1)});
// Make views of the numpy arrays
auto y_view = make_view_3d(y);
auto y_view_err = make_view_3d(y_err);
auto x_view = make_view_1d(x);
aare::fit_gaus(x_view, y_view, y_view_err, par->view(),
par_err->view(), chi2->view(), n_threads);
return py::dict("par"_a = return_image_data(par),
"par_err"_a = return_image_data(par_err),
"chi2"_a = return_image_data(chi2),
"Ndf"_a = y.shape(2) - 3);
} else if (y.ndim() == 1) {
// Allocate memory for the output
// Need to have pointers to allow python to manage
// the memory
auto par = new NDArray<double, 1>({3});
auto par_err = new NDArray<double, 1>({3});
// Decode the numpy arrays
auto y_view = make_view_1d(y);
auto y_view_err = make_view_1d(y_err);
auto x_view = make_view_1d(x);
double chi2 = 0;
aare::fit_gaus(x_view, y_view, y_view_err, par->view(),
par_err->view(), chi2);
return py::dict("par"_a = return_image_data(par),
"par_err"_a = return_image_data(par_err),
"chi2"_a = chi2, "Ndf"_a = y.size() - 3);
} else {
throw std::runtime_error("Data must be 1D or 3D");
}
},
R"(
Fit a 1D Gaussian to data with error estimates.
Parameters
----------
x : array_like
The x values.
y : array_like
The y values.
y_err : array_like
The error in the y values.
n_threads : int, optional
The number of threads to use. Default is 4.
)",
py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4);
m.def(
"fit_pol1",
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> y,
int n_threads) {
if (y.ndim() == 3) {
auto par = new NDArray<double, 3>{};
auto x_view = make_view_1d(x);
auto y_view = make_view_3d(y);
*par = aare::fit_pol1(x_view, y_view, n_threads);
return return_image_data(par);
} else if (y.ndim() == 1) {
auto par = new NDArray<double, 1>{};
auto x_view = make_view_1d(x);
auto y_view = make_view_1d(y);
*par = aare::fit_pol1(x_view, y_view);
return return_image_data(par);
} else {
throw std::runtime_error("Data must be 1D or 3D");
}
},
py::arg("x"), py::arg("y"), py::arg("n_threads") = 4);
m.def(
"fit_pol1",
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> y,
py::array_t<double, py::array::c_style | py::array::forcecast> y_err,
int n_threads) {
if (y.ndim() == 3) {
auto par = new NDArray<double, 3>({y.shape(0), y.shape(1), 2});
auto par_err =
new NDArray<double, 3>({y.shape(0), y.shape(1), 2});
auto y_view = make_view_3d(y);
auto y_view_err = make_view_3d(y_err);
auto x_view = make_view_1d(x);
auto chi2 = new NDArray<double, 2>({y.shape(0), y.shape(1)});
aare::fit_pol1(x_view, y_view, y_view_err, par->view(),
par_err->view(), chi2->view(), n_threads);
return py::dict("par"_a = return_image_data(par),
"par_err"_a = return_image_data(par_err),
"chi2"_a = return_image_data(chi2),
"Ndf"_a = y.shape(2) - 2);
} else if (y.ndim() == 1) {
auto par = new NDArray<double, 1>({2});
auto par_err = new NDArray<double, 1>({2});
auto y_view = make_view_1d(y);
auto y_view_err = make_view_1d(y_err);
auto x_view = make_view_1d(x);
double chi2 = 0;
aare::fit_pol1(x_view, y_view, y_view_err, par->view(),
par_err->view(), chi2);
return py::dict("par"_a = return_image_data(par),
"par_err"_a = return_image_data(par_err),
"chi2"_a = chi2, "Ndf"_a = y.size() - 2);
} else {
throw std::runtime_error("Data must be 1D or 3D");
}
},
R"(
Fit a 1D polynomial to data with error estimates.
Parameters
----------
x : array_like
The x values.
y : array_like
The y values.
y_err : array_like
The error in the y values.
n_threads : int, optional
The number of threads to use. Default is 4.
)",
py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4);
}

View File

@ -1,12 +1,14 @@
//Files with bindings to the different classes //Files with bindings to the different classes
#include "file.hpp" #include "file.hpp"
#include "raw_file.hpp" #include "raw_file.hpp"
#include "ctb_raw_file.hpp"
#include "raw_master_file.hpp" #include "raw_master_file.hpp"
#include "var_cluster.hpp" #include "var_cluster.hpp"
#include "pixel_map.hpp" #include "pixel_map.hpp"
#include "pedestal.hpp" #include "pedestal.hpp"
#include "cluster.hpp" #include "cluster.hpp"
#include "cluster_file.hpp" #include "cluster_file.hpp"
#include "fit.hpp"
//Pybind stuff //Pybind stuff
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
@ -17,11 +19,17 @@ namespace py = pybind11;
PYBIND11_MODULE(_aare, m) { PYBIND11_MODULE(_aare, m) {
define_file_io_bindings(m); define_file_io_bindings(m);
define_raw_file_io_bindings(m); define_raw_file_io_bindings(m);
define_ctb_raw_file_io_bindings(m);
define_raw_master_file_bindings(m); define_raw_master_file_bindings(m);
define_var_cluster_finder_bindings(m); define_var_cluster_finder_bindings(m);
define_pixel_map_bindings(m); define_pixel_map_bindings(m);
define_pedestal_bindings<double>(m, "Pedestal"); define_pedestal_bindings<double>(m, "Pedestal_d");
define_pedestal_bindings<float>(m, "Pedestal_float32"); define_pedestal_bindings<float>(m, "Pedestal_f");
define_cluster_finder_bindings(m); define_cluster_finder_bindings(m);
define_cluster_finder_mt_bindings(m);
define_cluster_file_io_bindings(m); define_cluster_file_io_bindings(m);
define_cluster_collector_bindings(m);
define_cluster_file_sink_bindings(m);
define_fit_bindings(m);
} }

View File

@ -39,65 +39,6 @@ template <typename T> py::array return_vector(std::vector<T> *vec) {
free_when_done); // numpy array references this parent free_when_done); // numpy array references this parent
} }
// template <typename Reader> py::array do_read(Reader &r, size_t n_frames) {
// py::array image;
// if (n_frames == 0)
// n_frames = r.total_frames();
// std::array<ssize_t, 3> shape{static_cast<ssize_t>(n_frames), r.rows(),
// r.cols()};
// const uint8_t item_size = r.bytes_per_pixel();
// if (item_size == 1) {
// image = py::array_t<uint8_t, py::array::c_style | py::array::forcecast>(
// shape);
// } else if (item_size == 2) {
// image =
// py::array_t<uint16_t, py::array::c_style | py::array::forcecast>(
// shape);
// } else if (item_size == 4) {
// image =
// py::array_t<uint32_t, py::array::c_style | py::array::forcecast>(
// shape);
// }
// r.read_into(reinterpret_cast<std::byte *>(image.mutable_data()), n_frames);
// return image;
// }
// py::array return_frame(pl::Frame *ptr) {
// py::capsule free_when_done(ptr, [](void *f) {
// pl::Frame *foo = reinterpret_cast<pl::Frame *>(f);
// delete foo;
// });
// const uint8_t item_size = ptr->bytes_per_pixel();
// std::vector<ssize_t> shape;
// for (auto val : ptr->shape())
// if (val > 1)
// shape.push_back(val);
// std::vector<ssize_t> strides;
// if (shape.size() == 1)
// strides.push_back(item_size);
// else if (shape.size() == 2) {
// strides.push_back(item_size * shape[1]);
// strides.push_back(item_size);
// }
// if (item_size == 1)
// return py::array_t<uint8_t>(
// shape, strides,
// reinterpret_cast<uint8_t *>(ptr->data()), free_when_done);
// else if (item_size == 2)
// return py::array_t<uint16_t>(shape, strides,
// reinterpret_cast<uint16_t *>(ptr->data()),
// free_when_done);
// else if (item_size == 4)
// return py::array_t<uint32_t>(shape, strides,
// reinterpret_cast<uint32_t *>(ptr->data()),
// free_when_done);
// return {};
// }
// todo rewrite generic // todo rewrite generic
template <class T, int Flags> auto get_shape_3d(py::array_t<T, Flags> arr) { template <class T, int Flags> auto get_shape_3d(py::array_t<T, Flags> arr) {
return aare::Shape<3>{arr.shape(0), arr.shape(1), arr.shape(2)}; return aare::Shape<3>{arr.shape(0), arr.shape(1), arr.shape(2)};
@ -111,6 +52,13 @@ template <class T, int Flags> auto get_shape_2d(py::array_t<T, Flags> arr) {
return aare::Shape<2>{arr.shape(0), arr.shape(1)}; return aare::Shape<2>{arr.shape(0), arr.shape(1)};
} }
template <class T, int Flags> auto get_shape_1d(py::array_t<T, Flags> arr) {
return aare::Shape<1>{arr.shape(0)};
}
template <class T, int Flags> auto make_view_2d(py::array_t<T, Flags> arr) { template <class T, int Flags> auto make_view_2d(py::array_t<T, Flags> arr) {
return aare::NDView<T, 2>(arr.mutable_data(), get_shape_2d<T, Flags>(arr)); return aare::NDView<T, 2>(arr.mutable_data(), get_shape_2d<T, Flags>(arr));
}
template <class T, int Flags> auto make_view_1d(py::array_t<T, Flags> arr) {
return aare::NDView<T, 1>(arr.mutable_data(), get_shape_1d<T, Flags>(arr));
} }

View File

@ -15,19 +15,19 @@ template <typename SUM_TYPE> void define_pedestal_bindings(py::module &m, const
.def(py::init<int, int>()) .def(py::init<int, int>())
.def("mean", .def("mean",
[](Pedestal<SUM_TYPE> &self) { [](Pedestal<SUM_TYPE> &self) {
auto m = new NDArray<SUM_TYPE, 2>{}; auto mea = new NDArray<SUM_TYPE, 2>{};
*m = self.mean(); *mea = self.mean();
return return_image_data(m); return return_image_data(mea);
}) })
.def("variance", [](Pedestal<SUM_TYPE> &self) { .def("variance", [](Pedestal<SUM_TYPE> &self) {
auto m = new NDArray<SUM_TYPE, 2>{}; auto var = new NDArray<SUM_TYPE, 2>{};
*m = self.variance(); *var = self.variance();
return return_image_data(m); return return_image_data(var);
}) })
.def("std", [](Pedestal<SUM_TYPE> &self) { .def("std", [](Pedestal<SUM_TYPE> &self) {
auto m = new NDArray<SUM_TYPE, 2>{}; auto std = new NDArray<SUM_TYPE, 2>{};
*m = self.std(); *std = self.std();
return return_image_data(m); return return_image_data(std);
}) })
.def("clear", py::overload_cast<>(&Pedestal<SUM_TYPE>::clear)) .def("clear", py::overload_cast<>(&Pedestal<SUM_TYPE>::clear))
.def_property_readonly("rows", &Pedestal<SUM_TYPE>::rows) .def_property_readonly("rows", &Pedestal<SUM_TYPE>::rows)
@ -43,5 +43,10 @@ template <typename SUM_TYPE> void define_pedestal_bindings(py::module &m, const
.def("push", [](Pedestal<SUM_TYPE> &pedestal, py::array_t<uint16_t> &f) { .def("push", [](Pedestal<SUM_TYPE> &pedestal, py::array_t<uint16_t> &f) {
auto v = make_view_2d(f); auto v = make_view_2d(f);
pedestal.push(v); pedestal.push(v);
}); })
.def("push_no_update", [](Pedestal<SUM_TYPE> &pedestal, py::array_t<uint16_t, py::array::c_style> &f) {
auto v = make_view_2d(f);
pedestal.push_no_update(v);
}, py::arg().noconvert())
.def("update_mean", &Pedestal<SUM_TYPE>::update_mean);
} }

View File

@ -20,6 +20,14 @@ void define_pixel_map_bindings(py::module &m) {
.def("GenerateMoench05PixelMap", []() { .def("GenerateMoench05PixelMap", []() {
auto ptr = new NDArray<ssize_t,2>(GenerateMoench05PixelMap()); auto ptr = new NDArray<ssize_t,2>(GenerateMoench05PixelMap());
return return_image_data(ptr); return return_image_data(ptr);
})
.def("GenerateMoench05PixelMap1g", []() {
auto ptr = new NDArray<ssize_t,2>(GenerateMoench05PixelMap1g());
return return_image_data(ptr);
})
.def("GenerateMoench05PixelMapOld", []() {
auto ptr = new NDArray<ssize_t,2>(GenerateMoench05PixelMapOld());
return return_image_data(ptr);
}) })
.def("GenerateMH02SingleCounterPixelMap", []() { .def("GenerateMH02SingleCounterPixelMap", []() {
auto ptr = new NDArray<ssize_t,2>(GenerateMH02SingleCounterPixelMap()); auto ptr = new NDArray<ssize_t,2>(GenerateMH02SingleCounterPixelMap());

View File

@ -25,7 +25,6 @@ void define_raw_file_io_bindings(py::module &m) {
.def(py::init<const std::filesystem::path &>()) .def(py::init<const std::filesystem::path &>())
.def("read_frame", .def("read_frame",
[](RawFile &self) { [](RawFile &self) {
size_t image_size = self.bytes_per_frame();
py::array image; py::array image;
std::vector<ssize_t> shape; std::vector<ssize_t> shape;
shape.reserve(2); shape.reserve(2);
@ -49,32 +48,44 @@ void define_raw_file_io_bindings(py::module &m) {
return py::make_tuple(header, image); return py::make_tuple(header, image);
}) })
.def("read_n", .def(
[](RawFile &self, size_t n_frames) { "read_n",
py::array image; [](RawFile &self, size_t n_frames) {
std::vector<ssize_t> shape; // adjust for actual frames left in the file
shape.reserve(3); n_frames =
shape.push_back(n_frames); std::min(n_frames, self.total_frames() - self.tell());
shape.push_back(self.rows()); if (n_frames == 0) {
shape.push_back(self.cols()); throw std::runtime_error("No frames left in file");
}
std::vector<size_t> shape{n_frames, self.rows(), self.cols()};
// return headers from all subfiles
py::array_t<DetectorHeader> header;
if (self.n_mod() == 1) {
header = py::array_t<DetectorHeader>(n_frames);
} else {
header = py::array_t<DetectorHeader>({self.n_mod(), n_frames});
}
// py::array_t<DetectorHeader> header({self.n_mod(), n_frames});
// return headers from all subfiles py::array image;
py::array_t<DetectorHeader> header({self.n_mod(), n_frames}); const uint8_t item_size = self.bytes_per_pixel();
if (item_size == 1) {
image = py::array_t<uint8_t>(shape);
} else if (item_size == 2) {
image = py::array_t<uint16_t>(shape);
} else if (item_size == 4) {
image = py::array_t<uint32_t>(shape);
}
self.read_into(
reinterpret_cast<std::byte *>(image.mutable_data()),
n_frames, header.mutable_data());
const uint8_t item_size = self.bytes_per_pixel(); return py::make_tuple(header, image);
if (item_size == 1) { },
image = py::array_t<uint8_t>(shape); R"(
} else if (item_size == 2) { Read n frames from the file.
image = py::array_t<uint16_t>(shape); )")
} else if (item_size == 4) {
image = py::array_t<uint32_t>(shape);
}
self.read_into(
reinterpret_cast<std::byte *>(image.mutable_data()),
n_frames, header.mutable_data());
return py::make_tuple(header, image);
})
.def("frame_number", &RawFile::frame_number) .def("frame_number", &RawFile::frame_number)
.def_property_readonly("bytes_per_frame", &RawFile::bytes_per_frame) .def_property_readonly("bytes_per_frame", &RawFile::bytes_per_frame)
.def_property_readonly("pixels_per_frame", &RawFile::pixels_per_frame) .def_property_readonly("pixels_per_frame", &RawFile::pixels_per_frame)

View File

@ -1,23 +1,74 @@
#include "aare/ClusterFile.hpp" #include "aare/ClusterFile.hpp"
#include <algorithm>
namespace aare { namespace aare {
ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size): m_chunk_size(chunk_size) { ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size,
fp = fopen(fname.c_str(), "rb"); const std::string &mode)
if (!fp) { : m_chunk_size(chunk_size), m_mode(mode) {
throw std::runtime_error("Could not open file: " + fname.string());
if (mode == "r") {
fp = fopen(fname.c_str(), "rb");
if (!fp) {
throw std::runtime_error("Could not open file for reading: " +
fname.string());
}
} else if (mode == "w") {
fp = fopen(fname.c_str(), "wb");
if (!fp) {
throw std::runtime_error("Could not open file for writing: " +
fname.string());
}
} else if (mode == "a") {
fp = fopen(fname.c_str(), "ab");
if (!fp) {
throw std::runtime_error("Could not open file for appending: " +
fname.string());
}
} else {
throw std::runtime_error("Unsupported mode: " + mode);
} }
} }
std::vector<Cluster> ClusterFile::read_clusters(size_t n_clusters) { ClusterFile::~ClusterFile() { close(); }
std::vector<Cluster> clusters(n_clusters);
void ClusterFile::close() {
if (fp) {
fclose(fp);
fp = nullptr;
}
}
void ClusterFile::write_frame(const ClusterVector<int32_t> &clusters) {
if (m_mode != "w" && m_mode != "a") {
throw std::runtime_error("File not opened for writing");
}
if (!(clusters.cluster_size_x() == 3) &&
!(clusters.cluster_size_y() == 3)) {
throw std::runtime_error("Only 3x3 clusters are supported");
}
int32_t frame_number = clusters.frame_number();
fwrite(&frame_number, sizeof(frame_number), 1, fp);
uint32_t n_clusters = clusters.size();
fwrite(&n_clusters, sizeof(n_clusters), 1, fp);
fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp);
}
ClusterVector<int32_t> ClusterFile::read_clusters(size_t n_clusters) {
if (m_mode != "r") {
throw std::runtime_error("File not opened for reading");
}
ClusterVector<int32_t> clusters(3,3, n_clusters);
int32_t iframe = 0; // frame number needs to be 4 bytes! int32_t iframe = 0; // frame number needs to be 4 bytes!
size_t nph_read = 0; size_t nph_read = 0;
uint32_t nn = m_num_left; uint32_t nn = m_num_left;
uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 uint32_t nph = m_num_left; // number of clusters in frame needs to be 4
auto buf = reinterpret_cast<Cluster *>(clusters.data()); // auto buf = reinterpret_cast<Cluster3x3 *>(clusters.data());
auto buf = clusters.data();
// if there are photons left from previous frame read them first // if there are photons left from previous frame read them first
if (nph) { if (nph) {
if (nph > n_clusters) { if (nph > n_clusters) {
@ -27,7 +78,8 @@ std::vector<Cluster> ClusterFile::read_clusters(size_t n_clusters) {
} else { } else {
nn = nph; nn = nph;
} }
nph_read += fread((void *)(buf + nph_read), sizeof(Cluster), nn, fp); nph_read += fread((buf + nph_read*clusters.item_size()),
clusters.item_size(), nn, fp);
m_num_left = nph - nn; // write back the number of photons left m_num_left = nph - nn; // write back the number of photons left
} }
@ -41,8 +93,8 @@ std::vector<Cluster> ClusterFile::read_clusters(size_t n_clusters) {
else else
nn = nph; nn = nph;
nph_read += nph_read += fread((buf + nph_read*clusters.item_size()),
fread((void *)(buf + nph_read), sizeof(Cluster), nn, fp); clusters.item_size(), nn, fp);
m_num_left = nph - nn; m_num_left = nph - nn;
} }
if (nph_read >= n_clusters) if (nph_read >= n_clusters)
@ -56,161 +108,239 @@ std::vector<Cluster> ClusterFile::read_clusters(size_t n_clusters) {
return clusters; return clusters;
} }
std::vector<Cluster> ClusterFile::read_frame(int32_t &out_fnum) { ClusterVector<int32_t> ClusterFile::read_frame() {
if (m_num_left) { if (m_mode != "r") {
throw std::runtime_error("There are still photons left in the last frame"); throw std::runtime_error("File not opened for reading");
} }
if (m_num_left) {
if (fread(&out_fnum, sizeof(out_fnum), 1, fp) != 1) { throw std::runtime_error(
"There are still photons left in the last frame");
}
int32_t frame_number;
if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) {
throw std::runtime_error("Could not read frame number"); throw std::runtime_error("Could not read frame number");
} }
int n_clusters; int32_t n_clusters; // Saved as 32bit integer in the cluster file
if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) {
throw std::runtime_error("Could not read number of clusters"); throw std::runtime_error("Could not read number of clusters");
} }
std::vector<Cluster> clusters(n_clusters); // std::vector<Cluster3x3> clusters(n_clusters);
ClusterVector<int32_t> clusters(3, 3, n_clusters);
clusters.set_frame_number(frame_number);
if (fread(clusters.data(), sizeof(Cluster), n_clusters, fp) != n_clusters) { if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) !=
static_cast<size_t>(n_clusters)) {
throw std::runtime_error("Could not read clusters"); throw std::runtime_error("Could not read clusters");
} }
clusters.resize(n_clusters);
return clusters; return clusters;
} }
std::vector<Cluster> ClusterFile::read_cluster_with_cut(size_t n_clusters,
double *noise_map,
int nx, int ny) {
std::vector<Cluster> clusters(n_clusters); // std::vector<Cluster3x3> ClusterFile::read_cluster_with_cut(size_t n_clusters,
// size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf, // double *noise_map,
// uint32_t *n_left, double *noise_map, int // int nx, int ny) {
// nx, int ny) { // if (m_mode != "r") {
int iframe = 0; // throw std::runtime_error("File not opened for reading");
// uint32_t nph = *n_left; // }
uint32_t nph = m_num_left; // std::vector<Cluster3x3> clusters(n_clusters);
// uint32_t nn = *n_left; // // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf,
uint32_t nn = m_num_left; // // uint32_t *n_left, double *noise_map, int
size_t nph_read = 0; // // nx, int ny) {
// int iframe = 0;
// // uint32_t nph = *n_left;
// uint32_t nph = m_num_left;
// // uint32_t nn = *n_left;
// uint32_t nn = m_num_left;
// size_t nph_read = 0;
int32_t t2max, tot1; // int32_t t2max, tot1;
int32_t tot3; // int32_t tot3;
// Cluster *ptr = buf; // // Cluster *ptr = buf;
Cluster *ptr = clusters.data(); // Cluster3x3 *ptr = clusters.data();
int good = 1; // int good = 1;
double noise; // double noise;
// read photons left from previous frame // // read photons left from previous frame
if (noise_map) // if (noise_map)
printf("Using noise map\n"); // printf("Using noise map\n");
if (nph) { // if (nph) {
if (nph > n_clusters) { // if (nph > n_clusters) {
// if we have more photons left in the frame then photons to // // if we have more photons left in the frame then photons to
// read we read directly the requested number // // read we read directly the requested number
nn = n_clusters; // nn = n_clusters;
} else { // } else {
nn = nph; // nn = nph;
} // }
for (size_t iph = 0; iph < nn; iph++) { // for (size_t iph = 0; iph < nn; iph++) {
// read photons 1 by 1 // // read photons 1 by 1
size_t n_read = fread((void *)(ptr), sizeof(Cluster), 1, fp); // size_t n_read =
if (n_read != 1) { // fread(reinterpret_cast<void *>(ptr), sizeof(Cluster3x3), 1, fp);
clusters.resize(nph_read); // if (n_read != 1) {
return clusters; // clusters.resize(nph_read);
} // return clusters;
// TODO! error handling on read // }
good = 1; // // TODO! error handling on read
if (noise_map) { // good = 1;
if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) { // if (noise_map) {
tot1 = ptr->data[4]; // if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) {
analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL, // tot1 = ptr->data[4];
NULL); // analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL,
noise = noise_map[ptr->y * nx + ptr->x]; // NULL);
if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) { // noise = noise_map[ptr->y * nx + ptr->x];
; // if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) {
} else { // ;
good = 0; // } else {
printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise, // good = 0;
tot1, t2max, tot3); // printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise,
} // tot1, t2max, tot3);
} else { // }
printf("Bad pixel number %d %d\n", ptr->x, ptr->y); // } else {
good = 0; // printf("Bad pixel number %d %d\n", ptr->x, ptr->y);
} // good = 0;
} // }
if (good) { // }
ptr++; // if (good) {
nph_read++; // ptr++;
} // nph_read++;
(m_num_left)--; // }
if (nph_read >= n_clusters) // (m_num_left)--;
break; // if (nph_read >= n_clusters)
} // break;
// }
// }
// if (nph_read < n_clusters) {
// // // keep on reading frames and photons until reaching
// // n_clusters
// while (fread(&iframe, sizeof(iframe), 1, fp)) {
// // // printf("%d\n",nph_read);
// if (fread(&nph, sizeof(nph), 1, fp)) {
// // // printf("** %d\n",nph);
// m_num_left = nph;
// for (size_t iph = 0; iph < nph; iph++) {
// // // read photons 1 by 1
// size_t n_read = fread(reinterpret_cast<void *>(ptr),
// sizeof(Cluster3x3), 1, fp);
// if (n_read != 1) {
// clusters.resize(nph_read);
// return clusters;
// // return nph_read;
// }
// good = 1;
// if (noise_map) {
// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 &&
// ptr->y < ny) {
// tot1 = ptr->data[4];
// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL,
// NULL, NULL, NULL);
// // noise = noise_map[ptr->y * nx + ptr->x];
// noise = noise_map[ptr->y + ny * ptr->x];
// if (tot1 > noise || t2max > 2 * noise ||
// tot3 > 3 * noise) {
// ;
// } else
// good = 0;
// } else {
// printf("Bad pixel number %d %d\n", ptr->x, ptr->y);
// good = 0;
// }
// }
// if (good) {
// ptr++;
// nph_read++;
// }
// (m_num_left)--;
// if (nph_read >= n_clusters)
// break;
// }
// }
// if (nph_read >= n_clusters)
// break;
// }
// }
// // printf("%d\n",nph_read);
// clusters.resize(nph_read);
// return clusters;
// }
NDArray<double, 2> calculate_eta2(ClusterVector<int> &clusters) {
//TOTO! make work with 2x2 clusters
NDArray<double, 2> eta2({static_cast<int64_t>(clusters.size()), 2});
for (size_t i = 0; i < clusters.size(); i++) {
auto e = calculate_eta2(clusters.at<Cluster3x3>(i));
eta2(i, 0) = e.x;
eta2(i, 1) = e.y;
} }
if (nph_read < n_clusters) { return eta2;
// // keep on reading frames and photons until reaching n_clusters
while (fread(&iframe, sizeof(iframe), 1, fp)) {
// // printf("%d\n",nph_read);
if (fread(&nph, sizeof(nph), 1, fp)) {
// // printf("** %d\n",nph);
m_num_left = nph;
for (size_t iph = 0; iph < nph; iph++) {
// // read photons 1 by 1
size_t n_read =
fread((void *)(ptr), sizeof(Cluster), 1, fp);
if (n_read != 1) {
clusters.resize(nph_read);
return clusters;
// return nph_read;
}
good = 1;
if (noise_map) {
if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 &&
ptr->y < ny) {
tot1 = ptr->data[4];
analyze_cluster(*ptr, &t2max, &tot3, NULL,
NULL,
NULL, NULL, NULL);
// noise = noise_map[ptr->y * nx + ptr->x];
noise = noise_map[ptr->y + ny * ptr->x];
if (tot1 > noise || t2max > 2 * noise ||
tot3 > 3 * noise) {
;
} else
good = 0;
} else {
printf("Bad pixel number %d %d\n", ptr->x,
ptr->y); good = 0;
}
}
if (good) {
ptr++;
nph_read++;
}
(m_num_left)--;
if (nph_read >= n_clusters)
break;
}
}
if (nph_read >= n_clusters)
break;
}
}
// printf("%d\n",nph_read);
clusters.resize(nph_read);
return clusters;
} }
int ClusterFile::analyze_cluster(Cluster cl, int32_t *t2, int32_t *t3, char *quad, /**
* @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 struct
* containing etay, etax and the corner of the cluster.
*/
Eta2 calculate_eta2(Cluster3x3 &cl) {
Eta2 eta{};
std::array<int32_t, 4> tot2;
tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4];
tot2[1] = cl.data[1] + cl.data[2] + cl.data[4] + cl.data[5];
tot2[2] = cl.data[3] + cl.data[4] + cl.data[6] + cl.data[7];
tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8];
auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin();
switch (c) {
case cBottomLeft:
if ((cl.data[3] + cl.data[4]) != 0)
eta.x =
static_cast<double>(cl.data[4]) / (cl.data[3] + cl.data[4]);
if ((cl.data[1] + cl.data[4]) != 0)
eta.y =
static_cast<double>(cl.data[4]) / (cl.data[1] + cl.data[4]);
eta.c = cBottomLeft;
break;
case cBottomRight:
if ((cl.data[2] + cl.data[5]) != 0)
eta.x =
static_cast<double>(cl.data[5]) / (cl.data[4] + cl.data[5]);
if ((cl.data[1] + cl.data[4]) != 0)
eta.y =
static_cast<double>(cl.data[4]) / (cl.data[1] + cl.data[4]);
eta.c = cBottomRight;
break;
case cTopLeft:
if ((cl.data[7] + cl.data[4]) != 0)
eta.x =
static_cast<double>(cl.data[4]) / (cl.data[3] + cl.data[4]);
if ((cl.data[7] + cl.data[4]) != 0)
eta.y =
static_cast<double>(cl.data[7]) / (cl.data[7] + cl.data[4]);
eta.c = cTopLeft;
break;
case cTopRight:
if ((cl.data[5] + cl.data[4]) != 0)
eta.x =
static_cast<double>(cl.data[5]) / (cl.data[5] + cl.data[4]);
if ((cl.data[7] + cl.data[4]) != 0)
eta.y =
static_cast<double>(cl.data[7]) / (cl.data[7] + cl.data[4]);
eta.c = cTopRight;
break;
// no default to allow compiler to warn about missing cases
}
return eta;
}
int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad,
double *eta2x, double *eta2y, double *eta3x, double *eta2x, double *eta2y, double *eta3x,
double *eta3y) { double *eta3y) {
return analyze_data(cl.data, t2, t3, quad, eta2x, eta2y, eta3x, eta3y); return analyze_data(cl.data, t2, t3, quad, eta2x, eta2y, eta3x, eta3y);
} }
int ClusterFile::analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad,
double *eta2x, double *eta2y, double *eta3x, double *eta3y) { double *eta2x, double *eta2y, double *eta3x, double *eta3y) {
int ok = 1; int ok = 1;
@ -252,12 +382,14 @@ int ClusterFile::analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *qua
c = i; c = i;
} }
} }
//printf("*** %d %d %d %d -- %d\n",tot2[0],tot2[1],tot2[2],tot2[3],t2max); // printf("*** %d %d %d %d --
// %d\n",tot2[0],tot2[1],tot2[2],tot2[3],t2max);
if (quad) if (quad)
*quad = c; *quad = c;
if (t2) if (t2)
*t2 = t2max; *t2 = t2max;
} }
if (t3) if (t3)
*t3 = tot3; *t3 = tot3;
@ -269,27 +401,27 @@ int ClusterFile::analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *qua
switch (c) { switch (c) {
case cBottomLeft: case cBottomLeft:
if (eta2x && (data[3] + data[4]) != 0) if (eta2x && (data[3] + data[4]) != 0)
*eta2x = (double)(data[4]) / (data[3] + data[4]); *eta2x = static_cast<double>(data[4]) / (data[3] + data[4]);
if (eta2y && (data[1] + data[4]) != 0) if (eta2y && (data[1] + data[4]) != 0)
*eta2y = (double)(data[4]) / (data[1] + data[4]); *eta2y = static_cast<double>(data[4]) / (data[1] + data[4]);
break; break;
case cBottomRight: case cBottomRight:
if (eta2x && (data[2] + data[5]) != 0) if (eta2x && (data[2] + data[5]) != 0)
*eta2x = (double)(data[5]) / (data[4] + data[5]); *eta2x = static_cast<double>(data[5]) / (data[4] + data[5]);
if (eta2y && (data[1] + data[4]) != 0) if (eta2y && (data[1] + data[4]) != 0)
*eta2y = (double)(data[4]) / (data[1] + data[4]); *eta2y = static_cast<double>(data[4]) / (data[1] + data[4]);
break; break;
case cTopLeft: case cTopLeft:
if (eta2x && (data[7] + data[4]) != 0) if (eta2x && (data[7] + data[4]) != 0)
*eta2x = (double)(data[4]) / (data[3] + data[4]); *eta2x = static_cast<double>(data[4]) / (data[3] + data[4]);
if (eta2y && (data[7] + data[4]) != 0) if (eta2y && (data[7] + data[4]) != 0)
*eta2y = (double)(data[7]) / (data[7] + data[4]); *eta2y = static_cast<double>(data[7]) / (data[7] + data[4]);
break; break;
case cTopRight: case cTopRight:
if (eta2x && t2max != 0) if (eta2x && t2max != 0)
*eta2x = (double)(data[5]) / (data[5] + data[4]); *eta2x = static_cast<double>(data[5]) / (data[5] + data[4]);
if (eta2y && t2max != 0) if (eta2y && t2max != 0)
*eta2y = (double)(data[7]) / (data[7] + data[4]); *eta2y = static_cast<double>(data[7]) / (data[7] + data[4]);
break; break;
default:; default:;
} }
@ -297,16 +429,14 @@ int ClusterFile::analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *qua
if (eta3x || eta3y) { if (eta3x || eta3y) {
if (eta3x && (data[3] + data[4] + data[5]) != 0) if (eta3x && (data[3] + data[4] + data[5]) != 0)
*eta3x = (double)(-data[3] + data[3 + 2]) / *eta3x = static_cast<double>(-data[3] + data[3 + 2]) /
(data[3] + data[4] + data[5]); (data[3] + data[4] + data[5]);
if (eta3y && (data[1] + data[4] + data[7]) != 0) if (eta3y && (data[1] + data[4] + data[7]) != 0)
*eta3y = (double)(-data[1] + data[2 * 3 + 1]) / *eta3y = static_cast<double>(-data[1] + data[2 * 3 + 1]) /
(data[1] + data[4] + data[7]); (data[1] + data[4] + data[7]);
} }
return ok; return ok;
} }
} // namespace aare } // namespace aare

198
src/ClusterVector.test.cpp Normal file
View File

@ -0,0 +1,198 @@
#include <cstdint>
#include "aare/ClusterVector.hpp"
#include <catch2/matchers/catch_matchers_floating_point.hpp>
#include <catch2/catch_test_macros.hpp>
using aare::ClusterVector;
struct Cluster_i2x2 {
int16_t x;
int16_t y;
int32_t data[4];
};
TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") {
ClusterVector<int32_t> cv(2, 2, 4);
REQUIRE(cv.capacity() == 4);
REQUIRE(cv.size() == 0);
REQUIRE(cv.cluster_size_x() == 2);
REQUIRE(cv.cluster_size_y() == 2);
// int16_t, int16_t, 2x2 int32_t = 20 bytes
REQUIRE(cv.item_size() == 20);
//Create a cluster and push back into the vector
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
REQUIRE(cv.size() == 1);
REQUIRE(cv.capacity() == 4);
//Read the cluster back out using copy. TODO! Can we improve the API?
Cluster_i2x2 c2;
std::byte *ptr = cv.element_ptr(0);
std::copy(ptr, ptr + cv.item_size(), reinterpret_cast<std::byte*>(&c2));
//Check that the data is the same
REQUIRE(c1.x == c2.x);
REQUIRE(c1.y == c2.y);
for(size_t i = 0; i < 4; i++) {
REQUIRE(c1.data[i] == c2.data[i]);
}
}
TEST_CASE("Summing 3x1 clusters of int64"){
struct Cluster_l3x1{
int16_t x;
int16_t y;
int32_t data[3];
};
ClusterVector<int32_t> cv(3, 1, 2);
REQUIRE(cv.capacity() == 2);
REQUIRE(cv.size() == 0);
REQUIRE(cv.cluster_size_x() == 3);
REQUIRE(cv.cluster_size_y() == 1);
//Create a cluster and push back into the vector
Cluster_l3x1 c1 = {1, 2, {3, 4, 5}};
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
REQUIRE(cv.capacity() == 2);
REQUIRE(cv.size() == 1);
Cluster_l3x1 c2 = {6, 7, {8, 9, 10}};
cv.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
REQUIRE(cv.capacity() == 2);
REQUIRE(cv.size() == 2);
Cluster_l3x1 c3 = {11, 12, {13, 14, 15}};
cv.push_back(c3.x, c3.y, reinterpret_cast<std::byte*>(&c3.data[0]));
REQUIRE(cv.capacity() == 4);
REQUIRE(cv.size() == 3);
auto sums = cv.sum();
REQUIRE(sums.size() == 3);
REQUIRE(sums[0] == 12);
REQUIRE(sums[1] == 27);
REQUIRE(sums[2] == 42);
}
TEST_CASE("Storing floats"){
struct Cluster_f4x2{
int16_t x;
int16_t y;
float data[8];
};
ClusterVector<float> cv(2, 4, 10);
REQUIRE(cv.capacity() == 10);
REQUIRE(cv.size() == 0);
REQUIRE(cv.cluster_size_x() == 2);
REQUIRE(cv.cluster_size_y() == 4);
//Create a cluster and push back into the vector
Cluster_f4x2 c1 = {1, 2, {3.0, 4.0, 5.0, 6.0,3.0, 4.0, 5.0, 6.0}};
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
REQUIRE(cv.capacity() == 10);
REQUIRE(cv.size() == 1);
Cluster_f4x2 c2 = {6, 7, {8.0, 9.0, 10.0, 11.0,8.0, 9.0, 10.0, 11.0}};
cv.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
REQUIRE(cv.capacity() == 10);
REQUIRE(cv.size() == 2);
auto sums = cv.sum();
REQUIRE(sums.size() == 2);
REQUIRE_THAT(sums[0], Catch::Matchers::WithinAbs(36.0, 1e-6));
REQUIRE_THAT(sums[1], Catch::Matchers::WithinAbs(76.0, 1e-6));
}
TEST_CASE("Push back more than initial capacity"){
ClusterVector<int32_t> cv(2, 2, 2);
auto initial_data = cv.data();
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
REQUIRE(cv.size() == 1);
REQUIRE(cv.capacity() == 2);
Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}};
cv.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
REQUIRE(cv.size() == 2);
REQUIRE(cv.capacity() == 2);
Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}};
cv.push_back(c3.x, c3.y, reinterpret_cast<std::byte*>(&c3.data[0]));
REQUIRE(cv.size() == 3);
REQUIRE(cv.capacity() == 4);
Cluster_i2x2* ptr = reinterpret_cast<Cluster_i2x2*>(cv.data());
REQUIRE(ptr[0].x == 1);
REQUIRE(ptr[0].y == 2);
REQUIRE(ptr[1].x == 6);
REQUIRE(ptr[1].y == 7);
REQUIRE(ptr[2].x == 11);
REQUIRE(ptr[2].y == 12);
//We should have allocated a new buffer, since we outgrew the initial capacity
REQUIRE(initial_data != cv.data());
}
TEST_CASE("Concatenate two cluster vectors where the first has enough capacity"){
ClusterVector<int32_t> cv1(2, 2, 12);
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
cv1.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}};
cv1.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
ClusterVector<int32_t> cv2(2, 2, 2);
Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}};
cv2.push_back(c3.x, c3.y, reinterpret_cast<std::byte*>(&c3.data[0]));
Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}};
cv2.push_back(c4.x, c4.y, reinterpret_cast<std::byte*>(&c4.data[0]));
cv1 += cv2;
REQUIRE(cv1.size() == 4);
REQUIRE(cv1.capacity() == 12);
Cluster_i2x2* ptr = reinterpret_cast<Cluster_i2x2*>(cv1.data());
REQUIRE(ptr[0].x == 1);
REQUIRE(ptr[0].y == 2);
REQUIRE(ptr[1].x == 6);
REQUIRE(ptr[1].y == 7);
REQUIRE(ptr[2].x == 11);
REQUIRE(ptr[2].y == 12);
REQUIRE(ptr[3].x == 16);
REQUIRE(ptr[3].y == 17);
}
TEST_CASE("Concatenate two cluster vectors where we need to allocate"){
ClusterVector<int32_t> cv1(2, 2, 2);
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
cv1.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}};
cv1.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
ClusterVector<int32_t> cv2(2, 2, 2);
Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}};
cv2.push_back(c3.x, c3.y, reinterpret_cast<std::byte*>(&c3.data[0]));
Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}};
cv2.push_back(c4.x, c4.y, reinterpret_cast<std::byte*>(&c4.data[0]));
cv1 += cv2;
REQUIRE(cv1.size() == 4);
REQUIRE(cv1.capacity() == 4);
Cluster_i2x2* ptr = reinterpret_cast<Cluster_i2x2*>(cv1.data());
REQUIRE(ptr[0].x == 1);
REQUIRE(ptr[0].y == 2);
REQUIRE(ptr[1].x == 6);
REQUIRE(ptr[1].y == 7);
REQUIRE(ptr[2].x == 11);
REQUIRE(ptr[2].y == 12);
REQUIRE(ptr[3].x == 16);
REQUIRE(ptr[3].y == 17);
}

View File

@ -16,7 +16,7 @@ CtbRawFile::CtbRawFile(const std::filesystem::path &fname) : m_master(fname) {
void CtbRawFile::read_into(std::byte *image_buf, DetectorHeader* header) { void CtbRawFile::read_into(std::byte *image_buf, DetectorHeader* header) {
if(m_current_frame >= m_master.frames_in_file()){ if(m_current_frame >= m_master.frames_in_file()){
throw std::runtime_error(LOCATION + "End of file reached"); throw std::runtime_error(LOCATION + " End of file reached");
} }
if(m_current_frame != 0 && m_current_frame % m_master.max_frames_per_file() == 0){ if(m_current_frame != 0 && m_current_frame % m_master.max_frames_per_file() == 0){

View File

@ -45,6 +45,8 @@ File& File::operator=(File &&other) noexcept {
return *this; return *this;
} }
// void File::close() { file_impl->close(); }
Frame File::read_frame() { return file_impl->read_frame(); } Frame File::read_frame() { return file_impl->read_frame(); }
Frame File::read_frame(size_t frame_index) { Frame File::read_frame(size_t frame_index) {
return file_impl->read_frame(frame_index); return file_impl->read_frame(frame_index);
@ -58,6 +60,8 @@ void File::read_into(std::byte *image_buf) { file_impl->read_into(image_buf); }
void File::read_into(std::byte *image_buf, size_t n_frames) { void File::read_into(std::byte *image_buf, size_t n_frames) {
file_impl->read_into(image_buf, n_frames); file_impl->read_into(image_buf, n_frames);
} }
size_t File::frame_number() { return file_impl->frame_number(tell()); }
size_t File::frame_number(size_t frame_index) { size_t File::frame_number(size_t frame_index) {
return file_impl->frame_number(frame_index); return file_impl->frame_number(frame_index);
} }

276
src/Fit.cpp Normal file
View File

@ -0,0 +1,276 @@
#include "aare/Fit.hpp"
#include "aare/utils/task.hpp"
#include "aare/utils/par.hpp"
#include <lmcurve2.h>
#include <lmfit.hpp>
#include <thread>
#include <array>
namespace aare {
namespace func {
double gaus(const double x, const double *par) {
return par[0] * exp(-pow(x - par[1], 2) / (2 * pow(par[2], 2)));
}
NDArray<double, 1> gaus(NDView<double, 1> x, NDView<double, 1> par) {
NDArray<double, 1> y({x.shape(0)}, 0);
for (size_t i = 0; i < x.size(); i++) {
y(i) = gaus(x(i), par.data());
}
return y;
}
double pol1(const double x, const double *par) { return par[0] * x + par[1]; }
NDArray<double, 1> pol1(NDView<double, 1> x, NDView<double, 1> par) {
NDArray<double, 1> y({x.shape()}, 0);
for (size_t i = 0; i < x.size(); i++) {
y(i) = pol1(x(i), par.data());
}
return y;
}
} // namespace func
NDArray<double, 1> fit_gaus(NDView<double, 1> x, NDView<double, 1> y) {
NDArray<double, 1> result = gaus_init_par(x, y);
lm_status_struct status;
lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(),
aare::func::gaus, &lm_control_double, &status);
return result;
}
NDArray<double, 3> fit_gaus(NDView<double, 1> x, NDView<double, 3> y,
int n_threads) {
NDArray<double, 3> result({y.shape(0), y.shape(1), 3}, 0);
auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) {
for (ssize_t row = first_row; row < last_row; row++) {
for (ssize_t col = 0; col < y.shape(1); col++) {
NDView<double, 1> values(&y(row, col, 0), {y.shape(2)});
auto res = fit_gaus(x, values);
result(row, col, 0) = res(0);
result(row, col, 1) = res(1);
result(row, col, 2) = res(2);
}
}
};
auto tasks = split_task(0, y.shape(0), n_threads);
RunInParallel(process, tasks);
return result;
}
std::array<double, 3> gaus_init_par(const NDView<double, 1> x, const NDView<double, 1> y) {
std::array<double, 3> start_par{0, 0, 0};
auto e = std::max_element(y.begin(), y.end());
auto idx = std::distance(y.begin(), e);
start_par[0] = *e; // For amplitude we use the maximum value
start_par[1] =
x[idx]; // For the mean we use the x value of the maximum value
// For sigma we estimate the fwhm and divide by 2.35
// assuming equally spaced x values
auto delta = x[1] - x[0];
start_par[2] =
std::count_if(y.begin(), y.end(),
[e, delta](double val) { return val > *e / 2; }) *
delta / 2.35;
return start_par;
}
std::array<double, 2> pol1_init_par(const NDView<double, 1> x, const NDView<double, 1> y){
// Estimate the initial parameters for the fit
std::array<double, 2> start_par{0, 0};
auto y2 = std::max_element(y.begin(), y.end());
auto x2 = x[std::distance(y.begin(), y2)];
auto y1 = std::min_element(y.begin(), y.end());
auto x1 = x[std::distance(y.begin(), y1)];
start_par[0] =
(*y2 - *y1) / (x2 - x1); // For amplitude we use the maximum value
start_par[1] =
*y1 - ((*y2 - *y1) / (x2 - x1)) *
x1; // For the mean we use the x value of the maximum value
return start_par;
}
void fit_gaus(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
NDView<double, 1> par_out, NDView<double, 1> par_err_out,
double &chi2) {
// Check that we have the correct sizes
if (y.size() != x.size() || y.size() != y_err.size() ||
par_out.size() != 3 || par_err_out.size() != 3) {
throw std::runtime_error("Data, x, data_err must have the same size "
"and par_out, par_err_out must have size 3");
}
// /* Collection of output parameters for status info. */
// typedef struct {
// double fnorm; /* norm of the residue vector fvec. */
// int nfev; /* actual number of iterations. */
// int outcome; /* Status indicator. Nonnegative values are used as
// index
// for the message text lm_infmsg, set in lmmin.c. */
// int userbreak; /* Set when function evaluation requests termination.
// */
// } lm_status_struct;
lm_status_struct status;
par_out = gaus_init_par(x, y);
std::array<double, 9> cov{0, 0, 0, 0, 0, 0, 0 , 0 , 0};
// void lmcurve2( const int n_par, double *par, double *parerr, double *covar, const int m_dat, const double *t, const double *y, const double *dy, double (*f)( const double ti, const double *par ), const lm_control_struct *control, lm_status_struct *status);
// n_par - Number of free variables. Length of parameter vector par.
// par - Parameter vector. On input, it must contain a reasonable guess. On output, it contains the solution found to minimize ||r||.
// parerr - Parameter uncertainties vector. Array of length n_par or NULL. On output, unless it or covar is NULL, it contains the weighted parameter uncertainties for the found parameters.
// covar - Covariance matrix. Array of length n_par * n_par or NULL. On output, unless it is NULL, it contains the covariance matrix.
// m_dat - Number of data points. Length of vectors t, y, dy. Must statisfy n_par <= m_dat.
// t - Array of length m_dat. Contains the abcissae (time, or "x") for which function f will be evaluated.
// y - Array of length m_dat. Contains the ordinate values that shall be fitted.
// dy - Array of length m_dat. Contains the standard deviations of the values y.
// f - A user-supplied parametric function f(ti;par).
// control - Parameter collection for tuning the fit procedure. In most cases, the default &lm_control_double is adequate. If f is only computed with single-precision accuracy, &lm_control_float should be used. Parameters are explained in lmmin2(3).
// status - A record used to return information about the minimization process: For details, see lmmin2(3).
lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(),
x.size(), x.data(), y.data(), y_err.data(), aare::func::gaus,
&lm_control_double, &status);
// Calculate chi2
chi2 = 0;
for (size_t i = 0; i < y.size(); i++) {
chi2 += std::pow((y(i) - func::gaus(x(i), par_out.data())) / y_err(i), 2);
}
}
void fit_gaus(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
NDView<double, 3> par_out, NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
int n_threads) {
auto process = [&](ssize_t first_row, ssize_t last_row) {
for (ssize_t row = first_row; row < last_row; row++) {
for (ssize_t col = 0; col < y.shape(1); col++) {
NDView<double, 1> y_view(&y(row, col, 0), {y.shape(2)});
NDView<double, 1> y_err_view(&y_err(row, col, 0),
{y_err.shape(2)});
NDView<double, 1> par_out_view(&par_out(row, col, 0),
{par_out.shape(2)});
NDView<double, 1> par_err_out_view(&par_err_out(row, col, 0),
{par_err_out.shape(2)});
fit_gaus(x, y_view, y_err_view, par_out_view, par_err_out_view,
chi2_out(row, col));
}
}
};
auto tasks = split_task(0, y.shape(0), n_threads);
RunInParallel(process, tasks);
}
void fit_pol1(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
NDView<double, 1> par_out, NDView<double, 1> par_err_out, double& chi2) {
// Check that we have the correct sizes
if (y.size() != x.size() || y.size() != y_err.size() ||
par_out.size() != 2 || par_err_out.size() != 2) {
throw std::runtime_error("Data, x, data_err must have the same size "
"and par_out, par_err_out must have size 2");
}
lm_status_struct status;
par_out = pol1_init_par(x, y);
std::array<double, 4> cov{0, 0, 0, 0};
lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(),
x.size(), x.data(), y.data(), y_err.data(), aare::func::pol1,
&lm_control_double, &status);
// Calculate chi2
chi2 = 0;
for (size_t i = 0; i < y.size(); i++) {
chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2);
}
}
void fit_pol1(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
NDView<double, 3> par_out, NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
int n_threads) {
auto process = [&](ssize_t first_row, ssize_t last_row) {
for (ssize_t row = first_row; row < last_row; row++) {
for (ssize_t col = 0; col < y.shape(1); col++) {
NDView<double, 1> y_view(&y(row, col, 0), {y.shape(2)});
NDView<double, 1> y_err_view(&y_err(row, col, 0),
{y_err.shape(2)});
NDView<double, 1> par_out_view(&par_out(row, col, 0),
{par_out.shape(2)});
NDView<double, 1> par_err_out_view(&par_err_out(row, col, 0),
{par_err_out.shape(2)});
fit_pol1(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col));
}
}
};
auto tasks = split_task(0, y.shape(0), n_threads);
RunInParallel(process, tasks);
}
NDArray<double, 1> fit_pol1(NDView<double, 1> x, NDView<double, 1> y) {
// // Check that we have the correct sizes
// if (y.size() != x.size() || y.size() != y_err.size() ||
// par_out.size() != 2 || par_err_out.size() != 2) {
// throw std::runtime_error("Data, x, data_err must have the same size "
// "and par_out, par_err_out must have size 2");
// }
NDArray<double, 1> par = pol1_init_par(x, y);
lm_status_struct status;
lmcurve(par.size(), par.data(), x.size(), x.data(), y.data(),
aare::func::pol1, &lm_control_double, &status);
return par;
}
NDArray<double, 3> fit_pol1(NDView<double, 1> x, NDView<double, 3> y,
int n_threads) {
NDArray<double, 3> result({y.shape(0), y.shape(1), 2}, 0);
auto process = [&](ssize_t first_row, ssize_t last_row) {
for (ssize_t row = first_row; row < last_row; row++) {
for (ssize_t col = 0; col < y.shape(1); col++) {
NDView<double, 1> values(&y(row, col, 0), {y.shape(2)});
auto res = fit_pol1(x, values);
result(row, col, 0) = res(0);
result(row, col, 1) = res(1);
}
}
};
auto tasks = split_task(0, y.shape(0), n_threads);
RunInParallel(process, tasks);
return result;
}
} // namespace aare

View File

@ -19,7 +19,7 @@ TEST_CASE("Construct a frame") {
// data should be initialized to 0 // data should be initialized to 0
for (size_t i = 0; i < rows; i++) { for (size_t i = 0; i < rows; i++) {
for (size_t j = 0; j < cols; j++) { for (size_t j = 0; j < cols; j++) {
uint8_t *data = (uint8_t *)frame.pixel_ptr(i, j); uint8_t *data = reinterpret_cast<uint8_t *>(frame.pixel_ptr(i, j));
REQUIRE(data != nullptr); REQUIRE(data != nullptr);
REQUIRE(*data == 0); REQUIRE(*data == 0);
} }
@ -40,7 +40,7 @@ TEST_CASE("Set a value in a 8 bit frame") {
// only the value we did set should be non-zero // only the value we did set should be non-zero
for (size_t i = 0; i < rows; i++) { for (size_t i = 0; i < rows; i++) {
for (size_t j = 0; j < cols; j++) { for (size_t j = 0; j < cols; j++) {
uint8_t *data = (uint8_t *)frame.pixel_ptr(i, j); uint8_t *data = reinterpret_cast<uint8_t *>(frame.pixel_ptr(i, j));
REQUIRE(data != nullptr); REQUIRE(data != nullptr);
if (i == 5 && j == 7) { if (i == 5 && j == 7) {
REQUIRE(*data == value); REQUIRE(*data == value);
@ -65,7 +65,7 @@ TEST_CASE("Set a value in a 64 bit frame") {
// only the value we did set should be non-zero // only the value we did set should be non-zero
for (size_t i = 0; i < rows; i++) { for (size_t i = 0; i < rows; i++) {
for (size_t j = 0; j < cols; j++) { for (size_t j = 0; j < cols; j++) {
uint64_t *data = (uint64_t *)frame.pixel_ptr(i, j); uint64_t *data = reinterpret_cast<uint64_t *>(frame.pixel_ptr(i, j));
REQUIRE(data != nullptr); REQUIRE(data != nullptr);
if (i == 5 && j == 7) { if (i == 5 && j == 7) {
REQUIRE(*data == value); REQUIRE(*data == value);
@ -149,4 +149,5 @@ TEST_CASE("test explicit copy constructor") {
REQUIRE(frame2.bitdepth() == bitdepth); REQUIRE(frame2.bitdepth() == bitdepth);
REQUIRE(frame2.bytes() == rows * cols * bitdepth / 8); REQUIRE(frame2.bytes() == rows * cols * bitdepth / 8);
REQUIRE(frame2.data() != data); REQUIRE(frame2.data() != data);
} }

View File

@ -379,4 +379,32 @@ TEST_CASE("Elementwise operations on images") {
REQUIRE(A(i) == a_val); REQUIRE(A(i) == a_val);
} }
} }
}
TEST_CASE("Assign an std::array to a 1D NDArray") {
NDArray<int, 1> a{{5}, 0};
std::array<int, 5> b{1, 2, 3, 4, 5};
a = b;
for (uint32_t i = 0; i < a.size(); ++i) {
REQUIRE(a(i) == b[i]);
}
}
TEST_CASE("Assign an std::array to a 1D NDArray of a different size") {
NDArray<int, 1> a{{3}, 0};
std::array<int, 5> b{1, 2, 3, 4, 5};
a = b;
REQUIRE(a.size() == 5);
for (uint32_t i = 0; i < a.size(); ++i) {
REQUIRE(a(i) == b[i]);
}
}
TEST_CASE("Construct an NDArray from an std::array") {
std::array<int, 5> b{1, 2, 3, 4, 5};
NDArray<int, 1> a(b);
for (uint32_t i = 0; i < a.size(); ++i) {
REQUIRE(a(i) == b[i]);
}
} }

View File

@ -31,6 +31,49 @@ NDArray<ssize_t, 2> GenerateMoench03PixelMap() {
} }
NDArray<ssize_t, 2> GenerateMoench05PixelMap() { NDArray<ssize_t, 2> GenerateMoench05PixelMap() {
std::array<int, 3> adc_numbers = {5, 9, 1};
NDArray<ssize_t, 2> order_map({160, 150});
int n_pixel = 0;
for (int row = 0; row < 160; row++) {
for (int i_col = 0; i_col < 50; i_col++) {
n_pixel = row * 50 + i_col;
for (int i_sc = 0; i_sc < 3; i_sc++) {
int col = 50 * i_sc + i_col;
int adc_nr = adc_numbers[i_sc];
int i_analog = n_pixel * 12 + adc_nr;
// analog_frame[row * 150 + col] = analog_data[i_analog] & 0x3FFF;
order_map(row, col) = i_analog;
}
}
}
return order_map;
}
NDArray<ssize_t, 2> GenerateMoench05PixelMap1g() {
std::array<int, 3> adc_numbers = {1, 2, 0};
NDArray<ssize_t, 2> order_map({160, 150});
int n_pixel = 0;
for (int row = 0; row < 160; row++) {
for (int i_col = 0; i_col < 50; i_col++) {
n_pixel = row * 50 + i_col;
for (int i_sc = 0; i_sc < 3; i_sc++) {
int col = 50 * i_sc + i_col;
int adc_nr = adc_numbers[i_sc];
int i_analog = n_pixel * 3 + adc_nr;
// analog_frame[row * 150 + col] = analog_data[i_analog] & 0x3FFF;
order_map(row, col) = i_analog;
}
}
}
return order_map;
}
NDArray<ssize_t, 2> GenerateMoench05PixelMapOld() {
std::array<int, 3> adc_numbers = {9, 13, 1}; std::array<int, 3> adc_numbers = {9, 13, 1};
NDArray<ssize_t, 2> order_map({160, 150}); NDArray<ssize_t, 2> order_map({160, 150});
int n_pixel = 0; int n_pixel = 0;

View File

@ -1,6 +1,7 @@
#include "aare/RawFile.hpp" #include "aare/RawFile.hpp"
#include "aare/PixelMap.hpp" #include "aare/PixelMap.hpp"
#include "aare/defs.hpp" #include "aare/defs.hpp"
#include "aare/geo_helpers.hpp"
#include <fmt/format.h> #include <fmt/format.h>
#include <nlohmann/json.hpp> #include <nlohmann/json.hpp>
@ -17,9 +18,15 @@ RawFile::RawFile(const std::filesystem::path &fname, const std::string &mode)
n_subfiles = find_number_of_subfiles(); // f0,f1...fn n_subfiles = find_number_of_subfiles(); // f0,f1...fn
n_subfile_parts = n_subfile_parts =
m_master.geometry().col * m_master.geometry().row; // d0,d1...dn m_master.geometry().col * m_master.geometry().row; // d0,d1...dn
find_geometry();
update_geometry_with_roi();
find_geometry();
if (m_master.roi()){
m_geometry = update_geometry_with_roi(m_geometry, m_master.roi().value());
}
open_subfiles(); open_subfiles();
} else { } else {
throw std::runtime_error(LOCATION + throw std::runtime_error(LOCATION +
@ -69,9 +76,13 @@ size_t RawFile::n_mod() const { return n_subfile_parts; }
size_t RawFile::bytes_per_frame() { size_t RawFile::bytes_per_frame() {
return m_rows * m_cols * m_master.bitdepth() / 8; // return m_rows * m_cols * m_master.bitdepth() / 8;
return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() / 8;
}
size_t RawFile::pixels_per_frame() {
// return m_rows * m_cols;
return m_geometry.pixels_x * m_geometry.pixels_y;
} }
size_t RawFile::pixels_per_frame() { return m_rows * m_cols; }
DetectorType RawFile::detector_type() const { return m_master.detector_type(); } DetectorType RawFile::detector_type() const { return m_master.detector_type(); }
@ -89,8 +100,8 @@ void RawFile::seek(size_t frame_index) {
size_t RawFile::tell() { return m_current_frame; }; size_t RawFile::tell() { return m_current_frame; };
size_t RawFile::total_frames() const { return m_master.frames_in_file(); } size_t RawFile::total_frames() const { return m_master.frames_in_file(); }
size_t RawFile::rows() const { return m_rows; } size_t RawFile::rows() const { return m_geometry.pixels_y; }
size_t RawFile::cols() const { return m_cols; } size_t RawFile::cols() const { return m_geometry.pixels_x; }
size_t RawFile::bitdepth() const { return m_master.bitdepth(); } size_t RawFile::bitdepth() const { return m_master.bitdepth(); }
xy RawFile::geometry() { return m_master.geometry(); } xy RawFile::geometry() { return m_master.geometry(); }
@ -99,14 +110,11 @@ void RawFile::open_subfiles() {
for (size_t i = 0; i != n_subfiles; ++i) { for (size_t i = 0; i != n_subfiles; ++i) {
auto v = std::vector<RawSubFile *>(n_subfile_parts); auto v = std::vector<RawSubFile *>(n_subfile_parts);
for (size_t j = 0; j != n_subfile_parts; ++j) { for (size_t j = 0; j != n_subfile_parts; ++j) {
fmt::print("{} pos: {},{}\n", j,positions[j].row, positions[j].col); auto pos = m_geometry.module_pixel_0[j];
auto pos = m_module_pixel_0[j];
fmt::print("{} pos: {},{}\n", j,pos.y, pos.x);
v[j] = new RawSubFile(m_master.data_fname(j, i), v[j] = new RawSubFile(m_master.data_fname(j, i),
m_master.detector_type(), pos.height, m_master.detector_type(), pos.height,
pos.width, m_master.bitdepth(), pos.width, m_master.bitdepth(),
positions[j].row, positions[j].col); pos.row_index, pos.col_index);
} }
subfiles.push_back(v); subfiles.push_back(v);
@ -149,112 +157,49 @@ int RawFile::find_number_of_subfiles() {
RawMasterFile RawFile::master() const { return m_master; } RawMasterFile RawFile::master() const { return m_master; }
/**
* @brief Find the geometry of the detector by opening all the subfiles and
* reading the headers.
*/
void RawFile::find_geometry() { void RawFile::find_geometry() {
//Hold the maximal row and column number found
//Later used for calculating the total number of rows and columns
uint16_t r{}; uint16_t r{};
uint16_t c{}; uint16_t c{};
for (size_t i = 0; i < n_subfile_parts; i++) { for (size_t i = 0; i < n_subfile_parts; i++) {
auto h = this->read_header(m_master.data_fname(i, 0)); auto h = read_header(m_master.data_fname(i, 0));
r = std::max(r, h.row); r = std::max(r, h.row);
c = std::max(c, h.column); c = std::max(c, h.column);
positions.push_back({h.row, h.column}); // positions.push_back({h.row, h.column});
ModuleGeometry g; ModuleGeometry g;
g.x = h.column * m_master.pixels_x(); g.origin_x = h.column * m_master.pixels_x();
g.y = h.row * m_master.pixels_y(); g.origin_y = h.row * m_master.pixels_y();
g.row_index = h.row;
g.col_index = h.column;
g.width = m_master.pixels_x(); g.width = m_master.pixels_x();
g.height = m_master.pixels_y(); g.height = m_master.pixels_y();
m_module_pixel_0.push_back(g); m_geometry.module_pixel_0.push_back(g);
} }
r++; r++;
c++; c++;
m_rows = (r * m_master.pixels_y()); m_geometry.pixels_y = (r * m_master.pixels_y());
m_cols = (c * m_master.pixels_x()); m_geometry.pixels_x = (c * m_master.pixels_x());
m_geometry.modules_x = c;
m_rows += static_cast<size_t>((r - 1) * cfg.module_gap_row); m_geometry.modules_y = r;
m_geometry.pixels_y += static_cast<size_t>((r - 1) * cfg.module_gap_row);
#ifdef AARE_VERBOSE
fmt::print("\nRawFile::find_geometry()\n");
for (size_t i = 0; i < m_module_pixel_0.size(); i++) {
fmt::print("Module {} at position: (r:{},c:{})\n", i,
m_module_pixel_0[i].y, m_module_pixel_0[i].x);
}
fmt::print("Image size: {}x{}\n\n", m_rows, m_cols);
#endif
}
void RawFile::update_geometry_with_roi() {
// TODO! implement this
if (m_master.roi()) {
auto roi = m_master.roi().value();
// TODO! can we do this cleaner?
int pos_y = 0;
int pos_y_increment = 0;
for (size_t row = 0; row < m_master.geometry().row; row++) {
int pos_x = 0;
for (size_t col = 0; col < m_master.geometry().col; col++) {
auto &m = m_module_pixel_0[row * m_master.geometry().col + col];
auto original_height = m.height;
auto original_width = m.width;
// module is to the left of the roi
if (m.x + m.width < roi.xmin) {
m.width = 0;
// roi is in module
} else {
// here we only arrive when the roi is in or to the left of
// the module
if (roi.xmin > m.x) {
m.width -= roi.xmin - m.x;
}
if (roi.xmax < m.x + m.width) {
m.width -= m.x + original_width - roi.xmax;
}
m.x = pos_x;
pos_x += m.width;
}
if (m.y + m.height < roi.ymin) {
m.height = 0;
} else {
if ((roi.ymin > m.y) && (roi.ymin < m.y + m.height)) {
m.height -= roi.ymin - m.y;
}
if (roi.ymax < m.y + m.height) {
m.height -= m.y + original_height - roi.ymax;
}
m.y = pos_y;
pos_y_increment = m.height;
}
}
// increment pos_y
pos_y += pos_y_increment;
}
m_rows = roi.height();
m_cols = roi.width();
}
#ifdef AARE_VERBOSE
fmt::print("RawFile::update_geometry_with_roi()\n");
for (const auto &m : m_module_pixel_0) {
fmt::print("Module at position: (r:{}, c:{}, h:{}, w:{})\n", m.y, m.x,
m.height, m.width);
}
fmt::print("Updated image size: {}x{}\n\n", m_rows, m_cols);
fmt::print("\n");
#endif
} }
Frame RawFile::get_frame(size_t frame_index) { Frame RawFile::get_frame(size_t frame_index) {
auto f = Frame(m_rows, m_cols, Dtype::from_bitdepth(m_master.bitdepth())); auto f = Frame(m_geometry.pixels_y, m_geometry.pixels_x, Dtype::from_bitdepth(m_master.bitdepth()));
std::byte *frame_buffer = f.data(); std::byte *frame_buffer = f.data();
get_frame_into(frame_index, frame_buffer); get_frame_into(frame_index, frame_buffer);
return f; return f;
@ -266,8 +211,7 @@ size_t RawFile::bytes_per_pixel() const {
} }
void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, DetectorHeader *header) { void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, DetectorHeader *header) {
if (frame_index >= total_frames()) {
if (frame_index > total_frames()) {
throw std::runtime_error(LOCATION + "Frame number out of range"); throw std::runtime_error(LOCATION + "Frame number out of range");
} }
std::vector<size_t> frame_numbers(n_subfile_parts); std::vector<size_t> frame_numbers(n_subfile_parts);
@ -279,6 +223,10 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect
if (n_subfile_parts != 1) { if (n_subfile_parts != 1) {
for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) {
auto subfile_id = frame_index / m_master.max_frames_per_file(); auto subfile_id = frame_index / m_master.max_frames_per_file();
if (subfile_id >= subfiles.size()) {
throw std::runtime_error(LOCATION +
" Subfile out of range. Possible missing data.");
}
frame_numbers[part_idx] = frame_numbers[part_idx] =
subfiles[subfile_id][part_idx]->frame_number( subfiles[subfile_id][part_idx]->frame_number(
frame_index % m_master.max_frames_per_file()); frame_index % m_master.max_frames_per_file());
@ -312,14 +260,19 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect
for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) {
auto corrected_idx = frame_indices[part_idx]; auto corrected_idx = frame_indices[part_idx];
auto subfile_id = corrected_idx / m_master.max_frames_per_file(); auto subfile_id = corrected_idx / m_master.max_frames_per_file();
if (subfile_id >= subfiles.size()) {
throw std::runtime_error(LOCATION +
" Subfile out of range. Possible missing data.");
}
// This is where we start writing // This is where we start writing
auto offset = (m_module_pixel_0[part_idx].y * m_cols + auto offset = (m_geometry.module_pixel_0[part_idx].origin_y * m_geometry.pixels_x +
m_module_pixel_0[part_idx].x)*m_master.bitdepth()/8; m_geometry.module_pixel_0[part_idx].origin_x)*m_master.bitdepth()/8;
if (m_module_pixel_0[part_idx].x!=0) if (m_geometry.module_pixel_0[part_idx].origin_x!=0)
throw std::runtime_error(LOCATION + "Implementation error. x pos not 0."); throw std::runtime_error(LOCATION + "Implementation error. x pos not 0.");
//TODO! Risk for out of range access
subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file()); subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file());
subfiles[subfile_id][part_idx]->read_into(frame_buffer + offset, header); subfiles[subfile_id][part_idx]->read_into(frame_buffer + offset, header);
if (header) if (header)
@ -340,21 +293,25 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect
// level // level
for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) {
auto pos = m_module_pixel_0[part_idx]; auto pos = m_geometry.module_pixel_0[part_idx];
auto corrected_idx = frame_indices[part_idx]; auto corrected_idx = frame_indices[part_idx];
auto subfile_id = corrected_idx / m_master.max_frames_per_file(); auto subfile_id = corrected_idx / m_master.max_frames_per_file();
if (subfile_id >= subfiles.size()) {
throw std::runtime_error(LOCATION +
" Subfile out of range. Possible missing data.");
}
subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file()); subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file());
subfiles[subfile_id][part_idx]->read_into(part_buffer, header); subfiles[subfile_id][part_idx]->read_into(part_buffer, header);
if(header) if(header)
++header; ++header;
for (size_t cur_row = 0; cur_row < (pos.height); for (size_t cur_row = 0; cur_row < static_cast<size_t>(pos.height);
cur_row++) { cur_row++) {
auto irow = (pos.y + cur_row); auto irow = (pos.origin_y + cur_row);
auto icol = pos.x; auto icol = pos.origin_x;
auto dest = (irow * this->m_cols + icol); auto dest = (irow * this->m_geometry.pixels_x + icol);
dest = dest * m_master.bitdepth() / 8; dest = dest * m_master.bitdepth() / 8;
memcpy(frame_buffer + dest, memcpy(frame_buffer + dest,
part_buffer + cur_row * pos.width * part_buffer + cur_row * pos.width *
@ -400,4 +357,8 @@ RawFile::~RawFile() {
} }
} }
} // namespace aare } // namespace aare

View File

@ -1,10 +1,13 @@
#include "aare/File.hpp" #include "aare/File.hpp"
#include "aare/RawMasterFile.hpp" //needed for ROI
#include "aare/RawFile.hpp"
#include <catch2/catch_test_macros.hpp> #include <catch2/catch_test_macros.hpp>
#include <filesystem> #include <filesystem>
#include "test_config.hpp" #include "test_config.hpp"
using aare::File; using aare::File;
TEST_CASE("Read number of frames from a jungfrau raw file", "[.integration]") { TEST_CASE("Read number of frames from a jungfrau raw file", "[.integration]") {
@ -148,3 +151,5 @@ TEST_CASE("Read file with unordered frames", "[.integration]") {
File f(fpath); File f(fpath);
REQUIRE_THROWS((f.read_frame())); REQUIRE_THROWS((f.read_frame()));
} }

View File

@ -37,11 +37,24 @@ std::filesystem::path RawFileNameComponents::master_fname() const {
} }
std::filesystem::path RawFileNameComponents::data_fname(size_t mod_id, std::filesystem::path RawFileNameComponents::data_fname(size_t mod_id,
size_t file_id) const { size_t file_id
return m_base_path / fmt::format("{}_d{}_f{}_{}.raw", m_base_name, mod_id, ) const {
std::string fmt = "{}_d{}_f{}_{}.raw";
//Before version X we used to name the data files f000000000000
if (m_old_scheme) {
fmt = "{}_d{}_f{:012}_{}.raw";
}
return m_base_path / fmt::format(fmt, m_base_name, mod_id,
file_id, m_file_index); file_id, m_file_index);
} }
void RawFileNameComponents::set_old_scheme(bool old_scheme) {
m_old_scheme = old_scheme;
}
const std::filesystem::path &RawFileNameComponents::base_path() const { const std::filesystem::path &RawFileNameComponents::base_path() const {
return m_base_path; return m_base_path;
} }
@ -314,10 +327,22 @@ void RawMasterFile::parse_raw(const std::filesystem::path &fpath) {
// do the actual parsing // do the actual parsing
if (key == "Version") { if (key == "Version") {
m_version = value; m_version = value;
//TODO!: How old versions can we handle?
auto v = std::stod(value);
//TODO! figure out exactly when we did the change
//This enables padding of f to 12 digits
if (v<4.0)
m_fnc.set_old_scheme(true);
} else if (key == "TimeStamp") { } else if (key == "TimeStamp") {
} else if (key == "Detector Type") { } else if (key == "Detector Type") {
m_type = StringTo<DetectorType>(value); m_type = StringTo<DetectorType>(value);
if(m_type==DetectorType::Moench){
m_type = DetectorType::Moench03_old;
}
} else if (key == "Timing Mode") { } else if (key == "Timing Mode") {
m_timing_mode = StringTo<TimingMode>(value); m_timing_mode = StringTo<TimingMode>(value);
} else if (key == "Image Size") { } else if (key == "Image Size") {
@ -352,6 +377,12 @@ void RawMasterFile::parse_raw(const std::filesystem::path &fpath) {
pos = value.find(','); pos = value.find(',');
m_pixels_x = std::stoi(value.substr(1, pos)); m_pixels_x = std::stoi(value.substr(1, pos));
m_pixels_y = std::stoi(value.substr(pos + 1)); m_pixels_y = std::stoi(value.substr(pos + 1));
}else if(key == "row"){
pos = value.find('p');
m_pixels_y = std::stoi(value.substr(0, pos));
}else if(key == "col"){
pos = value.find('p');
m_pixels_x = std::stoi(value.substr(0, pos));
} else if (key == "Total Frames") { } else if (key == "Total Frames") {
m_total_frames_expected = std::stoi(value); m_total_frames_expected = std::stoi(value);
} else if (key == "Dynamic Range") { } else if (key == "Dynamic Range") {
@ -360,6 +391,9 @@ void RawMasterFile::parse_raw(const std::filesystem::path &fpath) {
m_quad = std::stoi(value); m_quad = std::stoi(value);
} else if (key == "Max Frames Per File") { } else if (key == "Max Frames Per File") {
m_max_frames_per_file = std::stoi(value); m_max_frames_per_file = std::stoi(value);
}else if(key == "Max. Frames Per File"){
//Version 3.0 way of writing it
m_max_frames_per_file = std::stoi(value);
} else if (key == "Geometry") { } else if (key == "Geometry") {
pos = value.find(','); pos = value.find(',');
m_geometry = { m_geometry = {
@ -368,5 +402,19 @@ void RawMasterFile::parse_raw(const std::filesystem::path &fpath) {
} }
} }
} }
if (m_pixels_x == 400 && m_pixels_y == 400) {
m_type = DetectorType::Moench03_old;
}
//TODO! Look for d0, d1...dn and update geometry
if(m_geometry.col == 0 && m_geometry.row == 0){
m_geometry = {1,1};
fmt::print("Warning: No geometry found in master file. Assuming 1x1\n");
}
//TODO! Read files and find actual frames
if(m_frames_in_file==0)
m_frames_in_file = m_total_frames_expected;
} }
} // namespace aare } // namespace aare

View File

@ -31,6 +31,16 @@ TEST_CASE("Construction of master file name and data files"){
REQUIRE(m.data_fname(1, 1) == "test_d1_f1_1.raw"); REQUIRE(m.data_fname(1, 1) == "test_d1_f1_1.raw");
} }
TEST_CASE("Construction of master file name and data files using old scheme"){
RawFileNameComponents m("test_master_1.raw");
m.set_old_scheme(true);
REQUIRE(m.master_fname() == "test_master_1.raw");
REQUIRE(m.data_fname(0, 0) == "test_d0_f000000000000_1.raw");
REQUIRE(m.data_fname(1, 0) == "test_d1_f000000000000_1.raw");
REQUIRE(m.data_fname(0, 1) == "test_d0_f000000000001_1.raw");
REQUIRE(m.data_fname(1, 1) == "test_d1_f000000000001_1.raw");
}
TEST_CASE("Master file name does not fit pattern"){ TEST_CASE("Master file name does not fit pattern"){
REQUIRE_THROWS(RawFileNameComponents("somefile.json")); REQUIRE_THROWS(RawFileNameComponents("somefile.json"));
REQUIRE_THROWS(RawFileNameComponents("another_test_d0_f0_1.raw")); REQUIRE_THROWS(RawFileNameComponents("another_test_d0_f0_1.raw"));

View File

@ -9,14 +9,14 @@ namespace aare {
RawSubFile::RawSubFile(const std::filesystem::path &fname, RawSubFile::RawSubFile(const std::filesystem::path &fname,
DetectorType detector, size_t rows, size_t cols, DetectorType detector, size_t rows, size_t cols,
size_t bitdepth, uint32_t pos_row, uint32_t pos_col) size_t bitdepth, uint32_t pos_row, uint32_t pos_col)
: m_bitdepth(bitdepth), m_fname(fname), m_rows(rows), m_cols(cols), : m_detector_type(detector), m_bitdepth(bitdepth), m_fname(fname),
m_detector_type(detector), m_rows(rows), m_cols(cols),
m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row), m_pos_col(pos_col) { m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row),
m_pos_col(pos_col) {
if (m_detector_type == DetectorType::Moench03_old) { if (m_detector_type == DetectorType::Moench03_old) {
m_pixel_map = GenerateMoench03PixelMap(); m_pixel_map = GenerateMoench03PixelMap();
}else if(m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0){ } else if (m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0) {
m_pixel_map = GenerateEigerFlipRowsPixelMap(); m_pixel_map = GenerateEigerFlipRowsPixelMap();
fmt::print("Flipping rows\n");
} }
if (std::filesystem::exists(fname)) { if (std::filesystem::exists(fname)) {
@ -44,7 +44,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname,
void RawSubFile::seek(size_t frame_index) { void RawSubFile::seek(size_t frame_index) {
if (frame_index >= n_frames) { if (frame_index >= n_frames) {
throw std::runtime_error("Frame number out of range"); throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, n_frames));
} }
m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index); m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index);
} }
@ -53,37 +53,48 @@ size_t RawSubFile::tell() {
return m_file.tellg() / (sizeof(DetectorHeader) + bytes_per_frame()); return m_file.tellg() / (sizeof(DetectorHeader) + bytes_per_frame());
} }
void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) {
if(header){ if (header) {
m_file.read(reinterpret_cast<char *>(header), sizeof(DetectorHeader)); m_file.read(reinterpret_cast<char *>(header), sizeof(DetectorHeader));
} else { } else {
m_file.seekg(sizeof(DetectorHeader), std::ios::cur); m_file.seekg(sizeof(DetectorHeader), std::ios::cur);
} }
//TODO! expand support for different bitdepths // TODO! expand support for different bitdepths
if(m_pixel_map){ if (m_pixel_map) {
// read into a temporary buffer and then copy the data to the buffer // read into a temporary buffer and then copy the data to the buffer
// in the correct order // in the correct order
// currently this only supports 16 bit data! // TODO! add 4 bit support
auto part_buffer = new std::byte[bytes_per_frame()]; if(m_bitdepth == 8){
m_file.read(reinterpret_cast<char *>(part_buffer), bytes_per_frame()); read_with_map<uint8_t>(image_buf);
auto *data = reinterpret_cast<uint16_t *>(image_buf); }else if (m_bitdepth == 16) {
auto *part_data = reinterpret_cast<uint16_t *>(part_buffer); read_with_map<uint16_t>(image_buf);
for (size_t i = 0; i < pixels_per_frame(); i++) { } else if (m_bitdepth == 32) {
data[i] = part_data[(*m_pixel_map)(i)]; read_with_map<uint32_t>(image_buf);
}else{
throw std::runtime_error("Unsupported bitdepth for read with pixel map");
} }
delete[] part_buffer;
} else { } else {
// read directly into the buffer // read directly into the buffer
m_file.read(reinterpret_cast<char *>(image_buf), bytes_per_frame()); m_file.read(reinterpret_cast<char *>(image_buf), bytes_per_frame());
} }
} }
template <typename T>
void RawSubFile::read_with_map(std::byte *image_buf) {
auto part_buffer = new std::byte[bytes_per_frame()];
m_file.read(reinterpret_cast<char *>(part_buffer), bytes_per_frame());
auto *data = reinterpret_cast<T *>(image_buf);
auto *part_data = reinterpret_cast<T *>(part_buffer);
for (size_t i = 0; i < pixels_per_frame(); i++) {
data[i] = part_data[(*m_pixel_map)(i)];
}
delete[] part_buffer;
}
size_t RawSubFile::rows() const { return m_rows; } size_t RawSubFile::rows() const { return m_rows; }
size_t RawSubFile::cols() const { return m_cols; } size_t RawSubFile::cols() const { return m_cols; }
void RawSubFile::get_part(std::byte *buffer, size_t frame_index) { void RawSubFile::get_part(std::byte *buffer, size_t frame_index) {
seek(frame_index); seek(frame_index);
read_into(buffer, nullptr); read_into(buffer, nullptr);
@ -96,5 +107,4 @@ size_t RawSubFile::frame_number(size_t frame_index) {
return h.frameNumber; return h.frameNumber;
} }
} // namespace aare } // namespace aare

61
src/decode.cpp Normal file
View File

@ -0,0 +1,61 @@
#include "aare/decode.hpp"
namespace aare {
uint16_t adc_sar_05_decode64to16(uint64_t input){
//we want bits 29,19,28,18,31,21,27,20,24,23,25,22 and then pad to 16
uint16_t output = 0;
output |= ((input >> 22) & 1) << 11;
output |= ((input >> 25) & 1) << 10;
output |= ((input >> 23) & 1) << 9;
output |= ((input >> 24) & 1) << 8;
output |= ((input >> 20) & 1) << 7;
output |= ((input >> 27) & 1) << 6;
output |= ((input >> 21) & 1) << 5;
output |= ((input >> 31) & 1) << 4;
output |= ((input >> 18) & 1) << 3;
output |= ((input >> 28) & 1) << 2;
output |= ((input >> 19) & 1) << 1;
output |= ((input >> 29) & 1) << 0;
return output;
}
void adc_sar_05_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output){
for(int64_t i = 0; i < input.shape(0); i++){
for(int64_t j = 0; j < input.shape(1); j++){
output(i,j) = adc_sar_05_decode64to16(input(i,j));
}
}
}
uint16_t adc_sar_04_decode64to16(uint64_t input){
// bit_map = array([15,17,19,21,23,4,6,8,10,12,14,16] LSB->MSB
uint16_t output = 0;
output |= ((input >> 16) & 1) << 11;
output |= ((input >> 14) & 1) << 10;
output |= ((input >> 12) & 1) << 9;
output |= ((input >> 10) & 1) << 8;
output |= ((input >> 8) & 1) << 7;
output |= ((input >> 6) & 1) << 6;
output |= ((input >> 4) & 1) << 5;
output |= ((input >> 23) & 1) << 4;
output |= ((input >> 21) & 1) << 3;
output |= ((input >> 19) & 1) << 2;
output |= ((input >> 17) & 1) << 1;
output |= ((input >> 15) & 1) << 0;
return output;
}
void adc_sar_04_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output){
for(int64_t i = 0; i < input.shape(0); i++){
for(int64_t j = 0; j < input.shape(1); j++){
output(i,j) = adc_sar_04_decode64to16(input(i,j));
}
}
}
} // namespace aare

View File

@ -21,23 +21,37 @@ void assert_failed(const std::string &msg)
*/ */
template <> std::string ToString(DetectorType arg) { template <> std::string ToString(DetectorType arg) {
switch (arg) { switch (arg) {
case DetectorType::Jungfrau: case DetectorType::Generic:
return "Jungfrau"; return "Generic";
case DetectorType::Eiger: case DetectorType::Eiger:
return "Eiger"; return "Eiger";
case DetectorType::Mythen3: case DetectorType::Gotthard:
return "Mythen3"; return "Gotthard";
case DetectorType::Jungfrau:
return "Jungfrau";
case DetectorType::ChipTestBoard:
return "ChipTestBoard";
case DetectorType::Moench: case DetectorType::Moench:
return "Moench"; return "Moench";
case DetectorType::Mythen3:
return "Mythen3";
case DetectorType::Gotthard2:
return "Gotthard2";
case DetectorType::Xilinx_ChipTestBoard:
return "Xilinx_ChipTestBoard";
//Custom ones
case DetectorType::Moench03: case DetectorType::Moench03:
return "Moench03"; return "Moench03";
case DetectorType::Moench03_old: case DetectorType::Moench03_old:
return "Moench03_old"; return "Moench03_old";
case DetectorType::ChipTestBoard: case DetectorType::Unknown:
return "ChipTestBoard";
default:
return "Unknown"; return "Unknown";
//no default case to trigger compiler warning if not all
//enum values are handled
} }
throw std::runtime_error("Could not decode detector to string");
} }
/** /**
@ -47,21 +61,34 @@ template <> std::string ToString(DetectorType arg) {
* @throw runtime_error if the string does not match any DetectorType * @throw runtime_error if the string does not match any DetectorType
*/ */
template <> DetectorType StringTo(const std::string &arg) { template <> DetectorType StringTo(const std::string &arg) {
if (arg == "Jungfrau") if (arg == "Generic")
return DetectorType::Jungfrau; return DetectorType::Generic;
if (arg == "Eiger") if (arg == "Eiger")
return DetectorType::Eiger; return DetectorType::Eiger;
if (arg == "Mythen3") if (arg == "Gotthard")
return DetectorType::Mythen3; return DetectorType::Gotthard;
if (arg == "Jungfrau")
return DetectorType::Jungfrau;
if (arg == "ChipTestBoard")
return DetectorType::ChipTestBoard;
if (arg == "Moench") if (arg == "Moench")
return DetectorType::Moench; return DetectorType::Moench;
if (arg == "Mythen3")
return DetectorType::Mythen3;
if (arg == "Gotthard2")
return DetectorType::Gotthard2;
if (arg == "Xilinx_ChipTestBoard")
return DetectorType::Xilinx_ChipTestBoard;
//Custom ones
if (arg == "Moench03") if (arg == "Moench03")
return DetectorType::Moench03; return DetectorType::Moench03;
if (arg == "Moench03_old") if (arg == "Moench03_old")
return DetectorType::Moench03_old; return DetectorType::Moench03_old;
if (arg == "ChipTestBoard") if (arg == "Unknown")
return DetectorType::ChipTestBoard; return DetectorType::Unknown;
throw std::runtime_error("Could not decode dector from: \"" + arg + "\"");
throw std::runtime_error("Could not decode detector from: \"" + arg + "\"");
} }
/** /**

View File

@ -1,12 +1,59 @@
#include "aare/defs.hpp" #include "aare/defs.hpp"
// #include "aare/utils/floats.hpp"
#include <catch2/catch_test_macros.hpp> #include <catch2/catch_test_macros.hpp>
#include <string> #include <string>
using aare::ToString;
using aare::StringTo;
TEST_CASE("Enum to string conversion") { TEST_CASE("Enum to string conversion") {
// By the way I don't think the enum string conversions should be in the defs.hpp file // TODO! By the way I don't think the enum string conversions should be in the defs.hpp file
// but let's use this to show a test // but let's use this to show a test
REQUIRE(ToString(aare::DetectorType::Generic) == "Generic");
REQUIRE(ToString(aare::DetectorType::Eiger) == "Eiger");
REQUIRE(ToString(aare::DetectorType::Gotthard) == "Gotthard");
REQUIRE(ToString(aare::DetectorType::Jungfrau) == "Jungfrau"); REQUIRE(ToString(aare::DetectorType::Jungfrau) == "Jungfrau");
REQUIRE(ToString(aare::DetectorType::ChipTestBoard) == "ChipTestBoard");
REQUIRE(ToString(aare::DetectorType::Moench) == "Moench");
REQUIRE(ToString(aare::DetectorType::Mythen3) == "Mythen3");
REQUIRE(ToString(aare::DetectorType::Gotthard2) == "Gotthard2");
REQUIRE(ToString(aare::DetectorType::Xilinx_ChipTestBoard) == "Xilinx_ChipTestBoard");
REQUIRE(ToString(aare::DetectorType::Moench03) == "Moench03");
REQUIRE(ToString(aare::DetectorType::Moench03_old) == "Moench03_old");
REQUIRE(ToString(aare::DetectorType::Unknown) == "Unknown");
}
TEST_CASE("String to enum"){
REQUIRE(StringTo<aare::DetectorType>("Generic") == aare::DetectorType::Generic);
REQUIRE(StringTo<aare::DetectorType>("Eiger") == aare::DetectorType::Eiger);
REQUIRE(StringTo<aare::DetectorType>("Gotthard") == aare::DetectorType::Gotthard);
REQUIRE(StringTo<aare::DetectorType>("Jungfrau") == aare::DetectorType::Jungfrau);
REQUIRE(StringTo<aare::DetectorType>("ChipTestBoard") == aare::DetectorType::ChipTestBoard);
REQUIRE(StringTo<aare::DetectorType>("Moench") == aare::DetectorType::Moench);
REQUIRE(StringTo<aare::DetectorType>("Mythen3") == aare::DetectorType::Mythen3);
REQUIRE(StringTo<aare::DetectorType>("Gotthard2") == aare::DetectorType::Gotthard2);
REQUIRE(StringTo<aare::DetectorType>("Xilinx_ChipTestBoard") == aare::DetectorType::Xilinx_ChipTestBoard);
REQUIRE(StringTo<aare::DetectorType>("Moench03") == aare::DetectorType::Moench03);
REQUIRE(StringTo<aare::DetectorType>("Moench03_old") == aare::DetectorType::Moench03_old);
REQUIRE(StringTo<aare::DetectorType>("Unknown") == aare::DetectorType::Unknown);
}
TEST_CASE("Enum values"){
//Since some of the enums are written to file we need to make sure
//they match the value in the slsDetectorPackage
REQUIRE(static_cast<int>(aare::DetectorType::Generic) == 0);
REQUIRE(static_cast<int>(aare::DetectorType::Eiger) == 1);
REQUIRE(static_cast<int>(aare::DetectorType::Gotthard) == 2);
REQUIRE(static_cast<int>(aare::DetectorType::Jungfrau) == 3);
REQUIRE(static_cast<int>(aare::DetectorType::ChipTestBoard) == 4);
REQUIRE(static_cast<int>(aare::DetectorType::Moench) == 5);
REQUIRE(static_cast<int>(aare::DetectorType::Mythen3) == 6);
REQUIRE(static_cast<int>(aare::DetectorType::Gotthard2) == 7);
REQUIRE(static_cast<int>(aare::DetectorType::Xilinx_ChipTestBoard) == 8);
//Not included
REQUIRE(static_cast<int>(aare::DetectorType::Moench03) == 100);
} }
TEST_CASE("DynamicCluster creation") { TEST_CASE("DynamicCluster creation") {

71
src/geo_helpers.cpp Normal file
View File

@ -0,0 +1,71 @@
#include "aare/geo_helpers.hpp"
#include "fmt/core.h"
namespace aare{
DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) {
#ifdef AARE_VERBOSE
fmt::println("update_geometry_with_roi() called with ROI: {} {} {} {}",
roi.xmin, roi.xmax, roi.ymin, roi.ymax);
fmt::println("Geometry: {} {} {} {} {} {}",
geo.modules_x, geo.modules_y, geo.pixels_x, geo.pixels_y, geo.module_gap_row, geo.module_gap_col);
#endif
int pos_y = 0;
int pos_y_increment = 0;
for (int row = 0; row < geo.modules_y; row++) {
int pos_x = 0;
for (int col = 0; col < geo.modules_x; col++) {
auto &m = geo.module_pixel_0[row * geo.modules_x + col];
auto original_height = m.height;
auto original_width = m.width;
// module is to the left of the roi
if (m.origin_x + m.width < roi.xmin) {
m.width = 0;
// roi is in module
} else {
// here we only arrive when the roi is in or to the left of
// the module
if (roi.xmin > m.origin_x) {
m.width -= roi.xmin - m.origin_x;
}
if (roi.xmax < m.origin_x + original_width) {
m.width -= m.origin_x + original_width - roi.xmax;
}
m.origin_x = pos_x;
pos_x += m.width;
}
if (m.origin_y + m.height < roi.ymin) {
m.height = 0;
} else {
if ((roi.ymin > m.origin_y) && (roi.ymin < m.origin_y + m.height)) {
m.height -= roi.ymin - m.origin_y;
}
if (roi.ymax < m.origin_y + original_height) {
m.height -= m.origin_y + original_height - roi.ymax;
}
m.origin_y = pos_y;
pos_y_increment = m.height;
}
#ifdef AARE_VERBOSE
fmt::println("Module {} {} {} {}", m.origin_x, m.origin_y, m.width, m.height);
#endif
}
// increment pos_y
pos_y += pos_y_increment;
}
// m_rows = roi.height();
// m_cols = roi.width();
geo.pixels_x = roi.width();
geo.pixels_y = roi.height();
return geo;
}
} // namespace aare

230
src/geo_helpers.test.cpp Normal file
View File

@ -0,0 +1,230 @@
#include "aare/File.hpp"
#include "aare/RawMasterFile.hpp" //needed for ROI
#include "aare/RawFile.hpp"
#include <catch2/catch_test_macros.hpp>
#include <filesystem>
#include "aare/geo_helpers.hpp"
#include "test_config.hpp"
TEST_CASE("Simple ROIs on one module"){
// DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi)
aare::DetectorGeometry geo;
aare::ModuleGeometry mod;
mod.origin_x = 0;
mod.origin_y = 0;
mod.width = 1024;
mod.height = 512;
geo.pixels_x = 1024;
geo.pixels_y = 512;
geo.modules_x = 1;
geo.modules_y = 1;
geo.module_pixel_0.push_back(mod);
SECTION("ROI is the whole module"){
aare::ROI roi;
roi.xmin = 0;
roi.xmax = 1024;
roi.ymin = 0;
roi.ymax = 512;
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
REQUIRE(updated_geo.pixels_x == 1024);
REQUIRE(updated_geo.pixels_y == 512);
REQUIRE(updated_geo.modules_x == 1);
REQUIRE(updated_geo.modules_y == 1);
REQUIRE(updated_geo.module_pixel_0[0].height == 512);
REQUIRE(updated_geo.module_pixel_0[0].width == 1024);
}
SECTION("ROI is the top left corner of the module"){
aare::ROI roi;
roi.xmin = 100;
roi.xmax = 200;
roi.ymin = 150;
roi.ymax = 200;
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
REQUIRE(updated_geo.pixels_x == 100);
REQUIRE(updated_geo.pixels_y == 50);
REQUIRE(updated_geo.modules_x == 1);
REQUIRE(updated_geo.modules_y == 1);
REQUIRE(updated_geo.module_pixel_0[0].height == 50);
REQUIRE(updated_geo.module_pixel_0[0].width == 100);
}
SECTION("ROI is a small square"){
aare::ROI roi;
roi.xmin = 1000;
roi.xmax = 1010;
roi.ymin = 500;
roi.ymax = 510;
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
REQUIRE(updated_geo.pixels_x == 10);
REQUIRE(updated_geo.pixels_y == 10);
REQUIRE(updated_geo.modules_x == 1);
REQUIRE(updated_geo.modules_y == 1);
REQUIRE(updated_geo.module_pixel_0[0].height == 10);
REQUIRE(updated_geo.module_pixel_0[0].width == 10);
}
SECTION("ROI is a few columns"){
aare::ROI roi;
roi.xmin = 750;
roi.xmax = 800;
roi.ymin = 0;
roi.ymax = 512;
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
REQUIRE(updated_geo.pixels_x == 50);
REQUIRE(updated_geo.pixels_y == 512);
REQUIRE(updated_geo.modules_x == 1);
REQUIRE(updated_geo.modules_y == 1);
REQUIRE(updated_geo.module_pixel_0[0].height == 512);
REQUIRE(updated_geo.module_pixel_0[0].width == 50);
}
}
TEST_CASE("Two modules side by side"){
// DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi)
aare::DetectorGeometry geo;
aare::ModuleGeometry mod;
mod.origin_x = 0;
mod.origin_y = 0;
mod.width = 1024;
mod.height = 512;
geo.pixels_x = 2048;
geo.pixels_y = 512;
geo.modules_x = 2;
geo.modules_y = 1;
geo.module_pixel_0.push_back(mod);
mod.origin_x = 1024;
geo.module_pixel_0.push_back(mod);
SECTION("ROI is the whole image"){
aare::ROI roi;
roi.xmin = 0;
roi.xmax = 2048;
roi.ymin = 0;
roi.ymax = 512;
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
REQUIRE(updated_geo.pixels_x == 2048);
REQUIRE(updated_geo.pixels_y == 512);
REQUIRE(updated_geo.modules_x == 2);
REQUIRE(updated_geo.modules_y == 1);
}
SECTION("rectangle on both modules"){
aare::ROI roi;
roi.xmin = 800;
roi.xmax = 1300;
roi.ymin = 200;
roi.ymax = 499;
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
REQUIRE(updated_geo.pixels_x == 500);
REQUIRE(updated_geo.pixels_y == 299);
REQUIRE(updated_geo.modules_x == 2);
REQUIRE(updated_geo.modules_y == 1);
REQUIRE(updated_geo.module_pixel_0[0].height == 299);
REQUIRE(updated_geo.module_pixel_0[0].width == 224);
REQUIRE(updated_geo.module_pixel_0[1].height == 299);
REQUIRE(updated_geo.module_pixel_0[1].width == 276);
}
}
TEST_CASE("Three modules side by side"){
// DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi)
aare::DetectorGeometry geo;
aare::ROI roi;
roi.xmin = 700;
roi.xmax = 2500;
roi.ymin = 0;
roi.ymax = 123;
aare::ModuleGeometry mod;
mod.origin_x = 0;
mod.origin_y = 0;
mod.width = 1024;
mod.height = 512;
geo.pixels_x = 3072;
geo.pixels_y = 512;
geo.modules_x = 3;
geo.modules_y = 1;
geo.module_pixel_0.push_back(mod);
mod.origin_x = 1024;
geo.module_pixel_0.push_back(mod);
mod.origin_x = 2048;
geo.module_pixel_0.push_back(mod);
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
REQUIRE(updated_geo.pixels_x == 1800);
REQUIRE(updated_geo.pixels_y == 123);
REQUIRE(updated_geo.modules_x == 3);
REQUIRE(updated_geo.modules_y == 1);
REQUIRE(updated_geo.module_pixel_0[0].height == 123);
REQUIRE(updated_geo.module_pixel_0[0].width == 324);
REQUIRE(updated_geo.module_pixel_0[1].height == 123);
REQUIRE(updated_geo.module_pixel_0[1].width == 1024);
REQUIRE(updated_geo.module_pixel_0[2].height == 123);
REQUIRE(updated_geo.module_pixel_0[2].width == 452);
}
TEST_CASE("Four modules as a square"){
// DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi)
aare::DetectorGeometry geo;
aare::ROI roi;
roi.xmin = 500;
roi.xmax = 2000;
roi.ymin = 500;
roi.ymax = 600;
aare::ModuleGeometry mod;
mod.origin_x = 0;
mod.origin_y = 0;
mod.width = 1024;
mod.height = 512;
geo.pixels_x = 2048;
geo.pixels_y = 1024;
geo.modules_x = 2;
geo.modules_y = 2;
geo.module_pixel_0.push_back(mod);
mod.origin_x = 1024;
geo.module_pixel_0.push_back(mod);
mod.origin_x = 0;
mod.origin_y = 512;
geo.module_pixel_0.push_back(mod);
mod.origin_x = 1024;
geo.module_pixel_0.push_back(mod);
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
REQUIRE(updated_geo.pixels_x == 1500);
REQUIRE(updated_geo.pixels_y == 100);
REQUIRE(updated_geo.modules_x == 2);
REQUIRE(updated_geo.modules_y == 2);
REQUIRE(updated_geo.module_pixel_0[0].height == 12);
REQUIRE(updated_geo.module_pixel_0[0].width == 524);
REQUIRE(updated_geo.module_pixel_0[1].height == 12);
REQUIRE(updated_geo.module_pixel_0[1].width == 976);
REQUIRE(updated_geo.module_pixel_0[2].height == 88);
REQUIRE(updated_geo.module_pixel_0[2].width == 524);
REQUIRE(updated_geo.module_pixel_0[3].height == 88);
REQUIRE(updated_geo.module_pixel_0[3].width == 976);
}

30
src/utils/task.cpp Normal file
View File

@ -0,0 +1,30 @@
#include "aare/utils/task.hpp"
namespace aare {
std::vector<std::pair<int, int>> split_task(int first, int last,
int n_threads) {
std::vector<std::pair<int, int>> vec;
vec.reserve(n_threads);
int n_frames = last - first;
if (n_threads >= n_frames) {
for (int i = 0; i != n_frames; ++i) {
vec.push_back({i, i + 1});
}
return vec;
}
int step = (n_frames) / n_threads;
for (int i = 0; i != n_threads; ++i) {
int start = step * i;
int stop = step * (i + 1);
if (i == n_threads - 1)
stop = last;
vec.push_back({start, stop});
}
return vec;
}
} // namespace aare

32
src/utils/task.test.cpp Normal file
View File

@ -0,0 +1,32 @@
#include "aare/utils/task.hpp"
#include <catch2/matchers/catch_matchers_floating_point.hpp>
#include <catch2/catch_test_macros.hpp>
TEST_CASE("Split a range into multiple tasks"){
auto tasks = aare::split_task(0, 10, 3);
REQUIRE(tasks.size() == 3);
REQUIRE(tasks[0].first == 0);
REQUIRE(tasks[0].second == 3);
REQUIRE(tasks[1].first == 3);
REQUIRE(tasks[1].second == 6);
REQUIRE(tasks[2].first == 6);
REQUIRE(tasks[2].second == 10);
tasks = aare::split_task(0, 10, 1);
REQUIRE(tasks.size() == 1);
REQUIRE(tasks[0].first == 0);
REQUIRE(tasks[0].second == 10);
tasks = aare::split_task(0, 10, 10);
REQUIRE(tasks.size() == 10);
for (int i = 0; i < 10; i++){
REQUIRE(tasks[i].first == i);
REQUIRE(tasks[i].second == i+1);
}
}

View File

@ -17,8 +17,8 @@ endif()
list(APPEND CMAKE_MODULE_PATH ${Catch2_SOURCE_DIR}/extras) list(APPEND CMAKE_MODULE_PATH ${Catch2_SOURCE_DIR}/extras)
add_executable(tests test.cpp) add_executable(tests test.cpp)
target_link_libraries(tests PRIVATE Catch2::Catch2WithMain) target_link_libraries(tests PRIVATE Catch2::Catch2WithMain aare_core aare_compiler_flags)
# target_compile_options(tests PRIVATE -fno-omit-frame-pointer -fsanitize=address)
set_target_properties(tests PROPERTIES set_target_properties(tests PROPERTIES
RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR} RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}
OUTPUT_NAME run_tests OUTPUT_NAME run_tests
@ -34,7 +34,7 @@ set(TestSources
target_sources(tests PRIVATE ${TestSources} ) target_sources(tests PRIVATE ${TestSources} )
#Work around to remove, this is not the way to do it =) #Work around to remove, this is not the way to do it =)
target_link_libraries(tests PRIVATE aare_core aare_compiler_flags) # target_link_libraries(tests PRIVATE aare_core aare_compiler_flags)
#configure a header to pass test file paths #configure a header to pass test file paths

View File

@ -3,6 +3,7 @@
#include <climits> #include <climits>
#include <filesystem> #include <filesystem>
#include <fstream> #include <fstream>
#include <fmt/core.h>
TEST_CASE("Test suite can find data assets", "[.integration]") { TEST_CASE("Test suite can find data assets", "[.integration]") {
auto fpath = test_data_path() / "numpy" / "test_numpy_file.npy"; auto fpath = test_data_path() / "numpy" / "test_numpy_file.npy";
@ -18,4 +19,20 @@ TEST_CASE("Test suite can open data assets", "[.integration]") {
TEST_CASE("Test float32 and char8") { TEST_CASE("Test float32 and char8") {
REQUIRE(sizeof(float) == 4); REQUIRE(sizeof(float) == 4);
REQUIRE(CHAR_BIT == 8); REQUIRE(CHAR_BIT == 8);
} }
/**
* Uncomment the following tests to verify that asan is working
*/
// TEST_CASE("trigger asan stack"){
// int arr[5] = {1,2,3,4,5};
// int val = arr[7];
// fmt::print("val: {}\n", val);
// }
// TEST_CASE("trigger asan heap"){
// auto *ptr = new int[5];
// ptr[70] = 5;
// fmt::print("ptr: {}\n", ptr[70]);
// }