mirror of
https://github.com/slsdetectorgroup/aare.git
synced 2025-04-20 05:40:03 +02:00
Added fitting, fixed roi etc (#129)
Co-authored-by: Patrick <patrick.sieberer@psi.ch> Co-authored-by: JulianHeymes <julian.heymes@psi.ch>
This commit is contained in:
parent
7d6223d52d
commit
dadf5f4869
@ -4,7 +4,6 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- developer
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
@ -34,7 +33,6 @@ jobs:
|
|||||||
run: conda install conda-build=24.9 conda-verify pytest anaconda-client
|
run: conda install conda-build=24.9 conda-verify pytest anaconda-client
|
||||||
|
|
||||||
- name: Enable upload
|
- name: Enable upload
|
||||||
if: github.ref == 'refs/heads/main'
|
|
||||||
run: conda config --set anaconda_upload yes
|
run: conda config --set anaconda_upload yes
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
40
.github/workflows/build_conda.yml
vendored
Normal file
40
.github/workflows/build_conda.yml
vendored
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
name: Build pkgs and deploy if on main
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- developer
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
platform: [ubuntu-latest, ] # macos-12, windows-2019]
|
||||||
|
python-version: ["3.12",]
|
||||||
|
|
||||||
|
runs-on: ${{ matrix.platform }}
|
||||||
|
|
||||||
|
# The setup-miniconda action needs this to activate miniconda
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: "bash -l {0}"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Get conda
|
||||||
|
uses: conda-incubator/setup-miniconda@v3.0.4
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
channels: conda-forge
|
||||||
|
|
||||||
|
- name: Prepare
|
||||||
|
run: conda install conda-build=24.9 conda-verify pytest anaconda-client
|
||||||
|
|
||||||
|
- name: Disable upload
|
||||||
|
run: conda config --set anaconda_upload no
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: conda build conda-recipe
|
||||||
|
|
@ -48,6 +48,7 @@ option(AARE_FETCH_PYBIND11 "Use FetchContent to download pybind11" ON)
|
|||||||
option(AARE_FETCH_CATCH "Use FetchContent to download catch2" ON)
|
option(AARE_FETCH_CATCH "Use FetchContent to download catch2" ON)
|
||||||
option(AARE_FETCH_JSON "Use FetchContent to download nlohmann::json" ON)
|
option(AARE_FETCH_JSON "Use FetchContent to download nlohmann::json" ON)
|
||||||
option(AARE_FETCH_ZMQ "Use FetchContent to download libzmq" ON)
|
option(AARE_FETCH_ZMQ "Use FetchContent to download libzmq" ON)
|
||||||
|
option(AARE_FETCH_LMFIT "Use FetchContent to download lmfit" ON)
|
||||||
|
|
||||||
|
|
||||||
#Convenience option to use system libraries only (no FetchContent)
|
#Convenience option to use system libraries only (no FetchContent)
|
||||||
@ -76,6 +77,34 @@ endif()
|
|||||||
|
|
||||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||||
|
|
||||||
|
if(AARE_FETCH_LMFIT)
|
||||||
|
set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch)
|
||||||
|
FetchContent_Declare(
|
||||||
|
lmfit
|
||||||
|
GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git
|
||||||
|
GIT_TAG main
|
||||||
|
PATCH_COMMAND ${lmfit_patch}
|
||||||
|
UPDATE_DISCONNECTED 1
|
||||||
|
EXCLUDE_FROM_ALL
|
||||||
|
)
|
||||||
|
#Disable what we don't need from lmfit
|
||||||
|
set(BUILD_TESTING OFF CACHE BOOL "")
|
||||||
|
set(LMFIT_CPPTEST OFF CACHE BOOL "")
|
||||||
|
set(LIB_MAN OFF CACHE BOOL "")
|
||||||
|
set(LMFIT_CPPTEST OFF CACHE BOOL "")
|
||||||
|
set(BUILD_SHARED_LIBS OFF CACHE BOOL "")
|
||||||
|
|
||||||
|
|
||||||
|
FetchContent_MakeAvailable(lmfit)
|
||||||
|
set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON)
|
||||||
|
|
||||||
|
target_include_directories (lmfit PUBLIC "${libzmq_SOURCE_DIR}/lib")
|
||||||
|
message(STATUS "lmfit include dir: ${lmfit_SOURCE_DIR}/lib")
|
||||||
|
else()
|
||||||
|
find_package(lmfit REQUIRED)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
if(AARE_FETCH_ZMQ)
|
if(AARE_FETCH_ZMQ)
|
||||||
# Fetchcontent_Declare is deprecated need to find a way to update this
|
# Fetchcontent_Declare is deprecated need to find a way to update this
|
||||||
# for now setting the policy to old is enough
|
# for now setting the policy to old is enough
|
||||||
@ -146,7 +175,6 @@ if (AARE_FETCH_JSON)
|
|||||||
install(
|
install(
|
||||||
TARGETS nlohmann_json
|
TARGETS nlohmann_json
|
||||||
EXPORT "${TARGETS_EXPORT_NAME}"
|
EXPORT "${TARGETS_EXPORT_NAME}"
|
||||||
|
|
||||||
)
|
)
|
||||||
message(STATUS "target: ${NLOHMANN_JSON_TARGET_NAME}")
|
message(STATUS "target: ${NLOHMANN_JSON_TARGET_NAME}")
|
||||||
else()
|
else()
|
||||||
@ -283,11 +311,14 @@ set(PUBLICHEADERS
|
|||||||
include/aare/ClusterFile.hpp
|
include/aare/ClusterFile.hpp
|
||||||
include/aare/CtbRawFile.hpp
|
include/aare/CtbRawFile.hpp
|
||||||
include/aare/ClusterVector.hpp
|
include/aare/ClusterVector.hpp
|
||||||
|
include/aare/decode.hpp
|
||||||
include/aare/defs.hpp
|
include/aare/defs.hpp
|
||||||
include/aare/Dtype.hpp
|
include/aare/Dtype.hpp
|
||||||
include/aare/File.hpp
|
include/aare/File.hpp
|
||||||
|
include/aare/Fit.hpp
|
||||||
include/aare/FileInterface.hpp
|
include/aare/FileInterface.hpp
|
||||||
include/aare/Frame.hpp
|
include/aare/Frame.hpp
|
||||||
|
include/aare/geo_helpers.hpp
|
||||||
include/aare/NDArray.hpp
|
include/aare/NDArray.hpp
|
||||||
include/aare/NDView.hpp
|
include/aare/NDView.hpp
|
||||||
include/aare/NumpyFile.hpp
|
include/aare/NumpyFile.hpp
|
||||||
@ -298,6 +329,7 @@ set(PUBLICHEADERS
|
|||||||
include/aare/RawMasterFile.hpp
|
include/aare/RawMasterFile.hpp
|
||||||
include/aare/RawSubFile.hpp
|
include/aare/RawSubFile.hpp
|
||||||
include/aare/VarClusterFinder.hpp
|
include/aare/VarClusterFinder.hpp
|
||||||
|
include/aare/utils/task.hpp
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -307,14 +339,18 @@ set(SourceFiles
|
|||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Fit.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -334,6 +370,7 @@ target_link_libraries(
|
|||||||
${STD_FS_LIB} # from helpers.cmake
|
${STD_FS_LIB} # from helpers.cmake
|
||||||
PRIVATE
|
PRIVATE
|
||||||
aare_compiler_flags
|
aare_compiler_flags
|
||||||
|
lmfit
|
||||||
)
|
)
|
||||||
|
|
||||||
set_target_properties(aare_core PROPERTIES
|
set_target_properties(aare_core PROPERTIES
|
||||||
@ -350,6 +387,7 @@ if(AARE_TESTS)
|
|||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp
|
||||||
@ -359,6 +397,7 @@ if(AARE_TESTS)
|
|||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.test.cpp
|
||||||
|
|
||||||
)
|
)
|
||||||
target_sources(tests PRIVATE ${TestSources} )
|
target_sources(tests PRIVATE ${TestSources} )
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
package:
|
package:
|
||||||
name: aare
|
name: aare
|
||||||
version: 2024.12.16.dev0 #TODO! how to not duplicate this?
|
version: 2025.2.12 #TODO! how to not duplicate this?
|
||||||
|
|
||||||
|
|
||||||
source:
|
source:
|
||||||
|
@ -12,28 +12,7 @@ set(SPHINX_BUILD ${CMAKE_CURRENT_BINARY_DIR})
|
|||||||
|
|
||||||
|
|
||||||
file(GLOB SPHINX_SOURCE_FILES CONFIGURE_DEPENDS "src/*.rst")
|
file(GLOB SPHINX_SOURCE_FILES CONFIGURE_DEPENDS "src/*.rst")
|
||||||
# set(SPHINX_SOURCE_FILES
|
|
||||||
# src/index.rst
|
|
||||||
# src/Installation.rst
|
|
||||||
# src/Requirements.rst
|
|
||||||
# src/NDArray.rst
|
|
||||||
# src/NDView.rst
|
|
||||||
# src/File.rst
|
|
||||||
# src/Frame.rst
|
|
||||||
# src/Dtype.rst
|
|
||||||
# src/ClusterFinder.rst
|
|
||||||
# src/ClusterFile.rst
|
|
||||||
# src/Pedestal.rst
|
|
||||||
# src/RawFile.rst
|
|
||||||
# src/RawSubFile.rst
|
|
||||||
# src/RawMasterFile.rst
|
|
||||||
# src/VarClusterFinder.rst
|
|
||||||
# src/pyVarClusterFinder.rst
|
|
||||||
# src/pyFile.rst
|
|
||||||
# src/pyCtbRawFile.rst
|
|
||||||
# src/pyRawFile.rst
|
|
||||||
# src/pyRawMasterFile.rst
|
|
||||||
# )
|
|
||||||
|
|
||||||
|
|
||||||
foreach(filename ${SPHINX_SOURCE_FILES})
|
foreach(filename ${SPHINX_SOURCE_FILES})
|
||||||
|
7
docs/src/ClusterFinderMT.rst
Normal file
7
docs/src/ClusterFinderMT.rst
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
ClusterFinderMT
|
||||||
|
==================
|
||||||
|
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::ClusterFinderMT
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
@ -30,10 +30,13 @@ AARE
|
|||||||
pyFile
|
pyFile
|
||||||
pyCtbRawFile
|
pyCtbRawFile
|
||||||
pyClusterFile
|
pyClusterFile
|
||||||
|
pyClusterVector
|
||||||
pyRawFile
|
pyRawFile
|
||||||
pyRawMasterFile
|
pyRawMasterFile
|
||||||
pyVarClusterFinder
|
pyVarClusterFinder
|
||||||
|
|
||||||
|
pyFit
|
||||||
|
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
:caption: C++ API
|
:caption: C++ API
|
||||||
@ -45,6 +48,7 @@ AARE
|
|||||||
File
|
File
|
||||||
Dtype
|
Dtype
|
||||||
ClusterFinder
|
ClusterFinder
|
||||||
|
ClusterFinderMT
|
||||||
ClusterFile
|
ClusterFile
|
||||||
ClusterVector
|
ClusterVector
|
||||||
Pedestal
|
Pedestal
|
||||||
|
33
docs/src/pyClusterVector.rst
Normal file
33
docs/src/pyClusterVector.rst
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
ClusterVector
|
||||||
|
================
|
||||||
|
|
||||||
|
The ClusterVector, holds clusters from the ClusterFinder. Since it is templated
|
||||||
|
in C++ we use a suffix indicating the data type in python. The suffix is
|
||||||
|
``_i`` for integer, ``_f`` for float, and ``_d`` for double.
|
||||||
|
|
||||||
|
At the moment the functionality from python is limited and it is not supported
|
||||||
|
to push_back clusters to the vector. The intended use case is to pass it to
|
||||||
|
C++ functions that support the ClusterVector or to view it as a numpy array.
|
||||||
|
|
||||||
|
**View ClusterVector as numpy array**
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
from aare import ClusterFile
|
||||||
|
with ClusterFile("path/to/file") as f:
|
||||||
|
cluster_vector = f.read_frame()
|
||||||
|
|
||||||
|
# Create a copy of the cluster data in a numpy array
|
||||||
|
clusters = np.array(cluster_vector)
|
||||||
|
|
||||||
|
# Avoid copying the data by passing copy=False
|
||||||
|
clusters = np.array(cluster_vector, copy = False)
|
||||||
|
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
.. autoclass:: ClusterVector_i
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members:
|
19
docs/src/pyFit.rst
Normal file
19
docs/src/pyFit.rst
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
|
||||||
|
Fit
|
||||||
|
========
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
|
||||||
|
**Functions**
|
||||||
|
|
||||||
|
.. autofunction:: gaus
|
||||||
|
|
||||||
|
.. autofunction:: pol1
|
||||||
|
|
||||||
|
|
||||||
|
**Fitting**
|
||||||
|
|
||||||
|
.. autofunction:: fit_gaus
|
||||||
|
|
||||||
|
.. autofunction:: fit_pol1
|
97
include/aare/CircularFifo.hpp
Normal file
97
include/aare/CircularFifo.hpp
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
|
#include <fmt/color.h>
|
||||||
|
#include <fmt/format.h>
|
||||||
|
#include <memory>
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#include "aare/ProducerConsumerQueue.hpp"
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
template <class ItemType> class CircularFifo {
|
||||||
|
uint32_t fifo_size;
|
||||||
|
aare::ProducerConsumerQueue<ItemType> free_slots;
|
||||||
|
aare::ProducerConsumerQueue<ItemType> filled_slots;
|
||||||
|
|
||||||
|
public:
|
||||||
|
CircularFifo() : CircularFifo(100){};
|
||||||
|
CircularFifo(uint32_t size) : fifo_size(size), free_slots(size + 1), filled_slots(size + 1) {
|
||||||
|
|
||||||
|
// TODO! how do we deal with alignment for writing? alignas???
|
||||||
|
// Do we give the user a chance to provide memory locations?
|
||||||
|
// Templated allocator?
|
||||||
|
for (size_t i = 0; i < fifo_size; ++i) {
|
||||||
|
free_slots.write(ItemType{});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool next() {
|
||||||
|
// TODO! avoid default constructing ItemType
|
||||||
|
ItemType it;
|
||||||
|
if (!filled_slots.read(it))
|
||||||
|
return false;
|
||||||
|
if (!free_slots.write(std::move(it)))
|
||||||
|
return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
~CircularFifo() {}
|
||||||
|
|
||||||
|
using value_type = ItemType;
|
||||||
|
|
||||||
|
auto numFilledSlots() const noexcept { return filled_slots.sizeGuess(); }
|
||||||
|
auto numFreeSlots() const noexcept { return free_slots.sizeGuess(); }
|
||||||
|
auto isFull() const noexcept { return filled_slots.isFull(); }
|
||||||
|
|
||||||
|
ItemType pop_free() {
|
||||||
|
ItemType v;
|
||||||
|
while (!free_slots.read(v))
|
||||||
|
;
|
||||||
|
return std::move(v);
|
||||||
|
// return v;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool try_pop_free(ItemType &v) { return free_slots.read(v); }
|
||||||
|
|
||||||
|
ItemType pop_value(std::chrono::nanoseconds wait, std::atomic<bool> &stopped) {
|
||||||
|
ItemType v;
|
||||||
|
while (!filled_slots.read(v) && !stopped) {
|
||||||
|
std::this_thread::sleep_for(wait);
|
||||||
|
}
|
||||||
|
return std::move(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
ItemType pop_value() {
|
||||||
|
ItemType v;
|
||||||
|
while (!filled_slots.read(v))
|
||||||
|
;
|
||||||
|
return std::move(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
ItemType *frontPtr() { return filled_slots.frontPtr(); }
|
||||||
|
|
||||||
|
// TODO! Add function to move item from filled to free to be used
|
||||||
|
// with the frontPtr function
|
||||||
|
|
||||||
|
template <class... Args> void push_value(Args &&...recordArgs) {
|
||||||
|
while (!filled_slots.write(std::forward<Args>(recordArgs)...))
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class... Args> bool try_push_value(Args &&...recordArgs) {
|
||||||
|
return filled_slots.write(std::forward<Args>(recordArgs)...);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class... Args> void push_free(Args &&...recordArgs) {
|
||||||
|
while (!free_slots.write(std::forward<Args>(recordArgs)...))
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class... Args> bool try_push_free(Args &&...recordArgs) {
|
||||||
|
return free_slots.write(std::forward<Args>(recordArgs)...);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
52
include/aare/ClusterCollector.hpp
Normal file
52
include/aare/ClusterCollector.hpp
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
#pragma once
|
||||||
|
#include <atomic>
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#include "aare/ProducerConsumerQueue.hpp"
|
||||||
|
#include "aare/ClusterVector.hpp"
|
||||||
|
#include "aare/ClusterFinderMT.hpp"
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
class ClusterCollector{
|
||||||
|
ProducerConsumerQueue<ClusterVector<int>>* m_source;
|
||||||
|
std::atomic<bool> m_stop_requested{false};
|
||||||
|
std::atomic<bool> m_stopped{true};
|
||||||
|
std::chrono::milliseconds m_default_wait{1};
|
||||||
|
std::thread m_thread;
|
||||||
|
std::vector<ClusterVector<int>> m_clusters;
|
||||||
|
|
||||||
|
void process(){
|
||||||
|
m_stopped = false;
|
||||||
|
fmt::print("ClusterCollector started\n");
|
||||||
|
while (!m_stop_requested || !m_source->isEmpty()) {
|
||||||
|
if (ClusterVector<int> *clusters = m_source->frontPtr();
|
||||||
|
clusters != nullptr) {
|
||||||
|
m_clusters.push_back(std::move(*clusters));
|
||||||
|
m_source->popFront();
|
||||||
|
}else{
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt::print("ClusterCollector stopped\n");
|
||||||
|
m_stopped = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
ClusterCollector(ClusterFinderMT<uint16_t, double, int32_t>* source){
|
||||||
|
m_source = source->sink();
|
||||||
|
m_thread = std::thread(&ClusterCollector::process, this);
|
||||||
|
}
|
||||||
|
void stop(){
|
||||||
|
m_stop_requested = true;
|
||||||
|
m_thread.join();
|
||||||
|
}
|
||||||
|
std::vector<ClusterVector<int>> steal_clusters(){
|
||||||
|
if(!m_stopped){
|
||||||
|
throw std::runtime_error("ClusterCollector is still running");
|
||||||
|
}
|
||||||
|
return std::move(m_clusters);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
@ -33,6 +33,12 @@ typedef enum {
|
|||||||
pTopRight = 8
|
pTopRight = 8
|
||||||
} pixel;
|
} pixel;
|
||||||
|
|
||||||
|
struct Eta2 {
|
||||||
|
double x;
|
||||||
|
double y;
|
||||||
|
corner c;
|
||||||
|
};
|
||||||
|
|
||||||
struct ClusterAnalysis {
|
struct ClusterAnalysis {
|
||||||
uint32_t c;
|
uint32_t c;
|
||||||
int32_t tot;
|
int32_t tot;
|
||||||
@ -49,6 +55,19 @@ int32_t frame_number
|
|||||||
uint32_t number_of_clusters
|
uint32_t number_of_clusters
|
||||||
....
|
....
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Class to read and write cluster files
|
||||||
|
* Expects data to be laid out as:
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* int32_t frame_number
|
||||||
|
* uint32_t number_of_clusters
|
||||||
|
* int16_t x, int16_t y, int32_t data[9] x number_of_clusters
|
||||||
|
* int32_t frame_number
|
||||||
|
* uint32_t number_of_clusters
|
||||||
|
* etc.
|
||||||
|
*/
|
||||||
class ClusterFile {
|
class ClusterFile {
|
||||||
FILE *fp{};
|
FILE *fp{};
|
||||||
uint32_t m_num_left{};
|
uint32_t m_num_left{};
|
||||||
@ -56,17 +75,52 @@ class ClusterFile {
|
|||||||
const std::string m_mode;
|
const std::string m_mode;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
/**
|
||||||
|
* @brief Construct a new Cluster File object
|
||||||
|
* @param fname path to the file
|
||||||
|
* @param chunk_size number of clusters to read at a time when iterating
|
||||||
|
* over the file
|
||||||
|
* @param mode mode to open the file in. "r" for reading, "w" for writing,
|
||||||
|
* "a" for appending
|
||||||
|
* @throws std::runtime_error if the file could not be opened
|
||||||
|
*/
|
||||||
ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000,
|
ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000,
|
||||||
const std::string &mode = "r");
|
const std::string &mode = "r");
|
||||||
~ClusterFile();
|
|
||||||
std::vector<Cluster3x3> read_clusters(size_t n_clusters);
|
|
||||||
std::vector<Cluster3x3> read_frame(int32_t &out_fnum);
|
|
||||||
void write_frame(int32_t frame_number,
|
|
||||||
const ClusterVector<int32_t> &clusters);
|
|
||||||
std::vector<Cluster3x3>
|
|
||||||
read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny);
|
|
||||||
|
|
||||||
|
|
||||||
|
~ClusterFile();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Read n_clusters clusters from the file discarding frame numbers.
|
||||||
|
* If EOF is reached the returned vector will have less than n_clusters
|
||||||
|
* clusters
|
||||||
|
*/
|
||||||
|
ClusterVector<int32_t> read_clusters(size_t n_clusters);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Read a single frame from the file and return the clusters. The
|
||||||
|
* cluster vector will have the frame number set.
|
||||||
|
* @throws std::runtime_error if the file is not opened for reading or the file pointer not
|
||||||
|
* at the beginning of a frame
|
||||||
|
*/
|
||||||
|
ClusterVector<int32_t> read_frame();
|
||||||
|
|
||||||
|
|
||||||
|
void write_frame(const ClusterVector<int32_t> &clusters);
|
||||||
|
|
||||||
|
// Need to be migrated to support NDArray and return a ClusterVector
|
||||||
|
// std::vector<Cluster3x3>
|
||||||
|
// read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the chunk size
|
||||||
|
*/
|
||||||
size_t chunk_size() const { return m_chunk_size; }
|
size_t chunk_size() const { return m_chunk_size; }
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Close the file. If not closed the file will be closed in the destructor
|
||||||
|
*/
|
||||||
void close();
|
void close();
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -76,6 +130,6 @@ int analyze_cluster(Cluster3x3& cl, int32_t *t2, int32_t *t3, char *quad,
|
|||||||
double *eta2x, double *eta2y, double *eta3x, double *eta3y);
|
double *eta2x, double *eta2y, double *eta3x, double *eta3y);
|
||||||
|
|
||||||
NDArray<double, 2> calculate_eta2(ClusterVector<int> &clusters);
|
NDArray<double, 2> calculate_eta2(ClusterVector<int> &clusters);
|
||||||
std::array<double,2> calculate_eta2( Cluster3x3& cl);
|
Eta2 calculate_eta2(Cluster3x3 &cl);
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
||||||
|
56
include/aare/ClusterFileSink.hpp
Normal file
56
include/aare/ClusterFileSink.hpp
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
#pragma once
|
||||||
|
#include <atomic>
|
||||||
|
#include <filesystem>
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#include "aare/ProducerConsumerQueue.hpp"
|
||||||
|
#include "aare/ClusterVector.hpp"
|
||||||
|
#include "aare/ClusterFinderMT.hpp"
|
||||||
|
|
||||||
|
namespace aare{
|
||||||
|
|
||||||
|
class ClusterFileSink{
|
||||||
|
ProducerConsumerQueue<ClusterVector<int>>* m_source;
|
||||||
|
std::atomic<bool> m_stop_requested{false};
|
||||||
|
std::atomic<bool> m_stopped{true};
|
||||||
|
std::chrono::milliseconds m_default_wait{1};
|
||||||
|
std::thread m_thread;
|
||||||
|
std::ofstream m_file;
|
||||||
|
|
||||||
|
|
||||||
|
void process(){
|
||||||
|
m_stopped = false;
|
||||||
|
fmt::print("ClusterFileSink started\n");
|
||||||
|
while (!m_stop_requested || !m_source->isEmpty()) {
|
||||||
|
if (ClusterVector<int> *clusters = m_source->frontPtr();
|
||||||
|
clusters != nullptr) {
|
||||||
|
// Write clusters to file
|
||||||
|
int32_t frame_number = clusters->frame_number(); //TODO! Should we store frame number already as int?
|
||||||
|
uint32_t num_clusters = clusters->size();
|
||||||
|
m_file.write(reinterpret_cast<const char*>(&frame_number), sizeof(frame_number));
|
||||||
|
m_file.write(reinterpret_cast<const char*>(&num_clusters), sizeof(num_clusters));
|
||||||
|
m_file.write(reinterpret_cast<const char*>(clusters->data()), clusters->size() * clusters->item_size());
|
||||||
|
m_source->popFront();
|
||||||
|
}else{
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt::print("ClusterFileSink stopped\n");
|
||||||
|
m_stopped = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
ClusterFileSink(ClusterFinderMT<uint16_t, double, int32_t>* source, const std::filesystem::path& fname){
|
||||||
|
m_source = source->sink();
|
||||||
|
m_thread = std::thread(&ClusterFileSink::process, this);
|
||||||
|
m_file.open(fname, std::ios::binary);
|
||||||
|
}
|
||||||
|
void stop(){
|
||||||
|
m_stop_requested = true;
|
||||||
|
m_thread.join();
|
||||||
|
m_file.close();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
} // namespace aare
|
@ -10,26 +10,12 @@
|
|||||||
|
|
||||||
namespace aare {
|
namespace aare {
|
||||||
|
|
||||||
/** enum to define the event types */
|
|
||||||
enum class eventType {
|
|
||||||
PEDESTAL, /** pedestal */
|
|
||||||
NEIGHBOUR, /** neighbour i.e. below threshold, but in the cluster of a
|
|
||||||
photon */
|
|
||||||
PHOTON, /** photon i.e. above threshold */
|
|
||||||
PHOTON_MAX, /** maximum of a cluster satisfying the photon conditions */
|
|
||||||
NEGATIVE_PEDESTAL, /** negative value, will not be accounted for as pedestal
|
|
||||||
in order to avoid drift of the pedestal towards
|
|
||||||
negative values */
|
|
||||||
UNDEFINED_EVENT = -1 /** undefined */
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double,
|
template <typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double,
|
||||||
typename CT = int32_t>
|
typename CT = int32_t>
|
||||||
class ClusterFinder {
|
class ClusterFinder {
|
||||||
Shape<2> m_image_size;
|
Shape<2> m_image_size;
|
||||||
const int m_cluster_sizeX;
|
const int m_cluster_sizeX;
|
||||||
const int m_cluster_sizeY;
|
const int m_cluster_sizeY;
|
||||||
// const PEDESTAL_TYPE m_threshold;
|
|
||||||
const PEDESTAL_TYPE m_nSigma;
|
const PEDESTAL_TYPE m_nSigma;
|
||||||
const PEDESTAL_TYPE c2;
|
const PEDESTAL_TYPE c2;
|
||||||
const PEDESTAL_TYPE c3;
|
const PEDESTAL_TYPE c3;
|
||||||
@ -61,6 +47,7 @@ class ClusterFinder {
|
|||||||
|
|
||||||
NDArray<PEDESTAL_TYPE, 2> pedestal() { return m_pedestal.mean(); }
|
NDArray<PEDESTAL_TYPE, 2> pedestal() { return m_pedestal.mean(); }
|
||||||
NDArray<PEDESTAL_TYPE, 2> noise() { return m_pedestal.std(); }
|
NDArray<PEDESTAL_TYPE, 2> noise() { return m_pedestal.std(); }
|
||||||
|
void clear_pedestal() { m_pedestal.clear(); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Move the clusters from the ClusterVector in the ClusterFinder to a
|
* @brief Move the clusters from the ClusterVector in the ClusterFinder to a
|
||||||
@ -78,13 +65,13 @@ class ClusterFinder {
|
|||||||
m_clusters = ClusterVector<CT>(m_cluster_sizeX, m_cluster_sizeY);
|
m_clusters = ClusterVector<CT>(m_cluster_sizeX, m_cluster_sizeY);
|
||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
void find_clusters(NDView<FRAME_TYPE, 2> frame) {
|
void find_clusters(NDView<FRAME_TYPE, 2> frame, uint64_t frame_number = 0) {
|
||||||
// // TODO! deal with even size clusters
|
// // TODO! deal with even size clusters
|
||||||
// // currently 3,3 -> +/- 1
|
// // currently 3,3 -> +/- 1
|
||||||
// // 4,4 -> +/- 2
|
// // 4,4 -> +/- 2
|
||||||
int dy = m_cluster_sizeY / 2;
|
int dy = m_cluster_sizeY / 2;
|
||||||
int dx = m_cluster_sizeX / 2;
|
int dx = m_cluster_sizeX / 2;
|
||||||
|
m_clusters.set_frame_number(frame_number);
|
||||||
std::vector<CT> cluster_data(m_cluster_sizeX * m_cluster_sizeY);
|
std::vector<CT> cluster_data(m_cluster_sizeX * m_cluster_sizeY);
|
||||||
for (int iy = 0; iy < frame.shape(0); iy++) {
|
for (int iy = 0; iy < frame.shape(0); iy++) {
|
||||||
for (int ix = 0; ix < frame.shape(1); ix++) {
|
for (int ix = 0; ix < frame.shape(1); ix++) {
|
||||||
@ -121,8 +108,8 @@ class ClusterFinder {
|
|||||||
} else if (total > c3 * m_nSigma * rms) {
|
} else if (total > c3 * m_nSigma * rms) {
|
||||||
// pass
|
// pass
|
||||||
} else {
|
} else {
|
||||||
// m_pedestal.push(iy, ix, frame(iy, ix));
|
// m_pedestal.push(iy, ix, frame(iy, ix)); // Safe option
|
||||||
m_pedestal.push_fast(iy, ix, frame(iy, ix));
|
m_pedestal.push_fast(iy, ix, frame(iy, ix)); // Assume we have reached n_samples in the pedestal, slight performance improvement
|
||||||
continue; // It was a pedestal value nothing to store
|
continue; // It was a pedestal value nothing to store
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,114 +144,6 @@ class ClusterFinder {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// // template <typename FRAME_TYPE, typename PEDESTAL_TYPE>
|
|
||||||
// std::vector<DynamicCluster>
|
|
||||||
// find_clusters_with_threshold(NDView<FRAME_TYPE, 2> frame,
|
|
||||||
// Pedestal<PEDESTAL_TYPE> &pedestal) {
|
|
||||||
// assert(m_threshold > 0);
|
|
||||||
// std::vector<DynamicCluster> clusters;
|
|
||||||
// std::vector<std::vector<eventType>> eventMask;
|
|
||||||
// for (int i = 0; i < frame.shape(0); i++) {
|
|
||||||
// eventMask.push_back(std::vector<eventType>(frame.shape(1)));
|
|
||||||
// }
|
|
||||||
// double tthr, tthr1, tthr2;
|
|
||||||
|
|
||||||
// NDArray<FRAME_TYPE, 2> rest({frame.shape(0), frame.shape(1)});
|
|
||||||
// NDArray<int, 2> nph({frame.shape(0), frame.shape(1)});
|
|
||||||
// // convert to n photons
|
|
||||||
// // nph = (frame-pedestal.mean()+0.5*m_threshold)/m_threshold; // can
|
|
||||||
// be
|
|
||||||
// // optimized with expression templates?
|
|
||||||
// for (int iy = 0; iy < frame.shape(0); iy++) {
|
|
||||||
// for (int ix = 0; ix < frame.shape(1); ix++) {
|
|
||||||
// auto val = frame(iy, ix) - pedestal.mean(iy, ix);
|
|
||||||
// nph(iy, ix) = (val + 0.5 * m_threshold) / m_threshold;
|
|
||||||
// nph(iy, ix) = nph(iy, ix) < 0 ? 0 : nph(iy, ix);
|
|
||||||
// rest(iy, ix) = val - nph(iy, ix) * m_threshold;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// // iterate over frame pixels
|
|
||||||
// for (int iy = 0; iy < frame.shape(0); iy++) {
|
|
||||||
// for (int ix = 0; ix < frame.shape(1); ix++) {
|
|
||||||
// eventMask[iy][ix] = eventType::PEDESTAL;
|
|
||||||
// // initialize max and total
|
|
||||||
// FRAME_TYPE max = std::numeric_limits<FRAME_TYPE>::min();
|
|
||||||
// long double total = 0;
|
|
||||||
// if (rest(iy, ix) <= 0.25 * m_threshold) {
|
|
||||||
// pedestal.push(iy, ix, frame(iy, ix));
|
|
||||||
// continue;
|
|
||||||
// }
|
|
||||||
// eventMask[iy][ix] = eventType::NEIGHBOUR;
|
|
||||||
// // iterate over cluster pixels around the current pixel
|
|
||||||
// (ix,iy) for (short ir = -(m_cluster_sizeY / 2);
|
|
||||||
// ir < (m_cluster_sizeY / 2) + 1; ir++) {
|
|
||||||
// for (short ic = -(m_cluster_sizeX / 2);
|
|
||||||
// ic < (m_cluster_sizeX / 2) + 1; ic++) {
|
|
||||||
// if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
|
|
||||||
// iy + ir >= 0 && iy + ir < frame.shape(0)) {
|
|
||||||
// auto val = frame(iy + ir, ix + ic) -
|
|
||||||
// pedestal.mean(iy + ir, ix + ic);
|
|
||||||
// total += val;
|
|
||||||
// if (val > max) {
|
|
||||||
// max = val;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// auto rms = pedestal.std(iy, ix);
|
|
||||||
// if (m_nSigma == 0) {
|
|
||||||
// tthr = m_threshold;
|
|
||||||
// tthr1 = m_threshold;
|
|
||||||
// tthr2 = m_threshold;
|
|
||||||
// } else {
|
|
||||||
// tthr = m_nSigma * rms;
|
|
||||||
// tthr1 = m_nSigma * rms * c3;
|
|
||||||
// tthr2 = m_nSigma * rms * c2;
|
|
||||||
|
|
||||||
// if (m_threshold > 2 * tthr)
|
|
||||||
// tthr = m_threshold - tthr;
|
|
||||||
// if (m_threshold > 2 * tthr1)
|
|
||||||
// tthr1 = tthr - tthr1;
|
|
||||||
// if (m_threshold > 2 * tthr2)
|
|
||||||
// tthr2 = tthr - tthr2;
|
|
||||||
// }
|
|
||||||
// if (total > tthr1 || max > tthr) {
|
|
||||||
// eventMask[iy][ix] = eventType::PHOTON;
|
|
||||||
// nph(iy, ix) += 1;
|
|
||||||
// rest(iy, ix) -= m_threshold;
|
|
||||||
// } else {
|
|
||||||
// pedestal.push(iy, ix, frame(iy, ix));
|
|
||||||
// continue;
|
|
||||||
// }
|
|
||||||
// if (eventMask[iy][ix] == eventType::PHOTON &&
|
|
||||||
// frame(iy, ix) - pedestal.mean(iy, ix) >= max) {
|
|
||||||
// eventMask[iy][ix] = eventType::PHOTON_MAX;
|
|
||||||
// DynamicCluster cluster(m_cluster_sizeX, m_cluster_sizeY,
|
|
||||||
// Dtype(typeid(FRAME_TYPE)));
|
|
||||||
// cluster.x = ix;
|
|
||||||
// cluster.y = iy;
|
|
||||||
// short i = 0;
|
|
||||||
// for (short ir = -(m_cluster_sizeY / 2);
|
|
||||||
// ir < (m_cluster_sizeY / 2) + 1; ir++) {
|
|
||||||
// for (short ic = -(m_cluster_sizeX / 2);
|
|
||||||
// ic < (m_cluster_sizeX / 2) + 1; ic++) {
|
|
||||||
// if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
|
|
||||||
// iy + ir >= 0 && iy + ir < frame.shape(0)) {
|
|
||||||
// auto tmp = frame(iy + ir, ix + ic) -
|
|
||||||
// pedestal.mean(iy + ir, ix + ic);
|
|
||||||
// cluster.set<FRAME_TYPE>(i, tmp);
|
|
||||||
// i++;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// clusters.push_back(cluster);
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// return clusters;
|
|
||||||
// }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
268
include/aare/ClusterFinderMT.hpp
Normal file
268
include/aare/ClusterFinderMT.hpp
Normal file
@ -0,0 +1,268 @@
|
|||||||
|
#pragma once
|
||||||
|
#include <atomic>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <memory>
|
||||||
|
#include <thread>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "aare/ClusterFinder.hpp"
|
||||||
|
#include "aare/NDArray.hpp"
|
||||||
|
#include "aare/ProducerConsumerQueue.hpp"
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
enum class FrameType {
|
||||||
|
DATA,
|
||||||
|
PEDESTAL,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct FrameWrapper {
|
||||||
|
FrameType type;
|
||||||
|
uint64_t frame_number;
|
||||||
|
NDArray<uint16_t, 2> data;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief ClusterFinderMT is a multi-threaded version of ClusterFinder. It uses
|
||||||
|
* a producer-consumer queue to distribute the frames to the threads. The
|
||||||
|
* clusters are collected in a single output queue.
|
||||||
|
* @tparam FRAME_TYPE type of the frame data
|
||||||
|
* @tparam PEDESTAL_TYPE type of the pedestal data
|
||||||
|
* @tparam CT type of the cluster data
|
||||||
|
*/
|
||||||
|
template <typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double,
|
||||||
|
typename CT = int32_t>
|
||||||
|
class ClusterFinderMT {
|
||||||
|
size_t m_current_thread{0};
|
||||||
|
size_t m_n_threads{0};
|
||||||
|
using Finder = ClusterFinder<FRAME_TYPE, PEDESTAL_TYPE, CT>;
|
||||||
|
using InputQueue = ProducerConsumerQueue<FrameWrapper>;
|
||||||
|
using OutputQueue = ProducerConsumerQueue<ClusterVector<int>>;
|
||||||
|
std::vector<std::unique_ptr<InputQueue>> m_input_queues;
|
||||||
|
std::vector<std::unique_ptr<OutputQueue>> m_output_queues;
|
||||||
|
|
||||||
|
OutputQueue m_sink{1000}; // All clusters go into this queue
|
||||||
|
|
||||||
|
std::vector<std::unique_ptr<Finder>> m_cluster_finders;
|
||||||
|
std::vector<std::thread> m_threads;
|
||||||
|
std::thread m_collect_thread;
|
||||||
|
std::chrono::milliseconds m_default_wait{1};
|
||||||
|
|
||||||
|
std::atomic<bool> m_stop_requested{false};
|
||||||
|
std::atomic<bool> m_processing_threads_stopped{true};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Function called by the processing threads. It reads the frames
|
||||||
|
* from the input queue and processes them.
|
||||||
|
*/
|
||||||
|
void process(int thread_id) {
|
||||||
|
auto cf = m_cluster_finders[thread_id].get();
|
||||||
|
auto q = m_input_queues[thread_id].get();
|
||||||
|
bool realloc_same_capacity = true;
|
||||||
|
|
||||||
|
while (!m_stop_requested || !q->isEmpty()) {
|
||||||
|
if (FrameWrapper *frame = q->frontPtr(); frame != nullptr) {
|
||||||
|
|
||||||
|
switch (frame->type) {
|
||||||
|
case FrameType::DATA:
|
||||||
|
cf->find_clusters(frame->data.view(), frame->frame_number);
|
||||||
|
m_output_queues[thread_id]->write(cf->steal_clusters(realloc_same_capacity));
|
||||||
|
break;
|
||||||
|
|
||||||
|
case FrameType::PEDESTAL:
|
||||||
|
m_cluster_finders[thread_id]->push_pedestal_frame(
|
||||||
|
frame->data.view());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// frame is processed now discard it
|
||||||
|
m_input_queues[thread_id]->popFront();
|
||||||
|
} else {
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Collect all the clusters from the output queues and write them to
|
||||||
|
* the sink
|
||||||
|
*/
|
||||||
|
void collect() {
|
||||||
|
bool empty = true;
|
||||||
|
while (!m_stop_requested || !empty || !m_processing_threads_stopped) {
|
||||||
|
empty = true;
|
||||||
|
for (auto &queue : m_output_queues) {
|
||||||
|
if (!queue->isEmpty()) {
|
||||||
|
|
||||||
|
while (!m_sink.write(std::move(*queue->frontPtr()))) {
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
queue->popFront();
|
||||||
|
empty = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* @brief Construct a new ClusterFinderMT object
|
||||||
|
* @param image_size size of the image
|
||||||
|
* @param cluster_size size of the cluster
|
||||||
|
* @param nSigma number of sigma above the pedestal to consider a photon
|
||||||
|
* @param capacity initial capacity of the cluster vector. Should match
|
||||||
|
* expected number of clusters in a frame per frame.
|
||||||
|
* @param n_threads number of threads to use
|
||||||
|
*/
|
||||||
|
ClusterFinderMT(Shape<2> image_size, Shape<2> cluster_size,
|
||||||
|
PEDESTAL_TYPE nSigma = 5.0, size_t capacity = 2000,
|
||||||
|
size_t n_threads = 3)
|
||||||
|
: m_n_threads(n_threads) {
|
||||||
|
for (size_t i = 0; i < n_threads; i++) {
|
||||||
|
m_cluster_finders.push_back(
|
||||||
|
std::make_unique<ClusterFinder<FRAME_TYPE, PEDESTAL_TYPE, CT>>(
|
||||||
|
image_size, cluster_size, nSigma, capacity));
|
||||||
|
}
|
||||||
|
for (size_t i = 0; i < n_threads; i++) {
|
||||||
|
m_input_queues.emplace_back(std::make_unique<InputQueue>(200));
|
||||||
|
m_output_queues.emplace_back(std::make_unique<OutputQueue>(200));
|
||||||
|
}
|
||||||
|
//TODO! Should we start automatically?
|
||||||
|
start();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the sink queue where all the clusters are collected
|
||||||
|
* @warning You need to empty this queue otherwise the cluster finder will wait forever
|
||||||
|
*/
|
||||||
|
ProducerConsumerQueue<ClusterVector<int>> *sink() { return &m_sink; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Start all processing threads
|
||||||
|
*/
|
||||||
|
void start() {
|
||||||
|
m_processing_threads_stopped = false;
|
||||||
|
m_stop_requested = false;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < m_n_threads; i++) {
|
||||||
|
m_threads.push_back(
|
||||||
|
std::thread(&ClusterFinderMT::process, this, i));
|
||||||
|
}
|
||||||
|
|
||||||
|
m_collect_thread = std::thread(&ClusterFinderMT::collect, this);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Stop all processing threads
|
||||||
|
*/
|
||||||
|
void stop() {
|
||||||
|
m_stop_requested = true;
|
||||||
|
|
||||||
|
for (auto &thread : m_threads) {
|
||||||
|
thread.join();
|
||||||
|
}
|
||||||
|
m_threads.clear();
|
||||||
|
|
||||||
|
m_processing_threads_stopped = true;
|
||||||
|
m_collect_thread.join();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Wait for all the queues to be empty. Mostly used for timing tests.
|
||||||
|
*/
|
||||||
|
void sync() {
|
||||||
|
for (auto &q : m_input_queues) {
|
||||||
|
while (!q->isEmpty()) {
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (auto &q : m_output_queues) {
|
||||||
|
while (!q->isEmpty()) {
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
while (!m_sink.isEmpty()) {
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Push a pedestal frame to all the cluster finders. The frames is
|
||||||
|
* expected to be dark. No photon finding is done. Just pedestal update.
|
||||||
|
*/
|
||||||
|
void push_pedestal_frame(NDView<FRAME_TYPE, 2> frame) {
|
||||||
|
FrameWrapper fw{FrameType::PEDESTAL, 0,
|
||||||
|
NDArray(frame)}; // TODO! copies the data!
|
||||||
|
|
||||||
|
for (auto &queue : m_input_queues) {
|
||||||
|
while (!queue->write(fw)) {
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Push the frame to the queue of the next available thread. Function
|
||||||
|
* returns once the frame is in a queue.
|
||||||
|
* @note Spin locks with a default wait if the queue is full.
|
||||||
|
*/
|
||||||
|
void find_clusters(NDView<FRAME_TYPE, 2> frame, uint64_t frame_number = 0) {
|
||||||
|
FrameWrapper fw{FrameType::DATA, frame_number,
|
||||||
|
NDArray(frame)}; // TODO! copies the data!
|
||||||
|
while (!m_input_queues[m_current_thread % m_n_threads]->write(fw)) {
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
m_current_thread++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void clear_pedestal() {
|
||||||
|
if (!m_processing_threads_stopped) {
|
||||||
|
throw std::runtime_error("ClusterFinderMT is still running");
|
||||||
|
}
|
||||||
|
for (auto &cf : m_cluster_finders) {
|
||||||
|
cf->clear_pedestal();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the pedestal currently used by the cluster finder
|
||||||
|
* @param thread_index index of the thread
|
||||||
|
*/
|
||||||
|
auto pedestal(size_t thread_index = 0) {
|
||||||
|
if (m_cluster_finders.empty()) {
|
||||||
|
throw std::runtime_error("No cluster finders available");
|
||||||
|
}
|
||||||
|
if (!m_processing_threads_stopped) {
|
||||||
|
throw std::runtime_error("ClusterFinderMT is still running");
|
||||||
|
}
|
||||||
|
if (thread_index >= m_cluster_finders.size()) {
|
||||||
|
throw std::runtime_error("Thread index out of range");
|
||||||
|
}
|
||||||
|
return m_cluster_finders[thread_index]->pedestal();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the noise currently used by the cluster finder
|
||||||
|
* @param thread_index index of the thread
|
||||||
|
*/
|
||||||
|
auto noise(size_t thread_index = 0) {
|
||||||
|
if (m_cluster_finders.empty()) {
|
||||||
|
throw std::runtime_error("No cluster finders available");
|
||||||
|
}
|
||||||
|
if (!m_processing_threads_stopped) {
|
||||||
|
throw std::runtime_error("ClusterFinderMT is still running");
|
||||||
|
}
|
||||||
|
if (thread_index >= m_cluster_finders.size()) {
|
||||||
|
throw std::runtime_error("Thread index out of range");
|
||||||
|
}
|
||||||
|
return m_cluster_finders[thread_index]->noise();
|
||||||
|
}
|
||||||
|
|
||||||
|
// void push(FrameWrapper&& frame) {
|
||||||
|
// //TODO! need to loop until we are successful
|
||||||
|
// auto rc = m_input_queue.write(std::move(frame));
|
||||||
|
// fmt::print("pushed frame {}\n", rc);
|
||||||
|
// }
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
@ -1,4 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
#include <algorithm>
|
||||||
|
#include <array>
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <numeric>
|
#include <numeric>
|
||||||
@ -10,10 +12,14 @@ namespace aare {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief ClusterVector is a container for clusters of various sizes. It uses a
|
* @brief ClusterVector is a container for clusters of various sizes. It uses a
|
||||||
* contiguous memory buffer to store the clusters.
|
* contiguous memory buffer to store the clusters. It is templated on the data
|
||||||
|
* type and the coordinate type of the clusters.
|
||||||
* @note push_back can invalidate pointers to elements in the container
|
* @note push_back can invalidate pointers to elements in the container
|
||||||
|
* @warning ClusterVector is currently move only to catch unintended copies, but
|
||||||
|
* this might change since there are probably use cases where copying is needed.
|
||||||
* @tparam T data type of the pixels in the cluster
|
* @tparam T data type of the pixels in the cluster
|
||||||
* @tparam CoordType data type of the x and y coordinates of the cluster (normally int16_t)
|
* @tparam CoordType data type of the x and y coordinates of the cluster
|
||||||
|
* (normally int16_t)
|
||||||
*/
|
*/
|
||||||
template <typename T, typename CoordType = int16_t> class ClusterVector {
|
template <typename T, typename CoordType = int16_t> class ClusterVector {
|
||||||
using value_type = T;
|
using value_type = T;
|
||||||
@ -22,6 +28,7 @@ template <typename T, typename CoordType=int16_t> class ClusterVector {
|
|||||||
std::byte *m_data{};
|
std::byte *m_data{};
|
||||||
size_t m_size{0};
|
size_t m_size{0};
|
||||||
size_t m_capacity;
|
size_t m_capacity;
|
||||||
|
uint64_t m_frame_number{0}; // TODO! Check frame number size and type
|
||||||
/*
|
/*
|
||||||
Format string used in the python bindings to create a numpy
|
Format string used in the python bindings to create a numpy
|
||||||
array from the buffer
|
array from the buffer
|
||||||
@ -38,23 +45,24 @@ template <typename T, typename CoordType=int16_t> class ClusterVector {
|
|||||||
* @param cluster_size_x size of the cluster in x direction
|
* @param cluster_size_x size of the cluster in x direction
|
||||||
* @param cluster_size_y size of the cluster in y direction
|
* @param cluster_size_y size of the cluster in y direction
|
||||||
* @param capacity initial capacity of the buffer in number of clusters
|
* @param capacity initial capacity of the buffer in number of clusters
|
||||||
|
* @param frame_number frame number of the clusters. Default is 0, which is
|
||||||
|
* also used to indicate that the clusters come from many frames
|
||||||
*/
|
*/
|
||||||
ClusterVector(size_t cluster_size_x, size_t cluster_size_y,
|
ClusterVector(size_t cluster_size_x = 3, size_t cluster_size_y = 3,
|
||||||
size_t capacity = 1024)
|
size_t capacity = 1024, uint64_t frame_number = 0)
|
||||||
: m_cluster_size_x(cluster_size_x), m_cluster_size_y(cluster_size_y),
|
: m_cluster_size_x(cluster_size_x), m_cluster_size_y(cluster_size_y),
|
||||||
m_capacity(capacity) {
|
m_capacity(capacity), m_frame_number(frame_number) {
|
||||||
allocate_buffer(capacity);
|
allocate_buffer(capacity);
|
||||||
}
|
}
|
||||||
~ClusterVector() {
|
|
||||||
delete[] m_data;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
~ClusterVector() { delete[] m_data; }
|
||||||
|
|
||||||
// Move constructor
|
// Move constructor
|
||||||
ClusterVector(ClusterVector &&other) noexcept
|
ClusterVector(ClusterVector &&other) noexcept
|
||||||
: m_cluster_size_x(other.m_cluster_size_x),
|
: m_cluster_size_x(other.m_cluster_size_x),
|
||||||
m_cluster_size_y(other.m_cluster_size_y), m_data(other.m_data),
|
m_cluster_size_y(other.m_cluster_size_y), m_data(other.m_data),
|
||||||
m_size(other.m_size), m_capacity(other.m_capacity) {
|
m_size(other.m_size), m_capacity(other.m_capacity),
|
||||||
|
m_frame_number(other.m_frame_number) {
|
||||||
other.m_data = nullptr;
|
other.m_data = nullptr;
|
||||||
other.m_size = 0;
|
other.m_size = 0;
|
||||||
other.m_capacity = 0;
|
other.m_capacity = 0;
|
||||||
@ -69,9 +77,11 @@ template <typename T, typename CoordType=int16_t> class ClusterVector {
|
|||||||
m_data = other.m_data;
|
m_data = other.m_data;
|
||||||
m_size = other.m_size;
|
m_size = other.m_size;
|
||||||
m_capacity = other.m_capacity;
|
m_capacity = other.m_capacity;
|
||||||
|
m_frame_number = other.m_frame_number;
|
||||||
other.m_data = nullptr;
|
other.m_data = nullptr;
|
||||||
other.m_size = 0;
|
other.m_size = 0;
|
||||||
other.m_capacity = 0;
|
other.m_capacity = 0;
|
||||||
|
other.m_frame_number = 0;
|
||||||
}
|
}
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
@ -79,7 +89,8 @@ template <typename T, typename CoordType=int16_t> class ClusterVector {
|
|||||||
/**
|
/**
|
||||||
* @brief Reserve space for at least capacity clusters
|
* @brief Reserve space for at least capacity clusters
|
||||||
* @param capacity number of clusters to reserve space for
|
* @param capacity number of clusters to reserve space for
|
||||||
* @note If capacity is less than the current capacity, the function does nothing.
|
* @note If capacity is less than the current capacity, the function does
|
||||||
|
* nothing.
|
||||||
*/
|
*/
|
||||||
void reserve(size_t capacity) {
|
void reserve(size_t capacity) {
|
||||||
if (capacity > m_capacity) {
|
if (capacity > m_capacity) {
|
||||||
@ -92,7 +103,8 @@ template <typename T, typename CoordType=int16_t> class ClusterVector {
|
|||||||
* @param x x-coordinate of the cluster
|
* @param x x-coordinate of the cluster
|
||||||
* @param y y-coordinate of the cluster
|
* @param y y-coordinate of the cluster
|
||||||
* @param data pointer to the data of the cluster
|
* @param data pointer to the data of the cluster
|
||||||
* @warning The data pointer must point to a buffer of size cluster_size_x * cluster_size_y * sizeof(T)
|
* @warning The data pointer must point to a buffer of size cluster_size_x *
|
||||||
|
* cluster_size_y * sizeof(T)
|
||||||
*/
|
*/
|
||||||
void push_back(CoordType x, CoordType y, const std::byte *data) {
|
void push_back(CoordType x, CoordType y, const std::byte *data) {
|
||||||
if (m_size == m_capacity) {
|
if (m_size == m_capacity) {
|
||||||
@ -108,7 +120,15 @@ template <typename T, typename CoordType=int16_t> class ClusterVector {
|
|||||||
ptr);
|
ptr);
|
||||||
m_size++;
|
m_size++;
|
||||||
}
|
}
|
||||||
|
ClusterVector &operator+=(const ClusterVector &other) {
|
||||||
|
if (m_size + other.m_size > m_capacity) {
|
||||||
|
allocate_buffer(m_capacity + other.m_size);
|
||||||
|
}
|
||||||
|
std::copy(other.m_data, other.m_data + other.m_size * item_size(),
|
||||||
|
m_data + m_size * item_size());
|
||||||
|
m_size += other.m_size;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Sum the pixels in each cluster
|
* @brief Sum the pixels in each cluster
|
||||||
@ -116,7 +136,7 @@ template <typename T, typename CoordType=int16_t> class ClusterVector {
|
|||||||
*/
|
*/
|
||||||
std::vector<T> sum() {
|
std::vector<T> sum() {
|
||||||
std::vector<T> sums(m_size);
|
std::vector<T> sums(m_size);
|
||||||
const size_t stride = element_offset();
|
const size_t stride = item_size();
|
||||||
const size_t n_pixels = m_cluster_size_x * m_cluster_size_y;
|
const size_t n_pixels = m_cluster_size_x * m_cluster_size_y;
|
||||||
std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y
|
std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y
|
||||||
|
|
||||||
@ -129,26 +149,73 @@ template <typename T, typename CoordType=int16_t> class ClusterVector {
|
|||||||
return sums;
|
return sums;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the maximum sum of the 2x2 subclusters in each cluster
|
||||||
|
* @return std::vector<T> vector of sums for each cluster
|
||||||
|
* @throws std::runtime_error if the cluster size is not 3x3
|
||||||
|
* @warning Only 3x3 clusters are supported for the 2x2 sum.
|
||||||
|
*/
|
||||||
|
std::vector<T> sum_2x2() {
|
||||||
|
std::vector<T> sums(m_size);
|
||||||
|
const size_t stride = item_size();
|
||||||
|
|
||||||
|
if (m_cluster_size_x != 3 || m_cluster_size_y != 3) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
"Only 3x3 clusters are supported for the 2x2 sum.");
|
||||||
|
}
|
||||||
|
std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y
|
||||||
|
|
||||||
|
for (size_t i = 0; i < m_size; i++) {
|
||||||
|
std::array<T, 4> total;
|
||||||
|
auto T_ptr = reinterpret_cast<T *>(ptr);
|
||||||
|
total[0] = T_ptr[0] + T_ptr[1] + T_ptr[3] + T_ptr[4];
|
||||||
|
total[1] = T_ptr[1] + T_ptr[2] + T_ptr[4] + T_ptr[5];
|
||||||
|
total[2] = T_ptr[3] + T_ptr[4] + T_ptr[6] + T_ptr[7];
|
||||||
|
total[3] = T_ptr[4] + T_ptr[5] + T_ptr[7] + T_ptr[8];
|
||||||
|
|
||||||
|
sums[i] = *std::max_element(total.begin(), total.end());
|
||||||
|
ptr += stride;
|
||||||
|
}
|
||||||
|
|
||||||
|
return sums;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the number of clusters in the vector
|
||||||
|
*/
|
||||||
size_t size() const { return m_size; }
|
size_t size() const { return m_size; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the capacity of the buffer in number of clusters. This is
|
||||||
|
* the number of clusters that can be stored in the current buffer without
|
||||||
|
* reallocation.
|
||||||
|
*/
|
||||||
size_t capacity() const { return m_capacity; }
|
size_t capacity() const { return m_capacity; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Return the offset in bytes for a single cluster
|
* @brief Return the size in bytes of a single cluster
|
||||||
*/
|
*/
|
||||||
size_t element_offset() const {
|
size_t item_size() const {
|
||||||
return 2 * sizeof(CoordType) +
|
return 2 * sizeof(CoordType) +
|
||||||
m_cluster_size_x * m_cluster_size_y * sizeof(T);
|
m_cluster_size_x * m_cluster_size_y * sizeof(T);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Return the offset in bytes for the i-th cluster
|
* @brief Return the offset in bytes for the i-th cluster
|
||||||
*/
|
*/
|
||||||
size_t element_offset(size_t i) const { return element_offset() * i; }
|
size_t element_offset(size_t i) const { return item_size() * i; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Return a pointer to the i-th cluster
|
* @brief Return a pointer to the i-th cluster
|
||||||
*/
|
*/
|
||||||
std::byte *element_ptr(size_t i) { return m_data + element_offset(i); }
|
std::byte *element_ptr(size_t i) { return m_data + element_offset(i); }
|
||||||
const std::byte * element_ptr(size_t i) const { return m_data + element_offset(i); }
|
|
||||||
|
/**
|
||||||
|
* @brief Return a pointer to the i-th cluster
|
||||||
|
*/
|
||||||
|
const std::byte *element_ptr(size_t i) const {
|
||||||
|
return m_data + element_offset(i);
|
||||||
|
}
|
||||||
|
|
||||||
size_t cluster_size_x() const { return m_cluster_size_x; }
|
size_t cluster_size_x() const { return m_cluster_size_x; }
|
||||||
size_t cluster_size_y() const { return m_cluster_size_y; }
|
size_t cluster_size_y() const { return m_cluster_size_y; }
|
||||||
@ -156,8 +223,11 @@ template <typename T, typename CoordType=int16_t> class ClusterVector {
|
|||||||
std::byte *data() { return m_data; }
|
std::byte *data() { return m_data; }
|
||||||
std::byte const *data() const { return m_data; }
|
std::byte const *data() const { return m_data; }
|
||||||
|
|
||||||
template<typename V>
|
/**
|
||||||
V& at(size_t i) {
|
* @brief Return a reference to the i-th cluster casted to type V
|
||||||
|
* @tparam V type of the cluster
|
||||||
|
*/
|
||||||
|
template <typename V> V &at(size_t i) {
|
||||||
return *reinterpret_cast<V *>(element_ptr(i));
|
return *reinterpret_cast<V *>(element_ptr(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -166,11 +236,36 @@ template <typename T, typename CoordType=int16_t> class ClusterVector {
|
|||||||
return m_fmt_base;
|
return m_fmt_base;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the frame number of the clusters. 0 is used to indicate
|
||||||
|
* that the clusters come from many frames
|
||||||
|
*/
|
||||||
|
uint64_t frame_number() const { return m_frame_number; }
|
||||||
|
|
||||||
|
void set_frame_number(uint64_t frame_number) {
|
||||||
|
m_frame_number = frame_number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Resize the vector to contain new_size clusters. If new_size is
|
||||||
|
* greater than the current capacity, a new buffer is allocated. If the size
|
||||||
|
* is smaller no memory is freed, size is just updated.
|
||||||
|
* @param new_size new size of the vector
|
||||||
|
* @warning The additional clusters are not initialized
|
||||||
|
*/
|
||||||
|
void resize(size_t new_size) {
|
||||||
|
// TODO! Should we initialize the new clusters?
|
||||||
|
if (new_size > m_capacity) {
|
||||||
|
allocate_buffer(new_size);
|
||||||
|
}
|
||||||
|
m_size = new_size;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void allocate_buffer(size_t new_capacity) {
|
void allocate_buffer(size_t new_capacity) {
|
||||||
size_t num_bytes = element_offset() * new_capacity;
|
size_t num_bytes = item_size() * new_capacity;
|
||||||
std::byte *new_data = new std::byte[num_bytes]{};
|
std::byte *new_data = new std::byte[num_bytes]{};
|
||||||
std::copy(m_data, m_data + element_offset() * m_size, new_data);
|
std::copy(m_data, m_data + item_size() * m_size, new_data);
|
||||||
delete[] m_data;
|
delete[] m_data;
|
||||||
m_data = new_data;
|
m_data = new_data;
|
||||||
m_capacity = new_capacity;
|
m_capacity = new_capacity;
|
||||||
|
@ -37,6 +37,8 @@ class File {
|
|||||||
File& operator=(File &&other) noexcept;
|
File& operator=(File &&other) noexcept;
|
||||||
~File() = default;
|
~File() = default;
|
||||||
|
|
||||||
|
// void close(); //!< close the file
|
||||||
|
|
||||||
Frame read_frame(); //!< read one frame from the file at the current position
|
Frame read_frame(); //!< read one frame from the file at the current position
|
||||||
Frame read_frame(size_t frame_index); //!< read one frame at the position given by frame number
|
Frame read_frame(size_t frame_index); //!< read one frame at the position given by frame number
|
||||||
std::vector<Frame> read_n(size_t n_frames); //!< read n_frames from the file at the current position
|
std::vector<Frame> read_n(size_t n_frames); //!< read n_frames from the file at the current position
|
||||||
|
76
include/aare/Fit.hpp
Normal file
76
include/aare/Fit.hpp
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
|
#include <fmt/core.h>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "aare/NDArray.hpp"
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
namespace func {
|
||||||
|
double gaus(const double x, const double *par);
|
||||||
|
NDArray<double, 1> gaus(NDView<double, 1> x, NDView<double, 1> par);
|
||||||
|
|
||||||
|
double pol1(const double x, const double *par);
|
||||||
|
NDArray<double, 1> pol1(NDView<double, 1> x, NDView<double, 1> par);
|
||||||
|
|
||||||
|
} // namespace func
|
||||||
|
|
||||||
|
static constexpr int DEFAULT_NUM_THREADS = 4;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Fit a 1D Gaussian to data.
|
||||||
|
* @param data data to fit
|
||||||
|
* @param x x values
|
||||||
|
*/
|
||||||
|
NDArray<double, 1> fit_gaus(NDView<double, 1> x, NDView<double, 1> y);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Fit a 1D Gaussian to each pixel. Data layout [row, col, values]
|
||||||
|
* @param x x values
|
||||||
|
* @param y y vales, layout [row, col, values]
|
||||||
|
* @param n_threads number of threads to use
|
||||||
|
*/
|
||||||
|
NDArray<double, 3> fit_gaus(NDView<double, 1> x, NDView<double, 3> y, int n_threads = DEFAULT_NUM_THREADS);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Fit a 1D Gaussian with error estimates
|
||||||
|
* @param x x values
|
||||||
|
* @param y y vales, layout [row, col, values]
|
||||||
|
* @param y_err error in y, layout [row, col, values]
|
||||||
|
* @param par_out output parameters
|
||||||
|
* @param par_err_out output error parameters
|
||||||
|
*/
|
||||||
|
void fit_gaus(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
|
||||||
|
NDView<double, 1> par_out, NDView<double, 1> par_err_out);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout [row, col, values]
|
||||||
|
* @param x x values
|
||||||
|
* @param y y vales, layout [row, col, values]
|
||||||
|
* @param y_err error in y, layout [row, col, values]
|
||||||
|
* @param par_out output parameters, layout [row, col, values]
|
||||||
|
* @param par_err_out output parameter errors, layout [row, col, values]
|
||||||
|
* @param n_threads number of threads to use
|
||||||
|
*/
|
||||||
|
void fit_gaus(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
|
||||||
|
NDView<double, 3> par_out, NDView<double, 3> par_err_out, int n_threads = DEFAULT_NUM_THREADS);
|
||||||
|
|
||||||
|
|
||||||
|
NDArray<double, 1> fit_pol1(NDView<double, 1> x, NDView<double, 1> y);
|
||||||
|
|
||||||
|
NDArray<double, 3> fit_pol1(NDView<double, 1> x, NDView<double, 3> y, int n_threads = DEFAULT_NUM_THREADS);
|
||||||
|
|
||||||
|
void fit_pol1(NDView<double, 1> x, NDView<double, 1> y,
|
||||||
|
NDView<double, 1> y_err, NDView<double, 1> par_out,
|
||||||
|
NDView<double, 1> par_err_out);
|
||||||
|
|
||||||
|
//TODO! not sure we need to offer the different version in C++
|
||||||
|
void fit_pol1(NDView<double, 1> x, NDView<double, 3> y,
|
||||||
|
NDView<double, 3> y_err, NDView<double, 3> par_out,
|
||||||
|
NDView<double, 3> par_err_out, int n_threads = DEFAULT_NUM_THREADS);
|
||||||
|
|
||||||
|
} // namespace aare
|
@ -89,6 +89,7 @@ template <typename SUM_TYPE = double> class Pedestal {
|
|||||||
m_sum = 0;
|
m_sum = 0;
|
||||||
m_sum2 = 0;
|
m_sum2 = 0;
|
||||||
m_cur_samples = 0;
|
m_cur_samples = 0;
|
||||||
|
m_mean = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -97,6 +98,7 @@ template <typename SUM_TYPE = double> class Pedestal {
|
|||||||
m_sum(row, col) = 0;
|
m_sum(row, col) = 0;
|
||||||
m_sum2(row, col) = 0;
|
m_sum2(row, col) = 0;
|
||||||
m_cur_samples(row, col) = 0;
|
m_cur_samples(row, col) = 0;
|
||||||
|
m_mean(row, col) = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -119,7 +121,7 @@ template <typename SUM_TYPE = double> class Pedestal {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Push but don't update the cached mean. Speeds up the process
|
* Push but don't update the cached mean. Speeds up the process
|
||||||
* when intitializing the pedestal.
|
* when initializing the pedestal.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
template <typename T> void push_no_update(NDView<T, 2> frame) {
|
template <typename T> void push_no_update(NDView<T, 2> frame) {
|
||||||
@ -165,8 +167,8 @@ template <typename SUM_TYPE = double> class Pedestal {
|
|||||||
m_sum2(row, col) += val * val;
|
m_sum2(row, col) += val * val;
|
||||||
m_cur_samples(row, col)++;
|
m_cur_samples(row, col)++;
|
||||||
} else {
|
} else {
|
||||||
m_sum(row, col) += val - m_sum(row, col) / m_cur_samples(row, col);
|
m_sum(row, col) += val - m_sum(row, col) / m_samples;
|
||||||
m_sum2(row, col) += val * val - m_sum2(row, col) / m_cur_samples(row, col);
|
m_sum2(row, col) += val * val - m_sum2(row, col) / m_samples;
|
||||||
}
|
}
|
||||||
//Since we just did a push we know that m_cur_samples(row, col) is at least 1
|
//Since we just did a push we know that m_cur_samples(row, col) is at least 1
|
||||||
m_mean(row, col) = m_sum(row, col) / m_cur_samples(row, col);
|
m_mean(row, col) = m_sum(row, col) / m_cur_samples(row, col);
|
||||||
|
203
include/aare/ProducerConsumerQueue.hpp
Normal file
203
include/aare/ProducerConsumerQueue.hpp
Normal file
@ -0,0 +1,203 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// @author Bo Hu (bhu@fb.com)
|
||||||
|
// @author Jordan DeLong (delong.j@fb.com)
|
||||||
|
|
||||||
|
// Changes made by PSD Detector Group:
|
||||||
|
// Copied: Line 34 constexpr std::size_t hardware_destructive_interference_size = 128; from folly/lang/Align.h
|
||||||
|
// Changed extension to .hpp
|
||||||
|
// Changed namespace to aare
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <cassert>
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <memory>
|
||||||
|
#include <stdexcept>
|
||||||
|
#include <type_traits>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
constexpr std::size_t hardware_destructive_interference_size = 128;
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ProducerConsumerQueue is a one producer and one consumer queue
|
||||||
|
* without locks.
|
||||||
|
*/
|
||||||
|
template <class T> struct ProducerConsumerQueue {
|
||||||
|
typedef T value_type;
|
||||||
|
|
||||||
|
ProducerConsumerQueue(const ProducerConsumerQueue &) = delete;
|
||||||
|
ProducerConsumerQueue &operator=(const ProducerConsumerQueue &) = delete;
|
||||||
|
|
||||||
|
|
||||||
|
ProducerConsumerQueue(ProducerConsumerQueue &&other){
|
||||||
|
size_ = other.size_;
|
||||||
|
records_ = other.records_;
|
||||||
|
other.records_ = nullptr;
|
||||||
|
readIndex_ = other.readIndex_.load(std::memory_order_acquire);
|
||||||
|
writeIndex_ = other.writeIndex_.load(std::memory_order_acquire);
|
||||||
|
}
|
||||||
|
ProducerConsumerQueue &operator=(ProducerConsumerQueue &&other){
|
||||||
|
size_ = other.size_;
|
||||||
|
records_ = other.records_;
|
||||||
|
other.records_ = nullptr;
|
||||||
|
readIndex_ = other.readIndex_.load(std::memory_order_acquire);
|
||||||
|
writeIndex_ = other.writeIndex_.load(std::memory_order_acquire);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
ProducerConsumerQueue():ProducerConsumerQueue(2){};
|
||||||
|
// size must be >= 2.
|
||||||
|
//
|
||||||
|
// Also, note that the number of usable slots in the queue at any
|
||||||
|
// given time is actually (size-1), so if you start with an empty queue,
|
||||||
|
// isFull() will return true after size-1 insertions.
|
||||||
|
explicit ProducerConsumerQueue(uint32_t size)
|
||||||
|
: size_(size), records_(static_cast<T *>(std::malloc(sizeof(T) * size))), readIndex_(0), writeIndex_(0) {
|
||||||
|
assert(size >= 2);
|
||||||
|
if (!records_) {
|
||||||
|
throw std::bad_alloc();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
~ProducerConsumerQueue() {
|
||||||
|
// We need to destruct anything that may still exist in our queue.
|
||||||
|
// (No real synchronization needed at destructor time: only one
|
||||||
|
// thread can be doing this.)
|
||||||
|
if (!std::is_trivially_destructible<T>::value) {
|
||||||
|
size_t readIndex = readIndex_;
|
||||||
|
size_t endIndex = writeIndex_;
|
||||||
|
while (readIndex != endIndex) {
|
||||||
|
records_[readIndex].~T();
|
||||||
|
if (++readIndex == size_) {
|
||||||
|
readIndex = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::free(records_);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class... Args> bool write(Args &&...recordArgs) {
|
||||||
|
auto const currentWrite = writeIndex_.load(std::memory_order_relaxed);
|
||||||
|
auto nextRecord = currentWrite + 1;
|
||||||
|
if (nextRecord == size_) {
|
||||||
|
nextRecord = 0;
|
||||||
|
}
|
||||||
|
if (nextRecord != readIndex_.load(std::memory_order_acquire)) {
|
||||||
|
new (&records_[currentWrite]) T(std::forward<Args>(recordArgs)...);
|
||||||
|
writeIndex_.store(nextRecord, std::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// queue is full
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// move (or copy) the value at the front of the queue to given variable
|
||||||
|
bool read(T &record) {
|
||||||
|
auto const currentRead = readIndex_.load(std::memory_order_relaxed);
|
||||||
|
if (currentRead == writeIndex_.load(std::memory_order_acquire)) {
|
||||||
|
// queue is empty
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto nextRecord = currentRead + 1;
|
||||||
|
if (nextRecord == size_) {
|
||||||
|
nextRecord = 0;
|
||||||
|
}
|
||||||
|
record = std::move(records_[currentRead]);
|
||||||
|
records_[currentRead].~T();
|
||||||
|
readIndex_.store(nextRecord, std::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// pointer to the value at the front of the queue (for use in-place) or
|
||||||
|
// nullptr if empty.
|
||||||
|
T *frontPtr() {
|
||||||
|
auto const currentRead = readIndex_.load(std::memory_order_relaxed);
|
||||||
|
if (currentRead == writeIndex_.load(std::memory_order_acquire)) {
|
||||||
|
// queue is empty
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
return &records_[currentRead];
|
||||||
|
}
|
||||||
|
|
||||||
|
// queue must not be empty
|
||||||
|
void popFront() {
|
||||||
|
auto const currentRead = readIndex_.load(std::memory_order_relaxed);
|
||||||
|
assert(currentRead != writeIndex_.load(std::memory_order_acquire));
|
||||||
|
|
||||||
|
auto nextRecord = currentRead + 1;
|
||||||
|
if (nextRecord == size_) {
|
||||||
|
nextRecord = 0;
|
||||||
|
}
|
||||||
|
records_[currentRead].~T();
|
||||||
|
readIndex_.store(nextRecord, std::memory_order_release);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isEmpty() const {
|
||||||
|
return readIndex_.load(std::memory_order_acquire) == writeIndex_.load(std::memory_order_acquire);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isFull() const {
|
||||||
|
auto nextRecord = writeIndex_.load(std::memory_order_acquire) + 1;
|
||||||
|
if (nextRecord == size_) {
|
||||||
|
nextRecord = 0;
|
||||||
|
}
|
||||||
|
if (nextRecord != readIndex_.load(std::memory_order_acquire)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// queue is full
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// * If called by consumer, then true size may be more (because producer may
|
||||||
|
// be adding items concurrently).
|
||||||
|
// * If called by producer, then true size may be less (because consumer may
|
||||||
|
// be removing items concurrently).
|
||||||
|
// * It is undefined to call this from any other thread.
|
||||||
|
size_t sizeGuess() const {
|
||||||
|
int ret = writeIndex_.load(std::memory_order_acquire) - readIndex_.load(std::memory_order_acquire);
|
||||||
|
if (ret < 0) {
|
||||||
|
ret += size_;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
// maximum number of items in the queue.
|
||||||
|
size_t capacity() const { return size_ - 1; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
using AtomicIndex = std::atomic<unsigned int>;
|
||||||
|
|
||||||
|
char pad0_[hardware_destructive_interference_size];
|
||||||
|
// const uint32_t size_;
|
||||||
|
uint32_t size_;
|
||||||
|
// T *const records_;
|
||||||
|
T* records_;
|
||||||
|
|
||||||
|
alignas(hardware_destructive_interference_size) AtomicIndex readIndex_;
|
||||||
|
alignas(hardware_destructive_interference_size) AtomicIndex writeIndex_;
|
||||||
|
|
||||||
|
char pad1_[hardware_destructive_interference_size - sizeof(AtomicIndex)];
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
@ -34,15 +34,19 @@ class RawFile : public FileInterface {
|
|||||||
size_t n_subfile_parts{}; // d0,d1...dn
|
size_t n_subfile_parts{}; // d0,d1...dn
|
||||||
//TODO! move to vector of SubFile instead of pointers
|
//TODO! move to vector of SubFile instead of pointers
|
||||||
std::vector<std::vector<RawSubFile *>> subfiles; //subfiles[f0,f1...fn][d0,d1...dn]
|
std::vector<std::vector<RawSubFile *>> subfiles; //subfiles[f0,f1...fn][d0,d1...dn]
|
||||||
std::vector<xy> positions;
|
// std::vector<xy> positions;
|
||||||
std::vector<ModuleGeometry> m_module_pixel_0;
|
|
||||||
ModuleConfig cfg{0, 0};
|
ModuleConfig cfg{0, 0};
|
||||||
|
|
||||||
RawMasterFile m_master;
|
RawMasterFile m_master;
|
||||||
|
|
||||||
size_t m_current_frame{};
|
size_t m_current_frame{};
|
||||||
size_t m_rows{};
|
|
||||||
size_t m_cols{};
|
// std::vector<ModuleGeometry> m_module_pixel_0;
|
||||||
|
// size_t m_rows{};
|
||||||
|
// size_t m_cols{};
|
||||||
|
|
||||||
|
DetectorGeometry m_geometry;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
@ -111,11 +115,12 @@ class RawFile : public FileInterface {
|
|||||||
*/
|
*/
|
||||||
static DetectorHeader read_header(const std::filesystem::path &fname);
|
static DetectorHeader read_header(const std::filesystem::path &fname);
|
||||||
|
|
||||||
void update_geometry_with_roi();
|
// void update_geometry_with_roi();
|
||||||
int find_number_of_subfiles();
|
int find_number_of_subfiles();
|
||||||
|
|
||||||
void open_subfiles();
|
void open_subfiles();
|
||||||
void find_geometry();
|
void find_geometry();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
@ -62,17 +62,6 @@ class ScanParameters {
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
struct ROI{
|
|
||||||
int64_t xmin{};
|
|
||||||
int64_t xmax{};
|
|
||||||
int64_t ymin{};
|
|
||||||
int64_t ymax{};
|
|
||||||
|
|
||||||
int64_t height() const { return ymax - ymin; }
|
|
||||||
int64_t width() const { return xmax - xmin; }
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Class for parsing a master file either in our .json format or the old
|
* @brief Class for parsing a master file either in our .json format or the old
|
||||||
* .raw format
|
* .raw format
|
||||||
|
@ -66,6 +66,9 @@ class RawSubFile {
|
|||||||
size_t pixels_per_frame() const { return m_rows * m_cols; }
|
size_t pixels_per_frame() const { return m_rows * m_cols; }
|
||||||
size_t bytes_per_pixel() const { return m_bitdepth / 8; }
|
size_t bytes_per_pixel() const { return m_bitdepth / 8; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
template <typename T>
|
||||||
|
void read_with_map(std::byte *image_buf);
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
13
include/aare/decode.hpp
Normal file
13
include/aare/decode.hpp
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <aare/NDView.hpp>
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
|
||||||
|
uint16_t adc_sar_05_decode64to16(uint64_t input);
|
||||||
|
uint16_t adc_sar_04_decode64to16(uint64_t input);
|
||||||
|
void adc_sar_05_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output);
|
||||||
|
void adc_sar_04_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output);
|
||||||
|
|
||||||
|
} // namespace aare
|
@ -179,11 +179,40 @@ template <typename T> struct t_xy {
|
|||||||
using xy = t_xy<uint32_t>;
|
using xy = t_xy<uint32_t>;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Class to hold the geometry of a module. Where pixel 0 is located and the size of the module
|
||||||
|
*/
|
||||||
struct ModuleGeometry{
|
struct ModuleGeometry{
|
||||||
int x{};
|
int origin_x{};
|
||||||
int y{};
|
int origin_y{};
|
||||||
int height{};
|
int height{};
|
||||||
int width{};
|
int width{};
|
||||||
|
int row_index{};
|
||||||
|
int col_index{};
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Class to hold the geometry of a detector. Number of modules, their size and where pixel 0
|
||||||
|
* for each module is located
|
||||||
|
*/
|
||||||
|
struct DetectorGeometry{
|
||||||
|
int modules_x{};
|
||||||
|
int modules_y{};
|
||||||
|
int pixels_x{};
|
||||||
|
int pixels_y{};
|
||||||
|
int module_gap_row{};
|
||||||
|
int module_gap_col{};
|
||||||
|
std::vector<ModuleGeometry> module_pixel_0;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ROI{
|
||||||
|
int64_t xmin{};
|
||||||
|
int64_t xmax{};
|
||||||
|
int64_t ymin{};
|
||||||
|
int64_t ymax{};
|
||||||
|
|
||||||
|
int64_t height() const { return ymax - ymin; }
|
||||||
|
int64_t width() const { return xmax - xmin; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
16
include/aare/geo_helpers.hpp
Normal file
16
include/aare/geo_helpers.hpp
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
#pragma once
|
||||||
|
#include "aare/defs.hpp"
|
||||||
|
#include "aare/RawMasterFile.hpp" //ROI refactor away
|
||||||
|
namespace aare{
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Update the detector geometry given a region of interest
|
||||||
|
*
|
||||||
|
* @param geo
|
||||||
|
* @param roi
|
||||||
|
* @return DetectorGeometry
|
||||||
|
*/
|
||||||
|
DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, ROI roi);
|
||||||
|
|
||||||
|
|
||||||
|
} // namespace aare
|
8
include/aare/utils/task.hpp
Normal file
8
include/aare/utils/task.hpp
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
|
||||||
|
#include <utility>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
std::vector<std::pair<int, int>> split_task(int first, int last, int n_threads);
|
||||||
|
|
||||||
|
} // namespace aare
|
13
patches/lmfit.patch
Normal file
13
patches/lmfit.patch
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
|
||||||
|
index 4efb7ed..6533660 100644
|
||||||
|
--- a/lib/CMakeLists.txt
|
||||||
|
+++ b/lib/CMakeLists.txt
|
||||||
|
@@ -11,7 +11,7 @@ target_compile_definitions(${lib} PRIVATE "LMFIT_EXPORT") # for Windows DLL expo
|
||||||
|
|
||||||
|
target_include_directories(${lib}
|
||||||
|
PUBLIC
|
||||||
|
- $<BUILD_INTERFACE:${CMAKE_SOURCE_DIR}/>
|
||||||
|
+ $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/>
|
||||||
|
$<INSTALL_INTERFACE:include/>
|
||||||
|
)
|
||||||
|
|
@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "aare"
|
name = "aare"
|
||||||
version = "2024.12.16.dev0"
|
version = "2025.2.12"
|
||||||
|
|
||||||
[tool.scikit-build]
|
[tool.scikit-build]
|
||||||
cmake.verbose = true
|
cmake.verbose = true
|
||||||
|
@ -28,6 +28,7 @@ target_link_libraries(_aare PRIVATE aare_core aare_compiler_flags)
|
|||||||
set( PYTHON_FILES
|
set( PYTHON_FILES
|
||||||
aare/__init__.py
|
aare/__init__.py
|
||||||
aare/CtbRawFile.py
|
aare/CtbRawFile.py
|
||||||
|
aare/func.py
|
||||||
aare/RawFile.py
|
aare/RawFile.py
|
||||||
aare/transform.py
|
aare/transform.py
|
||||||
aare/ScanParameters.py
|
aare/ScanParameters.py
|
||||||
@ -43,10 +44,17 @@ set_target_properties(_aare PROPERTIES
|
|||||||
LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/aare
|
LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/aare
|
||||||
)
|
)
|
||||||
|
|
||||||
|
set(PYTHON_EXAMPLES
|
||||||
|
examples/play.py
|
||||||
|
examples/fits.py
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# Copy the examples/scripts to the build directory
|
|
||||||
configure_file(examples/play.py ${CMAKE_BINARY_DIR}/play.py)
|
# Copy the python examples to the build directory
|
||||||
|
foreach(FILE ${PYTHON_EXAMPLES})
|
||||||
|
configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} )
|
||||||
|
endforeach(FILE ${PYTHON_EXAMPLES})
|
||||||
|
|
||||||
|
|
||||||
if(AARE_INSTALL_PYTHONEXT)
|
if(AARE_INSTALL_PYTHONEXT)
|
||||||
|
@ -8,8 +8,16 @@ from ._aare import DetectorType
|
|||||||
from ._aare import ClusterFile
|
from ._aare import ClusterFile
|
||||||
from ._aare import hitmap
|
from ._aare import hitmap
|
||||||
|
|
||||||
|
from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i
|
||||||
|
|
||||||
|
from ._aare import fit_gaus, fit_pol1
|
||||||
|
|
||||||
from .CtbRawFile import CtbRawFile
|
from .CtbRawFile import CtbRawFile
|
||||||
from .RawFile import RawFile
|
from .RawFile import RawFile
|
||||||
from .ScanParameters import ScanParameters
|
from .ScanParameters import ScanParameters
|
||||||
|
|
||||||
from .utils import random_pixels, random_pixel
|
from .utils import random_pixels, random_pixel, flat_list
|
||||||
|
|
||||||
|
|
||||||
|
#make functions available in the top level API
|
||||||
|
from .func import *
|
||||||
|
1
python/aare/func.py
Normal file
1
python/aare/func.py
Normal file
@ -0,0 +1 @@
|
|||||||
|
from ._aare import gaus, pol1
|
@ -2,6 +2,14 @@ import numpy as np
|
|||||||
from . import _aare
|
from . import _aare
|
||||||
|
|
||||||
|
|
||||||
|
class AdcSar04Transform64to16:
|
||||||
|
def __call__(self, data):
|
||||||
|
return _aare.adc_sar_04_decode64to16(data)
|
||||||
|
|
||||||
|
class AdcSar05Transform64to16:
|
||||||
|
def __call__(self, data):
|
||||||
|
return _aare.adc_sar_05_decode64to16(data)
|
||||||
|
|
||||||
class Moench05Transform:
|
class Moench05Transform:
|
||||||
#Could be moved to C++ without changing the interface
|
#Could be moved to C++ without changing the interface
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -46,3 +54,5 @@ moench05 = Moench05Transform()
|
|||||||
moench05_1g = Moench05Transform1g()
|
moench05_1g = Moench05Transform1g()
|
||||||
moench05_old = Moench05TransformOld()
|
moench05_old = Moench05TransformOld()
|
||||||
matterhorn02 = Matterhorn02Transform()
|
matterhorn02 = Matterhorn02Transform()
|
||||||
|
adc_sar_04_64to16 = AdcSar04Transform64to16()
|
||||||
|
adc_sar_05_64to16 = AdcSar05Transform64to16()
|
@ -21,3 +21,7 @@ def random_pixel(xmin=0, xmax=512, ymin=0, ymax=1024):
|
|||||||
tuple: (row, col)
|
tuple: (row, col)
|
||||||
"""
|
"""
|
||||||
return random_pixels(1, xmin, xmax, ymin, ymax)[0]
|
return random_pixels(1, xmin, xmax, ymin, ymax)[0]
|
||||||
|
|
||||||
|
def flat_list(xss):
|
||||||
|
"""Flatten a list of lists."""
|
||||||
|
return [x for xs in xss for x in xs]
|
79
python/examples/fits.py
Normal file
79
python/examples/fits.py
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import numpy as np
|
||||||
|
from aare import fit_gaus, fit_pol1
|
||||||
|
from aare import gaus, pol1
|
||||||
|
|
||||||
|
textpm = f"±" #
|
||||||
|
textmu = f"μ" #
|
||||||
|
textsigma = f"σ" #
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# ================================= Gauss fit =================================
|
||||||
|
# Parameters
|
||||||
|
mu = np.random.uniform(1, 100) # Mean of Gaussian
|
||||||
|
sigma = np.random.uniform(4, 20) # Standard deviation
|
||||||
|
num_points = 10000 # Number of points for smooth distribution
|
||||||
|
noise_sigma = 100
|
||||||
|
|
||||||
|
# Generate Gaussian distribution
|
||||||
|
data = np.random.normal(mu, sigma, num_points)
|
||||||
|
|
||||||
|
# Generate errors for each point
|
||||||
|
errors = np.abs(np.random.normal(0, sigma, num_points)) # Errors with mean 0, std 0.5
|
||||||
|
|
||||||
|
# Create subplot
|
||||||
|
fig0, ax0 = plt.subplots(1, 1, num=0, figsize=(12, 8))
|
||||||
|
|
||||||
|
x = np.histogram(data, bins=30)[1][:-1] + 0.05
|
||||||
|
y = np.histogram(data, bins=30)[0]
|
||||||
|
yerr = errors[:30]
|
||||||
|
|
||||||
|
|
||||||
|
# Add the errors as error bars in the step plot
|
||||||
|
ax0.errorbar(x, y, yerr=yerr, fmt=". ", capsize=5)
|
||||||
|
ax0.grid()
|
||||||
|
|
||||||
|
par, err = fit_gaus(x, y, yerr)
|
||||||
|
print(par, err)
|
||||||
|
|
||||||
|
x = np.linspace(x[0], x[-1], 1000)
|
||||||
|
ax0.plot(x, gaus(x, par), marker="")
|
||||||
|
ax0.set(xlabel="x", ylabel="Counts", title=f"A0 = {par[0]:0.2f}{textpm}{err[0]:0.2f}\n"
|
||||||
|
f"{textmu} = {par[1]:0.2f}{textpm}{err[1]:0.2f}\n"
|
||||||
|
f"{textsigma} = {par[2]:0.2f}{textpm}{err[2]:0.2f}\n"
|
||||||
|
f"(init: {textmu}: {mu:0.2f}, {textsigma}: {sigma:0.2f})")
|
||||||
|
fig0.tight_layout()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# ================================= pol1 fit =================================
|
||||||
|
# Parameters
|
||||||
|
n_points = 40
|
||||||
|
|
||||||
|
# Generate random slope and intercept (origin)
|
||||||
|
slope = np.random.uniform(-10, 10) # Random slope between 0.5 and 2.0
|
||||||
|
intercept = np.random.uniform(-10, 10) # Random intercept between -10 and 10
|
||||||
|
|
||||||
|
# Generate random x values
|
||||||
|
x_values = np.random.uniform(-10, 10, n_points)
|
||||||
|
|
||||||
|
# Calculate y values based on the linear function y = mx + b + error
|
||||||
|
errors = np.abs(np.random.normal(0, np.random.uniform(1, 5), n_points))
|
||||||
|
var_points = np.random.normal(0, np.random.uniform(0.1, 2), n_points)
|
||||||
|
y_values = slope * x_values + intercept + var_points
|
||||||
|
|
||||||
|
fig1, ax1 = plt.subplots(1, 1, num=1, figsize=(12, 8))
|
||||||
|
ax1.errorbar(x_values, y_values, yerr=errors, fmt=". ", capsize=5)
|
||||||
|
par, err = fit_pol1(x_values, y_values, errors)
|
||||||
|
|
||||||
|
|
||||||
|
x = np.linspace(np.min(x_values), np.max(x_values), 1000)
|
||||||
|
ax1.plot(x, pol1(x, par), marker="")
|
||||||
|
ax1.set(xlabel="x", ylabel="y", title=f"a = {par[0]:0.2f}{textpm}{err[0]:0.2f}\n"
|
||||||
|
f"b = {par[1]:0.2f}{textpm}{err[1]:0.2f}\n"
|
||||||
|
f"(init: {slope:0.2f}, {intercept:0.2f})")
|
||||||
|
fig1.tight_layout()
|
||||||
|
|
||||||
|
plt.show()
|
||||||
|
|
@ -8,51 +8,61 @@ import numpy as np
|
|||||||
import boost_histogram as bh
|
import boost_histogram as bh
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from aare import File, ClusterFinder, VarClusterFinder
|
<<<<<<< HEAD
|
||||||
|
from aare import File, ClusterFinder, VarClusterFinder, ClusterFile, CtbRawFile
|
||||||
|
from aare import gaus, fit_gaus
|
||||||
|
|
||||||
base = Path('/mnt/sls_det_storage/matterhorn_data/aare_test_data/')
|
base = Path('/mnt/sls_det_storage/moench_data/Julian/MOENCH05/20250113_first_xrays_redo/raw_files/')
|
||||||
|
cluster_file = Path('/home/l_msdetect/erik/tmp/Cu.clust')
|
||||||
f = File(base/'Moench03new/cu_half_speed_master_4.json')
|
|
||||||
cf = ClusterFinder((400,400), (3,3))
|
|
||||||
for i in range(1000):
|
|
||||||
cf.push_pedestal_frame(f.read_frame())
|
|
||||||
|
|
||||||
fig, ax = plt.subplots()
|
|
||||||
im = ax.imshow(cf.pedestal())
|
|
||||||
cf.pedestal()
|
|
||||||
cf.noise()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
N = 500
|
|
||||||
t0 = time.perf_counter()
|
|
||||||
hist1 = bh.Histogram(bh.axis.Regular(40, -2, 4000))
|
|
||||||
f.seek(0)
|
|
||||||
|
|
||||||
t0 = time.perf_counter()
|
t0 = time.perf_counter()
|
||||||
data = f.read_n(N)
|
offset= -0.5
|
||||||
|
hist3d = bh.Histogram(
|
||||||
|
bh.axis.Regular(160, 0+offset, 160+offset), #x
|
||||||
|
bh.axis.Regular(150, 0+offset, 150+offset), #y
|
||||||
|
bh.axis.Regular(200, 0, 6000), #ADU
|
||||||
|
)
|
||||||
|
|
||||||
|
total_clusters = 0
|
||||||
|
with ClusterFile(cluster_file, chunk_size = 1000) as f:
|
||||||
|
for i, clusters in enumerate(f):
|
||||||
|
arr = np.array(clusters)
|
||||||
|
total_clusters += clusters.size
|
||||||
|
hist3d.fill(arr['y'],arr['x'], clusters.sum_2x2()) #python talks [row, col] cluster finder [x,y]
|
||||||
|
=======
|
||||||
|
from aare import RawFile
|
||||||
|
|
||||||
|
f = RawFile('/mnt/sls_det_storage/jungfrau_data1/vadym_tests/jf12_M431/laser_scan/laserScan_pedestal_G0_master_0.json')
|
||||||
|
|
||||||
|
print(f'{f.frame_number(1)}')
|
||||||
|
|
||||||
|
for i in range(10):
|
||||||
|
header, img = f.read_frame()
|
||||||
|
print(header['frameNumber'], img.shape)
|
||||||
|
>>>>>>> developer
|
||||||
|
|
||||||
|
|
||||||
t_elapsed = time.perf_counter()-t0
|
t_elapsed = time.perf_counter()-t0
|
||||||
|
print(f'Histogram filling took: {t_elapsed:.3f}s {total_clusters/t_elapsed/1e6:.3f}M clusters/s')
|
||||||
|
|
||||||
|
histogram_data = hist3d.counts()
|
||||||
|
x = hist3d.axes[2].edges[:-1]
|
||||||
|
|
||||||
n_bytes = data.itemsize*data.size
|
y = histogram_data[100,100,:]
|
||||||
|
xx = np.linspace(x[0], x[-1])
|
||||||
|
# fig, ax = plt.subplots()
|
||||||
|
# ax.step(x, y, where = 'post')
|
||||||
|
|
||||||
print(f'Reading {N} frames took {t_elapsed:.3f}s {N/t_elapsed:.0f} FPS, {n_bytes/1024**2:.4f} GB/s')
|
y_err = np.sqrt(y)
|
||||||
|
y_err = np.zeros(y.size)
|
||||||
|
y_err += 1
|
||||||
|
|
||||||
|
# par = fit_gaus2(y,x, y_err)
|
||||||
|
# ax.plot(xx, gaus(xx,par))
|
||||||
|
# print(par)
|
||||||
|
|
||||||
for frame in data:
|
res = fit_gaus(y,x)
|
||||||
a = cf.find_clusters(frame)
|
res2 = fit_gaus(y,x, y_err)
|
||||||
|
print(res)
|
||||||
|
print(res2)
|
||||||
|
|
||||||
clusters = cf.steal_clusters()
|
|
||||||
|
|
||||||
# t_elapsed = time.perf_counter()-t0
|
|
||||||
# print(f'Clustering {N} frames took {t_elapsed:.2f}s {N/t_elapsed:.0f} FPS')
|
|
||||||
|
|
||||||
|
|
||||||
# t0 = time.perf_counter()
|
|
||||||
# total_clusters = clusters.size
|
|
||||||
|
|
||||||
# hist1.fill(clusters.sum())
|
|
||||||
|
|
||||||
# t_elapsed = time.perf_counter()-t0
|
|
||||||
# print(f'Filling histogram with the sum of {total_clusters} clusters took: {t_elapsed:.3f}s, {total_clusters/t_elapsed:.3g} clust/s')
|
|
||||||
# print(f'Average number of clusters per frame {total_clusters/N:.3f}')
|
|
@ -1,4 +1,7 @@
|
|||||||
|
#include "aare/ClusterCollector.hpp"
|
||||||
|
#include "aare/ClusterFileSink.hpp"
|
||||||
#include "aare/ClusterFinder.hpp"
|
#include "aare/ClusterFinder.hpp"
|
||||||
|
#include "aare/ClusterFinderMT.hpp"
|
||||||
#include "aare/ClusterVector.hpp"
|
#include "aare/ClusterVector.hpp"
|
||||||
#include "aare/NDView.hpp"
|
#include "aare/NDView.hpp"
|
||||||
#include "aare/Pedestal.hpp"
|
#include "aare/Pedestal.hpp"
|
||||||
@ -8,9 +11,10 @@
|
|||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <pybind11/pybind11.h>
|
#include <pybind11/pybind11.h>
|
||||||
#include <pybind11/stl.h>
|
#include <pybind11/stl.h>
|
||||||
|
#include <pybind11/stl_bind.h>
|
||||||
|
|
||||||
namespace py = pybind11;
|
namespace py = pybind11;
|
||||||
using pd_type = float;
|
using pd_type = double;
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void define_cluster_vector(py::module &m, const std::string &typestr) {
|
void define_cluster_vector(py::module &m, const std::string &typestr) {
|
||||||
@ -18,70 +22,144 @@ void define_cluster_vector(py::module &m, const std::string &typestr) {
|
|||||||
py::class_<ClusterVector<T>>(m, class_name.c_str(), py::buffer_protocol())
|
py::class_<ClusterVector<T>>(m, class_name.c_str(), py::buffer_protocol())
|
||||||
.def(py::init<int, int>())
|
.def(py::init<int, int>())
|
||||||
.def_property_readonly("size", &ClusterVector<T>::size)
|
.def_property_readonly("size", &ClusterVector<T>::size)
|
||||||
.def("element_offset",
|
.def("item_size", &ClusterVector<T>::item_size)
|
||||||
py::overload_cast<>(&ClusterVector<T>::element_offset, py::const_))
|
|
||||||
.def_property_readonly("fmt",
|
.def_property_readonly("fmt",
|
||||||
[typestr](ClusterVector<T> &self) {
|
[typestr](ClusterVector<T> &self) {
|
||||||
return fmt::format(
|
return fmt::format(
|
||||||
self.fmt_base(), self.cluster_size_x(),
|
self.fmt_base(), self.cluster_size_x(),
|
||||||
self.cluster_size_y(), typestr);
|
self.cluster_size_y(), typestr);
|
||||||
})
|
})
|
||||||
.def("sum", [](ClusterVector<T> &self) {
|
.def("sum",
|
||||||
|
[](ClusterVector<T> &self) {
|
||||||
auto *vec = new std::vector<T>(self.sum());
|
auto *vec = new std::vector<T>(self.sum());
|
||||||
return return_vector(vec);
|
return return_vector(vec);
|
||||||
})
|
})
|
||||||
|
.def("sum_2x2", [](ClusterVector<T> &self) {
|
||||||
|
auto *vec = new std::vector<T>(self.sum_2x2());
|
||||||
|
return return_vector(vec);
|
||||||
|
})
|
||||||
|
.def_property_readonly("capacity", &ClusterVector<T>::capacity)
|
||||||
|
.def_property("frame_number", &ClusterVector<T>::frame_number,
|
||||||
|
&ClusterVector<T>::set_frame_number)
|
||||||
.def_buffer([typestr](ClusterVector<T> &self) -> py::buffer_info {
|
.def_buffer([typestr](ClusterVector<T> &self) -> py::buffer_info {
|
||||||
return py::buffer_info(
|
return py::buffer_info(
|
||||||
self.data(), /* Pointer to buffer */
|
self.data(), /* Pointer to buffer */
|
||||||
self.element_offset(), /* Size of one scalar */
|
self.item_size(), /* Size of one scalar */
|
||||||
fmt::format(self.fmt_base(), self.cluster_size_x(),
|
fmt::format(self.fmt_base(), self.cluster_size_x(),
|
||||||
self.cluster_size_y(),
|
self.cluster_size_y(),
|
||||||
typestr), /* Format descriptor */
|
typestr), /* Format descriptor */
|
||||||
1, /* Number of dimensions */
|
1, /* Number of dimensions */
|
||||||
{self.size()}, /* Buffer dimensions */
|
{self.size()}, /* Buffer dimensions */
|
||||||
{self.element_offset()} /* Strides (in bytes) for each index */
|
{self.item_size()} /* Strides (in bytes) for each index */
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void define_cluster_finder_mt_bindings(py::module &m) {
|
||||||
|
py::class_<ClusterFinderMT<uint16_t, pd_type>>(m, "ClusterFinderMT")
|
||||||
|
.def(py::init<Shape<2>, Shape<2>, pd_type, size_t, size_t>(),
|
||||||
|
py::arg("image_size"), py::arg("cluster_size"),
|
||||||
|
py::arg("n_sigma") = 5.0, py::arg("capacity") = 2048,
|
||||||
|
py::arg("n_threads") = 3)
|
||||||
|
.def("push_pedestal_frame",
|
||||||
|
[](ClusterFinderMT<uint16_t, pd_type> &self,
|
||||||
|
py::array_t<uint16_t> frame) {
|
||||||
|
auto view = make_view_2d(frame);
|
||||||
|
self.push_pedestal_frame(view);
|
||||||
|
})
|
||||||
|
.def(
|
||||||
|
"find_clusters",
|
||||||
|
[](ClusterFinderMT<uint16_t, pd_type> &self,
|
||||||
|
py::array_t<uint16_t> frame, uint64_t frame_number) {
|
||||||
|
auto view = make_view_2d(frame);
|
||||||
|
self.find_clusters(view, frame_number);
|
||||||
|
return;
|
||||||
|
},
|
||||||
|
py::arg(), py::arg("frame_number") = 0)
|
||||||
|
.def("clear_pedestal", &ClusterFinderMT<uint16_t, pd_type>::clear_pedestal)
|
||||||
|
.def("sync", &ClusterFinderMT<uint16_t, pd_type>::sync)
|
||||||
|
.def("stop", &ClusterFinderMT<uint16_t, pd_type>::stop)
|
||||||
|
.def("start", &ClusterFinderMT<uint16_t, pd_type>::start)
|
||||||
|
.def("pedestal",
|
||||||
|
[](ClusterFinderMT<uint16_t, pd_type> &self, size_t thread_index) {
|
||||||
|
auto pd = new NDArray<pd_type, 2>{};
|
||||||
|
*pd = self.pedestal(thread_index);
|
||||||
|
return return_image_data(pd);
|
||||||
|
},py::arg("thread_index") = 0)
|
||||||
|
.def("noise",
|
||||||
|
[](ClusterFinderMT<uint16_t, pd_type> &self, size_t thread_index) {
|
||||||
|
auto arr = new NDArray<pd_type, 2>{};
|
||||||
|
*arr = self.noise(thread_index);
|
||||||
|
return return_image_data(arr);
|
||||||
|
},py::arg("thread_index") = 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void define_cluster_collector_bindings(py::module &m) {
|
||||||
|
py::class_<ClusterCollector>(m, "ClusterCollector")
|
||||||
|
.def(py::init<ClusterFinderMT<uint16_t, double, int32_t> *>())
|
||||||
|
.def("stop", &ClusterCollector::stop)
|
||||||
|
.def(
|
||||||
|
"steal_clusters",
|
||||||
|
[](ClusterCollector &self) {
|
||||||
|
auto v =
|
||||||
|
new std::vector<ClusterVector<int>>(self.steal_clusters());
|
||||||
|
return v;
|
||||||
|
},
|
||||||
|
py::return_value_policy::take_ownership);
|
||||||
|
}
|
||||||
|
|
||||||
|
void define_cluster_file_sink_bindings(py::module &m) {
|
||||||
|
py::class_<ClusterFileSink>(m, "ClusterFileSink")
|
||||||
|
.def(py::init<ClusterFinderMT<uint16_t, double, int32_t> *,
|
||||||
|
const std::filesystem::path &>())
|
||||||
|
.def("stop", &ClusterFileSink::stop);
|
||||||
|
}
|
||||||
|
|
||||||
void define_cluster_finder_bindings(py::module &m) {
|
void define_cluster_finder_bindings(py::module &m) {
|
||||||
py::class_<ClusterFinder<uint16_t, pd_type>>(m, "ClusterFinder")
|
py::class_<ClusterFinder<uint16_t, pd_type>>(m, "ClusterFinder")
|
||||||
.def(py::init<Shape<2>, Shape<2>, pd_type, size_t>(), py::arg("image_size"),
|
.def(py::init<Shape<2>, Shape<2>, pd_type, size_t>(),
|
||||||
py::arg("cluster_size"), py::arg("n_sigma") = 5.0,
|
py::arg("image_size"), py::arg("cluster_size"),
|
||||||
py::arg("capacity") = 1'000'000)
|
py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000)
|
||||||
.def("push_pedestal_frame",
|
.def("push_pedestal_frame",
|
||||||
[](ClusterFinder<uint16_t, pd_type> &self,
|
[](ClusterFinder<uint16_t, pd_type> &self,
|
||||||
py::array_t<uint16_t> frame) {
|
py::array_t<uint16_t> frame) {
|
||||||
auto view = make_view_2d(frame);
|
auto view = make_view_2d(frame);
|
||||||
self.push_pedestal_frame(view);
|
self.push_pedestal_frame(view);
|
||||||
})
|
})
|
||||||
.def("pedestal",
|
.def("clear_pedestal", &ClusterFinder<uint16_t, pd_type>::clear_pedestal)
|
||||||
|
.def_property_readonly("pedestal",
|
||||||
[](ClusterFinder<uint16_t, pd_type> &self) {
|
[](ClusterFinder<uint16_t, pd_type> &self) {
|
||||||
auto pd = new NDArray<pd_type, 2>{};
|
auto pd = new NDArray<pd_type, 2>{};
|
||||||
*pd = self.pedestal();
|
*pd = self.pedestal();
|
||||||
return return_image_data(pd);
|
return return_image_data(pd);
|
||||||
})
|
})
|
||||||
.def("noise",
|
.def_property_readonly("noise",
|
||||||
[](ClusterFinder<uint16_t, pd_type> &self) {
|
[](ClusterFinder<uint16_t, pd_type> &self) {
|
||||||
auto arr = new NDArray<pd_type, 2>{};
|
auto arr = new NDArray<pd_type, 2>{};
|
||||||
*arr = self.noise();
|
*arr = self.noise();
|
||||||
return return_image_data(arr);
|
return return_image_data(arr);
|
||||||
})
|
})
|
||||||
.def("steal_clusters",
|
.def(
|
||||||
[](ClusterFinder<uint16_t, pd_type> &self, bool realloc_same_capacity) {
|
"steal_clusters",
|
||||||
auto v = new ClusterVector<int>(self.steal_clusters(realloc_same_capacity));
|
|
||||||
return v;
|
|
||||||
}, py::arg("realloc_same_capacity") = false)
|
|
||||||
.def("find_clusters",
|
|
||||||
[](ClusterFinder<uint16_t, pd_type> &self,
|
[](ClusterFinder<uint16_t, pd_type> &self,
|
||||||
py::array_t<uint16_t> frame) {
|
bool realloc_same_capacity) {
|
||||||
|
auto v = new ClusterVector<int>(
|
||||||
|
self.steal_clusters(realloc_same_capacity));
|
||||||
|
return v;
|
||||||
|
},
|
||||||
|
py::arg("realloc_same_capacity") = false)
|
||||||
|
.def(
|
||||||
|
"find_clusters",
|
||||||
|
[](ClusterFinder<uint16_t, pd_type> &self,
|
||||||
|
py::array_t<uint16_t> frame, uint64_t frame_number) {
|
||||||
auto view = make_view_2d(frame);
|
auto view = make_view_2d(frame);
|
||||||
self.find_clusters(view);
|
self.find_clusters(view, frame_number);
|
||||||
return;
|
return;
|
||||||
});
|
},
|
||||||
|
py::arg(), py::arg("frame_number") = 0);
|
||||||
m.def("hitmap", [](std::array<size_t, 2> image_size, ClusterVector<int32_t>& cv){
|
|
||||||
|
|
||||||
|
m.def("hitmap",
|
||||||
|
[](std::array<size_t, 2> image_size, ClusterVector<int32_t> &cv) {
|
||||||
py::array_t<int32_t> hitmap(image_size);
|
py::array_t<int32_t> hitmap(image_size);
|
||||||
auto r = hitmap.mutable_unchecked<2>();
|
auto r = hitmap.mutable_unchecked<2>();
|
||||||
|
|
||||||
@ -90,7 +168,7 @@ void define_cluster_finder_bindings(py::module &m) {
|
|||||||
for (py::ssize_t j = 0; j < r.shape(1); j++)
|
for (py::ssize_t j = 0; j < r.shape(1); j++)
|
||||||
r(i, j) = 0;
|
r(i, j) = 0;
|
||||||
|
|
||||||
size_t stride = cv.element_offset();
|
size_t stride = cv.item_size();
|
||||||
auto ptr = cv.data();
|
auto ptr = cv.data();
|
||||||
for (size_t i = 0; i < cv.size(); i++) {
|
for (size_t i = 0; i < cv.size(); i++) {
|
||||||
auto x = *reinterpret_cast<int16_t *>(ptr);
|
auto x = *reinterpret_cast<int16_t *>(ptr);
|
||||||
@ -104,7 +182,6 @@ void define_cluster_finder_bindings(py::module &m) {
|
|||||||
define_cluster_vector<double>(m, "d");
|
define_cluster_vector<double>(m, "d");
|
||||||
define_cluster_vector<float>(m, "f");
|
define_cluster_vector<float>(m, "f");
|
||||||
|
|
||||||
|
|
||||||
py::class_<DynamicCluster>(m, "DynamicCluster", py::buffer_protocol())
|
py::class_<DynamicCluster>(m, "DynamicCluster", py::buffer_protocol())
|
||||||
.def(py::init<int, int, Dtype>())
|
.def(py::init<int, int, Dtype>())
|
||||||
.def("size", &DynamicCluster::size)
|
.def("size", &DynamicCluster::size)
|
||||||
|
@ -28,27 +28,24 @@ void define_cluster_file_io_bindings(py::module &m) {
|
|||||||
py::arg(), py::arg("chunk_size") = 1000, py::arg("mode") = "r")
|
py::arg(), py::arg("chunk_size") = 1000, py::arg("mode") = "r")
|
||||||
.def("read_clusters",
|
.def("read_clusters",
|
||||||
[](ClusterFile &self, size_t n_clusters) {
|
[](ClusterFile &self, size_t n_clusters) {
|
||||||
auto *vec =
|
auto v = new ClusterVector<int32_t>(self.read_clusters(n_clusters));
|
||||||
new std::vector<Cluster3x3>(self.read_clusters(n_clusters));
|
return v;
|
||||||
return return_vector(vec);
|
},py::return_value_policy::take_ownership)
|
||||||
})
|
|
||||||
.def("read_frame",
|
.def("read_frame",
|
||||||
[](ClusterFile &self) {
|
[](ClusterFile &self) {
|
||||||
int32_t frame_number;
|
auto v = new ClusterVector<int32_t>(self.read_frame());
|
||||||
auto *vec =
|
return v;
|
||||||
new std::vector<Cluster3x3>(self.read_frame(frame_number));
|
|
||||||
return py::make_tuple(frame_number, return_vector(vec));
|
|
||||||
})
|
})
|
||||||
.def("write_frame", &ClusterFile::write_frame)
|
.def("write_frame", &ClusterFile::write_frame)
|
||||||
.def("read_cluster_with_cut",
|
// .def("read_cluster_with_cut",
|
||||||
[](ClusterFile &self, size_t n_clusters,
|
// [](ClusterFile &self, size_t n_clusters,
|
||||||
py::array_t<double> noise_map, int nx, int ny) {
|
// py::array_t<double> noise_map, int nx, int ny) {
|
||||||
auto view = make_view_2d(noise_map);
|
// auto view = make_view_2d(noise_map);
|
||||||
auto *vec =
|
// auto *vec =
|
||||||
new std::vector<Cluster3x3>(self.read_cluster_with_cut(
|
// new std::vector<Cluster3x3>(self.read_cluster_with_cut(
|
||||||
n_clusters, view.data(), nx, ny));
|
// n_clusters, view.data(), nx, ny));
|
||||||
return return_vector(vec);
|
// return return_vector(vec);
|
||||||
})
|
// })
|
||||||
.def("__enter__", [](ClusterFile &self) { return &self; })
|
.def("__enter__", [](ClusterFile &self) { return &self; })
|
||||||
.def("__exit__",
|
.def("__exit__",
|
||||||
[](ClusterFile &self,
|
[](ClusterFile &self,
|
||||||
@ -59,12 +56,11 @@ void define_cluster_file_io_bindings(py::module &m) {
|
|||||||
})
|
})
|
||||||
.def("__iter__", [](ClusterFile &self) { return &self; })
|
.def("__iter__", [](ClusterFile &self) { return &self; })
|
||||||
.def("__next__", [](ClusterFile &self) {
|
.def("__next__", [](ClusterFile &self) {
|
||||||
auto vec =
|
auto v = new ClusterVector<int32_t>(self.read_clusters(self.chunk_size()));
|
||||||
new std::vector<Cluster3x3>(self.read_clusters(self.chunk_size()));
|
if (v->size() == 0) {
|
||||||
if (vec->size() == 0) {
|
|
||||||
throw py::stop_iteration();
|
throw py::stop_iteration();
|
||||||
}
|
}
|
||||||
return return_vector(vec);
|
return v;
|
||||||
});
|
});
|
||||||
|
|
||||||
m.def("calculate_eta2", []( aare::ClusterVector<int32_t> &clusters) {
|
m.def("calculate_eta2", []( aare::ClusterVector<int32_t> &clusters) {
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#include "aare/RawSubFile.hpp"
|
#include "aare/RawSubFile.hpp"
|
||||||
|
|
||||||
#include "aare/defs.hpp"
|
#include "aare/defs.hpp"
|
||||||
|
#include "aare/decode.hpp"
|
||||||
// #include "aare/fClusterFileV2.hpp"
|
// #include "aare/fClusterFileV2.hpp"
|
||||||
|
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
@ -23,6 +24,47 @@ using namespace ::aare;
|
|||||||
|
|
||||||
void define_ctb_raw_file_io_bindings(py::module &m) {
|
void define_ctb_raw_file_io_bindings(py::module &m) {
|
||||||
|
|
||||||
|
m.def("adc_sar_05_decode64to16", [](py::array_t<uint8_t> input) {
|
||||||
|
|
||||||
|
|
||||||
|
if(input.ndim() != 2){
|
||||||
|
throw std::runtime_error("Only 2D arrays are supported at this moment");
|
||||||
|
}
|
||||||
|
|
||||||
|
//Create a 2D output array with the same shape as the input
|
||||||
|
std::vector<ssize_t> shape{input.shape(0), input.shape(1)/8};
|
||||||
|
py::array_t<uint16_t> output(shape);
|
||||||
|
|
||||||
|
//Create a view of the input and output arrays
|
||||||
|
NDView<uint64_t, 2> input_view(reinterpret_cast<uint64_t*>(input.mutable_data()), {output.shape(0), output.shape(1)});
|
||||||
|
NDView<uint16_t, 2> output_view(output.mutable_data(), {output.shape(0), output.shape(1)});
|
||||||
|
|
||||||
|
adc_sar_05_decode64to16(input_view, output_view);
|
||||||
|
|
||||||
|
return output;
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
m.def("adc_sar_04_decode64to16", [](py::array_t<uint8_t> input) {
|
||||||
|
|
||||||
|
|
||||||
|
if(input.ndim() != 2){
|
||||||
|
throw std::runtime_error("Only 2D arrays are supported at this moment");
|
||||||
|
}
|
||||||
|
|
||||||
|
//Create a 2D output array with the same shape as the input
|
||||||
|
std::vector<ssize_t> shape{input.shape(0), input.shape(1)/8};
|
||||||
|
py::array_t<uint16_t> output(shape);
|
||||||
|
|
||||||
|
//Create a view of the input and output arrays
|
||||||
|
NDView<uint64_t, 2> input_view(reinterpret_cast<uint64_t*>(input.mutable_data()), {output.shape(0), output.shape(1)});
|
||||||
|
NDView<uint16_t, 2> output_view(output.mutable_data(), {output.shape(0), output.shape(1)});
|
||||||
|
|
||||||
|
adc_sar_04_decode64to16(input_view, output_view);
|
||||||
|
|
||||||
|
return output;
|
||||||
|
});
|
||||||
|
|
||||||
py::class_<CtbRawFile>(m, "CtbRawFile")
|
py::class_<CtbRawFile>(m, "CtbRawFile")
|
||||||
.def(py::init<const std::filesystem::path &>())
|
.def(py::init<const std::filesystem::path &>())
|
||||||
.def("read_frame",
|
.def("read_frame",
|
||||||
|
@ -20,6 +20,11 @@
|
|||||||
namespace py = pybind11;
|
namespace py = pybind11;
|
||||||
using namespace ::aare;
|
using namespace ::aare;
|
||||||
|
|
||||||
|
//Disable warnings for unused parameters, as we ignore some
|
||||||
|
//in the __exit__ method
|
||||||
|
#pragma GCC diagnostic push
|
||||||
|
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||||
|
|
||||||
void define_file_io_bindings(py::module &m) {
|
void define_file_io_bindings(py::module &m) {
|
||||||
|
|
||||||
|
|
||||||
@ -124,8 +129,41 @@ void define_file_io_bindings(py::module &m) {
|
|||||||
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()),
|
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()),
|
||||||
n_frames);
|
n_frames);
|
||||||
return image;
|
return image;
|
||||||
|
})
|
||||||
|
.def("__enter__", [](File &self) { return &self; })
|
||||||
|
.def("__exit__",
|
||||||
|
[](File &self,
|
||||||
|
const std::optional<pybind11::type> &exc_type,
|
||||||
|
const std::optional<pybind11::object> &exc_value,
|
||||||
|
const std::optional<pybind11::object> &traceback) {
|
||||||
|
// self.close();
|
||||||
|
})
|
||||||
|
.def("__iter__", [](File &self) { return &self; })
|
||||||
|
.def("__next__", [](File &self) {
|
||||||
|
|
||||||
|
try{
|
||||||
|
const uint8_t item_size = self.bytes_per_pixel();
|
||||||
|
py::array image;
|
||||||
|
std::vector<ssize_t> shape;
|
||||||
|
shape.reserve(2);
|
||||||
|
shape.push_back(self.rows());
|
||||||
|
shape.push_back(self.cols());
|
||||||
|
if (item_size == 1) {
|
||||||
|
image = py::array_t<uint8_t>(shape);
|
||||||
|
} else if (item_size == 2) {
|
||||||
|
image = py::array_t<uint16_t>(shape);
|
||||||
|
} else if (item_size == 4) {
|
||||||
|
image = py::array_t<uint32_t>(shape);
|
||||||
|
}
|
||||||
|
self.read_into(
|
||||||
|
reinterpret_cast<std::byte *>(image.mutable_data()));
|
||||||
|
return image;
|
||||||
|
}catch(std::runtime_error &e){
|
||||||
|
throw py::stop_iteration();
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
py::class_<FileConfig>(m, "FileConfig")
|
py::class_<FileConfig>(m, "FileConfig")
|
||||||
.def(py::init<>())
|
.def(py::init<>())
|
||||||
.def_readwrite("rows", &FileConfig::rows)
|
.def_readwrite("rows", &FileConfig::rows)
|
||||||
@ -205,7 +243,7 @@ void define_file_io_bindings(py::module &m) {
|
|||||||
return image;
|
return image;
|
||||||
});
|
});
|
||||||
|
|
||||||
|
#pragma GCC diagnostic pop
|
||||||
// py::class_<ClusterHeader>(m, "ClusterHeader")
|
// py::class_<ClusterHeader>(m, "ClusterHeader")
|
||||||
// .def(py::init<>())
|
// .def(py::init<>())
|
||||||
// .def_readwrite("frame_number", &ClusterHeader::frame_number)
|
// .def_readwrite("frame_number", &ClusterHeader::frame_number)
|
||||||
|
223
python/src/fit.hpp
Normal file
223
python/src/fit.hpp
Normal file
@ -0,0 +1,223 @@
|
|||||||
|
#include <cstdint>
|
||||||
|
#include <filesystem>
|
||||||
|
#include <pybind11/pybind11.h>
|
||||||
|
#include <pybind11/stl.h>
|
||||||
|
#include <pybind11/stl_bind.h>
|
||||||
|
|
||||||
|
#include "aare/Fit.hpp"
|
||||||
|
|
||||||
|
namespace py = pybind11;
|
||||||
|
|
||||||
|
void define_fit_bindings(py::module &m) {
|
||||||
|
|
||||||
|
// TODO! Evaluate without converting to double
|
||||||
|
m.def(
|
||||||
|
"gaus",
|
||||||
|
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||||
|
py::array_t<double, py::array::c_style | py::array::forcecast> par) {
|
||||||
|
auto x_view = make_view_1d(x);
|
||||||
|
auto par_view = make_view_1d(par);
|
||||||
|
auto y = new NDArray<double, 1>{aare::func::gaus(x_view, par_view)};
|
||||||
|
return return_image_data(y);
|
||||||
|
},
|
||||||
|
R"(
|
||||||
|
Evaluate a 1D Gaussian function for all points in x using parameters par.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
x : array_like
|
||||||
|
The points at which to evaluate the Gaussian function.
|
||||||
|
par : array_like
|
||||||
|
The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation.
|
||||||
|
)", py::arg("x"), py::arg("par"));
|
||||||
|
|
||||||
|
m.def(
|
||||||
|
"pol1",
|
||||||
|
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||||
|
py::array_t<double, py::array::c_style | py::array::forcecast> par) {
|
||||||
|
auto x_view = make_view_1d(x);
|
||||||
|
auto par_view = make_view_1d(par);
|
||||||
|
auto y = new NDArray<double, 1>{aare::func::pol1(x_view, par_view)};
|
||||||
|
return return_image_data(y);
|
||||||
|
},
|
||||||
|
R"(
|
||||||
|
Evaluate a 1D polynomial function for all points in x using parameters par. (p0+p1*x)
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
x : array_like
|
||||||
|
The points at which to evaluate the polynomial function.
|
||||||
|
par : array_like
|
||||||
|
The parameters of the polynomial function. The first element is the intercept, and the second element is the slope.
|
||||||
|
)", py::arg("x"), py::arg("par"));
|
||||||
|
|
||||||
|
m.def(
|
||||||
|
"fit_gaus",
|
||||||
|
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||||
|
py::array_t<double, py::array::c_style | py::array::forcecast> y,
|
||||||
|
int n_threads) {
|
||||||
|
if (y.ndim() == 3) {
|
||||||
|
auto par = new NDArray<double, 3>{};
|
||||||
|
auto y_view = make_view_3d(y);
|
||||||
|
auto x_view = make_view_1d(x);
|
||||||
|
*par = aare::fit_gaus(x_view, y_view, n_threads);
|
||||||
|
return return_image_data(par);
|
||||||
|
} else if (y.ndim() == 1) {
|
||||||
|
auto par = new NDArray<double, 1>{};
|
||||||
|
auto y_view = make_view_1d(y);
|
||||||
|
auto x_view = make_view_1d(x);
|
||||||
|
*par = aare::fit_gaus(x_view, y_view);
|
||||||
|
return return_image_data(par);
|
||||||
|
} else {
|
||||||
|
throw std::runtime_error("Data must be 1D or 3D");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
R"(
|
||||||
|
Fit a 1D Gaussian to data.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
x : array_like
|
||||||
|
The x values.
|
||||||
|
y : array_like
|
||||||
|
The y values.
|
||||||
|
n_threads : int, optional
|
||||||
|
The number of threads to use. Default is 4.
|
||||||
|
)",
|
||||||
|
py::arg("x"), py::arg("y"), py::arg("n_threads") = 4);
|
||||||
|
|
||||||
|
m.def(
|
||||||
|
"fit_gaus",
|
||||||
|
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||||
|
py::array_t<double, py::array::c_style | py::array::forcecast> y,
|
||||||
|
py::array_t<double, py::array::c_style | py::array::forcecast>
|
||||||
|
y_err, int n_threads) {
|
||||||
|
if (y.ndim() == 3) {
|
||||||
|
// Allocate memory for the output
|
||||||
|
// Need to have pointers to allow python to manage
|
||||||
|
// the memory
|
||||||
|
auto par = new NDArray<double, 3>({y.shape(0), y.shape(1), 3});
|
||||||
|
auto par_err =
|
||||||
|
new NDArray<double, 3>({y.shape(0), y.shape(1), 3});
|
||||||
|
|
||||||
|
auto y_view = make_view_3d(y);
|
||||||
|
auto y_view_err = make_view_3d(y_err);
|
||||||
|
auto x_view = make_view_1d(x);
|
||||||
|
aare::fit_gaus(x_view, y_view, y_view_err, par->view(),
|
||||||
|
par_err->view(), n_threads);
|
||||||
|
// return return_image_data(par);
|
||||||
|
return py::make_tuple(return_image_data(par),
|
||||||
|
return_image_data(par_err));
|
||||||
|
} else if (y.ndim() == 1) {
|
||||||
|
// Allocate memory for the output
|
||||||
|
// Need to have pointers to allow python to manage
|
||||||
|
// the memory
|
||||||
|
auto par = new NDArray<double, 1>({3});
|
||||||
|
auto par_err = new NDArray<double, 1>({3});
|
||||||
|
|
||||||
|
// Decode the numpy arrays
|
||||||
|
auto y_view = make_view_1d(y);
|
||||||
|
auto y_view_err = make_view_1d(y_err);
|
||||||
|
auto x_view = make_view_1d(x);
|
||||||
|
|
||||||
|
aare::fit_gaus(x_view, y_view, y_view_err, par->view(),
|
||||||
|
par_err->view());
|
||||||
|
return py::make_tuple(return_image_data(par),
|
||||||
|
return_image_data(par_err));
|
||||||
|
} else {
|
||||||
|
throw std::runtime_error("Data must be 1D or 3D");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
R"(
|
||||||
|
Fit a 1D Gaussian to data with error estimates.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
x : array_like
|
||||||
|
The x values.
|
||||||
|
y : array_like
|
||||||
|
The y values.
|
||||||
|
y_err : array_like
|
||||||
|
The error in the y values.
|
||||||
|
n_threads : int, optional
|
||||||
|
The number of threads to use. Default is 4.
|
||||||
|
)",
|
||||||
|
py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4);
|
||||||
|
|
||||||
|
m.def(
|
||||||
|
"fit_pol1",
|
||||||
|
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||||
|
py::array_t<double, py::array::c_style | py::array::forcecast> y,
|
||||||
|
int n_threads) {
|
||||||
|
if (y.ndim() == 3) {
|
||||||
|
auto par = new NDArray<double, 3>{};
|
||||||
|
|
||||||
|
auto x_view = make_view_1d(x);
|
||||||
|
auto y_view = make_view_3d(y);
|
||||||
|
*par = aare::fit_pol1(x_view, y_view, n_threads);
|
||||||
|
return return_image_data(par);
|
||||||
|
} else if (y.ndim() == 1) {
|
||||||
|
auto par = new NDArray<double, 1>{};
|
||||||
|
auto x_view = make_view_1d(x);
|
||||||
|
auto y_view = make_view_1d(y);
|
||||||
|
*par = aare::fit_pol1(x_view, y_view);
|
||||||
|
return return_image_data(par);
|
||||||
|
} else {
|
||||||
|
throw std::runtime_error("Data must be 1D or 3D");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
py::arg("x"), py::arg("y"), py::arg("n_threads") = 4);
|
||||||
|
|
||||||
|
m.def(
|
||||||
|
"fit_pol1",
|
||||||
|
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
|
||||||
|
py::array_t<double, py::array::c_style | py::array::forcecast> y,
|
||||||
|
py::array_t<double, py::array::c_style | py::array::forcecast>
|
||||||
|
y_err, int n_threads) {
|
||||||
|
if (y.ndim() == 3) {
|
||||||
|
auto par =
|
||||||
|
new NDArray<double, 3>({y.shape(0), y.shape(1), 2});
|
||||||
|
auto par_err =
|
||||||
|
new NDArray<double, 3>({y.shape(0), y.shape(1), 2});
|
||||||
|
|
||||||
|
auto y_view = make_view_3d(y);
|
||||||
|
auto y_view_err = make_view_3d(y_err);
|
||||||
|
auto x_view = make_view_1d(x);
|
||||||
|
|
||||||
|
aare::fit_pol1(x_view, y_view,y_view_err, par->view(),
|
||||||
|
par_err->view(), n_threads);
|
||||||
|
return py::make_tuple(return_image_data(par),
|
||||||
|
return_image_data(par_err));
|
||||||
|
|
||||||
|
} else if (y.ndim() == 1) {
|
||||||
|
auto par = new NDArray<double, 1>({2});
|
||||||
|
auto par_err = new NDArray<double, 1>({2});
|
||||||
|
|
||||||
|
auto y_view = make_view_1d(y);
|
||||||
|
auto y_view_err = make_view_1d(y_err);
|
||||||
|
auto x_view = make_view_1d(x);
|
||||||
|
|
||||||
|
aare::fit_pol1(x_view, y_view, y_view_err, par->view(),
|
||||||
|
par_err->view());
|
||||||
|
return py::make_tuple(return_image_data(par),
|
||||||
|
return_image_data(par_err));
|
||||||
|
} else {
|
||||||
|
throw std::runtime_error("Data must be 1D or 3D");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
R"(
|
||||||
|
Fit a 1D polynomial to data with error estimates.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
x : array_like
|
||||||
|
The x values.
|
||||||
|
y : array_like
|
||||||
|
The y values.
|
||||||
|
y_err : array_like
|
||||||
|
The error in the y values.
|
||||||
|
n_threads : int, optional
|
||||||
|
The number of threads to use. Default is 4.
|
||||||
|
)",
|
||||||
|
py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4);
|
||||||
|
}
|
@ -8,6 +8,7 @@
|
|||||||
#include "pedestal.hpp"
|
#include "pedestal.hpp"
|
||||||
#include "cluster.hpp"
|
#include "cluster.hpp"
|
||||||
#include "cluster_file.hpp"
|
#include "cluster_file.hpp"
|
||||||
|
#include "fit.hpp"
|
||||||
|
|
||||||
//Pybind stuff
|
//Pybind stuff
|
||||||
#include <pybind11/pybind11.h>
|
#include <pybind11/pybind11.h>
|
||||||
@ -25,5 +26,10 @@ PYBIND11_MODULE(_aare, m) {
|
|||||||
define_pedestal_bindings<double>(m, "Pedestal_d");
|
define_pedestal_bindings<double>(m, "Pedestal_d");
|
||||||
define_pedestal_bindings<float>(m, "Pedestal_f");
|
define_pedestal_bindings<float>(m, "Pedestal_f");
|
||||||
define_cluster_finder_bindings(m);
|
define_cluster_finder_bindings(m);
|
||||||
|
define_cluster_finder_mt_bindings(m);
|
||||||
define_cluster_file_io_bindings(m);
|
define_cluster_file_io_bindings(m);
|
||||||
|
define_cluster_collector_bindings(m);
|
||||||
|
define_cluster_file_sink_bindings(m);
|
||||||
|
define_fit_bindings(m);
|
||||||
|
|
||||||
}
|
}
|
@ -39,65 +39,6 @@ template <typename T> py::array return_vector(std::vector<T> *vec) {
|
|||||||
free_when_done); // numpy array references this parent
|
free_when_done); // numpy array references this parent
|
||||||
}
|
}
|
||||||
|
|
||||||
// template <typename Reader> py::array do_read(Reader &r, size_t n_frames) {
|
|
||||||
// py::array image;
|
|
||||||
// if (n_frames == 0)
|
|
||||||
// n_frames = r.total_frames();
|
|
||||||
|
|
||||||
// std::array<ssize_t, 3> shape{static_cast<ssize_t>(n_frames), r.rows(),
|
|
||||||
// r.cols()};
|
|
||||||
// const uint8_t item_size = r.bytes_per_pixel();
|
|
||||||
// if (item_size == 1) {
|
|
||||||
// image = py::array_t<uint8_t, py::array::c_style | py::array::forcecast>(
|
|
||||||
// shape);
|
|
||||||
// } else if (item_size == 2) {
|
|
||||||
// image =
|
|
||||||
// py::array_t<uint16_t, py::array::c_style | py::array::forcecast>(
|
|
||||||
// shape);
|
|
||||||
// } else if (item_size == 4) {
|
|
||||||
// image =
|
|
||||||
// py::array_t<uint32_t, py::array::c_style | py::array::forcecast>(
|
|
||||||
// shape);
|
|
||||||
// }
|
|
||||||
// r.read_into(reinterpret_cast<std::byte *>(image.mutable_data()), n_frames);
|
|
||||||
// return image;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// py::array return_frame(pl::Frame *ptr) {
|
|
||||||
// py::capsule free_when_done(ptr, [](void *f) {
|
|
||||||
// pl::Frame *foo = reinterpret_cast<pl::Frame *>(f);
|
|
||||||
// delete foo;
|
|
||||||
// });
|
|
||||||
|
|
||||||
// const uint8_t item_size = ptr->bytes_per_pixel();
|
|
||||||
// std::vector<ssize_t> shape;
|
|
||||||
// for (auto val : ptr->shape())
|
|
||||||
// if (val > 1)
|
|
||||||
// shape.push_back(val);
|
|
||||||
|
|
||||||
// std::vector<ssize_t> strides;
|
|
||||||
// if (shape.size() == 1)
|
|
||||||
// strides.push_back(item_size);
|
|
||||||
// else if (shape.size() == 2) {
|
|
||||||
// strides.push_back(item_size * shape[1]);
|
|
||||||
// strides.push_back(item_size);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if (item_size == 1)
|
|
||||||
// return py::array_t<uint8_t>(
|
|
||||||
// shape, strides,
|
|
||||||
// reinterpret_cast<uint8_t *>(ptr->data()), free_when_done);
|
|
||||||
// else if (item_size == 2)
|
|
||||||
// return py::array_t<uint16_t>(shape, strides,
|
|
||||||
// reinterpret_cast<uint16_t *>(ptr->data()),
|
|
||||||
// free_when_done);
|
|
||||||
// else if (item_size == 4)
|
|
||||||
// return py::array_t<uint32_t>(shape, strides,
|
|
||||||
// reinterpret_cast<uint32_t *>(ptr->data()),
|
|
||||||
// free_when_done);
|
|
||||||
// return {};
|
|
||||||
// }
|
|
||||||
|
|
||||||
// todo rewrite generic
|
// todo rewrite generic
|
||||||
template <class T, int Flags> auto get_shape_3d(py::array_t<T, Flags> arr) {
|
template <class T, int Flags> auto get_shape_3d(py::array_t<T, Flags> arr) {
|
||||||
return aare::Shape<3>{arr.shape(0), arr.shape(1), arr.shape(2)};
|
return aare::Shape<3>{arr.shape(0), arr.shape(1), arr.shape(2)};
|
||||||
@ -111,6 +52,13 @@ template <class T, int Flags> auto get_shape_2d(py::array_t<T, Flags> arr) {
|
|||||||
return aare::Shape<2>{arr.shape(0), arr.shape(1)};
|
return aare::Shape<2>{arr.shape(0), arr.shape(1)};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <class T, int Flags> auto get_shape_1d(py::array_t<T, Flags> arr) {
|
||||||
|
return aare::Shape<1>{arr.shape(0)};
|
||||||
|
}
|
||||||
|
|
||||||
template <class T, int Flags> auto make_view_2d(py::array_t<T, Flags> arr) {
|
template <class T, int Flags> auto make_view_2d(py::array_t<T, Flags> arr) {
|
||||||
return aare::NDView<T, 2>(arr.mutable_data(), get_shape_2d<T, Flags>(arr));
|
return aare::NDView<T, 2>(arr.mutable_data(), get_shape_2d<T, Flags>(arr));
|
||||||
}
|
}
|
||||||
|
template <class T, int Flags> auto make_view_1d(py::array_t<T, Flags> arr) {
|
||||||
|
return aare::NDView<T, 1>(arr.mutable_data(), get_shape_1d<T, Flags>(arr));
|
||||||
|
}
|
@ -20,6 +20,12 @@ ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size,
|
|||||||
throw std::runtime_error("Could not open file for writing: " +
|
throw std::runtime_error("Could not open file for writing: " +
|
||||||
fname.string());
|
fname.string());
|
||||||
}
|
}
|
||||||
|
} else if (mode == "a") {
|
||||||
|
fp = fopen(fname.c_str(), "ab");
|
||||||
|
if (!fp) {
|
||||||
|
throw std::runtime_error("Could not open file for appending: " +
|
||||||
|
fname.string());
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
throw std::runtime_error("Unsupported mode: " + mode);
|
throw std::runtime_error("Unsupported mode: " + mode);
|
||||||
}
|
}
|
||||||
@ -34,35 +40,35 @@ void ClusterFile::close() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ClusterFile::write_frame(int32_t frame_number,
|
void ClusterFile::write_frame(const ClusterVector<int32_t> &clusters) {
|
||||||
const ClusterVector<int32_t> &clusters) {
|
if (m_mode != "w" && m_mode != "a") {
|
||||||
if (m_mode != "w") {
|
|
||||||
throw std::runtime_error("File not opened for writing");
|
throw std::runtime_error("File not opened for writing");
|
||||||
}
|
}
|
||||||
if (!(clusters.cluster_size_x() == 3) &&
|
if (!(clusters.cluster_size_x() == 3) &&
|
||||||
!(clusters.cluster_size_y() == 3)) {
|
!(clusters.cluster_size_y() == 3)) {
|
||||||
throw std::runtime_error("Only 3x3 clusters are supported");
|
throw std::runtime_error("Only 3x3 clusters are supported");
|
||||||
}
|
}
|
||||||
|
int32_t frame_number = clusters.frame_number();
|
||||||
fwrite(&frame_number, sizeof(frame_number), 1, fp);
|
fwrite(&frame_number, sizeof(frame_number), 1, fp);
|
||||||
uint32_t n_clusters = clusters.size();
|
uint32_t n_clusters = clusters.size();
|
||||||
fwrite(&n_clusters, sizeof(n_clusters), 1, fp);
|
fwrite(&n_clusters, sizeof(n_clusters), 1, fp);
|
||||||
fwrite(clusters.data(), clusters.element_offset(), clusters.size(), fp);
|
fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp);
|
||||||
// write clusters
|
|
||||||
// fwrite(clusters.data(), sizeof(Cluster), clusters.size(), fp);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<Cluster3x3> ClusterFile::read_clusters(size_t n_clusters) {
|
ClusterVector<int32_t> ClusterFile::read_clusters(size_t n_clusters) {
|
||||||
if (m_mode != "r") {
|
if (m_mode != "r") {
|
||||||
throw std::runtime_error("File not opened for reading");
|
throw std::runtime_error("File not opened for reading");
|
||||||
}
|
}
|
||||||
std::vector<Cluster3x3> clusters(n_clusters);
|
|
||||||
|
ClusterVector<int32_t> clusters(3,3, n_clusters);
|
||||||
|
|
||||||
int32_t iframe = 0; // frame number needs to be 4 bytes!
|
int32_t iframe = 0; // frame number needs to be 4 bytes!
|
||||||
size_t nph_read = 0;
|
size_t nph_read = 0;
|
||||||
uint32_t nn = m_num_left;
|
uint32_t nn = m_num_left;
|
||||||
uint32_t nph = m_num_left; // number of clusters in frame needs to be 4
|
uint32_t nph = m_num_left; // number of clusters in frame needs to be 4
|
||||||
|
|
||||||
auto buf = reinterpret_cast<Cluster3x3 *>(clusters.data());
|
// auto buf = reinterpret_cast<Cluster3x3 *>(clusters.data());
|
||||||
|
auto buf = clusters.data();
|
||||||
// if there are photons left from previous frame read them first
|
// if there are photons left from previous frame read them first
|
||||||
if (nph) {
|
if (nph) {
|
||||||
if (nph > n_clusters) {
|
if (nph > n_clusters) {
|
||||||
@ -72,8 +78,8 @@ std::vector<Cluster3x3> ClusterFile::read_clusters(size_t n_clusters) {
|
|||||||
} else {
|
} else {
|
||||||
nn = nph;
|
nn = nph;
|
||||||
}
|
}
|
||||||
nph_read += fread(reinterpret_cast<void *>(buf + nph_read),
|
nph_read += fread((buf + nph_read*clusters.item_size()),
|
||||||
sizeof(Cluster3x3), nn, fp);
|
clusters.item_size(), nn, fp);
|
||||||
m_num_left = nph - nn; // write back the number of photons left
|
m_num_left = nph - nn; // write back the number of photons left
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -87,8 +93,8 @@ std::vector<Cluster3x3> ClusterFile::read_clusters(size_t n_clusters) {
|
|||||||
else
|
else
|
||||||
nn = nph;
|
nn = nph;
|
||||||
|
|
||||||
nph_read += fread(reinterpret_cast<void *>(buf + nph_read),
|
nph_read += fread((buf + nph_read*clusters.item_size()),
|
||||||
sizeof(Cluster3x3), nn, fp);
|
clusters.item_size(), nn, fp);
|
||||||
m_num_left = nph - nn;
|
m_num_left = nph - nn;
|
||||||
}
|
}
|
||||||
if (nph_read >= n_clusters)
|
if (nph_read >= n_clusters)
|
||||||
@ -102,7 +108,7 @@ std::vector<Cluster3x3> ClusterFile::read_clusters(size_t n_clusters) {
|
|||||||
return clusters;
|
return clusters;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<Cluster3x3> ClusterFile::read_frame(int32_t &out_fnum) {
|
ClusterVector<int32_t> ClusterFile::read_frame() {
|
||||||
if (m_mode != "r") {
|
if (m_mode != "r") {
|
||||||
throw std::runtime_error("File not opened for reading");
|
throw std::runtime_error("File not opened for reading");
|
||||||
}
|
}
|
||||||
@ -110,8 +116,8 @@ std::vector<Cluster3x3> ClusterFile::read_frame(int32_t &out_fnum) {
|
|||||||
throw std::runtime_error(
|
throw std::runtime_error(
|
||||||
"There are still photons left in the last frame");
|
"There are still photons left in the last frame");
|
||||||
}
|
}
|
||||||
|
int32_t frame_number;
|
||||||
if (fread(&out_fnum, sizeof(out_fnum), 1, fp) != 1) {
|
if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) {
|
||||||
throw std::runtime_error("Could not read frame number");
|
throw std::runtime_error("Could not read frame number");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -119,158 +125,163 @@ std::vector<Cluster3x3> ClusterFile::read_frame(int32_t &out_fnum) {
|
|||||||
if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) {
|
if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) {
|
||||||
throw std::runtime_error("Could not read number of clusters");
|
throw std::runtime_error("Could not read number of clusters");
|
||||||
}
|
}
|
||||||
std::vector<Cluster3x3> clusters(n_clusters);
|
// std::vector<Cluster3x3> clusters(n_clusters);
|
||||||
|
ClusterVector<int32_t> clusters(3, 3, n_clusters);
|
||||||
|
clusters.set_frame_number(frame_number);
|
||||||
|
|
||||||
if (fread(clusters.data(), sizeof(Cluster3x3), n_clusters, fp) !=
|
if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) !=
|
||||||
static_cast<size_t>(n_clusters)) {
|
static_cast<size_t>(n_clusters)) {
|
||||||
throw std::runtime_error("Could not read clusters");
|
throw std::runtime_error("Could not read clusters");
|
||||||
}
|
}
|
||||||
|
clusters.resize(n_clusters);
|
||||||
return clusters;
|
return clusters;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<Cluster3x3> ClusterFile::read_cluster_with_cut(size_t n_clusters,
|
|
||||||
double *noise_map,
|
|
||||||
int nx, int ny) {
|
|
||||||
if (m_mode != "r") {
|
|
||||||
throw std::runtime_error("File not opened for reading");
|
|
||||||
}
|
|
||||||
std::vector<Cluster3x3> clusters(n_clusters);
|
|
||||||
// size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf,
|
|
||||||
// uint32_t *n_left, double *noise_map, int
|
|
||||||
// nx, int ny) {
|
|
||||||
int iframe = 0;
|
|
||||||
// uint32_t nph = *n_left;
|
|
||||||
uint32_t nph = m_num_left;
|
|
||||||
// uint32_t nn = *n_left;
|
|
||||||
uint32_t nn = m_num_left;
|
|
||||||
size_t nph_read = 0;
|
|
||||||
|
|
||||||
int32_t t2max, tot1;
|
// std::vector<Cluster3x3> ClusterFile::read_cluster_with_cut(size_t n_clusters,
|
||||||
int32_t tot3;
|
// double *noise_map,
|
||||||
// Cluster *ptr = buf;
|
// int nx, int ny) {
|
||||||
Cluster3x3 *ptr = clusters.data();
|
// if (m_mode != "r") {
|
||||||
int good = 1;
|
// throw std::runtime_error("File not opened for reading");
|
||||||
double noise;
|
// }
|
||||||
// read photons left from previous frame
|
// std::vector<Cluster3x3> clusters(n_clusters);
|
||||||
if (noise_map)
|
// // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf,
|
||||||
printf("Using noise map\n");
|
// // uint32_t *n_left, double *noise_map, int
|
||||||
|
// // nx, int ny) {
|
||||||
|
// int iframe = 0;
|
||||||
|
// // uint32_t nph = *n_left;
|
||||||
|
// uint32_t nph = m_num_left;
|
||||||
|
// // uint32_t nn = *n_left;
|
||||||
|
// uint32_t nn = m_num_left;
|
||||||
|
// size_t nph_read = 0;
|
||||||
|
|
||||||
if (nph) {
|
// int32_t t2max, tot1;
|
||||||
if (nph > n_clusters) {
|
// int32_t tot3;
|
||||||
// if we have more photons left in the frame then photons to
|
// // Cluster *ptr = buf;
|
||||||
// read we read directly the requested number
|
// Cluster3x3 *ptr = clusters.data();
|
||||||
nn = n_clusters;
|
// int good = 1;
|
||||||
} else {
|
// double noise;
|
||||||
nn = nph;
|
// // read photons left from previous frame
|
||||||
}
|
// if (noise_map)
|
||||||
for (size_t iph = 0; iph < nn; iph++) {
|
// printf("Using noise map\n");
|
||||||
// read photons 1 by 1
|
|
||||||
size_t n_read =
|
|
||||||
fread(reinterpret_cast<void *>(ptr), sizeof(Cluster3x3), 1, fp);
|
|
||||||
if (n_read != 1) {
|
|
||||||
clusters.resize(nph_read);
|
|
||||||
return clusters;
|
|
||||||
}
|
|
||||||
// TODO! error handling on read
|
|
||||||
good = 1;
|
|
||||||
if (noise_map) {
|
|
||||||
if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) {
|
|
||||||
tot1 = ptr->data[4];
|
|
||||||
analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL,
|
|
||||||
NULL);
|
|
||||||
noise = noise_map[ptr->y * nx + ptr->x];
|
|
||||||
if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) {
|
|
||||||
;
|
|
||||||
} else {
|
|
||||||
good = 0;
|
|
||||||
printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise,
|
|
||||||
tot1, t2max, tot3);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
printf("Bad pixel number %d %d\n", ptr->x, ptr->y);
|
|
||||||
good = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (good) {
|
|
||||||
ptr++;
|
|
||||||
nph_read++;
|
|
||||||
}
|
|
||||||
(m_num_left)--;
|
|
||||||
if (nph_read >= n_clusters)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (nph_read < n_clusters) {
|
|
||||||
// // keep on reading frames and photons until reaching
|
|
||||||
// n_clusters
|
|
||||||
while (fread(&iframe, sizeof(iframe), 1, fp)) {
|
|
||||||
// // printf("%d\n",nph_read);
|
|
||||||
|
|
||||||
if (fread(&nph, sizeof(nph), 1, fp)) {
|
// if (nph) {
|
||||||
// // printf("** %d\n",nph);
|
// if (nph > n_clusters) {
|
||||||
m_num_left = nph;
|
// // if we have more photons left in the frame then photons to
|
||||||
for (size_t iph = 0; iph < nph; iph++) {
|
// // read we read directly the requested number
|
||||||
|
// nn = n_clusters;
|
||||||
|
// } else {
|
||||||
|
// nn = nph;
|
||||||
|
// }
|
||||||
|
// for (size_t iph = 0; iph < nn; iph++) {
|
||||||
// // read photons 1 by 1
|
// // read photons 1 by 1
|
||||||
size_t n_read = fread(reinterpret_cast<void *>(ptr),
|
// size_t n_read =
|
||||||
sizeof(Cluster3x3), 1, fp);
|
// fread(reinterpret_cast<void *>(ptr), sizeof(Cluster3x3), 1, fp);
|
||||||
if (n_read != 1) {
|
// if (n_read != 1) {
|
||||||
clusters.resize(nph_read);
|
// clusters.resize(nph_read);
|
||||||
return clusters;
|
// return clusters;
|
||||||
// return nph_read;
|
// }
|
||||||
}
|
// // TODO! error handling on read
|
||||||
good = 1;
|
// good = 1;
|
||||||
if (noise_map) {
|
// if (noise_map) {
|
||||||
if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 &&
|
// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) {
|
||||||
ptr->y < ny) {
|
// tot1 = ptr->data[4];
|
||||||
tot1 = ptr->data[4];
|
// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL,
|
||||||
analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL,
|
// NULL);
|
||||||
NULL, NULL, NULL);
|
|
||||||
// noise = noise_map[ptr->y * nx + ptr->x];
|
// noise = noise_map[ptr->y * nx + ptr->x];
|
||||||
noise = noise_map[ptr->y + ny * ptr->x];
|
// if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) {
|
||||||
if (tot1 > noise || t2max > 2 * noise ||
|
// ;
|
||||||
tot3 > 3 * noise) {
|
// } else {
|
||||||
;
|
// good = 0;
|
||||||
} else
|
// printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise,
|
||||||
good = 0;
|
// tot1, t2max, tot3);
|
||||||
} else {
|
// }
|
||||||
printf("Bad pixel number %d %d\n", ptr->x, ptr->y);
|
// } else {
|
||||||
good = 0;
|
// printf("Bad pixel number %d %d\n", ptr->x, ptr->y);
|
||||||
}
|
// good = 0;
|
||||||
}
|
// }
|
||||||
if (good) {
|
// }
|
||||||
ptr++;
|
// if (good) {
|
||||||
nph_read++;
|
// ptr++;
|
||||||
}
|
// nph_read++;
|
||||||
(m_num_left)--;
|
// }
|
||||||
if (nph_read >= n_clusters)
|
// (m_num_left)--;
|
||||||
break;
|
// if (nph_read >= n_clusters)
|
||||||
}
|
// break;
|
||||||
}
|
// }
|
||||||
if (nph_read >= n_clusters)
|
// }
|
||||||
break;
|
// if (nph_read < n_clusters) {
|
||||||
}
|
// // // keep on reading frames and photons until reaching
|
||||||
}
|
// // n_clusters
|
||||||
// printf("%d\n",nph_read);
|
// while (fread(&iframe, sizeof(iframe), 1, fp)) {
|
||||||
clusters.resize(nph_read);
|
// // // printf("%d\n",nph_read);
|
||||||
return clusters;
|
|
||||||
}
|
// if (fread(&nph, sizeof(nph), 1, fp)) {
|
||||||
|
// // // printf("** %d\n",nph);
|
||||||
|
// m_num_left = nph;
|
||||||
|
// for (size_t iph = 0; iph < nph; iph++) {
|
||||||
|
// // // read photons 1 by 1
|
||||||
|
// size_t n_read = fread(reinterpret_cast<void *>(ptr),
|
||||||
|
// sizeof(Cluster3x3), 1, fp);
|
||||||
|
// if (n_read != 1) {
|
||||||
|
// clusters.resize(nph_read);
|
||||||
|
// return clusters;
|
||||||
|
// // return nph_read;
|
||||||
|
// }
|
||||||
|
// good = 1;
|
||||||
|
// if (noise_map) {
|
||||||
|
// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 &&
|
||||||
|
// ptr->y < ny) {
|
||||||
|
// tot1 = ptr->data[4];
|
||||||
|
// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL,
|
||||||
|
// NULL, NULL, NULL);
|
||||||
|
// // noise = noise_map[ptr->y * nx + ptr->x];
|
||||||
|
// noise = noise_map[ptr->y + ny * ptr->x];
|
||||||
|
// if (tot1 > noise || t2max > 2 * noise ||
|
||||||
|
// tot3 > 3 * noise) {
|
||||||
|
// ;
|
||||||
|
// } else
|
||||||
|
// good = 0;
|
||||||
|
// } else {
|
||||||
|
// printf("Bad pixel number %d %d\n", ptr->x, ptr->y);
|
||||||
|
// good = 0;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// if (good) {
|
||||||
|
// ptr++;
|
||||||
|
// nph_read++;
|
||||||
|
// }
|
||||||
|
// (m_num_left)--;
|
||||||
|
// if (nph_read >= n_clusters)
|
||||||
|
// break;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// if (nph_read >= n_clusters)
|
||||||
|
// break;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// // printf("%d\n",nph_read);
|
||||||
|
// clusters.resize(nph_read);
|
||||||
|
// return clusters;
|
||||||
|
// }
|
||||||
|
|
||||||
NDArray<double, 2> calculate_eta2(ClusterVector<int> &clusters) {
|
NDArray<double, 2> calculate_eta2(ClusterVector<int> &clusters) {
|
||||||
NDArray<double, 2> eta2({clusters.size(), 2});
|
//TOTO! make work with 2x2 clusters
|
||||||
|
NDArray<double, 2> eta2({static_cast<int64_t>(clusters.size()), 2});
|
||||||
for (size_t i = 0; i < clusters.size(); i++) {
|
for (size_t i = 0; i < clusters.size(); i++) {
|
||||||
// int32_t t2;
|
auto e = calculate_eta2(clusters.at<Cluster3x3>(i));
|
||||||
// auto* ptr = reinterpret_cast<int32_t*> (clusters.element_ptr(i) + 2 *
|
eta2(i, 0) = e.x;
|
||||||
// sizeof(int16_t)); analyze_cluster(clusters.at<Cluster3x3>(i), &t2,
|
eta2(i, 1) = e.y;
|
||||||
// nullptr, nullptr, &eta2(i,0), &eta2(i,1) , nullptr, nullptr);
|
|
||||||
auto [x, y] = calculate_eta2(clusters.at<Cluster3x3>(i));
|
|
||||||
eta2(i, 0) = x;
|
|
||||||
eta2(i, 1) = y;
|
|
||||||
}
|
}
|
||||||
return eta2;
|
return eta2;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::array<double, 2> calculate_eta2(Cluster3x3 &cl) {
|
/**
|
||||||
std::array<double, 2> eta2{};
|
* @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 struct
|
||||||
|
* containing etay, etax and the corner of the cluster.
|
||||||
|
*/
|
||||||
|
Eta2 calculate_eta2(Cluster3x3 &cl) {
|
||||||
|
Eta2 eta{};
|
||||||
|
|
||||||
std::array<int32_t, 4> tot2;
|
std::array<int32_t, 4> tot2;
|
||||||
tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4];
|
tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4];
|
||||||
@ -283,39 +294,43 @@ std::array<double, 2> calculate_eta2(Cluster3x3 &cl) {
|
|||||||
switch (c) {
|
switch (c) {
|
||||||
case cBottomLeft:
|
case cBottomLeft:
|
||||||
if ((cl.data[3] + cl.data[4]) != 0)
|
if ((cl.data[3] + cl.data[4]) != 0)
|
||||||
eta2[0] =
|
eta.x =
|
||||||
static_cast<double>(cl.data[4]) / (cl.data[3] + cl.data[4]);
|
static_cast<double>(cl.data[4]) / (cl.data[3] + cl.data[4]);
|
||||||
if ((cl.data[1] + cl.data[4]) != 0)
|
if ((cl.data[1] + cl.data[4]) != 0)
|
||||||
eta2[1] =
|
eta.y =
|
||||||
static_cast<double>(cl.data[4]) / (cl.data[1] + cl.data[4]);
|
static_cast<double>(cl.data[4]) / (cl.data[1] + cl.data[4]);
|
||||||
|
eta.c = cBottomLeft;
|
||||||
break;
|
break;
|
||||||
case cBottomRight:
|
case cBottomRight:
|
||||||
if ((cl.data[2] + cl.data[5]) != 0)
|
if ((cl.data[2] + cl.data[5]) != 0)
|
||||||
eta2[0] =
|
eta.x =
|
||||||
static_cast<double>(cl.data[5]) / (cl.data[4] + cl.data[5]);
|
static_cast<double>(cl.data[5]) / (cl.data[4] + cl.data[5]);
|
||||||
if ((cl.data[1] + cl.data[4]) != 0)
|
if ((cl.data[1] + cl.data[4]) != 0)
|
||||||
eta2[1] =
|
eta.y =
|
||||||
static_cast<double>(cl.data[4]) / (cl.data[1] + cl.data[4]);
|
static_cast<double>(cl.data[4]) / (cl.data[1] + cl.data[4]);
|
||||||
|
eta.c = cBottomRight;
|
||||||
break;
|
break;
|
||||||
case cTopLeft:
|
case cTopLeft:
|
||||||
if ((cl.data[7] + cl.data[4]) != 0)
|
if ((cl.data[7] + cl.data[4]) != 0)
|
||||||
eta2[0] =
|
eta.x =
|
||||||
static_cast<double>(cl.data[4]) / (cl.data[3] + cl.data[4]);
|
static_cast<double>(cl.data[4]) / (cl.data[3] + cl.data[4]);
|
||||||
if ((cl.data[7] + cl.data[4]) != 0)
|
if ((cl.data[7] + cl.data[4]) != 0)
|
||||||
eta2[1] =
|
eta.y =
|
||||||
static_cast<double>(cl.data[7]) / (cl.data[7] + cl.data[4]);
|
static_cast<double>(cl.data[7]) / (cl.data[7] + cl.data[4]);
|
||||||
|
eta.c = cTopLeft;
|
||||||
break;
|
break;
|
||||||
case cTopRight:
|
case cTopRight:
|
||||||
if ((cl.data[5] + cl.data[4]) != 0)
|
if ((cl.data[5] + cl.data[4]) != 0)
|
||||||
eta2[0] =
|
eta.x =
|
||||||
static_cast<double>(cl.data[5]) / (cl.data[5] + cl.data[4]);
|
static_cast<double>(cl.data[5]) / (cl.data[5] + cl.data[4]);
|
||||||
if ((cl.data[7] + cl.data[4]) != 0)
|
if ((cl.data[7] + cl.data[4]) != 0)
|
||||||
eta2[1] =
|
eta.y =
|
||||||
static_cast<double>(cl.data[7]) / (cl.data[7] + cl.data[4]);
|
static_cast<double>(cl.data[7]) / (cl.data[7] + cl.data[4]);
|
||||||
|
eta.c = cTopRight;
|
||||||
break;
|
break;
|
||||||
// default:;
|
// no default to allow compiler to warn about missing cases
|
||||||
}
|
}
|
||||||
return eta2;
|
return eta;
|
||||||
}
|
}
|
||||||
|
|
||||||
int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad,
|
int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad,
|
||||||
|
@ -6,20 +6,22 @@
|
|||||||
|
|
||||||
using aare::ClusterVector;
|
using aare::ClusterVector;
|
||||||
|
|
||||||
TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") {
|
|
||||||
struct Cluster_i2x2 {
|
struct Cluster_i2x2 {
|
||||||
int16_t x;
|
int16_t x;
|
||||||
int16_t y;
|
int16_t y;
|
||||||
int32_t data[4];
|
int32_t data[4];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") {
|
||||||
|
|
||||||
|
|
||||||
ClusterVector<int32_t> cv(2, 2, 4);
|
ClusterVector<int32_t> cv(2, 2, 4);
|
||||||
REQUIRE(cv.capacity() == 4);
|
REQUIRE(cv.capacity() == 4);
|
||||||
REQUIRE(cv.size() == 0);
|
REQUIRE(cv.size() == 0);
|
||||||
REQUIRE(cv.cluster_size_x() == 2);
|
REQUIRE(cv.cluster_size_x() == 2);
|
||||||
REQUIRE(cv.cluster_size_y() == 2);
|
REQUIRE(cv.cluster_size_y() == 2);
|
||||||
// int16_t, int16_t, 2x2 int32_t = 20 bytes
|
// int16_t, int16_t, 2x2 int32_t = 20 bytes
|
||||||
REQUIRE(cv.element_offset() == 20);
|
REQUIRE(cv.item_size() == 20);
|
||||||
|
|
||||||
//Create a cluster and push back into the vector
|
//Create a cluster and push back into the vector
|
||||||
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
|
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
|
||||||
@ -30,7 +32,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") {
|
|||||||
//Read the cluster back out using copy. TODO! Can we improve the API?
|
//Read the cluster back out using copy. TODO! Can we improve the API?
|
||||||
Cluster_i2x2 c2;
|
Cluster_i2x2 c2;
|
||||||
std::byte *ptr = cv.element_ptr(0);
|
std::byte *ptr = cv.element_ptr(0);
|
||||||
std::copy(ptr, ptr + cv.element_offset(), reinterpret_cast<std::byte*>(&c2));
|
std::copy(ptr, ptr + cv.item_size(), reinterpret_cast<std::byte*>(&c2));
|
||||||
|
|
||||||
//Check that the data is the same
|
//Check that the data is the same
|
||||||
REQUIRE(c1.x == c2.x);
|
REQUIRE(c1.x == c2.x);
|
||||||
@ -83,8 +85,8 @@ TEST_CASE("Storing floats"){
|
|||||||
float data[8];
|
float data[8];
|
||||||
};
|
};
|
||||||
|
|
||||||
ClusterVector<float> cv(2, 4, 2);
|
ClusterVector<float> cv(2, 4, 10);
|
||||||
REQUIRE(cv.capacity() == 2);
|
REQUIRE(cv.capacity() == 10);
|
||||||
REQUIRE(cv.size() == 0);
|
REQUIRE(cv.size() == 0);
|
||||||
REQUIRE(cv.cluster_size_x() == 2);
|
REQUIRE(cv.cluster_size_x() == 2);
|
||||||
REQUIRE(cv.cluster_size_y() == 4);
|
REQUIRE(cv.cluster_size_y() == 4);
|
||||||
@ -92,13 +94,13 @@ TEST_CASE("Storing floats"){
|
|||||||
//Create a cluster and push back into the vector
|
//Create a cluster and push back into the vector
|
||||||
Cluster_f4x2 c1 = {1, 2, {3.0, 4.0, 5.0, 6.0,3.0, 4.0, 5.0, 6.0}};
|
Cluster_f4x2 c1 = {1, 2, {3.0, 4.0, 5.0, 6.0,3.0, 4.0, 5.0, 6.0}};
|
||||||
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
||||||
REQUIRE(cv.capacity() == 2);
|
REQUIRE(cv.capacity() == 10);
|
||||||
REQUIRE(cv.size() == 1);
|
REQUIRE(cv.size() == 1);
|
||||||
|
|
||||||
|
|
||||||
Cluster_f4x2 c2 = {6, 7, {8.0, 9.0, 10.0, 11.0,8.0, 9.0, 10.0, 11.0}};
|
Cluster_f4x2 c2 = {6, 7, {8.0, 9.0, 10.0, 11.0,8.0, 9.0, 10.0, 11.0}};
|
||||||
cv.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
|
cv.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
|
||||||
REQUIRE(cv.capacity() == 2);
|
REQUIRE(cv.capacity() == 10);
|
||||||
REQUIRE(cv.size() == 2);
|
REQUIRE(cv.size() == 2);
|
||||||
|
|
||||||
auto sums = cv.sum();
|
auto sums = cv.sum();
|
||||||
@ -106,3 +108,91 @@ TEST_CASE("Storing floats"){
|
|||||||
REQUIRE_THAT(sums[0], Catch::Matchers::WithinAbs(36.0, 1e-6));
|
REQUIRE_THAT(sums[0], Catch::Matchers::WithinAbs(36.0, 1e-6));
|
||||||
REQUIRE_THAT(sums[1], Catch::Matchers::WithinAbs(76.0, 1e-6));
|
REQUIRE_THAT(sums[1], Catch::Matchers::WithinAbs(76.0, 1e-6));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Push back more than initial capacity"){
|
||||||
|
|
||||||
|
ClusterVector<int32_t> cv(2, 2, 2);
|
||||||
|
auto initial_data = cv.data();
|
||||||
|
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
|
||||||
|
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
||||||
|
REQUIRE(cv.size() == 1);
|
||||||
|
REQUIRE(cv.capacity() == 2);
|
||||||
|
|
||||||
|
Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}};
|
||||||
|
cv.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
|
||||||
|
REQUIRE(cv.size() == 2);
|
||||||
|
REQUIRE(cv.capacity() == 2);
|
||||||
|
|
||||||
|
Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}};
|
||||||
|
cv.push_back(c3.x, c3.y, reinterpret_cast<std::byte*>(&c3.data[0]));
|
||||||
|
REQUIRE(cv.size() == 3);
|
||||||
|
REQUIRE(cv.capacity() == 4);
|
||||||
|
|
||||||
|
Cluster_i2x2* ptr = reinterpret_cast<Cluster_i2x2*>(cv.data());
|
||||||
|
REQUIRE(ptr[0].x == 1);
|
||||||
|
REQUIRE(ptr[0].y == 2);
|
||||||
|
REQUIRE(ptr[1].x == 6);
|
||||||
|
REQUIRE(ptr[1].y == 7);
|
||||||
|
REQUIRE(ptr[2].x == 11);
|
||||||
|
REQUIRE(ptr[2].y == 12);
|
||||||
|
|
||||||
|
//We should have allocated a new buffer, since we outgrew the initial capacity
|
||||||
|
REQUIRE(initial_data != cv.data());
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Concatenate two cluster vectors where the first has enough capacity"){
|
||||||
|
ClusterVector<int32_t> cv1(2, 2, 12);
|
||||||
|
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
|
||||||
|
cv1.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
||||||
|
Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}};
|
||||||
|
cv1.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
|
||||||
|
|
||||||
|
ClusterVector<int32_t> cv2(2, 2, 2);
|
||||||
|
Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}};
|
||||||
|
cv2.push_back(c3.x, c3.y, reinterpret_cast<std::byte*>(&c3.data[0]));
|
||||||
|
Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}};
|
||||||
|
cv2.push_back(c4.x, c4.y, reinterpret_cast<std::byte*>(&c4.data[0]));
|
||||||
|
|
||||||
|
cv1 += cv2;
|
||||||
|
REQUIRE(cv1.size() == 4);
|
||||||
|
REQUIRE(cv1.capacity() == 12);
|
||||||
|
|
||||||
|
Cluster_i2x2* ptr = reinterpret_cast<Cluster_i2x2*>(cv1.data());
|
||||||
|
REQUIRE(ptr[0].x == 1);
|
||||||
|
REQUIRE(ptr[0].y == 2);
|
||||||
|
REQUIRE(ptr[1].x == 6);
|
||||||
|
REQUIRE(ptr[1].y == 7);
|
||||||
|
REQUIRE(ptr[2].x == 11);
|
||||||
|
REQUIRE(ptr[2].y == 12);
|
||||||
|
REQUIRE(ptr[3].x == 16);
|
||||||
|
REQUIRE(ptr[3].y == 17);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Concatenate two cluster vectors where we need to allocate"){
|
||||||
|
ClusterVector<int32_t> cv1(2, 2, 2);
|
||||||
|
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
|
||||||
|
cv1.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
||||||
|
Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}};
|
||||||
|
cv1.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
|
||||||
|
|
||||||
|
ClusterVector<int32_t> cv2(2, 2, 2);
|
||||||
|
Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}};
|
||||||
|
cv2.push_back(c3.x, c3.y, reinterpret_cast<std::byte*>(&c3.data[0]));
|
||||||
|
Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}};
|
||||||
|
cv2.push_back(c4.x, c4.y, reinterpret_cast<std::byte*>(&c4.data[0]));
|
||||||
|
|
||||||
|
cv1 += cv2;
|
||||||
|
REQUIRE(cv1.size() == 4);
|
||||||
|
REQUIRE(cv1.capacity() == 4);
|
||||||
|
|
||||||
|
Cluster_i2x2* ptr = reinterpret_cast<Cluster_i2x2*>(cv1.data());
|
||||||
|
REQUIRE(ptr[0].x == 1);
|
||||||
|
REQUIRE(ptr[0].y == 2);
|
||||||
|
REQUIRE(ptr[1].x == 6);
|
||||||
|
REQUIRE(ptr[1].y == 7);
|
||||||
|
REQUIRE(ptr[2].x == 11);
|
||||||
|
REQUIRE(ptr[2].y == 12);
|
||||||
|
REQUIRE(ptr[3].x == 16);
|
||||||
|
REQUIRE(ptr[3].y == 17);
|
||||||
|
}
|
@ -45,6 +45,8 @@ File& File::operator=(File &&other) noexcept {
|
|||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// void File::close() { file_impl->close(); }
|
||||||
|
|
||||||
Frame File::read_frame() { return file_impl->read_frame(); }
|
Frame File::read_frame() { return file_impl->read_frame(); }
|
||||||
Frame File::read_frame(size_t frame_index) {
|
Frame File::read_frame(size_t frame_index) {
|
||||||
return file_impl->read_frame(frame_index);
|
return file_impl->read_frame(frame_index);
|
||||||
|
300
src/Fit.cpp
Normal file
300
src/Fit.cpp
Normal file
@ -0,0 +1,300 @@
|
|||||||
|
#include "aare/Fit.hpp"
|
||||||
|
#include "aare/utils/task.hpp"
|
||||||
|
|
||||||
|
#include <lmcurve2.h>
|
||||||
|
#include <lmfit.hpp>
|
||||||
|
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
namespace func {
|
||||||
|
|
||||||
|
double gaus(const double x, const double *par) {
|
||||||
|
return par[0] * exp(-pow(x - par[1], 2) / (2 * pow(par[2], 2)));
|
||||||
|
}
|
||||||
|
|
||||||
|
NDArray<double, 1> gaus(NDView<double, 1> x, NDView<double, 1> par) {
|
||||||
|
NDArray<double, 1> y({x.shape(0)}, 0);
|
||||||
|
for (size_t i = 0; i < x.size(); i++) {
|
||||||
|
y(i) = gaus(x(i), par.data());
|
||||||
|
}
|
||||||
|
return y;
|
||||||
|
}
|
||||||
|
|
||||||
|
double pol1(const double x, const double *par) { return par[0] * x + par[1]; }
|
||||||
|
|
||||||
|
NDArray<double, 1> pol1(NDView<double, 1> x, NDView<double, 1> par) {
|
||||||
|
NDArray<double, 1> y({x.shape()}, 0);
|
||||||
|
for (size_t i = 0; i < x.size(); i++) {
|
||||||
|
y(i) = pol1(x(i), par.data());
|
||||||
|
}
|
||||||
|
return y;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace func
|
||||||
|
|
||||||
|
NDArray<double, 1> fit_gaus(NDView<double, 1> x, NDView<double, 1> y) {
|
||||||
|
NDArray<double, 1> result({3}, 0);
|
||||||
|
lm_control_struct control = lm_control_double;
|
||||||
|
|
||||||
|
// Estimate the initial parameters for the fit
|
||||||
|
std::vector<double> start_par{0, 0, 0};
|
||||||
|
auto e = std::max_element(y.begin(), y.end());
|
||||||
|
auto idx = std::distance(y.begin(), e);
|
||||||
|
|
||||||
|
start_par[0] = *e; // For amplitude we use the maximum value
|
||||||
|
start_par[1] =
|
||||||
|
x[idx]; // For the mean we use the x value of the maximum value
|
||||||
|
|
||||||
|
// For sigma we estimate the fwhm and divide by 2.35
|
||||||
|
// assuming equally spaced x values
|
||||||
|
auto delta = x[1] - x[0];
|
||||||
|
start_par[2] =
|
||||||
|
std::count_if(y.begin(), y.end(),
|
||||||
|
[e, delta](double val) { return val > *e / 2; }) *
|
||||||
|
delta / 2.35;
|
||||||
|
|
||||||
|
lmfit::result_t res(start_par);
|
||||||
|
lmcurve(res.par.size(), res.par.data(), x.size(), x.data(), y.data(),
|
||||||
|
aare::func::gaus, &control, &res.status);
|
||||||
|
|
||||||
|
result(0) = res.par[0];
|
||||||
|
result(1) = res.par[1];
|
||||||
|
result(2) = res.par[2];
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
NDArray<double, 3> fit_gaus(NDView<double, 1> x, NDView<double, 3> y,
|
||||||
|
int n_threads) {
|
||||||
|
NDArray<double, 3> result({y.shape(0), y.shape(1), 3}, 0);
|
||||||
|
|
||||||
|
auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) {
|
||||||
|
for (ssize_t row = first_row; row < last_row; row++) {
|
||||||
|
for (ssize_t col = 0; col < y.shape(1); col++) {
|
||||||
|
NDView<double, 1> values(&y(row, col, 0), {y.shape(2)});
|
||||||
|
auto res = fit_gaus(x, values);
|
||||||
|
result(row, col, 0) = res(0);
|
||||||
|
result(row, col, 1) = res(1);
|
||||||
|
result(row, col, 2) = res(2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
auto tasks = split_task(0, y.shape(0), n_threads);
|
||||||
|
std::vector<std::thread> threads;
|
||||||
|
for (auto &task : tasks) {
|
||||||
|
threads.push_back(std::thread(process, task.first, task.second));
|
||||||
|
}
|
||||||
|
for (auto &thread : threads) {
|
||||||
|
thread.join();
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void fit_gaus(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
|
||||||
|
NDView<double, 3> par_out, NDView<double, 3> par_err_out,
|
||||||
|
int n_threads) {
|
||||||
|
|
||||||
|
auto process = [&](ssize_t first_row, ssize_t last_row) {
|
||||||
|
for (ssize_t row = first_row; row < last_row; row++) {
|
||||||
|
for (ssize_t col = 0; col < y.shape(1); col++) {
|
||||||
|
NDView<double, 1> y_view(&y(row, col, 0), {y.shape(2)});
|
||||||
|
NDView<double, 1> y_err_view(&y_err(row, col, 0),
|
||||||
|
{y_err.shape(2)});
|
||||||
|
NDView<double, 1> par_out_view(&par_out(row, col, 0),
|
||||||
|
{par_out.shape(2)});
|
||||||
|
NDView<double, 1> par_err_out_view(&par_err_out(row, col, 0),
|
||||||
|
{par_err_out.shape(2)});
|
||||||
|
fit_gaus(x, y_view, y_err_view, par_out_view, par_err_out_view);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
auto tasks = split_task(0, y.shape(0), n_threads);
|
||||||
|
std::vector<std::thread> threads;
|
||||||
|
for (auto &task : tasks) {
|
||||||
|
threads.push_back(std::thread(process, task.first, task.second));
|
||||||
|
}
|
||||||
|
for (auto &thread : threads) {
|
||||||
|
thread.join();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void fit_gaus(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
|
||||||
|
NDView<double, 1> par_out, NDView<double, 1> par_err_out) {
|
||||||
|
// Check that we have the correct sizes
|
||||||
|
if (y.size() != x.size() || y.size() != y_err.size() ||
|
||||||
|
par_out.size() != 3 || par_err_out.size() != 3) {
|
||||||
|
throw std::runtime_error("Data, x, data_err must have the same size "
|
||||||
|
"and par_out, par_err_out must have size 3");
|
||||||
|
}
|
||||||
|
|
||||||
|
lm_control_struct control = lm_control_double;
|
||||||
|
|
||||||
|
// Estimate the initial parameters for the fit
|
||||||
|
std::vector<double> start_par{0, 0, 0};
|
||||||
|
std::vector<double> start_par_err{0, 0, 0};
|
||||||
|
std::vector<double> start_cov{0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||||
|
|
||||||
|
auto e = std::max_element(y.begin(), y.end());
|
||||||
|
auto idx = std::distance(y.begin(), e);
|
||||||
|
start_par[0] = *e; // For amplitude we use the maximum value
|
||||||
|
start_par[1] =
|
||||||
|
x[idx]; // For the mean we use the x value of the maximum value
|
||||||
|
|
||||||
|
// For sigma we estimate the fwhm and divide by 2.35
|
||||||
|
// assuming equally spaced x values
|
||||||
|
auto delta = x[1] - x[0];
|
||||||
|
start_par[2] =
|
||||||
|
std::count_if(y.begin(), y.end(),
|
||||||
|
[e, delta](double val) { return val > *e / 2; }) *
|
||||||
|
delta / 2.35;
|
||||||
|
|
||||||
|
lmfit::result_t res(start_par);
|
||||||
|
lmfit::result_t res_err(start_par_err);
|
||||||
|
lmfit::result_t cov(start_cov);
|
||||||
|
|
||||||
|
// TODO can we make lmcurve write the result directly where is should be?
|
||||||
|
lmcurve2(res.par.size(), res.par.data(), res_err.par.data(), cov.par.data(),
|
||||||
|
x.size(), x.data(), y.data(), y_err.data(), aare::func::gaus,
|
||||||
|
&control, &res.status);
|
||||||
|
|
||||||
|
par_out(0) = res.par[0];
|
||||||
|
par_out(1) = res.par[1];
|
||||||
|
par_out(2) = res.par[2];
|
||||||
|
par_err_out(0) = res_err.par[0];
|
||||||
|
par_err_out(1) = res_err.par[1];
|
||||||
|
par_err_out(2) = res_err.par[2];
|
||||||
|
}
|
||||||
|
|
||||||
|
void fit_pol1(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
|
||||||
|
NDView<double, 1> par_out, NDView<double, 1> par_err_out) {
|
||||||
|
// Check that we have the correct sizes
|
||||||
|
if (y.size() != x.size() || y.size() != y_err.size() ||
|
||||||
|
par_out.size() != 2 || par_err_out.size() != 2) {
|
||||||
|
throw std::runtime_error("Data, x, data_err must have the same size "
|
||||||
|
"and par_out, par_err_out must have size 2");
|
||||||
|
}
|
||||||
|
|
||||||
|
lm_control_struct control = lm_control_double;
|
||||||
|
|
||||||
|
// Estimate the initial parameters for the fit
|
||||||
|
std::vector<double> start_par{0, 0};
|
||||||
|
std::vector<double> start_par_err{0, 0};
|
||||||
|
std::vector<double> start_cov{0, 0, 0, 0};
|
||||||
|
|
||||||
|
auto y2 = std::max_element(y.begin(), y.end());
|
||||||
|
auto x2 = x[std::distance(y.begin(), y2)];
|
||||||
|
auto y1 = std::min_element(y.begin(), y.end());
|
||||||
|
auto x1 = x[std::distance(y.begin(), y1)];
|
||||||
|
|
||||||
|
start_par[0] =
|
||||||
|
(*y2 - *y1) / (x2 - x1); // For amplitude we use the maximum value
|
||||||
|
start_par[1] =
|
||||||
|
*y1 - ((*y2 - *y1) / (x2 - x1)) *
|
||||||
|
x1; // For the mean we use the x value of the maximum value
|
||||||
|
|
||||||
|
lmfit::result_t res(start_par);
|
||||||
|
lmfit::result_t res_err(start_par_err);
|
||||||
|
lmfit::result_t cov(start_cov);
|
||||||
|
|
||||||
|
lmcurve2(res.par.size(), res.par.data(), res_err.par.data(), cov.par.data(),
|
||||||
|
x.size(), x.data(), y.data(), y_err.data(), aare::func::pol1,
|
||||||
|
&control, &res.status);
|
||||||
|
|
||||||
|
par_out(0) = res.par[0];
|
||||||
|
par_out(1) = res.par[1];
|
||||||
|
par_err_out(0) = res_err.par[0];
|
||||||
|
par_err_out(1) = res_err.par[1];
|
||||||
|
}
|
||||||
|
|
||||||
|
void fit_pol1(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
|
||||||
|
NDView<double, 3> par_out, NDView<double, 3> par_err_out,
|
||||||
|
int n_threads) {
|
||||||
|
|
||||||
|
auto process = [&](ssize_t first_row, ssize_t last_row) {
|
||||||
|
for (ssize_t row = first_row; row < last_row; row++) {
|
||||||
|
for (ssize_t col = 0; col < y.shape(1); col++) {
|
||||||
|
NDView<double, 1> y_view(&y(row, col, 0), {y.shape(2)});
|
||||||
|
NDView<double, 1> y_err_view(&y_err(row, col, 0),
|
||||||
|
{y_err.shape(2)});
|
||||||
|
NDView<double, 1> par_out_view(&par_out(row, col, 0),
|
||||||
|
{par_out.shape(2)});
|
||||||
|
NDView<double, 1> par_err_out_view(&par_err_out(row, col, 0),
|
||||||
|
{par_err_out.shape(2)});
|
||||||
|
fit_pol1(x, y_view, y_err_view, par_out_view, par_err_out_view);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
auto tasks = split_task(0, y.shape(0), n_threads);
|
||||||
|
std::vector<std::thread> threads;
|
||||||
|
for (auto &task : tasks) {
|
||||||
|
threads.push_back(std::thread(process, task.first, task.second));
|
||||||
|
}
|
||||||
|
for (auto &thread : threads) {
|
||||||
|
thread.join();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
NDArray<double, 1> fit_pol1(NDView<double, 1> x, NDView<double, 1> y) {
|
||||||
|
// // Check that we have the correct sizes
|
||||||
|
// if (y.size() != x.size() || y.size() != y_err.size() ||
|
||||||
|
// par_out.size() != 2 || par_err_out.size() != 2) {
|
||||||
|
// throw std::runtime_error("Data, x, data_err must have the same size "
|
||||||
|
// "and par_out, par_err_out must have size 2");
|
||||||
|
// }
|
||||||
|
NDArray<double, 1> par({2}, 0);
|
||||||
|
|
||||||
|
lm_control_struct control = lm_control_double;
|
||||||
|
|
||||||
|
// Estimate the initial parameters for the fit
|
||||||
|
std::vector<double> start_par{0, 0};
|
||||||
|
|
||||||
|
auto y2 = std::max_element(y.begin(), y.end());
|
||||||
|
auto x2 = x[std::distance(y.begin(), y2)];
|
||||||
|
auto y1 = std::min_element(y.begin(), y.end());
|
||||||
|
auto x1 = x[std::distance(y.begin(), y1)];
|
||||||
|
|
||||||
|
start_par[0] = (*y2 - *y1) / (x2 - x1);
|
||||||
|
start_par[1] = *y1 - ((*y2 - *y1) / (x2 - x1)) * x1;
|
||||||
|
|
||||||
|
lmfit::result_t res(start_par);
|
||||||
|
|
||||||
|
lmcurve(res.par.size(), res.par.data(), x.size(), x.data(), y.data(),
|
||||||
|
aare::func::pol1, &control, &res.status);
|
||||||
|
|
||||||
|
par(0) = res.par[0];
|
||||||
|
par(1) = res.par[1];
|
||||||
|
return par;
|
||||||
|
}
|
||||||
|
|
||||||
|
NDArray<double, 3> fit_pol1(NDView<double, 1> x, NDView<double, 3> y,
|
||||||
|
int n_threads) {
|
||||||
|
NDArray<double, 3> result({y.shape(0), y.shape(1), 2}, 0);
|
||||||
|
|
||||||
|
auto process = [&](ssize_t first_row, ssize_t last_row) {
|
||||||
|
for (ssize_t row = first_row; row < last_row; row++) {
|
||||||
|
for (ssize_t col = 0; col < y.shape(1); col++) {
|
||||||
|
NDView<double, 1> values(&y(row, col, 0), {y.shape(2)});
|
||||||
|
auto res = fit_pol1(x, values);
|
||||||
|
result(row, col, 0) = res(0);
|
||||||
|
result(row, col, 1) = res(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
auto tasks = split_task(0, y.shape(0), n_threads);
|
||||||
|
std::vector<std::thread> threads;
|
||||||
|
for (auto &task : tasks) {
|
||||||
|
threads.push_back(std::thread(process, task.first, task.second));
|
||||||
|
}
|
||||||
|
for (auto &thread : threads) {
|
||||||
|
thread.join();
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace aare
|
157
src/RawFile.cpp
157
src/RawFile.cpp
@ -1,6 +1,7 @@
|
|||||||
#include "aare/RawFile.hpp"
|
#include "aare/RawFile.hpp"
|
||||||
#include "aare/PixelMap.hpp"
|
#include "aare/PixelMap.hpp"
|
||||||
#include "aare/defs.hpp"
|
#include "aare/defs.hpp"
|
||||||
|
#include "aare/geo_helpers.hpp"
|
||||||
|
|
||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
#include <nlohmann/json.hpp>
|
#include <nlohmann/json.hpp>
|
||||||
@ -21,7 +22,10 @@ RawFile::RawFile(const std::filesystem::path &fname, const std::string &mode)
|
|||||||
|
|
||||||
|
|
||||||
find_geometry();
|
find_geometry();
|
||||||
update_geometry_with_roi();
|
|
||||||
|
if (m_master.roi()){
|
||||||
|
m_geometry = update_geometry_with_roi(m_geometry, m_master.roi().value());
|
||||||
|
}
|
||||||
|
|
||||||
open_subfiles();
|
open_subfiles();
|
||||||
} else {
|
} else {
|
||||||
@ -72,9 +76,13 @@ size_t RawFile::n_mod() const { return n_subfile_parts; }
|
|||||||
|
|
||||||
|
|
||||||
size_t RawFile::bytes_per_frame() {
|
size_t RawFile::bytes_per_frame() {
|
||||||
return m_rows * m_cols * m_master.bitdepth() / 8;
|
// return m_rows * m_cols * m_master.bitdepth() / 8;
|
||||||
|
return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() / 8;
|
||||||
|
}
|
||||||
|
size_t RawFile::pixels_per_frame() {
|
||||||
|
// return m_rows * m_cols;
|
||||||
|
return m_geometry.pixels_x * m_geometry.pixels_y;
|
||||||
}
|
}
|
||||||
size_t RawFile::pixels_per_frame() { return m_rows * m_cols; }
|
|
||||||
|
|
||||||
DetectorType RawFile::detector_type() const { return m_master.detector_type(); }
|
DetectorType RawFile::detector_type() const { return m_master.detector_type(); }
|
||||||
|
|
||||||
@ -92,8 +100,8 @@ void RawFile::seek(size_t frame_index) {
|
|||||||
size_t RawFile::tell() { return m_current_frame; };
|
size_t RawFile::tell() { return m_current_frame; };
|
||||||
|
|
||||||
size_t RawFile::total_frames() const { return m_master.frames_in_file(); }
|
size_t RawFile::total_frames() const { return m_master.frames_in_file(); }
|
||||||
size_t RawFile::rows() const { return m_rows; }
|
size_t RawFile::rows() const { return m_geometry.pixels_y; }
|
||||||
size_t RawFile::cols() const { return m_cols; }
|
size_t RawFile::cols() const { return m_geometry.pixels_x; }
|
||||||
size_t RawFile::bitdepth() const { return m_master.bitdepth(); }
|
size_t RawFile::bitdepth() const { return m_master.bitdepth(); }
|
||||||
xy RawFile::geometry() { return m_master.geometry(); }
|
xy RawFile::geometry() { return m_master.geometry(); }
|
||||||
|
|
||||||
@ -102,11 +110,11 @@ void RawFile::open_subfiles() {
|
|||||||
for (size_t i = 0; i != n_subfiles; ++i) {
|
for (size_t i = 0; i != n_subfiles; ++i) {
|
||||||
auto v = std::vector<RawSubFile *>(n_subfile_parts);
|
auto v = std::vector<RawSubFile *>(n_subfile_parts);
|
||||||
for (size_t j = 0; j != n_subfile_parts; ++j) {
|
for (size_t j = 0; j != n_subfile_parts; ++j) {
|
||||||
auto pos = m_module_pixel_0[j];
|
auto pos = m_geometry.module_pixel_0[j];
|
||||||
v[j] = new RawSubFile(m_master.data_fname(j, i),
|
v[j] = new RawSubFile(m_master.data_fname(j, i),
|
||||||
m_master.detector_type(), pos.height,
|
m_master.detector_type(), pos.height,
|
||||||
pos.width, m_master.bitdepth(),
|
pos.width, m_master.bitdepth(),
|
||||||
positions[j].row, positions[j].col);
|
pos.row_index, pos.col_index);
|
||||||
|
|
||||||
}
|
}
|
||||||
subfiles.push_back(v);
|
subfiles.push_back(v);
|
||||||
@ -149,112 +157,49 @@ int RawFile::find_number_of_subfiles() {
|
|||||||
|
|
||||||
RawMasterFile RawFile::master() const { return m_master; }
|
RawMasterFile RawFile::master() const { return m_master; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Find the geometry of the detector by opening all the subfiles and
|
||||||
|
* reading the headers.
|
||||||
|
*/
|
||||||
void RawFile::find_geometry() {
|
void RawFile::find_geometry() {
|
||||||
|
|
||||||
|
//Hold the maximal row and column number found
|
||||||
|
//Later used for calculating the total number of rows and columns
|
||||||
uint16_t r{};
|
uint16_t r{};
|
||||||
uint16_t c{};
|
uint16_t c{};
|
||||||
|
|
||||||
|
|
||||||
for (size_t i = 0; i < n_subfile_parts; i++) {
|
for (size_t i = 0; i < n_subfile_parts; i++) {
|
||||||
auto h = this->read_header(m_master.data_fname(i, 0));
|
auto h = read_header(m_master.data_fname(i, 0));
|
||||||
r = std::max(r, h.row);
|
r = std::max(r, h.row);
|
||||||
c = std::max(c, h.column);
|
c = std::max(c, h.column);
|
||||||
positions.push_back({h.row, h.column});
|
// positions.push_back({h.row, h.column});
|
||||||
|
|
||||||
ModuleGeometry g;
|
ModuleGeometry g;
|
||||||
g.x = h.column * m_master.pixels_x();
|
g.origin_x = h.column * m_master.pixels_x();
|
||||||
g.y = h.row * m_master.pixels_y();
|
g.origin_y = h.row * m_master.pixels_y();
|
||||||
|
g.row_index = h.row;
|
||||||
|
g.col_index = h.column;
|
||||||
g.width = m_master.pixels_x();
|
g.width = m_master.pixels_x();
|
||||||
g.height = m_master.pixels_y();
|
g.height = m_master.pixels_y();
|
||||||
m_module_pixel_0.push_back(g);
|
m_geometry.module_pixel_0.push_back(g);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
r++;
|
r++;
|
||||||
c++;
|
c++;
|
||||||
|
|
||||||
m_rows = (r * m_master.pixels_y());
|
m_geometry.pixels_y = (r * m_master.pixels_y());
|
||||||
m_cols = (c * m_master.pixels_x());
|
m_geometry.pixels_x = (c * m_master.pixels_x());
|
||||||
|
m_geometry.modules_x = c;
|
||||||
m_rows += static_cast<size_t>((r - 1) * cfg.module_gap_row);
|
m_geometry.modules_y = r;
|
||||||
|
m_geometry.pixels_y += static_cast<size_t>((r - 1) * cfg.module_gap_row);
|
||||||
#ifdef AARE_VERBOSE
|
|
||||||
fmt::print("\nRawFile::find_geometry()\n");
|
|
||||||
for (size_t i = 0; i < m_module_pixel_0.size(); i++) {
|
|
||||||
fmt::print("Module {} at position: (r:{},c:{})\n", i,
|
|
||||||
m_module_pixel_0[i].y, m_module_pixel_0[i].x);
|
|
||||||
}
|
|
||||||
fmt::print("Image size: {}x{}\n\n", m_rows, m_cols);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
void RawFile::update_geometry_with_roi() {
|
|
||||||
// TODO! implement this
|
|
||||||
if (m_master.roi()) {
|
|
||||||
auto roi = m_master.roi().value();
|
|
||||||
|
|
||||||
// TODO! can we do this cleaner?
|
|
||||||
int pos_y = 0;
|
|
||||||
int pos_y_increment = 0;
|
|
||||||
for (size_t row = 0; row < m_master.geometry().row; row++) {
|
|
||||||
int pos_x = 0;
|
|
||||||
for (size_t col = 0; col < m_master.geometry().col; col++) {
|
|
||||||
auto &m = m_module_pixel_0[row * m_master.geometry().col + col];
|
|
||||||
auto original_height = m.height;
|
|
||||||
auto original_width = m.width;
|
|
||||||
|
|
||||||
// module is to the left of the roi
|
|
||||||
if (m.x + m.width < roi.xmin) {
|
|
||||||
m.width = 0;
|
|
||||||
|
|
||||||
// roi is in module
|
|
||||||
} else {
|
|
||||||
// here we only arrive when the roi is in or to the left of
|
|
||||||
// the module
|
|
||||||
if (roi.xmin > m.x) {
|
|
||||||
m.width -= roi.xmin - m.x;
|
|
||||||
}
|
|
||||||
if (roi.xmax < m.x + m.width) {
|
|
||||||
m.width -= m.x + original_width - roi.xmax;
|
|
||||||
}
|
|
||||||
m.x = pos_x;
|
|
||||||
pos_x += m.width;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (m.y + m.height < roi.ymin) {
|
|
||||||
m.height = 0;
|
|
||||||
} else {
|
|
||||||
if ((roi.ymin > m.y) && (roi.ymin < m.y + m.height)) {
|
|
||||||
m.height -= roi.ymin - m.y;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
if (roi.ymax < m.y + m.height) {
|
|
||||||
m.height -= m.y + original_height - roi.ymax;
|
|
||||||
}
|
|
||||||
m.y = pos_y;
|
|
||||||
pos_y_increment = m.height;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// increment pos_y
|
|
||||||
pos_y += pos_y_increment;
|
|
||||||
}
|
|
||||||
|
|
||||||
m_rows = roi.height();
|
|
||||||
m_cols = roi.width();
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef AARE_VERBOSE
|
|
||||||
fmt::print("RawFile::update_geometry_with_roi()\n");
|
|
||||||
for (const auto &m : m_module_pixel_0) {
|
|
||||||
fmt::print("Module at position: (r:{}, c:{}, h:{}, w:{})\n", m.y, m.x,
|
|
||||||
m.height, m.width);
|
|
||||||
}
|
|
||||||
fmt::print("Updated image size: {}x{}\n\n", m_rows, m_cols);
|
|
||||||
fmt::print("\n");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
Frame RawFile::get_frame(size_t frame_index) {
|
Frame RawFile::get_frame(size_t frame_index) {
|
||||||
auto f = Frame(m_rows, m_cols, Dtype::from_bitdepth(m_master.bitdepth()));
|
auto f = Frame(m_geometry.pixels_y, m_geometry.pixels_x, Dtype::from_bitdepth(m_master.bitdepth()));
|
||||||
std::byte *frame_buffer = f.data();
|
std::byte *frame_buffer = f.data();
|
||||||
get_frame_into(frame_index, frame_buffer);
|
get_frame_into(frame_index, frame_buffer);
|
||||||
return f;
|
return f;
|
||||||
@ -278,6 +223,10 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect
|
|||||||
if (n_subfile_parts != 1) {
|
if (n_subfile_parts != 1) {
|
||||||
for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) {
|
for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) {
|
||||||
auto subfile_id = frame_index / m_master.max_frames_per_file();
|
auto subfile_id = frame_index / m_master.max_frames_per_file();
|
||||||
|
if (subfile_id >= subfiles.size()) {
|
||||||
|
throw std::runtime_error(LOCATION +
|
||||||
|
" Subfile out of range. Possible missing data.");
|
||||||
|
}
|
||||||
frame_numbers[part_idx] =
|
frame_numbers[part_idx] =
|
||||||
subfiles[subfile_id][part_idx]->frame_number(
|
subfiles[subfile_id][part_idx]->frame_number(
|
||||||
frame_index % m_master.max_frames_per_file());
|
frame_index % m_master.max_frames_per_file());
|
||||||
@ -311,12 +260,16 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect
|
|||||||
for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) {
|
for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) {
|
||||||
auto corrected_idx = frame_indices[part_idx];
|
auto corrected_idx = frame_indices[part_idx];
|
||||||
auto subfile_id = corrected_idx / m_master.max_frames_per_file();
|
auto subfile_id = corrected_idx / m_master.max_frames_per_file();
|
||||||
|
if (subfile_id >= subfiles.size()) {
|
||||||
|
throw std::runtime_error(LOCATION +
|
||||||
|
" Subfile out of range. Possible missing data.");
|
||||||
|
}
|
||||||
|
|
||||||
// This is where we start writing
|
// This is where we start writing
|
||||||
auto offset = (m_module_pixel_0[part_idx].y * m_cols +
|
auto offset = (m_geometry.module_pixel_0[part_idx].origin_y * m_geometry.pixels_x +
|
||||||
m_module_pixel_0[part_idx].x)*m_master.bitdepth()/8;
|
m_geometry.module_pixel_0[part_idx].origin_x)*m_master.bitdepth()/8;
|
||||||
|
|
||||||
if (m_module_pixel_0[part_idx].x!=0)
|
if (m_geometry.module_pixel_0[part_idx].origin_x!=0)
|
||||||
throw std::runtime_error(LOCATION + "Implementation error. x pos not 0.");
|
throw std::runtime_error(LOCATION + "Implementation error. x pos not 0.");
|
||||||
|
|
||||||
//TODO! Risk for out of range access
|
//TODO! Risk for out of range access
|
||||||
@ -340,9 +293,13 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect
|
|||||||
// level
|
// level
|
||||||
|
|
||||||
for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) {
|
for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) {
|
||||||
auto pos = m_module_pixel_0[part_idx];
|
auto pos = m_geometry.module_pixel_0[part_idx];
|
||||||
auto corrected_idx = frame_indices[part_idx];
|
auto corrected_idx = frame_indices[part_idx];
|
||||||
auto subfile_id = corrected_idx / m_master.max_frames_per_file();
|
auto subfile_id = corrected_idx / m_master.max_frames_per_file();
|
||||||
|
if (subfile_id >= subfiles.size()) {
|
||||||
|
throw std::runtime_error(LOCATION +
|
||||||
|
" Subfile out of range. Possible missing data.");
|
||||||
|
}
|
||||||
|
|
||||||
subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file());
|
subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file());
|
||||||
subfiles[subfile_id][part_idx]->read_into(part_buffer, header);
|
subfiles[subfile_id][part_idx]->read_into(part_buffer, header);
|
||||||
@ -352,9 +309,9 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect
|
|||||||
for (size_t cur_row = 0; cur_row < static_cast<size_t>(pos.height);
|
for (size_t cur_row = 0; cur_row < static_cast<size_t>(pos.height);
|
||||||
cur_row++) {
|
cur_row++) {
|
||||||
|
|
||||||
auto irow = (pos.y + cur_row);
|
auto irow = (pos.origin_y + cur_row);
|
||||||
auto icol = pos.x;
|
auto icol = pos.origin_x;
|
||||||
auto dest = (irow * this->m_cols + icol);
|
auto dest = (irow * this->m_geometry.pixels_x + icol);
|
||||||
dest = dest * m_master.bitdepth() / 8;
|
dest = dest * m_master.bitdepth() / 8;
|
||||||
memcpy(frame_buffer + dest,
|
memcpy(frame_buffer + dest,
|
||||||
part_buffer + cur_row * pos.width *
|
part_buffer + cur_row * pos.width *
|
||||||
@ -400,4 +357,8 @@ RawFile::~RawFile() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
@ -1,10 +1,13 @@
|
|||||||
#include "aare/File.hpp"
|
#include "aare/File.hpp"
|
||||||
|
#include "aare/RawMasterFile.hpp" //needed for ROI
|
||||||
|
#include "aare/RawFile.hpp"
|
||||||
|
|
||||||
#include <catch2/catch_test_macros.hpp>
|
#include <catch2/catch_test_macros.hpp>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
|
|
||||||
#include "test_config.hpp"
|
#include "test_config.hpp"
|
||||||
|
|
||||||
|
|
||||||
using aare::File;
|
using aare::File;
|
||||||
|
|
||||||
TEST_CASE("Read number of frames from a jungfrau raw file", "[.integration]") {
|
TEST_CASE("Read number of frames from a jungfrau raw file", "[.integration]") {
|
||||||
@ -148,3 +151,5 @@ TEST_CASE("Read file with unordered frames", "[.integration]") {
|
|||||||
File f(fpath);
|
File f(fpath);
|
||||||
REQUIRE_THROWS((f.read_frame()));
|
REQUIRE_THROWS((f.read_frame()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -9,8 +9,10 @@ namespace aare {
|
|||||||
RawSubFile::RawSubFile(const std::filesystem::path &fname,
|
RawSubFile::RawSubFile(const std::filesystem::path &fname,
|
||||||
DetectorType detector, size_t rows, size_t cols,
|
DetectorType detector, size_t rows, size_t cols,
|
||||||
size_t bitdepth, uint32_t pos_row, uint32_t pos_col)
|
size_t bitdepth, uint32_t pos_row, uint32_t pos_col)
|
||||||
: m_detector_type(detector), m_bitdepth(bitdepth), m_fname(fname), m_rows(rows), m_cols(cols),
|
: m_detector_type(detector), m_bitdepth(bitdepth), m_fname(fname),
|
||||||
m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row), m_pos_col(pos_col) {
|
m_rows(rows), m_cols(cols),
|
||||||
|
m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row),
|
||||||
|
m_pos_col(pos_col) {
|
||||||
if (m_detector_type == DetectorType::Moench03_old) {
|
if (m_detector_type == DetectorType::Moench03_old) {
|
||||||
m_pixel_map = GenerateMoench03PixelMap();
|
m_pixel_map = GenerateMoench03PixelMap();
|
||||||
} else if (m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0) {
|
} else if (m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0) {
|
||||||
@ -42,7 +44,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname,
|
|||||||
|
|
||||||
void RawSubFile::seek(size_t frame_index) {
|
void RawSubFile::seek(size_t frame_index) {
|
||||||
if (frame_index >= n_frames) {
|
if (frame_index >= n_frames) {
|
||||||
throw std::runtime_error("Frame number out of range");
|
throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, n_frames));
|
||||||
}
|
}
|
||||||
m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index);
|
m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index);
|
||||||
}
|
}
|
||||||
@ -51,7 +53,6 @@ size_t RawSubFile::tell() {
|
|||||||
return m_file.tellg() / (sizeof(DetectorHeader) + bytes_per_frame());
|
return m_file.tellg() / (sizeof(DetectorHeader) + bytes_per_frame());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) {
|
void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) {
|
||||||
if (header) {
|
if (header) {
|
||||||
m_file.read(reinterpret_cast<char *>(header), sizeof(DetectorHeader));
|
m_file.read(reinterpret_cast<char *>(header), sizeof(DetectorHeader));
|
||||||
@ -63,25 +64,37 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) {
|
|||||||
if (m_pixel_map) {
|
if (m_pixel_map) {
|
||||||
// read into a temporary buffer and then copy the data to the buffer
|
// read into a temporary buffer and then copy the data to the buffer
|
||||||
// in the correct order
|
// in the correct order
|
||||||
// currently this only supports 16 bit data!
|
// TODO! add 4 bit support
|
||||||
auto part_buffer = new std::byte[bytes_per_frame()];
|
if(m_bitdepth == 8){
|
||||||
m_file.read(reinterpret_cast<char *>(part_buffer), bytes_per_frame());
|
read_with_map<uint8_t>(image_buf);
|
||||||
auto *data = reinterpret_cast<uint16_t *>(image_buf);
|
}else if (m_bitdepth == 16) {
|
||||||
auto *part_data = reinterpret_cast<uint16_t *>(part_buffer);
|
read_with_map<uint16_t>(image_buf);
|
||||||
for (size_t i = 0; i < pixels_per_frame(); i++) {
|
} else if (m_bitdepth == 32) {
|
||||||
data[i] = part_data[(*m_pixel_map)(i)];
|
read_with_map<uint32_t>(image_buf);
|
||||||
|
}else{
|
||||||
|
throw std::runtime_error("Unsupported bitdepth for read with pixel map");
|
||||||
}
|
}
|
||||||
delete[] part_buffer;
|
|
||||||
} else {
|
} else {
|
||||||
// read directly into the buffer
|
// read directly into the buffer
|
||||||
m_file.read(reinterpret_cast<char *>(image_buf), bytes_per_frame());
|
m_file.read(reinterpret_cast<char *>(image_buf), bytes_per_frame());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void RawSubFile::read_with_map(std::byte *image_buf) {
|
||||||
|
auto part_buffer = new std::byte[bytes_per_frame()];
|
||||||
|
m_file.read(reinterpret_cast<char *>(part_buffer), bytes_per_frame());
|
||||||
|
auto *data = reinterpret_cast<T *>(image_buf);
|
||||||
|
auto *part_data = reinterpret_cast<T *>(part_buffer);
|
||||||
|
for (size_t i = 0; i < pixels_per_frame(); i++) {
|
||||||
|
data[i] = part_data[(*m_pixel_map)(i)];
|
||||||
|
}
|
||||||
|
delete[] part_buffer;
|
||||||
|
}
|
||||||
size_t RawSubFile::rows() const { return m_rows; }
|
size_t RawSubFile::rows() const { return m_rows; }
|
||||||
size_t RawSubFile::cols() const { return m_cols; }
|
size_t RawSubFile::cols() const { return m_cols; }
|
||||||
|
|
||||||
|
|
||||||
void RawSubFile::get_part(std::byte *buffer, size_t frame_index) {
|
void RawSubFile::get_part(std::byte *buffer, size_t frame_index) {
|
||||||
seek(frame_index);
|
seek(frame_index);
|
||||||
read_into(buffer, nullptr);
|
read_into(buffer, nullptr);
|
||||||
@ -94,5 +107,4 @@ size_t RawSubFile::frame_number(size_t frame_index) {
|
|||||||
return h.frameNumber;
|
return h.frameNumber;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
} // namespace aare
|
} // namespace aare
|
61
src/decode.cpp
Normal file
61
src/decode.cpp
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
#include "aare/decode.hpp"
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
uint16_t adc_sar_05_decode64to16(uint64_t input){
|
||||||
|
|
||||||
|
//we want bits 29,19,28,18,31,21,27,20,24,23,25,22 and then pad to 16
|
||||||
|
uint16_t output = 0;
|
||||||
|
output |= ((input >> 22) & 1) << 11;
|
||||||
|
output |= ((input >> 25) & 1) << 10;
|
||||||
|
output |= ((input >> 23) & 1) << 9;
|
||||||
|
output |= ((input >> 24) & 1) << 8;
|
||||||
|
output |= ((input >> 20) & 1) << 7;
|
||||||
|
output |= ((input >> 27) & 1) << 6;
|
||||||
|
output |= ((input >> 21) & 1) << 5;
|
||||||
|
output |= ((input >> 31) & 1) << 4;
|
||||||
|
output |= ((input >> 18) & 1) << 3;
|
||||||
|
output |= ((input >> 28) & 1) << 2;
|
||||||
|
output |= ((input >> 19) & 1) << 1;
|
||||||
|
output |= ((input >> 29) & 1) << 0;
|
||||||
|
return output;
|
||||||
|
}
|
||||||
|
|
||||||
|
void adc_sar_05_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output){
|
||||||
|
for(int64_t i = 0; i < input.shape(0); i++){
|
||||||
|
for(int64_t j = 0; j < input.shape(1); j++){
|
||||||
|
output(i,j) = adc_sar_05_decode64to16(input(i,j));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uint16_t adc_sar_04_decode64to16(uint64_t input){
|
||||||
|
|
||||||
|
// bit_map = array([15,17,19,21,23,4,6,8,10,12,14,16] LSB->MSB
|
||||||
|
uint16_t output = 0;
|
||||||
|
output |= ((input >> 16) & 1) << 11;
|
||||||
|
output |= ((input >> 14) & 1) << 10;
|
||||||
|
output |= ((input >> 12) & 1) << 9;
|
||||||
|
output |= ((input >> 10) & 1) << 8;
|
||||||
|
output |= ((input >> 8) & 1) << 7;
|
||||||
|
output |= ((input >> 6) & 1) << 6;
|
||||||
|
output |= ((input >> 4) & 1) << 5;
|
||||||
|
output |= ((input >> 23) & 1) << 4;
|
||||||
|
output |= ((input >> 21) & 1) << 3;
|
||||||
|
output |= ((input >> 19) & 1) << 2;
|
||||||
|
output |= ((input >> 17) & 1) << 1;
|
||||||
|
output |= ((input >> 15) & 1) << 0;
|
||||||
|
return output;
|
||||||
|
}
|
||||||
|
|
||||||
|
void adc_sar_04_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output){
|
||||||
|
for(int64_t i = 0; i < input.shape(0); i++){
|
||||||
|
for(int64_t j = 0; j < input.shape(1); j++){
|
||||||
|
output(i,j) = adc_sar_04_decode64to16(input(i,j));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
} // namespace aare
|
71
src/geo_helpers.cpp
Normal file
71
src/geo_helpers.cpp
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
|
||||||
|
#include "aare/geo_helpers.hpp"
|
||||||
|
#include "fmt/core.h"
|
||||||
|
|
||||||
|
namespace aare{
|
||||||
|
|
||||||
|
DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) {
|
||||||
|
#ifdef AARE_VERBOSE
|
||||||
|
fmt::println("update_geometry_with_roi() called with ROI: {} {} {} {}",
|
||||||
|
roi.xmin, roi.xmax, roi.ymin, roi.ymax);
|
||||||
|
fmt::println("Geometry: {} {} {} {} {} {}",
|
||||||
|
geo.modules_x, geo.modules_y, geo.pixels_x, geo.pixels_y, geo.module_gap_row, geo.module_gap_col);
|
||||||
|
#endif
|
||||||
|
int pos_y = 0;
|
||||||
|
int pos_y_increment = 0;
|
||||||
|
for (int row = 0; row < geo.modules_y; row++) {
|
||||||
|
int pos_x = 0;
|
||||||
|
for (int col = 0; col < geo.modules_x; col++) {
|
||||||
|
auto &m = geo.module_pixel_0[row * geo.modules_x + col];
|
||||||
|
auto original_height = m.height;
|
||||||
|
auto original_width = m.width;
|
||||||
|
|
||||||
|
// module is to the left of the roi
|
||||||
|
if (m.origin_x + m.width < roi.xmin) {
|
||||||
|
m.width = 0;
|
||||||
|
|
||||||
|
// roi is in module
|
||||||
|
} else {
|
||||||
|
// here we only arrive when the roi is in or to the left of
|
||||||
|
// the module
|
||||||
|
if (roi.xmin > m.origin_x) {
|
||||||
|
m.width -= roi.xmin - m.origin_x;
|
||||||
|
}
|
||||||
|
if (roi.xmax < m.origin_x + original_width) {
|
||||||
|
m.width -= m.origin_x + original_width - roi.xmax;
|
||||||
|
}
|
||||||
|
m.origin_x = pos_x;
|
||||||
|
pos_x += m.width;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m.origin_y + m.height < roi.ymin) {
|
||||||
|
m.height = 0;
|
||||||
|
} else {
|
||||||
|
if ((roi.ymin > m.origin_y) && (roi.ymin < m.origin_y + m.height)) {
|
||||||
|
m.height -= roi.ymin - m.origin_y;
|
||||||
|
|
||||||
|
}
|
||||||
|
if (roi.ymax < m.origin_y + original_height) {
|
||||||
|
m.height -= m.origin_y + original_height - roi.ymax;
|
||||||
|
}
|
||||||
|
m.origin_y = pos_y;
|
||||||
|
pos_y_increment = m.height;
|
||||||
|
}
|
||||||
|
#ifdef AARE_VERBOSE
|
||||||
|
fmt::println("Module {} {} {} {}", m.origin_x, m.origin_y, m.width, m.height);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
// increment pos_y
|
||||||
|
pos_y += pos_y_increment;
|
||||||
|
}
|
||||||
|
|
||||||
|
// m_rows = roi.height();
|
||||||
|
// m_cols = roi.width();
|
||||||
|
geo.pixels_x = roi.width();
|
||||||
|
geo.pixels_y = roi.height();
|
||||||
|
|
||||||
|
return geo;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace aare
|
230
src/geo_helpers.test.cpp
Normal file
230
src/geo_helpers.test.cpp
Normal file
@ -0,0 +1,230 @@
|
|||||||
|
#include "aare/File.hpp"
|
||||||
|
#include "aare/RawMasterFile.hpp" //needed for ROI
|
||||||
|
#include "aare/RawFile.hpp"
|
||||||
|
|
||||||
|
#include <catch2/catch_test_macros.hpp>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
#include "aare/geo_helpers.hpp"
|
||||||
|
#include "test_config.hpp"
|
||||||
|
|
||||||
|
TEST_CASE("Simple ROIs on one module"){
|
||||||
|
// DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi)
|
||||||
|
aare::DetectorGeometry geo;
|
||||||
|
|
||||||
|
|
||||||
|
aare::ModuleGeometry mod;
|
||||||
|
mod.origin_x = 0;
|
||||||
|
mod.origin_y = 0;
|
||||||
|
mod.width = 1024;
|
||||||
|
mod.height = 512;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
geo.pixels_x = 1024;
|
||||||
|
geo.pixels_y = 512;
|
||||||
|
geo.modules_x = 1;
|
||||||
|
geo.modules_y = 1;
|
||||||
|
geo.module_pixel_0.push_back(mod);
|
||||||
|
|
||||||
|
SECTION("ROI is the whole module"){
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 0;
|
||||||
|
roi.xmax = 1024;
|
||||||
|
roi.ymin = 0;
|
||||||
|
roi.ymax = 512;
|
||||||
|
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
|
||||||
|
|
||||||
|
REQUIRE(updated_geo.pixels_x == 1024);
|
||||||
|
REQUIRE(updated_geo.pixels_y == 512);
|
||||||
|
REQUIRE(updated_geo.modules_x == 1);
|
||||||
|
REQUIRE(updated_geo.modules_y == 1);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[0].height == 512);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[0].width == 1024);
|
||||||
|
}
|
||||||
|
SECTION("ROI is the top left corner of the module"){
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 100;
|
||||||
|
roi.xmax = 200;
|
||||||
|
roi.ymin = 150;
|
||||||
|
roi.ymax = 200;
|
||||||
|
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
|
||||||
|
|
||||||
|
REQUIRE(updated_geo.pixels_x == 100);
|
||||||
|
REQUIRE(updated_geo.pixels_y == 50);
|
||||||
|
REQUIRE(updated_geo.modules_x == 1);
|
||||||
|
REQUIRE(updated_geo.modules_y == 1);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[0].height == 50);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[0].width == 100);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTION("ROI is a small square"){
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 1000;
|
||||||
|
roi.xmax = 1010;
|
||||||
|
roi.ymin = 500;
|
||||||
|
roi.ymax = 510;
|
||||||
|
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
|
||||||
|
|
||||||
|
REQUIRE(updated_geo.pixels_x == 10);
|
||||||
|
REQUIRE(updated_geo.pixels_y == 10);
|
||||||
|
REQUIRE(updated_geo.modules_x == 1);
|
||||||
|
REQUIRE(updated_geo.modules_y == 1);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[0].height == 10);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[0].width == 10);
|
||||||
|
}
|
||||||
|
SECTION("ROI is a few columns"){
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 750;
|
||||||
|
roi.xmax = 800;
|
||||||
|
roi.ymin = 0;
|
||||||
|
roi.ymax = 512;
|
||||||
|
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
|
||||||
|
|
||||||
|
REQUIRE(updated_geo.pixels_x == 50);
|
||||||
|
REQUIRE(updated_geo.pixels_y == 512);
|
||||||
|
REQUIRE(updated_geo.modules_x == 1);
|
||||||
|
REQUIRE(updated_geo.modules_y == 1);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[0].height == 512);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[0].width == 50);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
TEST_CASE("Two modules side by side"){
|
||||||
|
// DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi)
|
||||||
|
aare::DetectorGeometry geo;
|
||||||
|
|
||||||
|
|
||||||
|
aare::ModuleGeometry mod;
|
||||||
|
mod.origin_x = 0;
|
||||||
|
mod.origin_y = 0;
|
||||||
|
mod.width = 1024;
|
||||||
|
mod.height = 512;
|
||||||
|
|
||||||
|
geo.pixels_x = 2048;
|
||||||
|
geo.pixels_y = 512;
|
||||||
|
geo.modules_x = 2;
|
||||||
|
geo.modules_y = 1;
|
||||||
|
|
||||||
|
geo.module_pixel_0.push_back(mod);
|
||||||
|
mod.origin_x = 1024;
|
||||||
|
geo.module_pixel_0.push_back(mod);
|
||||||
|
|
||||||
|
SECTION("ROI is the whole image"){
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 0;
|
||||||
|
roi.xmax = 2048;
|
||||||
|
roi.ymin = 0;
|
||||||
|
roi.ymax = 512;
|
||||||
|
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
|
||||||
|
|
||||||
|
REQUIRE(updated_geo.pixels_x == 2048);
|
||||||
|
REQUIRE(updated_geo.pixels_y == 512);
|
||||||
|
REQUIRE(updated_geo.modules_x == 2);
|
||||||
|
REQUIRE(updated_geo.modules_y == 1);
|
||||||
|
}
|
||||||
|
SECTION("rectangle on both modules"){
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 800;
|
||||||
|
roi.xmax = 1300;
|
||||||
|
roi.ymin = 200;
|
||||||
|
roi.ymax = 499;
|
||||||
|
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
|
||||||
|
|
||||||
|
REQUIRE(updated_geo.pixels_x == 500);
|
||||||
|
REQUIRE(updated_geo.pixels_y == 299);
|
||||||
|
REQUIRE(updated_geo.modules_x == 2);
|
||||||
|
REQUIRE(updated_geo.modules_y == 1);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[0].height == 299);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[0].width == 224);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[1].height == 299);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[1].width == 276);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Three modules side by side"){
|
||||||
|
// DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi)
|
||||||
|
aare::DetectorGeometry geo;
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 700;
|
||||||
|
roi.xmax = 2500;
|
||||||
|
roi.ymin = 0;
|
||||||
|
roi.ymax = 123;
|
||||||
|
|
||||||
|
aare::ModuleGeometry mod;
|
||||||
|
mod.origin_x = 0;
|
||||||
|
mod.origin_y = 0;
|
||||||
|
mod.width = 1024;
|
||||||
|
mod.height = 512;
|
||||||
|
|
||||||
|
geo.pixels_x = 3072;
|
||||||
|
geo.pixels_y = 512;
|
||||||
|
geo.modules_x = 3;
|
||||||
|
geo.modules_y = 1;
|
||||||
|
|
||||||
|
geo.module_pixel_0.push_back(mod);
|
||||||
|
mod.origin_x = 1024;
|
||||||
|
geo.module_pixel_0.push_back(mod);
|
||||||
|
mod.origin_x = 2048;
|
||||||
|
geo.module_pixel_0.push_back(mod);
|
||||||
|
|
||||||
|
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
|
||||||
|
|
||||||
|
REQUIRE(updated_geo.pixels_x == 1800);
|
||||||
|
REQUIRE(updated_geo.pixels_y == 123);
|
||||||
|
REQUIRE(updated_geo.modules_x == 3);
|
||||||
|
REQUIRE(updated_geo.modules_y == 1);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[0].height == 123);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[0].width == 324);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[1].height == 123);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[1].width == 1024);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[2].height == 123);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[2].width == 452);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Four modules as a square"){
|
||||||
|
// DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi)
|
||||||
|
aare::DetectorGeometry geo;
|
||||||
|
aare::ROI roi;
|
||||||
|
roi.xmin = 500;
|
||||||
|
roi.xmax = 2000;
|
||||||
|
roi.ymin = 500;
|
||||||
|
roi.ymax = 600;
|
||||||
|
|
||||||
|
aare::ModuleGeometry mod;
|
||||||
|
mod.origin_x = 0;
|
||||||
|
mod.origin_y = 0;
|
||||||
|
mod.width = 1024;
|
||||||
|
mod.height = 512;
|
||||||
|
|
||||||
|
geo.pixels_x = 2048;
|
||||||
|
geo.pixels_y = 1024;
|
||||||
|
geo.modules_x = 2;
|
||||||
|
geo.modules_y = 2;
|
||||||
|
|
||||||
|
geo.module_pixel_0.push_back(mod);
|
||||||
|
mod.origin_x = 1024;
|
||||||
|
geo.module_pixel_0.push_back(mod);
|
||||||
|
mod.origin_x = 0;
|
||||||
|
mod.origin_y = 512;
|
||||||
|
geo.module_pixel_0.push_back(mod);
|
||||||
|
mod.origin_x = 1024;
|
||||||
|
geo.module_pixel_0.push_back(mod);
|
||||||
|
|
||||||
|
auto updated_geo = aare::update_geometry_with_roi(geo, roi);
|
||||||
|
|
||||||
|
REQUIRE(updated_geo.pixels_x == 1500);
|
||||||
|
REQUIRE(updated_geo.pixels_y == 100);
|
||||||
|
REQUIRE(updated_geo.modules_x == 2);
|
||||||
|
REQUIRE(updated_geo.modules_y == 2);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[0].height == 12);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[0].width == 524);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[1].height == 12);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[1].width == 976);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[2].height == 88);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[2].width == 524);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[3].height == 88);
|
||||||
|
REQUIRE(updated_geo.module_pixel_0[3].width == 976);
|
||||||
|
}
|
30
src/utils/task.cpp
Normal file
30
src/utils/task.cpp
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
#include "aare/utils/task.hpp"
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
std::vector<std::pair<int, int>> split_task(int first, int last,
|
||||||
|
int n_threads) {
|
||||||
|
std::vector<std::pair<int, int>> vec;
|
||||||
|
vec.reserve(n_threads);
|
||||||
|
|
||||||
|
int n_frames = last - first;
|
||||||
|
|
||||||
|
if (n_threads >= n_frames) {
|
||||||
|
for (int i = 0; i != n_frames; ++i) {
|
||||||
|
vec.push_back({i, i + 1});
|
||||||
|
}
|
||||||
|
return vec;
|
||||||
|
}
|
||||||
|
|
||||||
|
int step = (n_frames) / n_threads;
|
||||||
|
for (int i = 0; i != n_threads; ++i) {
|
||||||
|
int start = step * i;
|
||||||
|
int stop = step * (i + 1);
|
||||||
|
if (i == n_threads - 1)
|
||||||
|
stop = last;
|
||||||
|
vec.push_back({start, stop});
|
||||||
|
}
|
||||||
|
return vec;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace aare
|
32
src/utils/task.test.cpp
Normal file
32
src/utils/task.test.cpp
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
#include "aare/utils/task.hpp"
|
||||||
|
|
||||||
|
#include <catch2/matchers/catch_matchers_floating_point.hpp>
|
||||||
|
#include <catch2/catch_test_macros.hpp>
|
||||||
|
|
||||||
|
|
||||||
|
TEST_CASE("Split a range into multiple tasks"){
|
||||||
|
|
||||||
|
auto tasks = aare::split_task(0, 10, 3);
|
||||||
|
REQUIRE(tasks.size() == 3);
|
||||||
|
REQUIRE(tasks[0].first == 0);
|
||||||
|
REQUIRE(tasks[0].second == 3);
|
||||||
|
REQUIRE(tasks[1].first == 3);
|
||||||
|
REQUIRE(tasks[1].second == 6);
|
||||||
|
REQUIRE(tasks[2].first == 6);
|
||||||
|
REQUIRE(tasks[2].second == 10);
|
||||||
|
|
||||||
|
tasks = aare::split_task(0, 10, 1);
|
||||||
|
REQUIRE(tasks.size() == 1);
|
||||||
|
REQUIRE(tasks[0].first == 0);
|
||||||
|
REQUIRE(tasks[0].second == 10);
|
||||||
|
|
||||||
|
tasks = aare::split_task(0, 10, 10);
|
||||||
|
REQUIRE(tasks.size() == 10);
|
||||||
|
for (int i = 0; i < 10; i++){
|
||||||
|
REQUIRE(tasks[i].first == i);
|
||||||
|
REQUIRE(tasks[i].second == i+1);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user