3 Commits

Author SHA1 Message Date
94ac58b09e For 2025.5.22 release (#181)
All checks were successful
Build on RHEL9 / build (push) Successful in 2m22s
Build on RHEL8 / build (push) Successful in 2m29s
Co-authored-by: Patrick <patrick.sieberer@psi.ch>
Co-authored-by: JulianHeymes <julian.heymes@psi.ch>
Co-authored-by: Dhanya Thattil <dhanya.thattil@psi.ch>
Co-authored-by: Xiangyu Xie <45243914+xiangyuxie@users.noreply.github.com>
Co-authored-by: xiangyu.xie <xiangyu.xie@psi.ch>
Co-authored-by: AliceMazzoleni99 <alice.mazzoleni@psi.ch>
Co-authored-by: Mazzoleni Alice Francesca <mazzol_a@pc17378.psi.ch>
Co-authored-by: siebsi <sieb.patr@gmail.com>
2025-05-22 11:40:39 +02:00
fd0196f2fd Developer (#164)
All checks were successful
Build on RHEL9 / build (push) Successful in 1m58s
Build on RHEL8 / build (push) Successful in 2m22s
- State before merging the new cluster vector API

---------

Co-authored-by: Patrick <patrick.sieberer@psi.ch>
Co-authored-by: JulianHeymes <julian.heymes@psi.ch>
Co-authored-by: Dhanya Thattil <dhanya.thattil@psi.ch>
Co-authored-by: Xiangyu Xie <45243914+xiangyuxie@users.noreply.github.com>
Co-authored-by: xiangyu.xie <xiangyu.xie@psi.ch>
Co-authored-by: siebsi <sieb.patr@gmail.com>
2025-04-22 16:41:48 +02:00
e1533282f1 Cluster cuts (#146)
Some checks failed
Build the package using cmake then documentation / build (ubuntu-latest, 3.12) (push) Failing after 43s
Co-authored-by: Patrick <patrick.sieberer@psi.ch>
Co-authored-by: JulianHeymes <julian.heymes@psi.ch>
Co-authored-by: Dhanya Thattil <dhanya.thattil@psi.ch>
Co-authored-by: Xiangyu Xie <45243914+xiangyuxie@users.noreply.github.com>
Co-authored-by: xiangyu.xie <xiangyu.xie@psi.ch>
2025-04-01 15:15:54 +02:00
76 changed files with 2780 additions and 980 deletions

View File

@ -1,18 +1,24 @@
name: Build on RHEL8
on:
push:
workflow_dispatch:
permissions:
contents: read
jobs:
buildh:
build:
runs-on: "ubuntu-latest"
container:
image: gitea.psi.ch/images/rhel8-developer-gitea-actions
steps:
- uses: actions/checkout@v4
# workaround until actions/checkout@v4 is available for RH8
# - uses: actions/checkout@v4
- name: Clone repository
run: |
echo Cloning ${{ github.ref_name }}
git clone https://${{secrets.GITHUB_TOKEN}}@gitea.psi.ch/${{ github.repository }}.git --branch=${{ github.ref_name }} .
- name: Install dependencies
@ -22,7 +28,7 @@ jobs:
- name: Build library
run: |
mkdir build && cd build
cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON
cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON -DPython_FIND_VIRTUALENV=FIRST
make -j 2
- name: C++ unit tests

View File

@ -8,7 +8,7 @@ permissions:
contents: read
jobs:
buildh:
build:
runs-on: "ubuntu-latest"
container:
image: gitea.psi.ch/images/rhel9-developer-gitea-actions

View File

@ -1,9 +1,9 @@
name: Build pkgs and deploy if on main
on:
push:
branches:
- main
release:
types:
- published
jobs:
build:
@ -24,13 +24,13 @@ jobs:
- uses: actions/checkout@v4
- name: Get conda
uses: conda-incubator/setup-miniconda@v3.0.4
uses: conda-incubator/setup-miniconda@v3
with:
python-version: ${{ matrix.python-version }}
environment-file: etc/dev-env.yml
miniforge-version: latest
channels: conda-forge
- name: Prepare
run: conda install conda-build=24.9 conda-verify pytest anaconda-client
conda-remove-defaults: "true"
- name: Enable upload
run: conda config --set anaconda_upload yes

View File

@ -24,14 +24,15 @@ jobs:
- uses: actions/checkout@v4
- name: Get conda
uses: conda-incubator/setup-miniconda@v3.0.4
uses: conda-incubator/setup-miniconda@v3
with:
python-version: ${{ matrix.python-version }}
environment-file: etc/dev-env.yml
miniforge-version: latest
channels: conda-forge
conda-remove-defaults: "true"
- name: Prepare
run: conda install conda-build=24.9 conda-verify pytest anaconda-client
- name: Disable upload
run: conda config --set anaconda_upload no

64
.github/workflows/build_wheel.yml vendored Normal file
View File

@ -0,0 +1,64 @@
name: Build wheel
on:
workflow_dispatch:
pull_request:
push:
branches:
- main
release:
types:
- published
jobs:
build_wheels:
name: Build wheels on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest,]
steps:
- uses: actions/checkout@v4
- name: Build wheels
run: pipx run cibuildwheel==2.23.0
- uses: actions/upload-artifact@v4
with:
name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }}
path: ./wheelhouse/*.whl
build_sdist:
name: Build source distribution
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build sdist
run: pipx run build --sdist
- uses: actions/upload-artifact@v4
with:
name: cibw-sdist
path: dist/*.tar.gz
upload_pypi:
needs: [build_wheels, build_sdist]
runs-on: ubuntu-latest
environment: pypi
permissions:
id-token: write
if: github.event_name == 'release' && github.event.action == 'published'
# or, alternatively, upload to PyPI on every tag starting with 'v' (remove on: release above to use this)
# if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v')
steps:
- uses: actions/download-artifact@v4
with:
# unpacks all CIBW artifacts into dist/
pattern: cibw-*
path: dist
merge-multiple: true
- uses: pypa/gh-action-pypi-publish@release/v1

3
.gitignore vendored
View File

@ -17,7 +17,8 @@ Testing/
ctbDict.cpp
ctbDict.h
wheelhouse/
dist/
*.pyc
*/__pycache__/*

View File

@ -1,16 +1,29 @@
cmake_minimum_required(VERSION 3.14)
cmake_minimum_required(VERSION 3.15)
project(aare
VERSION 1.0.0
DESCRIPTION "Data processing library for PSI detectors"
HOMEPAGE_URL "https://github.com/slsdetectorgroup/aare"
LANGUAGES C CXX
)
# Read VERSION file into project version
set(VERSION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/VERSION")
file(READ "${VERSION_FILE}" VERSION_CONTENT)
string(STRIP "${VERSION_CONTENT}" PROJECT_VERSION_STRING)
set(PROJECT_VERSION ${PROJECT_VERSION_STRING})
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
execute_process(
COMMAND git log -1 --format=%h
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
OUTPUT_VARIABLE GIT_HASH
OUTPUT_STRIP_TRAILING_WHITESPACE
)
message(STATUS "Building from git hash: ${GIT_HASH}")
if (${CMAKE_VERSION} VERSION_GREATER "3.24")
cmake_policy(SET CMP0135 NEW) #Fetch content download timestamp
endif()
@ -66,6 +79,9 @@ endif()
if(AARE_VERBOSE)
add_compile_definitions(AARE_VERBOSE)
add_compile_definitions(AARE_LOG_LEVEL=aare::logDEBUG5)
else()
add_compile_definitions(AARE_LOG_LEVEL=aare::logERROR)
endif()
if(AARE_CUSTOM_ASSERT)
@ -77,6 +93,7 @@ if(AARE_BENCHMARKS)
endif()
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
if(AARE_FETCH_LMFIT)
@ -382,6 +399,7 @@ set(SourceFiles
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp
)
add_library(aare_core STATIC ${SourceFiles})
@ -390,6 +408,9 @@ target_include_directories(aare_core PUBLIC
"$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>"
)
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
target_link_libraries(
aare_core
PUBLIC
@ -398,6 +419,7 @@ target_link_libraries(
${STD_FS_LIB} # from helpers.cmake
PRIVATE
aare_compiler_flags
Threads::Threads
$<BUILD_INTERFACE:lmfit>
)
@ -415,6 +437,7 @@ if(AARE_TESTS)
set(TestSources
${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.test.cpp
@ -426,11 +449,13 @@ if(AARE_TESTS)
${CMAKE_CURRENT_SOURCE_DIR}/src/Cluster.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/CalculateEta.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinderMT.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.test.cpp
)

1
VERSION Normal file
View File

@ -0,0 +1 @@
2025.5.22

View File

@ -1,28 +1,5 @@
python:
- 3.11
- 3.11
- 3.11
- 3.12
- 3.12
- 3.12
- 3.13
numpy:
- 1.26
- 2.0
- 2.1
- 1.26
- 2.0
- 2.1
- 2.1
zip_keys:
- python
- numpy
pin_run_as_build:
numpy: x.x
python: x.x

View File

@ -1,10 +1,10 @@
source:
path: ../
{% set version = load_file_regex(load_file = 'VERSION', regex_pattern = '(\d+(?:\.\d+)*(?:[\+\w\.]+))').group(1) %}
package:
name: aare
version: 2025.4.1 #TODO! how to not duplicate this?
version: {{version}}
source:
path: ..
@ -12,44 +12,39 @@ source:
build:
number: 0
script:
- unset CMAKE_GENERATOR && {{ PYTHON }} -m pip install . -vv # [not win]
- {{ PYTHON }} -m pip install . -vv # [win]
- unset CMAKE_GENERATOR && {{ PYTHON }} -m pip install . -vv
requirements:
build:
- python {{python}}
- numpy {{ numpy }}
- {{ compiler('cxx') }}
host:
- cmake
- ninja
- python {{python}}
- numpy {{ numpy }}
host:
- python
- pip
- numpy=2.1
- scikit-build-core
- pybind11 >=2.13.0
- fmt
- zeromq
- nlohmann_json
- catch2
- matplotlib # needed in host to solve the environment for run
run:
- python {{python}}
- numpy {{ numpy }}
- python
- {{ pin_compatible('numpy') }}
- matplotlib
test:
imports:
- aare
# requires:
# - pytest
# source_files:
# - tests
# commands:
# - pytest tests
requires:
- pytest
- boost-histogram
source_files:
- python/tests
commands:
- python -m pytest python/tests
about:
summary: An example project built with pybind11 and scikit-build.
# license_file: LICENSE
summary: Data analysis library for hybrid pixel detectors from PSI

View File

@ -3,13 +3,11 @@ channels:
- conda-forge
dependencies:
- anaconda-client
- conda-build
- doxygen
- sphinx=7.1.2
- breathe
- pybind11
- sphinx_rtd_theme
- furo
- nlohmann_json
- zeromq
- fmt
- numpy

View File

@ -1,22 +1,24 @@
#pragma once
#include <cstdint> //int64_t
#include <cstddef> //size_t
#include <cstdint>
#include <cstddef>
#include <array>
#include <cassert>
#include "aare/defs.hpp"
namespace aare {
template <typename E, int64_t Ndim> class ArrayExpr {
template <typename E, ssize_t Ndim> class ArrayExpr {
public:
static constexpr bool is_leaf = false;
auto operator[](size_t i) const { return static_cast<E const &>(*this)[i]; }
auto operator()(size_t i) const { return static_cast<E const &>(*this)[i]; }
auto size() const { return static_cast<E const &>(*this).size(); }
std::array<int64_t, Ndim> shape() const { return static_cast<E const &>(*this).shape(); }
std::array<ssize_t, Ndim> shape() const { return static_cast<E const &>(*this).shape(); }
};
template <typename A, typename B, int64_t Ndim>
template <typename A, typename B, ssize_t Ndim>
class ArrayAdd : public ArrayExpr<ArrayAdd<A, B, Ndim>, Ndim> {
const A &arr1_;
const B &arr2_;
@ -27,10 +29,10 @@ class ArrayAdd : public ArrayExpr<ArrayAdd<A, B, Ndim>, Ndim> {
}
auto operator[](int i) const { return arr1_[i] + arr2_[i]; }
size_t size() const { return arr1_.size(); }
std::array<int64_t, Ndim> shape() const { return arr1_.shape(); }
std::array<ssize_t, Ndim> shape() const { return arr1_.shape(); }
};
template <typename A, typename B, int64_t Ndim>
template <typename A, typename B, ssize_t Ndim>
class ArraySub : public ArrayExpr<ArraySub<A, B, Ndim>, Ndim> {
const A &arr1_;
const B &arr2_;
@ -41,10 +43,10 @@ class ArraySub : public ArrayExpr<ArraySub<A, B, Ndim>, Ndim> {
}
auto operator[](int i) const { return arr1_[i] - arr2_[i]; }
size_t size() const { return arr1_.size(); }
std::array<int64_t, Ndim> shape() const { return arr1_.shape(); }
std::array<ssize_t, Ndim> shape() const { return arr1_.shape(); }
};
template <typename A, typename B, int64_t Ndim>
template <typename A, typename B, ssize_t Ndim>
class ArrayMul : public ArrayExpr<ArrayMul<A, B, Ndim>,Ndim> {
const A &arr1_;
const B &arr2_;
@ -55,10 +57,10 @@ class ArrayMul : public ArrayExpr<ArrayMul<A, B, Ndim>,Ndim> {
}
auto operator[](int i) const { return arr1_[i] * arr2_[i]; }
size_t size() const { return arr1_.size(); }
std::array<int64_t, Ndim> shape() const { return arr1_.shape(); }
std::array<ssize_t, Ndim> shape() const { return arr1_.shape(); }
};
template <typename A, typename B, int64_t Ndim>
template <typename A, typename B, ssize_t Ndim>
class ArrayDiv : public ArrayExpr<ArrayDiv<A, B, Ndim>, Ndim> {
const A &arr1_;
const B &arr2_;
@ -69,27 +71,27 @@ class ArrayDiv : public ArrayExpr<ArrayDiv<A, B, Ndim>, Ndim> {
}
auto operator[](int i) const { return arr1_[i] / arr2_[i]; }
size_t size() const { return arr1_.size(); }
std::array<int64_t, Ndim> shape() const { return arr1_.shape(); }
std::array<ssize_t, Ndim> shape() const { return arr1_.shape(); }
};
template <typename A, typename B, int64_t Ndim>
template <typename A, typename B, ssize_t Ndim>
auto operator+(const ArrayExpr<A, Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
return ArrayAdd<ArrayExpr<A, Ndim>, ArrayExpr<B, Ndim>, Ndim>(arr1, arr2);
}
template <typename A, typename B, int64_t Ndim>
template <typename A, typename B, ssize_t Ndim>
auto operator-(const ArrayExpr<A,Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
return ArraySub<ArrayExpr<A, Ndim>, ArrayExpr<B, Ndim>, Ndim>(arr1, arr2);
}
template <typename A, typename B, int64_t Ndim>
template <typename A, typename B, ssize_t Ndim>
auto operator*(const ArrayExpr<A, Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
return ArrayMul<ArrayExpr<A, Ndim>, ArrayExpr<B, Ndim>, Ndim>(arr1, arr2);
}
template <typename A, typename B, int64_t Ndim>
template <typename A, typename B, ssize_t Ndim>
auto operator/(const ArrayExpr<A, Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
return ArrayDiv<ArrayExpr<A, Ndim>, ArrayExpr<B, Ndim>, Ndim>(arr1, arr2);
}

View File

@ -6,14 +6,14 @@
namespace aare {
typedef enum {
enum class corner : int {
cBottomLeft = 0,
cBottomRight = 1,
cTopLeft = 2,
cTopRight = 3
} corner;
};
typedef enum {
enum class pixel : int {
pBottomLeft = 0,
pBottom = 1,
pBottomRight = 2,
@ -23,7 +23,7 @@ typedef enum {
pTopLeft = 6,
pTop = 7,
pTopRight = 8
} pixel;
};
template <typename T> struct Eta2 {
double x;
@ -41,7 +41,7 @@ NDArray<double, 2> calculate_eta2(const ClusterVector<ClusterType> &clusters) {
NDArray<double, 2> eta2({static_cast<int64_t>(clusters.size()), 2});
for (size_t i = 0; i < clusters.size(); i++) {
auto e = calculate_eta2(clusters.at(i));
auto e = calculate_eta2(clusters[i]);
eta2(i, 0) = e.x;
eta2(i, 1) = e.y;
}
@ -64,31 +64,79 @@ calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
eta.sum = max_sum.first;
auto c = max_sum.second;
size_t cluster_center_index =
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
size_t index_bottom_left_max_2x2_subcluster =
(int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1);
if ((cl.data[index_bottom_left_max_2x2_subcluster] +
cl.data[index_bottom_left_max_2x2_subcluster + 1]) != 0)
eta.x = static_cast<double>(
cl.data[index_bottom_left_max_2x2_subcluster + 1]) /
static_cast<double>(
(cl.data[index_bottom_left_max_2x2_subcluster] +
cl.data[index_bottom_left_max_2x2_subcluster + 1]));
// check that cluster center is in max subcluster
if (cluster_center_index != index_bottom_left_max_2x2_subcluster &&
cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 &&
cluster_center_index !=
index_bottom_left_max_2x2_subcluster + ClusterSizeX &&
cluster_center_index !=
index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1)
throw std::runtime_error("Photon center is not in max 2x2_subcluster");
if ((cl.data[index_bottom_left_max_2x2_subcluster] +
cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]) != 0)
eta.y =
static_cast<double>(
cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]) /
static_cast<double>(
(cl.data[index_bottom_left_max_2x2_subcluster] +
cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]));
if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) %
ClusterSizeX ==
0) {
if ((cl.data[cluster_center_index + 1] +
cl.data[cluster_center_index]) != 0)
eta.x = static_cast<double>(cl.data[cluster_center_index + 1]) /
static_cast<double>((cl.data[cluster_center_index + 1] +
cl.data[cluster_center_index]));
} else {
if ((cl.data[cluster_center_index] +
cl.data[cluster_center_index - 1]) != 0)
eta.x = static_cast<double>(cl.data[cluster_center_index]) /
static_cast<double>((cl.data[cluster_center_index - 1] +
cl.data[cluster_center_index]));
}
if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) /
ClusterSizeX <
1) {
assert(cluster_center_index + ClusterSizeX <
ClusterSizeX * ClusterSizeY); // suppress warning
if ((cl.data[cluster_center_index] +
cl.data[cluster_center_index + ClusterSizeX]) != 0)
eta.y = static_cast<double>(
cl.data[cluster_center_index + ClusterSizeX]) /
static_cast<double>(
(cl.data[cluster_center_index] +
cl.data[cluster_center_index + ClusterSizeX]));
} else {
if ((cl.data[cluster_center_index] +
cl.data[cluster_center_index - ClusterSizeX]) != 0)
eta.y = static_cast<double>(cl.data[cluster_center_index]) /
static_cast<double>(
(cl.data[cluster_center_index] +
cl.data[cluster_center_index - ClusterSizeX]));
}
eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no
// underyling enum class
return eta;
}
// TODO! Look up eta2 calculation - photon center should be top right corner
template <typename T>
Eta2<T> calculate_eta2(const Cluster<T, 2, 2, int16_t> &cl) {
Eta2<T> eta{};
if ((cl.data[0] + cl.data[1]) != 0)
eta.x = static_cast<double>(cl.data[1]) / (cl.data[0] + cl.data[1]);
if ((cl.data[0] + cl.data[2]) != 0)
eta.y = static_cast<double>(cl.data[2]) / (cl.data[0] + cl.data[2]);
eta.sum = cl.sum();
eta.c = static_cast<int>(corner::cBottomLeft); // TODO! This is not correct,
// but need to put something
return eta;
}
// calculates Eta3 for 3x3 cluster based on code from analyze_cluster
// TODO only supported for 3x3 Clusters
template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) {

View File

@ -16,80 +16,61 @@
namespace aare {
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = int16_t>
constexpr bool is_valid_cluster =
std::is_arithmetic_v<T> && std::is_integral_v<CoordType> &&
(ClusterSizeX > 0) && (ClusterSizeY > 0);
// requires clause c++20 maybe update
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = int16_t,
typename Enable = std::enable_if_t<
is_valid_cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>>
typename CoordType = int16_t>
struct Cluster {
static_assert(std::is_arithmetic_v<T>, "T needs to be an arithmetic type");
static_assert(std::is_integral_v<CoordType>,
"CoordType needs to be an integral type");
static_assert(ClusterSizeX > 0 && ClusterSizeY > 0,
"Cluster sizes must be bigger than zero");
CoordType x;
CoordType y;
T data[ClusterSizeX * ClusterSizeY];
std::array<T, ClusterSizeX * ClusterSizeY> data;
T sum() const {
return std::accumulate(data, data + ClusterSizeX * ClusterSizeY, 0);
}
static constexpr uint8_t cluster_size_x = ClusterSizeX;
static constexpr uint8_t cluster_size_y = ClusterSizeY;
using value_type = T;
using coord_type = CoordType;
T sum() const { return std::accumulate(data.begin(), data.end(), T{}); }
std::pair<T, int> max_sum_2x2() const {
constexpr size_t num_2x2_subclusters =
(ClusterSizeX - 1) * (ClusterSizeY - 1);
if constexpr (cluster_size_x == 3 && cluster_size_y == 3) {
std::array<T, 4> sum_2x2_subclusters;
sum_2x2_subclusters[0] = data[0] + data[1] + data[3] + data[4];
sum_2x2_subclusters[1] = data[1] + data[2] + data[4] + data[5];
sum_2x2_subclusters[2] = data[3] + data[4] + data[6] + data[7];
sum_2x2_subclusters[3] = data[4] + data[5] + data[7] + data[8];
int index = std::max_element(sum_2x2_subclusters.begin(),
sum_2x2_subclusters.end()) -
sum_2x2_subclusters.begin();
return std::make_pair(sum_2x2_subclusters[index], index);
} else if constexpr (cluster_size_x == 2 && cluster_size_y == 2) {
return std::make_pair(data[0] + data[1] + data[2] + data[3], 0);
} else {
constexpr size_t num_2x2_subclusters =
(ClusterSizeX - 1) * (ClusterSizeY - 1);
std::array<T, num_2x2_subclusters> sum_2x2_subcluster;
for (size_t i = 0; i < ClusterSizeY - 1; ++i) {
for (size_t j = 0; j < ClusterSizeX - 1; ++j)
sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] =
data[i * ClusterSizeX + j] +
data[i * ClusterSizeX + j + 1] +
data[(i + 1) * ClusterSizeX + j] +
data[(i + 1) * ClusterSizeX + j + 1];
std::array<T, num_2x2_subclusters> sum_2x2_subcluster;
for (size_t i = 0; i < ClusterSizeY - 1; ++i) {
for (size_t j = 0; j < ClusterSizeX - 1; ++j)
sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] =
data[i * ClusterSizeX + j] +
data[i * ClusterSizeX + j + 1] +
data[(i + 1) * ClusterSizeX + j] +
data[(i + 1) * ClusterSizeX + j + 1];
}
int index = std::max_element(sum_2x2_subcluster.begin(),
sum_2x2_subcluster.end()) -
sum_2x2_subcluster.begin();
return std::make_pair(sum_2x2_subcluster[index], index);
}
int index = std::max_element(sum_2x2_subcluster.begin(),
sum_2x2_subcluster.end()) -
sum_2x2_subcluster.begin();
return std::make_pair(sum_2x2_subcluster[index], index);
}
};
// Specialization for 2x2 clusters (only one sum exists)
template <typename T> struct Cluster<T, 2, 2, int16_t> {
int16_t x;
int16_t y;
T data[4];
T sum() const { return std::accumulate(data, data + 4, 0); }
std::pair<T, int> max_sum_2x2() const {
return std::make_pair(data[0] + data[1] + data[2] + data[3],
0); // Only one possible 2x2 sum
}
};
// Specialization for 3x3 clusters
template <typename T> struct Cluster<T, 3, 3, int16_t> {
int16_t x;
int16_t y;
T data[9];
T sum() const { return std::accumulate(data, data + 9, 0); }
std::pair<T, int> max_sum_2x2() const {
std::array<T, 4> sum_2x2_subclusters;
sum_2x2_subclusters[0] = data[0] + data[1] + data[3] + data[4];
sum_2x2_subclusters[1] = data[1] + data[2] + data[4] + data[5];
sum_2x2_subclusters[2] = data[3] + data[4] + data[6] + data[7];
sum_2x2_subclusters[3] = data[4] + data[5] + data[7] + data[8];
int index = std::max_element(sum_2x2_subclusters.begin(),
sum_2x2_subclusters.end()) -
sum_2x2_subclusters.begin();
return std::make_pair(sum_2x2_subclusters[index], index);
}
};
@ -102,20 +83,4 @@ struct is_cluster<Cluster<T, X, Y, CoordType>> : std::true_type {}; // Cluster
template <typename T> constexpr bool is_cluster_v = is_cluster<T>::value;
template <typename ClusterType,
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
struct extract_template_arguments; // Forward declaration
// helper struct to extract template argument
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType>
struct extract_template_arguments<
Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
using value_type = T;
static constexpr int cluster_size_x = ClusterSizeX;
static constexpr int cluster_size_y = ClusterSizeY;
using coordtype = CoordType;
};
} // namespace aare

View File

@ -37,7 +37,11 @@ class ClusterCollector {
public:
ClusterCollector(ClusterFinderMT<ClusterType, uint16_t, double> *source) {
m_source = source->sink();
m_thread = std::thread(&ClusterCollector::process, this);
m_thread =
std::thread(&ClusterCollector::process,
this); // only one process does that so why isnt it
// automatically written to m_cluster in collect
// - instead of writing first to m_sink?
}
void stop() {
m_stop_requested = true;

View File

@ -46,8 +46,8 @@ class ClusterFile {
std::optional<ROI> m_roi; /*Region of interest, will be applied if set*/
std::optional<NDArray<int32_t, 2>>
m_noise_map; /*Noise map to cut photons, will be applied if set*/
std::optional<GainMap> m_gain_map; /*Gain map to apply to the clusters, will
be applied if set*/
std::optional<InvertedGainMap> m_gain_map; /*Gain map to apply to the
clusters, will be applied if set*/
public:
/**
@ -60,26 +60,81 @@ class ClusterFile {
* @throws std::runtime_error if the file could not be opened
*/
ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000,
const std::string &mode = "r");
const std::string &mode = "r")
~ClusterFile();
: m_filename(fname.string()), m_chunk_size(chunk_size), m_mode(mode) {
if (mode == "r") {
fp = fopen(m_filename.c_str(), "rb");
if (!fp) {
throw std::runtime_error("Could not open file for reading: " +
m_filename);
}
} else if (mode == "w") {
fp = fopen(m_filename.c_str(), "wb");
if (!fp) {
throw std::runtime_error("Could not open file for writing: " +
m_filename);
}
} else if (mode == "a") {
fp = fopen(m_filename.c_str(), "ab");
if (!fp) {
throw std::runtime_error("Could not open file for appending: " +
m_filename);
}
} else {
throw std::runtime_error("Unsupported mode: " + mode);
}
}
~ClusterFile() { close(); }
/**
* @brief Read n_clusters clusters from the file discarding frame numbers.
* If EOF is reached the returned vector will have less than n_clusters
* clusters
* @brief Read n_clusters clusters from the file discarding
* frame numbers. If EOF is reached the returned vector will
* have less than n_clusters clusters
*/
ClusterVector<ClusterType> read_clusters(size_t n_clusters);
ClusterVector<ClusterType> read_clusters(size_t n_clusters) {
if (m_mode != "r") {
throw std::runtime_error("File not opened for reading");
}
if (m_noise_map || m_roi) {
return read_clusters_with_cut(n_clusters);
} else {
return read_clusters_without_cut(n_clusters);
}
}
/**
* @brief Read a single frame from the file and return the clusters. The
* cluster vector will have the frame number set.
* @throws std::runtime_error if the file is not opened for reading or the
* file pointer not at the beginning of a frame
* @brief Read a single frame from the file and return the
* clusters. The cluster vector will have the frame number
* set.
* @throws std::runtime_error if the file is not opened for
* reading or the file pointer not at the beginning of a
* frame
*/
ClusterVector<ClusterType> read_frame();
ClusterVector<ClusterType> read_frame() {
if (m_mode != "r") {
throw std::runtime_error(LOCATION + "File not opened for reading");
}
if (m_noise_map || m_roi) {
return read_frame_with_cut();
} else {
return read_frame_without_cut();
}
}
void write_frame(const ClusterVector<ClusterType> &clusters);
void write_frame(const ClusterVector<ClusterType> &clusters) {
if (m_mode != "w" && m_mode != "a") {
throw std::runtime_error("File not opened for writing");
}
int32_t frame_number = clusters.frame_number();
fwrite(&frame_number, sizeof(frame_number), 1, fp);
uint32_t n_clusters = clusters.size();
fwrite(&n_clusters, sizeof(n_clusters), 1, fp);
fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp);
}
/**
* @brief Return the chunk size
@ -87,39 +142,84 @@ class ClusterFile {
size_t chunk_size() const { return m_chunk_size; }
/**
* @brief Set the region of interest to use when reading clusters. If set
* only clusters within the ROI will be read.
* @brief Set the region of interest to use when reading
* clusters. If set only clusters within the ROI will be
* read.
*/
void set_roi(ROI roi);
void set_roi(ROI roi) { m_roi = roi; }
/**
* @brief Set the noise map to use when reading clusters. If set clusters
* below the noise level will be discarded. Selection criteria one of:
* Central pixel above noise, highest 2x2 sum above 2 * noise, total sum
* above 3 * noise.
* @brief Set the noise map to use when reading clusters. If
* set clusters below the noise level will be discarded.
* Selection criteria one of: Central pixel above noise,
* highest 2x2 sum above 2 * noise, total sum above 3 *
* noise.
*/
void set_noise_map(const NDView<int32_t, 2> noise_map);
void set_noise_map(const NDView<int32_t, 2> noise_map) {
m_noise_map = NDArray<int32_t, 2>(noise_map);
}
/**
* @brief Set the gain map to use when reading clusters. If set the gain map
* will be applied to the clusters that pass ROI and noise_map selection.
* The gain map is expected to be in ADU/energy.
*/
void set_gain_map(const NDView<double, 2> gain_map);
void set_gain_map(const NDView<double, 2> gain_map) {
m_gain_map = InvertedGainMap(gain_map);
}
void set_gain_map(const GainMap &gain_map);
void set_gain_map(const InvertedGainMap &gain_map) {
m_gain_map = gain_map;
}
void set_gain_map(const GainMap &&gain_map);
void set_gain_map(const InvertedGainMap &&gain_map) {
m_gain_map = gain_map;
}
/**
* @brief Close the file. If not closed the file will be closed in the
* destructor
* @brief Close the file. If not closed the file will be
* closed in the destructor
*/
void close();
void close() {
if (fp) {
fclose(fp);
fp = nullptr;
}
}
/** @brief Open the file in specific mode
*
*/
void open(const std::string &mode);
void open(const std::string &mode) {
if (fp) {
close();
}
if (mode == "r") {
fp = fopen(m_filename.c_str(), "rb");
if (!fp) {
throw std::runtime_error("Could not open file for reading: " +
m_filename);
}
m_mode = "r";
} else if (mode == "w") {
fp = fopen(m_filename.c_str(), "wb");
if (!fp) {
throw std::runtime_error("Could not open file for writing: " +
m_filename);
}
m_mode = "w";
} else if (mode == "a") {
fp = fopen(m_filename.c_str(), "ab");
if (!fp) {
throw std::runtime_error("Could not open file for appending: " +
m_filename);
}
m_mode = "a";
} else {
throw std::runtime_error("Unsupported mode: " + mode);
}
}
private:
ClusterVector<ClusterType> read_clusters_with_cut(size_t n_clusters);
@ -130,133 +230,6 @@ class ClusterFile {
ClusterType read_one_cluster();
};
template <typename ClusterType, typename Enable>
ClusterFile<ClusterType, Enable>::ClusterFile(
const std::filesystem::path &fname, size_t chunk_size,
const std::string &mode)
: m_filename(fname.string()), m_chunk_size(chunk_size), m_mode(mode) {
if (mode == "r") {
fp = fopen(m_filename.c_str(), "rb");
if (!fp) {
throw std::runtime_error("Could not open file for reading: " +
m_filename);
}
} else if (mode == "w") {
fp = fopen(m_filename.c_str(), "wb");
if (!fp) {
throw std::runtime_error("Could not open file for writing: " +
m_filename);
}
} else if (mode == "a") {
fp = fopen(m_filename.c_str(), "ab");
if (!fp) {
throw std::runtime_error("Could not open file for appending: " +
m_filename);
}
} else {
throw std::runtime_error("Unsupported mode: " + mode);
}
}
template <typename ClusterType, typename Enable>
ClusterFile<ClusterType, Enable>::~ClusterFile() {
close();
}
template <typename ClusterType, typename Enable>
void ClusterFile<ClusterType, Enable>::close() {
if (fp) {
fclose(fp);
fp = nullptr;
}
}
template <typename ClusterType, typename Enable>
void ClusterFile<ClusterType, Enable>::open(const std::string &mode) {
if (fp) {
close();
}
if (mode == "r") {
fp = fopen(m_filename.c_str(), "rb");
if (!fp) {
throw std::runtime_error("Could not open file for reading: " +
m_filename);
}
m_mode = "r";
} else if (mode == "w") {
fp = fopen(m_filename.c_str(), "wb");
if (!fp) {
throw std::runtime_error("Could not open file for writing: " +
m_filename);
}
m_mode = "w";
} else if (mode == "a") {
fp = fopen(m_filename.c_str(), "ab");
if (!fp) {
throw std::runtime_error("Could not open file for appending: " +
m_filename);
}
m_mode = "a";
} else {
throw std::runtime_error("Unsupported mode: " + mode);
}
}
template <typename ClusterType, typename Enable>
void ClusterFile<ClusterType, Enable>::set_roi(ROI roi) {
m_roi = roi;
}
template <typename ClusterType, typename Enable>
void ClusterFile<ClusterType, Enable>::set_noise_map(
const NDView<int32_t, 2> noise_map) {
m_noise_map = NDArray<int32_t, 2>(noise_map);
}
template <typename ClusterType, typename Enable>
void ClusterFile<ClusterType, Enable>::set_gain_map(
const NDView<double, 2> gain_map) {
m_gain_map = GainMap(gain_map);
}
template <typename ClusterType, typename Enable>
void ClusterFile<ClusterType, Enable>::set_gain_map(const GainMap &gain_map) {
m_gain_map = gain_map;
}
template <typename ClusterType, typename Enable>
void ClusterFile<ClusterType, Enable>::set_gain_map(const GainMap &&gain_map) {
m_gain_map = gain_map;
}
// TODO generally supported for all clsuter types
template <typename ClusterType, typename Enable>
void ClusterFile<ClusterType, Enable>::write_frame(
const ClusterVector<ClusterType> &clusters) {
if (m_mode != "w" && m_mode != "a") {
throw std::runtime_error("File not opened for writing");
}
int32_t frame_number = clusters.frame_number();
fwrite(&frame_number, sizeof(frame_number), 1, fp);
uint32_t n_clusters = clusters.size();
fwrite(&n_clusters, sizeof(n_clusters), 1, fp);
fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp);
}
template <typename ClusterType, typename Enable>
ClusterVector<ClusterType>
ClusterFile<ClusterType, Enable>::read_clusters(size_t n_clusters) {
if (m_mode != "r") {
throw std::runtime_error("File not opened for reading");
}
if (m_noise_map || m_roi) {
return read_clusters_with_cut(n_clusters);
} else {
return read_clusters_without_cut(n_clusters);
}
}
template <typename ClusterType, typename Enable>
ClusterVector<ClusterType>
ClusterFile<ClusterType, Enable>::read_clusters_without_cut(size_t n_clusters) {
@ -276,8 +249,8 @@ ClusterFile<ClusterType, Enable>::read_clusters_without_cut(size_t n_clusters) {
// if there are photons left from previous frame read them first
if (nph) {
if (nph > n_clusters) {
// if we have more photons left in the frame then photons to read we
// read directly the requested number
// if we have more photons left in the frame then photons to
// read we read directly the requested number
nn = n_clusters;
} else {
nn = nph;
@ -343,8 +316,8 @@ ClusterFile<ClusterType, Enable>::read_clusters_with_cut(size_t n_clusters) {
while (fread(&frame_number, sizeof(frame_number), 1, fp)) {
if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) {
clusters.set_frame_number(
frame_number); // cluster vector will hold the last frame
// number
frame_number); // cluster vector will hold the last
// frame number
while (m_num_left && clusters.size() < n_clusters) {
ClusterType c = read_one_cluster();
if (is_selected(c)) {
@ -375,18 +348,6 @@ ClusterType ClusterFile<ClusterType, Enable>::read_one_cluster() {
return c;
}
template <typename ClusterType, typename Enable>
ClusterVector<ClusterType> ClusterFile<ClusterType, Enable>::read_frame() {
if (m_mode != "r") {
throw std::runtime_error(LOCATION + "File not opened for reading");
}
if (m_noise_map || m_roi) {
return read_frame_with_cut();
} else {
return read_frame_without_cut();
}
}
template <typename ClusterType, typename Enable>
ClusterVector<ClusterType>
ClusterFile<ClusterType, Enable>::read_frame_without_cut() {
@ -465,13 +426,9 @@ bool ClusterFile<ClusterType, Enable>::is_selected(ClusterType &cl) {
}
}
auto cluster_size_x = extract_template_arguments<
std::remove_reference_t<decltype(cl)>>::cluster_size_x;
auto cluster_size_y = extract_template_arguments<
std::remove_reference_t<decltype(cl)>>::cluster_size_y;
size_t cluster_center_index =
(cluster_size_x / 2) + (cluster_size_y / 2) * cluster_size_x;
(ClusterType::cluster_size_x / 2) +
(ClusterType::cluster_size_y / 2) * ClusterType::cluster_size_x;
if (m_noise_map) {
auto sum_1x1 = cl.data[cluster_center_index]; // central pixel

View File

@ -20,11 +20,9 @@ class ClusterFinder {
Pedestal<PEDESTAL_TYPE> m_pedestal;
ClusterVector<ClusterType> m_clusters;
static const uint8_t ClusterSizeX =
extract_template_arguments<ClusterType>::cluster_size_x;
static const uint8_t ClusterSizeY =
extract_template_arguments<ClusterType>::cluster_size_x;
using CT = typename extract_template_arguments<ClusterType>::value_type;
static const uint8_t ClusterSizeX = ClusterType::cluster_size_x;
static const uint8_t ClusterSizeY = ClusterType::cluster_size_y;
using CT = typename ClusterType::value_type;
public:
/**
@ -79,7 +77,6 @@ class ClusterFinder {
int has_center_pixel_y = ClusterSizeY % 2;
m_clusters.set_frame_number(frame_number);
std::vector<CT> cluster_data(ClusterSizeX * ClusterSizeY);
for (int iy = 0; iy < frame.shape(0); iy++) {
for (int ix = 0; ix < frame.shape(1); ix++) {
@ -126,8 +123,9 @@ class ClusterFinder {
// Store cluster
if (value == max) {
// Zero out the cluster data
std::fill(cluster_data.begin(), cluster_data.end(), 0);
ClusterType cluster{};
cluster.x = ix;
cluster.y = iy;
// Fill the cluster data since we have a photon to store
// It's worth redoing the look since most of the time we
@ -141,20 +139,15 @@ class ClusterFinder {
static_cast<CT>(frame(iy + ir, ix + ic)) -
static_cast<CT>(
m_pedestal.mean(iy + ir, ix + ic));
cluster_data[i] =
cluster.data[i] =
tmp; // Watch for out of bounds access
i++;
}
}
}
ClusterType new_cluster{};
new_cluster.x = ix;
new_cluster.y = iy;
std::copy(cluster_data.begin(), cluster_data.end(),
new_cluster.data);
// Add the cluster to the output ClusterVector
m_clusters.push_back(new_cluster);
m_clusters.push_back(cluster);
}
}
}

View File

@ -34,7 +34,8 @@ template <typename ClusterType = Cluster<int32_t, 3, 3>,
typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double>
class ClusterFinderMT {
using CT = typename extract_template_arguments<ClusterType>::value_type;
protected:
using CT = typename ClusterType::value_type;
size_t m_current_thread{0};
size_t m_n_threads{0};
using Finder = ClusterFinder<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>;
@ -50,6 +51,7 @@ class ClusterFinderMT {
std::thread m_collect_thread;
std::chrono::milliseconds m_default_wait{1};
private:
std::atomic<bool> m_stop_requested{false};
std::atomic<bool> m_processing_threads_stopped{true};
@ -120,6 +122,7 @@ class ClusterFinderMT {
ClusterFinderMT(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
size_t capacity = 2000, size_t n_threads = 3)
: m_n_threads(n_threads) {
for (size_t i = 0; i < n_threads; i++) {
m_cluster_finders.push_back(
std::make_unique<

View File

@ -47,7 +47,7 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
* @param frame_number frame number of the clusters. Default is 0, which is
* also used to indicate that the clusters come from many frames
*/
ClusterVector(size_t capacity = 300, uint64_t frame_number = 0)
ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0)
: m_frame_number(frame_number) {
m_data.reserve(capacity);
}
@ -76,9 +76,10 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
std::vector<T> sum() {
std::vector<T> sums(m_data.size());
for (size_t i = 0; i < m_data.size(); i++) {
sums[i] = at(i).sum();
}
std::transform(
m_data.begin(), m_data.end(), sums.begin(),
[](const ClusterType &cluster) { return cluster.sum(); });
return sums;
}
@ -86,13 +87,15 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
* @brief Sum the pixels in the 2x2 subcluster with the biggest pixel sum in
* each cluster
* @return std::vector<T> vector of sums for each cluster
*/ //TODO if underlying container is a vector use std::for_each
*/
std::vector<T> sum_2x2() {
std::vector<T> sums_2x2(m_data.size());
for (size_t i = 0; i < m_data.size(); i++) {
sums_2x2[i] = at(i).max_sum_2x2().first;
}
std::transform(m_data.begin(), m_data.end(), sums_2x2.begin(),
[](const ClusterType &cluster) {
return cluster.max_sum_2x2().first;
});
return sums_2x2;
}
@ -130,9 +133,9 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
*/
size_t capacity() const { return m_data.capacity(); }
const auto begin() const { return m_data.begin(); }
auto begin() const { return m_data.begin(); }
const auto end() const { return m_data.end(); }
auto end() const { return m_data.end(); }
/**
* @brief Return the size in bytes of a single cluster
@ -149,9 +152,9 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
* @brief Return a reference to the i-th cluster casted to type V
* @tparam V type of the cluster
*/
ClusterType &at(size_t i) { return m_data[i]; }
ClusterType &operator[](size_t i) { return m_data[i]; }
const ClusterType &at(size_t i) const { return m_data[i]; }
const ClusterType &operator[](size_t i) const { return m_data[i]; }
/**
* @brief Return the frame number of the clusters. 0 is used to indicate

View File

@ -18,8 +18,8 @@ class FilePtr {
FilePtr(FilePtr &&other);
FilePtr &operator=(FilePtr &&other);
FILE *get();
int64_t tell();
void seek(int64_t offset, int whence = SEEK_SET) {
ssize_t tell();
void seek(ssize_t offset, int whence = SEEK_SET) {
if (fseek(fp_, offset, whence) != 0)
throw std::runtime_error("Error seeking in file");
}

View File

@ -15,6 +15,12 @@ NDArray<double, 1> gaus(NDView<double, 1> x, NDView<double, 1> par);
double pol1(const double x, const double *par);
NDArray<double, 1> pol1(NDView<double, 1> x, NDView<double, 1> par);
double scurve(const double x, const double *par);
NDArray<double, 1> scurve(NDView<double, 1> x, NDView<double, 1> par);
double scurve2(const double x, const double *par);
NDArray<double, 1> scurve2(NDView<double, 1> x, NDView<double, 1> par);
} // namespace func
@ -25,6 +31,9 @@ std::array<double, 3> gaus_init_par(const NDView<double, 1> x, const NDView<doub
std::array<double, 2> pol1_init_par(const NDView<double, 1> x, const NDView<double, 1> y);
std::array<double, 6> scurve_init_par(const NDView<double, 1> x, const NDView<double, 1> y);
std::array<double, 6> scurve2_init_par(const NDView<double, 1> x, const NDView<double, 1> y);
static constexpr int DEFAULT_NUM_THREADS = 4;
/**
@ -38,7 +47,7 @@ NDArray<double, 1> fit_gaus(NDView<double, 1> x, NDView<double, 1> y);
/**
* @brief Fit a 1D Gaussian to each pixel. Data layout [row, col, values]
* @param x x values
* @param y y vales, layout [row, col, values]
* @param y y values, layout [row, col, values]
* @param n_threads number of threads to use
*/
@ -51,7 +60,7 @@ NDArray<double, 3> fit_gaus(NDView<double, 1> x, NDView<double, 3> y,
/**
* @brief Fit a 1D Gaussian with error estimates
* @param x x values
* @param y y vales, layout [row, col, values]
* @param y y values, layout [row, col, values]
* @param y_err error in y, layout [row, col, values]
* @param par_out output parameters
* @param par_err_out output error parameters
@ -64,7 +73,7 @@ void fit_gaus(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
* @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout
* [row, col, values]
* @param x x values
* @param y y vales, layout [row, col, values]
* @param y y values, layout [row, col, values]
* @param y_err error in y, layout [row, col, values]
* @param par_out output parameters, layout [row, col, values]
* @param par_err_out output parameter errors, layout [row, col, values]
@ -88,5 +97,19 @@ void fit_pol1(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
NDView<double, 3> par_out, NDView<double, 3> par_err_out,NDView<double, 2> chi2_out,
int n_threads = DEFAULT_NUM_THREADS);
NDArray<double, 1> fit_scurve(NDView<double, 1> x, NDView<double, 1> y);
NDArray<double, 3> fit_scurve(NDView<double, 1> x, NDView<double, 3> y, int n_threads);
void fit_scurve(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
NDView<double, 1> par_out, NDView<double, 1> par_err_out, double& chi2);
void fit_scurve(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
NDView<double, 3> par_out, NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
int n_threads);
NDArray<double, 1> fit_scurve2(NDView<double, 1> x, NDView<double, 1> y);
NDArray<double, 3> fit_scurve2(NDView<double, 1> x, NDView<double, 3> y, int n_threads);
void fit_scurve2(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
NDView<double, 1> par_out, NDView<double, 1> par_err_out, double& chi2);
void fit_scurve2(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
NDView<double, 3> par_out, NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
int n_threads);
} // namespace aare

View File

@ -107,8 +107,8 @@ class Frame {
* @return NDView<T, 2>
*/
template <typename T> NDView<T, 2> view() {
std::array<int64_t, 2> shape = {static_cast<int64_t>(m_rows),
static_cast<int64_t>(m_cols)};
std::array<ssize_t, 2> shape = {static_cast<ssize_t>(m_rows),
static_cast<ssize_t>(m_cols)};
T *data = reinterpret_cast<T *>(m_data);
return NDView<T, 2>(data, shape);
}

View File

@ -1,6 +1,7 @@
/************************************************
* @file ApplyGainMap.hpp
* @short function to apply gain map of image size to a vector of clusters
* @file GainMap.hpp
* @short function to apply gain map of image size to a vector of clusters -
*note stored gainmap is inverted for efficient aaplication to images
***********************************************/
#pragma once
@ -12,14 +13,21 @@
namespace aare {
class GainMap {
class InvertedGainMap {
public:
explicit GainMap(const NDArray<double, 2> &gain_map)
: m_gain_map(gain_map) {};
explicit InvertedGainMap(const NDArray<double, 2> &gain_map)
: m_gain_map(gain_map) {
for (auto &item : m_gain_map) {
item = 1.0 / item;
}
};
explicit GainMap(const NDView<double, 2> gain_map) {
explicit InvertedGainMap(const NDView<double, 2> gain_map) {
m_gain_map = NDArray<double, 2>(gain_map);
for (auto &item : m_gain_map) {
item = 1.0 / item;
}
}
template <typename ClusterType,
@ -34,19 +42,21 @@ class GainMap {
int64_t index_cluster_center_x = ClusterSizeX / 2;
int64_t index_cluster_center_y = ClusterSizeY / 2;
for (size_t i = 0; i < clustervec.size(); i++) {
auto &cl = clustervec.at(i);
auto &cl = clustervec[i];
if (cl.x > 0 && cl.y > 0 && cl.x < m_gain_map.shape(1) - 1 &&
cl.y < m_gain_map.shape(0) - 1) {
for (size_t j = 0; j < ClusterSizeX * ClusterSizeY; j++) {
size_t x = cl.x + j % ClusterSizeX - index_cluster_center_x;
size_t y = cl.y + j / ClusterSizeX - index_cluster_center_y;
cl.data[j] = cl.data[j] * static_cast<T>(m_gain_map(y, x));
cl.data[j] = static_cast<T>(
static_cast<double>(cl.data[j]) *
m_gain_map(
y, x)); // cast after conversion to keep precision
}
} else {
memset(cl.data, 0,
ClusterSizeX * ClusterSizeY *
sizeof(T)); // clear edge clusters
// clear edge clusters
cl.data.fill(0);
}
}
}

View File

@ -44,15 +44,14 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
photons.reserve(clusters.size());
if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) {
for (size_t i = 0; i < clusters.size(); i++) {
for (const ClusterType &cluster : clusters) {
auto cluster = clusters.at(i);
auto eta = calculate_eta2(cluster);
Photon photon;
photon.x = cluster.x;
photon.y = cluster.y;
photon.energy = eta.sum;
photon.energy = static_cast<decltype(photon.energy)>(eta.sum);
// auto ie = nearest_index(m_energy_bins, photon.energy)-1;
// auto ix = nearest_index(m_etabinsx, eta.x)-1;
@ -70,20 +69,20 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
// cBottomRight = 1,
// cTopLeft = 2,
// cTopRight = 3
switch (eta.c) {
case cTopLeft:
switch (static_cast<corner>(eta.c)) {
case corner::cTopLeft:
dX = -1.;
dY = 0;
break;
case cTopRight:;
case corner::cTopRight:;
dX = 0;
dY = 0;
break;
case cBottomLeft:
case corner::cBottomLeft:
dX = -1.;
dY = -1.;
break;
case cBottomRight:
case corner::cBottomRight:
dX = 0.;
dY = -1.;
break;
@ -94,14 +93,13 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
}
} else if (clusters.cluster_size_x() == 2 ||
clusters.cluster_size_y() == 2) {
for (size_t i = 0; i < clusters.size(); i++) {
auto cluster = clusters.at(i);
for (const ClusterType &cluster : clusters) {
auto eta = calculate_eta2(cluster);
Photon photon;
photon.x = cluster.x;
photon.y = cluster.y;
photon.energy = eta.sum;
photon.energy = static_cast<decltype(photon.energy)>(eta.sum);
// Now do some actual interpolation.
// Find which energy bin the cluster is in

View File

@ -22,10 +22,10 @@ TODO! Add expression templates for operators
namespace aare {
template <typename T, int64_t Ndim = 2>
template <typename T, ssize_t Ndim = 2>
class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
std::array<int64_t, Ndim> shape_;
std::array<int64_t, Ndim> strides_;
std::array<ssize_t, Ndim> shape_;
std::array<ssize_t, Ndim> strides_;
size_t size_{};
T *data_;
@ -42,7 +42,7 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
*
* @param shape shape of the new NDArray
*/
explicit NDArray(std::array<int64_t, Ndim> shape)
explicit NDArray(std::array<ssize_t, Ndim> shape)
: shape_(shape), strides_(c_strides<Ndim>(shape_)),
size_(std::accumulate(shape_.begin(), shape_.end(), 1,
std::multiplies<>())),
@ -55,7 +55,7 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
* @param shape shape of the new array
* @param value value to initialize the array with
*/
NDArray(std::array<int64_t, Ndim> shape, T value) : NDArray(shape) {
NDArray(std::array<ssize_t, Ndim> shape, T value) : NDArray(shape) {
this->operator=(value);
}
@ -186,22 +186,22 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
}
// TODO! is int the right type for index?
T &operator()(int64_t i) { return data_[i]; }
const T &operator()(int64_t i) const { return data_[i]; }
T &operator()(ssize_t i) { return data_[i]; }
const T &operator()(ssize_t i) const { return data_[i]; }
T &operator[](int64_t i) { return data_[i]; }
const T &operator[](int64_t i) const { return data_[i]; }
T &operator[](ssize_t i) { return data_[i]; }
const T &operator[](ssize_t i) const { return data_[i]; }
T *data() { return data_; }
std::byte *buffer() { return reinterpret_cast<std::byte *>(data_); }
ssize_t size() const { return static_cast<ssize_t>(size_); }
size_t total_bytes() const { return size_ * sizeof(T); }
std::array<int64_t, Ndim> shape() const noexcept { return shape_; }
int64_t shape(int64_t i) const noexcept { return shape_[i]; }
std::array<int64_t, Ndim> strides() const noexcept { return strides_; }
std::array<ssize_t, Ndim> shape() const noexcept { return shape_; }
ssize_t shape(ssize_t i) const noexcept { return shape_[i]; }
std::array<ssize_t, Ndim> strides() const noexcept { return strides_; }
size_t bitdepth() const noexcept { return sizeof(T) * 8; }
std::array<int64_t, Ndim> byte_strides() const noexcept {
std::array<ssize_t, Ndim> byte_strides() const noexcept {
auto byte_strides = strides_;
for (auto &val : byte_strides)
val *= sizeof(T);
@ -228,7 +228,7 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
};
// Move assign
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> &
NDArray<T, Ndim>::operator=(NDArray<T, Ndim> &&other) noexcept {
if (this != &other) {
@ -242,7 +242,7 @@ NDArray<T, Ndim>::operator=(NDArray<T, Ndim> &&other) noexcept {
return *this;
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> &NDArray<T, Ndim>::operator+=(const NDArray<T, Ndim> &other) {
// check shape
if (shape_ == other.shape_) {
@ -254,7 +254,7 @@ NDArray<T, Ndim> &NDArray<T, Ndim>::operator+=(const NDArray<T, Ndim> &other) {
throw(std::runtime_error("Shape of ImageDatas must match"));
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> &NDArray<T, Ndim>::operator-=(const NDArray<T, Ndim> &other) {
// check shape
if (shape_ == other.shape_) {
@ -266,7 +266,7 @@ NDArray<T, Ndim> &NDArray<T, Ndim>::operator-=(const NDArray<T, Ndim> &other) {
throw(std::runtime_error("Shape of ImageDatas must match"));
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> &NDArray<T, Ndim>::operator*=(const NDArray<T, Ndim> &other) {
// check shape
if (shape_ == other.shape_) {
@ -278,14 +278,14 @@ NDArray<T, Ndim> &NDArray<T, Ndim>::operator*=(const NDArray<T, Ndim> &other) {
throw(std::runtime_error("Shape of ImageDatas must match"));
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> &NDArray<T, Ndim>::operator&=(const T &mask) {
for (auto it = begin(); it != end(); ++it)
*it &= mask;
return *this;
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<bool, Ndim> NDArray<T, Ndim>::operator>(const NDArray &other) {
if (shape_ == other.shape_) {
NDArray<bool, Ndim> result{shape_};
@ -297,7 +297,7 @@ NDArray<bool, Ndim> NDArray<T, Ndim>::operator>(const NDArray &other) {
throw(std::runtime_error("Shape of ImageDatas must match"));
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> &NDArray<T, Ndim>::operator=(const NDArray<T, Ndim> &other) {
if (this != &other) {
delete[] data_;
@ -310,7 +310,7 @@ NDArray<T, Ndim> &NDArray<T, Ndim>::operator=(const NDArray<T, Ndim> &other) {
return *this;
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
bool NDArray<T, Ndim>::operator==(const NDArray<T, Ndim> &other) const {
if (shape_ != other.shape_)
return false;
@ -322,23 +322,23 @@ bool NDArray<T, Ndim>::operator==(const NDArray<T, Ndim> &other) const {
return true;
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
bool NDArray<T, Ndim>::operator!=(const NDArray<T, Ndim> &other) const {
return !((*this) == other);
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> &NDArray<T, Ndim>::operator++() {
for (uint32_t i = 0; i < size_; ++i)
data_[i] += 1;
return *this;
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> &NDArray<T, Ndim>::operator=(const T &value) {
std::fill_n(data_, size_, value);
return *this;
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> &NDArray<T, Ndim>::operator+=(const T &value) {
for (uint32_t i = 0; i < size_; ++i)
data_[i] += value;
@ -348,57 +348,57 @@ NDArray<T, Ndim> &NDArray<T, Ndim>::operator+=(const T &value) {
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> NDArray<T, Ndim>::operator+(const T &value) {
NDArray result = *this;
result += value;
return result;
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> &NDArray<T, Ndim>::operator-=(const T &value) {
for (uint32_t i = 0; i < size_; ++i)
data_[i] -= value;
return *this;
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> NDArray<T, Ndim>::operator-(const T &value) {
NDArray result = *this;
result -= value;
return result;
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> &NDArray<T, Ndim>::operator/=(const T &value) {
for (uint32_t i = 0; i < size_; ++i)
data_[i] /= value;
return *this;
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> NDArray<T, Ndim>::operator/(const T &value) {
NDArray result = *this;
result /= value;
return result;
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> &NDArray<T, Ndim>::operator*=(const T &value) {
for (uint32_t i = 0; i < size_; ++i)
data_[i] *= value;
return *this;
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> NDArray<T, Ndim>::operator*(const T &value) {
NDArray result = *this;
result *= value;
return result;
}
// template <typename T, int64_t Ndim> void NDArray<T, Ndim>::Print() {
// template <typename T, ssize_t Ndim> void NDArray<T, Ndim>::Print() {
// if (shape_[0] < 20 && shape_[1] < 20)
// Print_all();
// else
// Print_some();
// }
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
std::ostream &operator<<(std::ostream &os, const NDArray<T, Ndim> &arr) {
for (auto row = 0; row < arr.shape(0); ++row) {
for (auto col = 0; col < arr.shape(1); ++col) {
@ -410,7 +410,7 @@ std::ostream &operator<<(std::ostream &os, const NDArray<T, Ndim> &arr) {
return os;
}
template <typename T, int64_t Ndim> void NDArray<T, Ndim>::Print_all() {
template <typename T, ssize_t Ndim> void NDArray<T, Ndim>::Print_all() {
for (auto row = 0; row < shape_[0]; ++row) {
for (auto col = 0; col < shape_[1]; ++col) {
std::cout << std::setw(3);
@ -419,7 +419,7 @@ template <typename T, int64_t Ndim> void NDArray<T, Ndim>::Print_all() {
std::cout << "\n";
}
}
template <typename T, int64_t Ndim> void NDArray<T, Ndim>::Print_some() {
template <typename T, ssize_t Ndim> void NDArray<T, Ndim>::Print_some() {
for (auto row = 0; row < 5; ++row) {
for (auto col = 0; col < 5; ++col) {
std::cout << std::setw(7);
@ -429,7 +429,7 @@ template <typename T, int64_t Ndim> void NDArray<T, Ndim>::Print_some() {
}
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
void save(NDArray<T, Ndim> &img, std::string &pathname) {
std::ofstream f;
f.open(pathname, std::ios::binary);
@ -437,9 +437,9 @@ void save(NDArray<T, Ndim> &img, std::string &pathname) {
f.close();
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
NDArray<T, Ndim> load(const std::string &pathname,
std::array<int64_t, Ndim> shape) {
std::array<ssize_t, Ndim> shape) {
NDArray<T, Ndim> img{shape};
std::ifstream f;
f.open(pathname, std::ios::binary);

View File

@ -14,10 +14,10 @@
#include <vector>
namespace aare {
template <int64_t Ndim> using Shape = std::array<int64_t, Ndim>;
template <ssize_t Ndim> using Shape = std::array<ssize_t, Ndim>;
// TODO! fix mismatch between signed and unsigned
template <int64_t Ndim> Shape<Ndim> make_shape(const std::vector<size_t> &shape) {
template <ssize_t Ndim> Shape<Ndim> make_shape(const std::vector<size_t> &shape) {
if (shape.size() != Ndim)
throw std::runtime_error("Shape size mismatch");
Shape<Ndim> arr;
@ -25,41 +25,41 @@ template <int64_t Ndim> Shape<Ndim> make_shape(const std::vector<size_t> &shape)
return arr;
}
template <int64_t Dim = 0, typename Strides> int64_t element_offset(const Strides & /*unused*/) { return 0; }
template <ssize_t Dim = 0, typename Strides> ssize_t element_offset(const Strides & /*unused*/) { return 0; }
template <int64_t Dim = 0, typename Strides, typename... Ix>
int64_t element_offset(const Strides &strides, int64_t i, Ix... index) {
template <ssize_t Dim = 0, typename Strides, typename... Ix>
ssize_t element_offset(const Strides &strides, ssize_t i, Ix... index) {
return i * strides[Dim] + element_offset<Dim + 1>(strides, index...);
}
template <int64_t Ndim> std::array<int64_t, Ndim> c_strides(const std::array<int64_t, Ndim> &shape) {
std::array<int64_t, Ndim> strides{};
template <ssize_t Ndim> std::array<ssize_t, Ndim> c_strides(const std::array<ssize_t, Ndim> &shape) {
std::array<ssize_t, Ndim> strides{};
std::fill(strides.begin(), strides.end(), 1);
for (int64_t i = Ndim - 1; i > 0; --i) {
for (ssize_t i = Ndim - 1; i > 0; --i) {
strides[i - 1] = strides[i] * shape[i];
}
return strides;
}
template <int64_t Ndim> std::array<int64_t, Ndim> make_array(const std::vector<int64_t> &vec) {
template <ssize_t Ndim> std::array<ssize_t, Ndim> make_array(const std::vector<ssize_t> &vec) {
assert(vec.size() == Ndim);
std::array<int64_t, Ndim> arr{};
std::array<ssize_t, Ndim> arr{};
std::copy_n(vec.begin(), Ndim, arr.begin());
return arr;
}
template <typename T, int64_t Ndim = 2> class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
template <typename T, ssize_t Ndim = 2> class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
public:
NDView() = default;
~NDView() = default;
NDView(const NDView &) = default;
NDView(NDView &&) = default;
NDView(T *buffer, std::array<int64_t, Ndim> shape)
NDView(T *buffer, std::array<ssize_t, Ndim> shape)
: buffer_(buffer), strides_(c_strides<Ndim>(shape)), shape_(shape),
size_(std::accumulate(std::begin(shape), std::end(shape), 1, std::multiplies<>())) {}
// NDView(T *buffer, const std::vector<int64_t> &shape)
// NDView(T *buffer, const std::vector<ssize_t> &shape)
// : buffer_(buffer), strides_(c_strides<Ndim>(make_array<Ndim>(shape))), shape_(make_array<Ndim>(shape)),
// size_(std::accumulate(std::begin(shape), std::end(shape), 1, std::multiplies<>())) {}
@ -73,14 +73,14 @@ template <typename T, int64_t Ndim = 2> class NDView : public ArrayExpr<NDView<T
ssize_t size() const { return static_cast<ssize_t>(size_); }
size_t total_bytes() const { return size_ * sizeof(T); }
std::array<int64_t, Ndim> strides() const noexcept { return strides_; }
std::array<ssize_t, Ndim> strides() const noexcept { return strides_; }
T *begin() { return buffer_; }
T *end() { return buffer_ + size_; }
T const *begin() const { return buffer_; }
T const *end() const { return buffer_ + size_; }
T &operator()(int64_t i) const { return buffer_[i]; }
T &operator[](int64_t i) const { return buffer_[i]; }
T &operator()(ssize_t i) const { return buffer_[i]; }
T &operator[](ssize_t i) const { return buffer_[i]; }
bool operator==(const NDView &other) const {
if (size_ != other.size_)
@ -136,15 +136,15 @@ template <typename T, int64_t Ndim = 2> class NDView : public ArrayExpr<NDView<T
}
auto &shape() const { return shape_; }
auto shape(int64_t i) const { return shape_[i]; }
auto shape(ssize_t i) const { return shape_[i]; }
T *data() { return buffer_; }
void print_all() const;
private:
T *buffer_{nullptr};
std::array<int64_t, Ndim> strides_{};
std::array<int64_t, Ndim> shape_{};
std::array<ssize_t, Ndim> strides_{};
std::array<ssize_t, Ndim> shape_{};
uint64_t size_{};
template <class BinaryOperation> NDView &elemenwise(T val, BinaryOperation op) {
@ -160,7 +160,7 @@ template <typename T, int64_t Ndim = 2> class NDView : public ArrayExpr<NDView<T
return *this;
}
};
template <typename T, int64_t Ndim> void NDView<T, Ndim>::print_all() const {
template <typename T, ssize_t Ndim> void NDView<T, Ndim>::print_all() const {
for (auto row = 0; row < shape_[0]; ++row) {
for (auto col = 0; col < shape_[1]; ++col) {
std::cout << std::setw(3);
@ -171,7 +171,7 @@ template <typename T, int64_t Ndim> void NDView<T, Ndim>::print_all() const {
}
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
std::ostream& operator <<(std::ostream& os, const NDView<T, Ndim>& arr){
for (auto row = 0; row < arr.shape(0); ++row) {
for (auto col = 0; col < arr.shape(1); ++col) {
@ -184,4 +184,9 @@ std::ostream& operator <<(std::ostream& os, const NDView<T, Ndim>& arr){
}
template <typename T>
NDView<T,1> make_view(std::vector<T>& vec){
return NDView<T,1>(vec.data(), {static_cast<ssize_t>(vec.size())});
}
} // namespace aare

View File

@ -69,7 +69,7 @@ class NumpyFile : public FileInterface {
*/
template <typename T, size_t NDim> NDArray<T, NDim> load() {
NDArray<T, NDim> arr(make_shape<NDim>(m_header.shape));
if (fseek(fp, static_cast<int64_t>(header_size), SEEK_SET)) {
if (fseek(fp, static_cast<long>(header_size), SEEK_SET)) {
throw std::runtime_error(LOCATION + "Error seeking to the start of the data");
}
size_t rc = fread(arr.data(), sizeof(T), arr.size(), fp);

View File

@ -107,7 +107,7 @@ template <typename SUM_TYPE = double> class Pedestal {
assert(frame.size() == m_rows * m_cols);
// TODO! move away from m_rows, m_cols
if (frame.shape() != std::array<int64_t, 2>{m_rows, m_cols}) {
if (frame.shape() != std::array<ssize_t, 2>{m_rows, m_cols}) {
throw std::runtime_error(
"Frame shape does not match pedestal shape");
}
@ -128,7 +128,7 @@ template <typename SUM_TYPE = double> class Pedestal {
assert(frame.size() == m_rows * m_cols);
// TODO! move away from m_rows, m_cols
if (frame.shape() != std::array<int64_t, 2>{m_rows, m_cols}) {
if (frame.shape() != std::array<ssize_t, 2>{m_rows, m_cols}) {
throw std::runtime_error(
"Frame shape does not match pedestal shape");
}

View File

@ -30,22 +30,11 @@ struct ModuleConfig {
* Consider using that unless you need raw file specific functionality.
*/
class RawFile : public FileInterface {
size_t n_subfiles{}; //f0,f1...fn
size_t n_subfile_parts{}; // d0,d1...dn
//TODO! move to vector of SubFile instead of pointers
std::vector<std::vector<RawSubFile *>> subfiles; //subfiles[f0,f1...fn][d0,d1...dn]
// std::vector<xy> positions;
std::vector<std::unique_ptr<RawSubFile>> m_subfiles;
ModuleConfig cfg{0, 0};
RawMasterFile m_master;
size_t m_current_frame{};
// std::vector<ModuleGeometry> m_module_pixel_0;
// size_t m_rows{};
// size_t m_cols{};
size_t m_current_subfile{};
DetectorGeometry m_geometry;
public:
@ -56,7 +45,7 @@ class RawFile : public FileInterface {
*/
RawFile(const std::filesystem::path &fname, const std::string &mode = "r");
virtual ~RawFile() override;
virtual ~RawFile() override = default;
Frame read_frame() override;
Frame read_frame(size_t frame_number) override;
@ -80,7 +69,7 @@ class RawFile : public FileInterface {
size_t cols() const override;
size_t bitdepth() const override;
xy geometry();
size_t n_mod() const;
size_t n_modules() const;
RawMasterFile master() const;
@ -115,9 +104,6 @@ class RawFile : public FileInterface {
*/
static DetectorHeader read_header(const std::filesystem::path &fname);
// void update_geometry_with_roi();
int find_number_of_subfiles();
void open_subfiles();
void find_geometry();
};

View File

@ -121,6 +121,7 @@ class RawMasterFile {
size_t total_frames_expected() const;
xy geometry() const;
size_t n_modules() const;
std::optional<size_t> analog_samples() const;
std::optional<size_t> digital_samples() const;

View File

@ -18,11 +18,20 @@ class RawSubFile {
std::ifstream m_file;
DetectorType m_detector_type;
size_t m_bitdepth;
std::filesystem::path m_fname;
std::filesystem::path m_path; //!< path to the subfile
std::string m_base_name; //!< base name used for formatting file names
size_t m_offset{}; //!< file index of the first file, allow starting at non zero file
size_t m_total_frames{}; //!< total number of frames in the series of files
size_t m_rows{};
size_t m_cols{};
size_t m_bytes_per_frame{};
size_t n_frames{};
int m_module_index{};
size_t m_current_file_index{}; //!< The index of the open file
size_t m_current_frame_index{}; //!< The index of the current frame (with reference to all files)
std::vector<size_t> m_last_frame_in_file{}; //!< Used for seeking to the correct file
uint32_t m_pos_row{};
uint32_t m_pos_col{};
@ -53,6 +62,7 @@ class RawSubFile {
size_t tell();
void read_into(std::byte *image_buf, DetectorHeader *header = nullptr);
void read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header= nullptr);
void get_part(std::byte *buffer, size_t frame_index);
void read_header(DetectorHeader *header);
@ -66,10 +76,17 @@ class RawSubFile {
size_t pixels_per_frame() const { return m_rows * m_cols; }
size_t bytes_per_pixel() const { return m_bitdepth / bits_per_byte; }
size_t frames_in_file() const { return m_total_frames; }
private:
template <typename T>
void read_with_map(std::byte *image_buf);
void parse_fname(const std::filesystem::path &fname);
void scan_files();
void open_file(size_t file_index);
std::filesystem::path fpath(size_t file_index) const;
};
} // namespace aare

View File

@ -28,7 +28,7 @@ template <typename T> class VarClusterFinder {
};
private:
const std::array<int64_t, 2> shape_;
const std::array<ssize_t, 2> shape_;
NDView<T, 2> original_;
NDArray<int, 2> labeled_;
NDArray<int, 2> peripheral_labeled_;

View File

@ -107,5 +107,16 @@ std::vector<T> cumsum(const std::vector<T>& vec) {
}
template <typename Container> bool all_equal(const Container &c) {
if (!c.empty() &&
std::all_of(begin(c), end(c),
[c](const typename Container::value_type &element) {
return element == c.front();
}))
return true;
return false;
}
} // namespace aare

View File

@ -1,6 +1,7 @@
#pragma once
#include <cstdint>
#include <vector>
#include <aare/NDView.hpp>
namespace aare {
@ -10,4 +11,16 @@ uint16_t adc_sar_04_decode64to16(uint64_t input);
void adc_sar_05_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output);
void adc_sar_04_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output);
} // namespace aare
/**
* @brief Apply custom weights to a 16-bit input value. Will sum up weights[i]**i
* for each bit i that is set in the input value.
* @throws std::out_of_range if weights.size() < 16
* @param input 16-bit input value
* @param weights vector of weights, size must be less than or equal to 16
*/
double apply_custom_weights(uint16_t input, const NDView<double, 1> weights);
void apply_custom_weights(NDView<uint16_t, 1> input, NDView<double, 1> output, const NDView<double, 1> weights);
} // namespace aare

View File

@ -204,23 +204,25 @@ struct DetectorGeometry{
int module_gap_row{};
int module_gap_col{};
std::vector<ModuleGeometry> module_pixel_0;
auto size() const { return module_pixel_0.size(); }
};
struct ROI{
int64_t xmin{};
int64_t xmax{};
int64_t ymin{};
int64_t ymax{};
ssize_t xmin{};
ssize_t xmax{};
ssize_t ymin{};
ssize_t ymax{};
int64_t height() const { return ymax - ymin; }
int64_t width() const { return xmax - xmin; }
bool contains(int64_t x, int64_t y) const {
ssize_t height() const { return ymax - ymin; }
ssize_t width() const { return xmax - xmin; }
bool contains(ssize_t x, ssize_t y) const {
return x >= xmin && x < xmax && y >= ymin && y < ymax;
}
};
using dynamic_shape = std::vector<int64_t>;
using dynamic_shape = std::vector<ssize_t>;
//TODO! Can we uniform enums between the libraries?

139
include/aare/logger.hpp Normal file
View File

@ -0,0 +1,139 @@
#pragma once
/*Utility to log to console*/
#include <iostream>
#include <sstream>
#include <sys/time.h>
namespace aare {
#define RED "\x1b[31m"
#define GREEN "\x1b[32m"
#define YELLOW "\x1b[33m"
#define BLUE "\x1b[34m"
#define MAGENTA "\x1b[35m"
#define CYAN "\x1b[36m"
#define GRAY "\x1b[37m"
#define DARKGRAY "\x1b[30m"
#define BG_BLACK "\x1b[48;5;232m"
#define BG_RED "\x1b[41m"
#define BG_GREEN "\x1b[42m"
#define BG_YELLOW "\x1b[43m"
#define BG_BLUE "\x1b[44m"
#define BG_MAGENTA "\x1b[45m"
#define BG_CYAN "\x1b[46m"
#define RESET "\x1b[0m"
#define BOLD "\x1b[1m"
enum TLogLevel {
logERROR,
logWARNING,
logINFOBLUE,
logINFOGREEN,
logINFORED,
logINFOCYAN,
logINFOMAGENTA,
logINFO,
logDEBUG,
logDEBUG1,
logDEBUG2,
logDEBUG3,
logDEBUG4,
logDEBUG5
};
// Compiler should optimize away anything below this value
#ifndef AARE_LOG_LEVEL
#define AARE_LOG_LEVEL "LOG LEVEL NOT SET IN CMAKE" //This is configured in the main CMakeLists.txt
#endif
#define __AT__ \
std::string(__FILE__) + std::string("::") + std::string(__func__) + \
std::string("(): ")
#define __SHORT_FORM_OF_FILE__ \
(strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define __SHORT_AT__ \
std::string(__SHORT_FORM_OF_FILE__) + std::string("::") + \
std::string(__func__) + std::string("(): ")
class Logger {
std::ostringstream os;
TLogLevel m_level = AARE_LOG_LEVEL;
public:
Logger() = default;
explicit Logger(TLogLevel level) : m_level(level){};
~Logger() {
// output in the destructor to allow for << syntax
os << RESET << '\n';
std::clog << os.str() << std::flush; // Single write
}
static TLogLevel &ReportingLevel() { // singelton eeh TODO! Do we need a runtime option?
static TLogLevel reportingLevel = logDEBUG5;
return reportingLevel;
}
// Danger this buffer need as many elements as TLogLevel
static const char *Color(TLogLevel level) noexcept {
static const char *const colors[] = {
RED BOLD, YELLOW BOLD, BLUE, GREEN, RED, CYAN, MAGENTA,
RESET, RESET, RESET, RESET, RESET, RESET, RESET};
// out of bounds
if (level < 0 || level >= sizeof(colors) / sizeof(colors[0])) {
return RESET;
}
return colors[level];
}
// Danger this buffer need as many elements as TLogLevel
static std::string ToString(TLogLevel level) {
static const char *const buffer[] = {
"ERROR", "WARNING", "INFO", "INFO", "INFO",
"INFO", "INFO", "INFO", "DEBUG", "DEBUG1",
"DEBUG2", "DEBUG3", "DEBUG4", "DEBUG5"};
// out of bounds
if (level < 0 || level >= sizeof(buffer) / sizeof(buffer[0])) {
return "UNKNOWN";
}
return buffer[level];
}
std::ostringstream &Get() {
os << Color(m_level) << "- " << Timestamp() << " " << ToString(m_level)
<< ": ";
return os;
}
static std::string Timestamp() {
constexpr size_t buffer_len = 12;
char buffer[buffer_len];
time_t t;
::time(&t);
tm r;
strftime(buffer, buffer_len, "%X", localtime_r(&t, &r));
buffer[buffer_len - 1] = '\0';
struct timeval tv;
gettimeofday(&tv, nullptr);
constexpr size_t result_len = 100;
char result[result_len];
snprintf(result, result_len, "%s.%03ld", buffer,
static_cast<long>(tv.tv_usec) / 1000);
result[result_len - 1] = '\0';
return result;
}
};
// TODO! Do we need to keep the runtime option?
#define LOG(level) \
if (level > AARE_LOG_LEVEL) \
; \
else if (level > aare::Logger::ReportingLevel()) \
; \
else \
aare::Logger(level).Get()
} // namespace aare

View File

@ -0,0 +1,12 @@
#pragma once
#include <fstream>
#include <string>
namespace aare {
/**
* @brief Get the error message from an ifstream object
*/
std::string ifstream_error_msg(std::ifstream &ifs);
} // namespace aare

View File

@ -1,22 +1,40 @@
[tool.scikit-build.metadata.version]
provider = "scikit_build_core.metadata.regex"
input = "VERSION"
regex = '^(?P<version>\d+(?:\.\d+)*(?:[\.\+\w]+)?)$'
result = "{version}"
[build-system]
requires = ["scikit-build-core>=0.10", "pybind11", "numpy"]
build-backend = "scikit_build_core.build"
[project]
name = "aare"
version = "2025.4.1"
dynamic = ["version"]
requires-python = ">=3.11"
dependencies = [
"numpy",
"matplotlib",
]
[tool.cibuildwheel]
build = "cp{311,312,313}-manylinux_x86_64"
[tool.scikit-build]
cmake.verbose = true
build.verbose = true
cmake.build-type = "Release"
install.components = ["python"]
[tool.scikit-build.cmake.define]
AARE_PYTHON_BINDINGS = "ON"
AARE_SYSTEM_LIBRARIES = "ON"
AARE_INSTALL_PYTHONEXT = "ON"
[tool.pytest.ini_options]
markers = [
"files: marks tests that need additional data (deselect with '-m \"not files\"')",

View File

@ -1,12 +1,13 @@
find_package (Python 3.10 COMPONENTS Interpreter Development REQUIRED)
find_package (Python 3.10 COMPONENTS Interpreter Development.Module REQUIRED)
set(PYBIND11_FINDPYTHON ON) # Needed for RH8
# Download or find pybind11 depending on configuration
if(AARE_FETCH_PYBIND11)
FetchContent_Declare(
pybind11
GIT_REPOSITORY https://github.com/pybind/pybind11
GIT_TAG v2.13.0
GIT_TAG v2.13.6
)
FetchContent_MakeAvailable(pybind11)
else()
@ -62,10 +63,16 @@ endforeach(FILE ${PYTHON_EXAMPLES})
if(AARE_INSTALL_PYTHONEXT)
install(TARGETS _aare
install(
TARGETS _aare
EXPORT "${TARGETS_EXPORT_NAME}"
LIBRARY DESTINATION aare
COMPONENT python
)
install(FILES ${PYTHON_FILES} DESTINATION aare)
install(
FILES ${PYTHON_FILES}
DESTINATION aare
COMPONENT python
)
endif()

View File

@ -1,5 +1,8 @@
from ._aare import ClusterFinder_Cluster3x3i
from ._aare import ClusterFinder_Cluster3x3i, ClusterFinder_Cluster2x2i, ClusterFinderMT_Cluster3x3i, ClusterFinderMT_Cluster2x2i, ClusterCollector_Cluster3x3i, ClusterCollector_Cluster2x2i
from ._aare import ClusterFileSink_Cluster3x3i, ClusterFileSink_Cluster2x2i
import numpy as np
def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024):
@ -9,6 +12,56 @@ def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacit
"""
if dtype == np.int32 and cluster_size == (3,3):
return ClusterFinder_Cluster3x3i(image_size, n_sigma = n_sigma, capacity=capacity)
elif dtype == np.int32 and cluster_size == (2,2):
return ClusterFinder_Cluster2x2i(image_size, n_sigma = n_sigma, capacity=capacity)
else:
#TODO! add the other formats
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
def ClusterFinderMT(image_size, cluster_size = (3,3), dtype=np.int32, n_sigma=5, capacity = 1024, n_threads = 3):
"""
Factory function to create a ClusterFinderMT object. Provides a cleaner syntax for
the templated ClusterFinderMT in C++.
"""
if dtype == np.int32 and cluster_size == (3,3):
return ClusterFinderMT_Cluster3x3i(image_size, n_sigma = n_sigma,
capacity = capacity, n_threads = n_threads)
elif dtype == np.int32 and cluster_size == (2,2):
return ClusterFinderMT_Cluster2x2i(image_size, n_sigma = n_sigma,
capacity = capacity, n_threads = n_threads)
else:
#TODO! add the other formats
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32):
"""
Factory function to create a ClusterCollector object. Provides a cleaner syntax for
the templated ClusterCollector in C++.
"""
if dtype == np.int32 and cluster_size == (3,3):
return ClusterCollector_Cluster3x3i(clusterfindermt)
elif dtype == np.int32 and cluster_size == (2,2):
return ClusterCollector_Cluster2x2i(clusterfindermt)
else:
#TODO! add the other formats
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):
"""
Factory function to create a ClusterCollector object. Provides a cleaner syntax for
the templated ClusterCollector in C++.
"""
if dtype == np.int32 and clusterfindermt.cluster_size == (3,3):
return ClusterFileSink_Cluster3x3i(clusterfindermt, cluster_file)
elif dtype == np.int32 and clusterfindermt.cluster_size == (2,2):
return ClusterFileSink_Cluster2x2i(clusterfindermt, cluster_file)
else:
#TODO! add the other formats
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")

View File

@ -11,12 +11,17 @@ from ._aare import ROI
# from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i
from .ClusterFinder import ClusterFinder
from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT, ClusterFileSink
from .ClusterVector import ClusterVector
from ._aare import fit_gaus, fit_pol1
from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2
from ._aare import Interpolator
from ._aare import calculate_eta2
from ._aare import apply_custom_weights
from .CtbRawFile import CtbRawFile
from .RawFile import RawFile
from .ScanParameters import ScanParameters

View File

@ -1 +1 @@
from ._aare import gaus, pol1
from ._aare import gaus, pol1, scurve, scurve2

View File

@ -1,79 +1,89 @@
import sys
sys.path.append('/home/l_msdetect/erik/aare/build')
from aare._aare import ClusterVector_i, Interpolator
import pickle
import numpy as np
import matplotlib.pyplot as plt
import boost_histogram as bh
import torch
import math
import time
from aare import RawSubFile, DetectorType, RawFile
from pathlib import Path
path = Path("/home/l_msdetect/erik/data/aare-test-data/raw/jungfrau/")
f = RawSubFile(path/"jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16)
# f = RawFile(path/"jungfrau_single_master_0.json")
# from aare._aare import ClusterVector_i, Interpolator
# import pickle
# import numpy as np
# import matplotlib.pyplot as plt
# import boost_histogram as bh
# import torch
# import math
# import time
def gaussian_2d(mx, my, sigma = 1, res=100, grid_size = 2):
"""
Generate a 2D gaussian as position mx, my, with sigma=sigma.
The gaussian is placed on a 2x2 pixel matrix with resolution
res in one dimesion.
"""
x = torch.linspace(0, pixel_size*grid_size, res)
x,y = torch.meshgrid(x,x, indexing="ij")
return 1 / (2*math.pi*sigma**2) * \
torch.exp(-((x - my)**2 / (2*sigma**2) + (y - mx)**2 / (2*sigma**2)))
# def gaussian_2d(mx, my, sigma = 1, res=100, grid_size = 2):
# """
# Generate a 2D gaussian as position mx, my, with sigma=sigma.
# The gaussian is placed on a 2x2 pixel matrix with resolution
# res in one dimesion.
# """
# x = torch.linspace(0, pixel_size*grid_size, res)
# x,y = torch.meshgrid(x,x, indexing="ij")
# return 1 / (2*math.pi*sigma**2) * \
# torch.exp(-((x - my)**2 / (2*sigma**2) + (y - mx)**2 / (2*sigma**2)))
scale = 1000 #Scale factor when converting to integer
pixel_size = 25 #um
grid = 2
resolution = 100
sigma_um = 10
xa = np.linspace(0,grid*pixel_size,resolution)
ticks = [0, 25, 50]
# scale = 1000 #Scale factor when converting to integer
# pixel_size = 25 #um
# grid = 2
# resolution = 100
# sigma_um = 10
# xa = np.linspace(0,grid*pixel_size,resolution)
# ticks = [0, 25, 50]
hit = np.array((20,20))
etahist_fname = "/home/l_msdetect/erik/tmp/test_hist.pkl"
# hit = np.array((20,20))
# etahist_fname = "/home/l_msdetect/erik/tmp/test_hist.pkl"
local_resolution = 99
grid_size = 3
xaxis = np.linspace(0,grid_size*pixel_size, local_resolution)
t = gaussian_2d(hit[0],hit[1], grid_size = grid_size, sigma = 10, res = local_resolution)
pixels = t.reshape(grid_size, t.shape[0] // grid_size, grid_size, t.shape[1] // grid_size).sum(axis = 3).sum(axis = 1)
pixels = pixels.numpy()
pixels = (pixels*scale).astype(np.int32)
v = ClusterVector_i(3,3)
v.push_back(1,1, pixels)
# local_resolution = 99
# grid_size = 3
# xaxis = np.linspace(0,grid_size*pixel_size, local_resolution)
# t = gaussian_2d(hit[0],hit[1], grid_size = grid_size, sigma = 10, res = local_resolution)
# pixels = t.reshape(grid_size, t.shape[0] // grid_size, grid_size, t.shape[1] // grid_size).sum(axis = 3).sum(axis = 1)
# pixels = pixels.numpy()
# pixels = (pixels*scale).astype(np.int32)
# v = ClusterVector_i(3,3)
# v.push_back(1,1, pixels)
with open(etahist_fname, "rb") as f:
hist = pickle.load(f)
eta = hist.view().copy()
etabinsx = np.array(hist.axes.edges.T[0].flat)
etabinsy = np.array(hist.axes.edges.T[1].flat)
ebins = np.array(hist.axes.edges.T[2].flat)
p = Interpolator(eta, etabinsx[0:-1], etabinsy[0:-1], ebins[0:-1])
# with open(etahist_fname, "rb") as f:
# hist = pickle.load(f)
# eta = hist.view().copy()
# etabinsx = np.array(hist.axes.edges.T[0].flat)
# etabinsy = np.array(hist.axes.edges.T[1].flat)
# ebins = np.array(hist.axes.edges.T[2].flat)
# p = Interpolator(eta, etabinsx[0:-1], etabinsy[0:-1], ebins[0:-1])
#Generate the hit
# #Generate the hit
tmp = p.interpolate(v)
print(f'tmp:{tmp}')
pos = np.array((tmp['x'], tmp['y']))*25
# tmp = p.interpolate(v)
# print(f'tmp:{tmp}')
# pos = np.array((tmp['x'], tmp['y']))*25
print(pixels)
fig, ax = plt.subplots(figsize = (7,7))
ax.pcolormesh(xaxis, xaxis, t)
ax.plot(*pos, 'o')
ax.set_xticks([0,25,50,75])
ax.set_yticks([0,25,50,75])
ax.set_xlim(0,75)
ax.set_ylim(0,75)
ax.grid()
print(f'{hit=}')
print(f'{pos=}')
# print(pixels)
# fig, ax = plt.subplots(figsize = (7,7))
# ax.pcolormesh(xaxis, xaxis, t)
# ax.plot(*pos, 'o')
# ax.set_xticks([0,25,50,75])
# ax.set_yticks([0,25,50,75])
# ax.set_xlim(0,75)
# ax.set_ylim(0,75)
# ax.grid()
# print(f'{hit=}')
# print(f'{pos=}')

View File

@ -21,16 +21,14 @@ using namespace aare;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
void define_ClusterVector(py::module &m, const std::string &typestr) {
using ClusterType =
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType, void>;
using ClusterType = Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>;
auto class_name = fmt::format("ClusterVector_{}", typestr);
py::class_<ClusterVector<
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType, void>, void>>(
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>, void>>(
m, class_name.c_str(),
py::buffer_protocol())
@ -41,8 +39,13 @@ void define_ClusterVector(py::module &m, const std::string &typestr) {
self.push_back(cluster);
})
.def("sum", [](ClusterVector<ClusterType> &self) {
auto *vec = new std::vector<Type>(self.sum());
.def("sum",
[](ClusterVector<ClusterType> &self) {
auto *vec = new std::vector<Type>(self.sum());
return return_vector(vec);
})
.def("sum_2x2", [](ClusterVector<ClusterType> &self){
auto *vec = new std::vector<Type>(self.sum_2x2());
return return_vector(vec);
})
.def_property_readonly("size", &ClusterVector<ClusterType>::size)
@ -72,32 +75,30 @@ void define_ClusterVector(py::module &m, const std::string &typestr) {
);
});
// Free functions using ClusterVector
m.def("hitmap",
[](std::array<size_t, 2> image_size, ClusterVector<ClusterType> &cv) {
// Create a numpy array to hold the hitmap
// The shape of the array is (image_size[0], image_size[1])
// note that the python array is passed as [row, col] which
// is the opposite of the clusters [x,y]
py::array_t<int32_t> hitmap(image_size);
auto r = hitmap.mutable_unchecked<2>();
// Initialize hitmap to 0
for (py::ssize_t i = 0; i < r.shape(0); i++)
for (py::ssize_t j = 0; j < r.shape(1); j++)
r(i, j) = 0;
// Free functions using ClusterVector
m.def("hitmap",
[](std::array<size_t, 2> image_size, ClusterVector<ClusterType> &cv) {
// Create a numpy array to hold the hitmap
// The shape of the array is (image_size[0], image_size[1])
// note that the python array is passed as [row, col] which
// is the opposite of the clusters [x,y]
py::array_t<int32_t> hitmap(image_size);
auto r = hitmap.mutable_unchecked<2>();
// Loop over the clusters and increment the hitmap
// Skip out of bound clusters
for (const auto& cluster : cv) {
auto x = cluster.x;
auto y = cluster.y;
if(x<image_size[1] && y<image_size[0])
r(cluster.y, cluster.x) += 1;
}
// Initialize hitmap to 0
for (py::ssize_t i = 0; i < r.shape(0); i++)
for (py::ssize_t j = 0; j < r.shape(1); j++)
r(i, j) = 0;
return hitmap;
});
// Loop over the clusters and increment the hitmap
// Skip out of bound clusters
for (const auto &cluster : cv) {
auto x = cluster.x;
auto y = cluster.y;
if (x < image_size[1] && y < image_size[0])
r(cluster.y, cluster.x) += 1;
}
return hitmap;
});
}

View File

@ -26,17 +26,18 @@ template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
void define_cluster(py::module &m, const std::string &typestr) {
auto class_name = fmt::format("Cluster{}", typestr);
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType, void>>(
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>(
m, class_name.c_str(), py::buffer_protocol())
.def(py::init([](uint8_t x, uint8_t y, py::array_t<Type> data) {
py::buffer_info buf_info = data.request();
Type *ptr = static_cast<Type *>(buf_info.ptr);
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType, void> cluster;
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster;
cluster.x = x;
cluster.y = y;
std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY,
cluster.data); // Copy array contents
auto r = data.template unchecked<1>(); // no bounds checks
for (py::ssize_t i = 0; i < data.size(); ++i) {
cluster.data[i] = r(i);
}
return cluster;
}));
@ -64,9 +65,6 @@ void define_cluster(py::module &m, const std::string &typestr) {
*/
}
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
void define_cluster_finder_mt_bindings(py::module &m,
@ -95,6 +93,9 @@ void define_cluster_finder_mt_bindings(py::module &m,
return;
},
py::arg(), py::arg("frame_number") = 0)
.def_property_readonly("cluster_size", [](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self){
return py::make_tuple(ClusterSizeX, ClusterSizeY);
})
.def("clear_pedestal",
&ClusterFinderMT<ClusterType, uint16_t, pd_type>::clear_pedestal)
.def("sync", &ClusterFinderMT<ClusterType, uint16_t, pd_type>::sync)
@ -206,6 +207,5 @@ void define_cluster_finder_bindings(py::module &m, const std::string &typestr) {
return;
},
py::arg(), py::arg("frame_number") = 0);
}
#pragma GCC diagnostic pop

View File

@ -59,9 +59,6 @@ void define_cluster_file_io_bindings(py::module &m,
self.set_gain_map(view);
})
// void set_gain_map(const GainMap &gain_map); //TODO do i need a
// gainmap constructor?
.def("close", &ClusterFile<ClusterType>::close)
.def("write_frame", &ClusterFile<ClusterType>::write_frame)
.def("__enter__", [](ClusterFile<ClusterType> &self) { return &self; })

View File

@ -10,6 +10,8 @@
#include "aare/decode.hpp"
// #include "aare/fClusterFileV2.hpp"
#include "np_helper.hpp"
#include <cstdint>
#include <filesystem>
#include <pybind11/iostream.h>
@ -32,7 +34,7 @@ m.def("adc_sar_05_decode64to16", [](py::array_t<uint8_t> input) {
}
//Create a 2D output array with the same shape as the input
std::vector<ssize_t> shape{input.shape(0), input.shape(1)/static_cast<int64_t>(bits_per_byte)};
std::vector<ssize_t> shape{input.shape(0), input.shape(1)/static_cast<ssize_t>(bits_per_byte)};
py::array_t<uint16_t> output(shape);
//Create a view of the input and output arrays
@ -53,7 +55,7 @@ m.def("adc_sar_04_decode64to16", [](py::array_t<uint8_t> input) {
}
//Create a 2D output array with the same shape as the input
std::vector<ssize_t> shape{input.shape(0), input.shape(1)/static_cast<int64_t>(bits_per_byte)};
std::vector<ssize_t> shape{input.shape(0), input.shape(1)/static_cast<ssize_t>(bits_per_byte)};
py::array_t<uint16_t> output(shape);
//Create a view of the input and output arrays
@ -65,35 +67,54 @@ m.def("adc_sar_04_decode64to16", [](py::array_t<uint8_t> input) {
return output;
});
py::class_<CtbRawFile>(m, "CtbRawFile")
.def(py::init<const std::filesystem::path &>())
.def("read_frame",
[](CtbRawFile &self) {
size_t image_size = self.image_size_in_bytes();
py::array image;
std::vector<ssize_t> shape;
shape.reserve(2);
shape.push_back(1);
shape.push_back(image_size);
m.def(
"apply_custom_weights",
[](py::array_t<uint16_t, py::array::c_style | py::array::forcecast> &input,
py::array_t<double, py::array::c_style | py::array::forcecast>
&weights) {
py::array_t<DetectorHeader> header(1);
// Create new array with same shape as the input array (uninitialized values)
py::buffer_info buf = input.request();
py::array_t<double> output(buf.shape);
// always read bytes
image = py::array_t<uint8_t>(shape);
// Use NDViews to call into the C++ library
auto weights_view = make_view_1d(weights);
NDView<uint16_t, 1> input_view(input.mutable_data(), {input.size()});
NDView<double, 1> output_view(output.mutable_data(), {output.size()});
self.read_into(
reinterpret_cast<std::byte *>(image.mutable_data()),
header.mutable_data());
apply_custom_weights(input_view, output_view, weights_view);
return output;
});
return py::make_tuple(header, image);
})
.def("seek", &CtbRawFile::seek)
.def("tell", &CtbRawFile::tell)
.def("master", &CtbRawFile::master)
py::class_<CtbRawFile>(m, "CtbRawFile")
.def(py::init<const std::filesystem::path &>())
.def("read_frame",
[](CtbRawFile &self) {
size_t image_size = self.image_size_in_bytes();
py::array image;
std::vector<ssize_t> shape;
shape.reserve(2);
shape.push_back(1);
shape.push_back(image_size);
.def_property_readonly("image_size_in_bytes",
&CtbRawFile::image_size_in_bytes)
py::array_t<DetectorHeader> header(1);
.def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file);
// always read bytes
image = py::array_t<uint8_t>(shape);
}
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()),
header.mutable_data());
return py::make_tuple(header, image);
})
.def("seek", &CtbRawFile::seek)
.def("tell", &CtbRawFile::tell)
.def("master", &CtbRawFile::master)
.def_property_readonly("image_size_in_bytes",
&CtbRawFile::image_size_in_bytes)
.def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file);
}

View File

@ -20,6 +20,9 @@
namespace py = pybind11;
using namespace ::aare;
//Disable warnings for unused parameters, as we ignore some
//in the __exit__ method
#pragma GCC diagnostic push
@ -195,7 +198,7 @@ void define_file_io_bindings(py::module &m) {
py::class_<ROI>(m, "ROI")
.def(py::init<>())
.def(py::init<int64_t, int64_t, int64_t, int64_t>(), py::arg("xmin"),
.def(py::init<ssize_t, ssize_t, ssize_t, ssize_t>(), py::arg("xmin"),
py::arg("xmax"), py::arg("ymin"), py::arg("ymax"))
.def_readwrite("xmin", &ROI::xmin)
.def_readwrite("xmax", &ROI::xmax)
@ -214,36 +217,9 @@ void define_file_io_bindings(py::module &m) {
py::class_<RawSubFile>(m, "RawSubFile")
.def(py::init<const std::filesystem::path &, DetectorType, size_t,
size_t, size_t>())
.def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame)
.def_property_readonly("pixels_per_frame",
&RawSubFile::pixels_per_frame)
.def("seek", &RawSubFile::seek)
.def("tell", &RawSubFile::tell)
.def_property_readonly("rows", &RawSubFile::rows)
.def_property_readonly("cols", &RawSubFile::cols)
.def("read_frame",
[](RawSubFile &self) {
const uint8_t item_size = self.bytes_per_pixel();
py::array image;
std::vector<ssize_t> shape;
shape.reserve(2);
shape.push_back(self.rows());
shape.push_back(self.cols());
if (item_size == 1) {
image = py::array_t<uint8_t>(shape);
} else if (item_size == 2) {
image = py::array_t<uint16_t>(shape);
} else if (item_size == 4) {
image = py::array_t<uint32_t>(shape);
}
fmt::print("item_size: {} rows: {} cols: {}\n", item_size, self.rows(), self.cols());
self.read_into(
reinterpret_cast<std::byte *>(image.mutable_data()));
return image;
});
#pragma GCC diagnostic pop
// py::class_<ClusterHeader>(m, "ClusterHeader")

View File

@ -55,6 +55,47 @@ void define_fit_bindings(py::module &m) {
)",
py::arg("x"), py::arg("par"));
m.def(
"scurve",
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> par) {
auto x_view = make_view_1d(x);
auto par_view = make_view_1d(par);
auto y = new NDArray<double, 1>{aare::func::scurve(x_view, par_view)};
return return_image_data(y);
},
R"(
Evaluate a 1D scurve function for all points in x using parameters par.
Parameters
----------
x : array_like
The points at which to evaluate the scurve function.
par : array_like
The parameters of the scurve function. The first element is the background slope, the second element is the background intercept, the third element is the mean, the fourth element is the standard deviation, the fifth element is inflexion point count number, and the sixth element is C.
)",
py::arg("x"), py::arg("par"));
m.def(
"scurve2",
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> par) {
auto x_view = make_view_1d(x);
auto par_view = make_view_1d(par);
auto y = new NDArray<double, 1>{aare::func::scurve2(x_view, par_view)};
return return_image_data(y);
},
R"(
Evaluate a 1D scurve2 function for all points in x using parameters par.
Parameters
----------
x : array_like
The points at which to evaluate the scurve function.
par : array_like
The parameters of the scurve2 function. The first element is the background slope, the second element is the background intercept, the third element is the mean, the fourth element is the standard deviation, the fifth element is inflexion point count number, and the sixth element is C.
)",
py::arg("x"), py::arg("par"));
m.def(
"fit_gaus",
@ -235,6 +276,180 @@ n_threads : int, optional
R"(
Fit a 1D polynomial to data with error estimates.
Parameters
----------
x : array_like
The x values.
y : array_like
The y values.
y_err : array_like
The error in the y values.
n_threads : int, optional
The number of threads to use. Default is 4.
)",
py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4);
//=========
m.def(
"fit_scurve",
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> y,
int n_threads) {
if (y.ndim() == 3) {
auto par = new NDArray<double, 3>{};
auto x_view = make_view_1d(x);
auto y_view = make_view_3d(y);
*par = aare::fit_scurve(x_view, y_view, n_threads);
return return_image_data(par);
} else if (y.ndim() == 1) {
auto par = new NDArray<double, 1>{};
auto x_view = make_view_1d(x);
auto y_view = make_view_1d(y);
*par = aare::fit_scurve(x_view, y_view);
return return_image_data(par);
} else {
throw std::runtime_error("Data must be 1D or 3D");
}
},
py::arg("x"), py::arg("y"), py::arg("n_threads") = 4);
m.def(
"fit_scurve",
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> y,
py::array_t<double, py::array::c_style | py::array::forcecast> y_err,
int n_threads) {
if (y.ndim() == 3) {
auto par = new NDArray<double, 3>({y.shape(0), y.shape(1), 6});
auto par_err =
new NDArray<double, 3>({y.shape(0), y.shape(1), 6});
auto y_view = make_view_3d(y);
auto y_view_err = make_view_3d(y_err);
auto x_view = make_view_1d(x);
auto chi2 = new NDArray<double, 2>({y.shape(0), y.shape(1)});
aare::fit_scurve(x_view, y_view, y_view_err, par->view(),
par_err->view(), chi2->view(), n_threads);
return py::dict("par"_a = return_image_data(par),
"par_err"_a = return_image_data(par_err),
"chi2"_a = return_image_data(chi2),
"Ndf"_a = y.shape(2) - 2);
} else if (y.ndim() == 1) {
auto par = new NDArray<double, 1>({2});
auto par_err = new NDArray<double, 1>({2});
auto y_view = make_view_1d(y);
auto y_view_err = make_view_1d(y_err);
auto x_view = make_view_1d(x);
double chi2 = 0;
aare::fit_scurve(x_view, y_view, y_view_err, par->view(),
par_err->view(), chi2);
return py::dict("par"_a = return_image_data(par),
"par_err"_a = return_image_data(par_err),
"chi2"_a = chi2, "Ndf"_a = y.size() - 2);
} else {
throw std::runtime_error("Data must be 1D or 3D");
}
},
R"(
Fit a 1D polynomial to data with error estimates.
Parameters
----------
x : array_like
The x values.
y : array_like
The y values.
y_err : array_like
The error in the y values.
n_threads : int, optional
The number of threads to use. Default is 4.
)",
py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4);
m.def(
"fit_scurve2",
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> y,
int n_threads) {
if (y.ndim() == 3) {
auto par = new NDArray<double, 3>{};
auto x_view = make_view_1d(x);
auto y_view = make_view_3d(y);
*par = aare::fit_scurve2(x_view, y_view, n_threads);
return return_image_data(par);
} else if (y.ndim() == 1) {
auto par = new NDArray<double, 1>{};
auto x_view = make_view_1d(x);
auto y_view = make_view_1d(y);
*par = aare::fit_scurve2(x_view, y_view);
return return_image_data(par);
} else {
throw std::runtime_error("Data must be 1D or 3D");
}
},
py::arg("x"), py::arg("y"), py::arg("n_threads") = 4);
m.def(
"fit_scurve2",
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> y,
py::array_t<double, py::array::c_style | py::array::forcecast> y_err,
int n_threads) {
if (y.ndim() == 3) {
auto par = new NDArray<double, 3>({y.shape(0), y.shape(1), 6});
auto par_err =
new NDArray<double, 3>({y.shape(0), y.shape(1), 6});
auto y_view = make_view_3d(y);
auto y_view_err = make_view_3d(y_err);
auto x_view = make_view_1d(x);
auto chi2 = new NDArray<double, 2>({y.shape(0), y.shape(1)});
aare::fit_scurve2(x_view, y_view, y_view_err, par->view(),
par_err->view(), chi2->view(), n_threads);
return py::dict("par"_a = return_image_data(par),
"par_err"_a = return_image_data(par_err),
"chi2"_a = return_image_data(chi2),
"Ndf"_a = y.shape(2) - 2);
} else if (y.ndim() == 1) {
auto par = new NDArray<double, 1>({6});
auto par_err = new NDArray<double, 1>({6});
auto y_view = make_view_1d(y);
auto y_view_err = make_view_1d(y_err);
auto x_view = make_view_1d(x);
double chi2 = 0;
aare::fit_scurve2(x_view, y_view, y_view_err, par->view(),
par_err->view(), chi2);
return py::dict("par"_a = return_image_data(par),
"par_err"_a = return_image_data(par_err),
"chi2"_a = chi2, "Ndf"_a = y.size() - 2);
} else {
throw std::runtime_error("Data must be 1D or 3D");
}
},
R"(
Fit a 1D polynomial to data with error estimates.
Parameters
----------
x : array_like

View File

@ -10,12 +10,12 @@
#include "file.hpp"
#include "fit.hpp"
#include "interpolation.hpp"
#include "pedestal.hpp"
#include "pixel_map.hpp"
#include "raw_file.hpp"
#include "raw_sub_file.hpp"
#include "raw_master_file.hpp"
#include "raw_file.hpp"
#include "pixel_map.hpp"
#include "var_cluster.hpp"
#include "pedestal.hpp"
#include "jungfrau_data_file.hpp"
// Pybind stuff
@ -27,6 +27,7 @@ namespace py = pybind11;
PYBIND11_MODULE(_aare, m) {
define_file_io_bindings(m);
define_raw_file_io_bindings(m);
define_raw_sub_file_io_bindings(m);
define_ctb_raw_file_io_bindings(m);
define_raw_master_file_bindings(m);
define_var_cluster_finder_bindings(m);

View File

@ -13,7 +13,7 @@ namespace py = pybind11;
using namespace aare;
// Pass image data back to python as a numpy array
template <typename T, int64_t Ndim>
template <typename T, ssize_t Ndim>
py::array return_image_data(aare::NDArray<T, Ndim> *image) {
py::capsule free_when_done(image, [](void *f) {

View File

@ -32,7 +32,7 @@ void define_raw_file_io_bindings(py::module &m) {
shape.push_back(self.cols());
// return headers from all subfiles
py::array_t<DetectorHeader> header(self.n_mod());
py::array_t<DetectorHeader> header(self.n_modules());
const uint8_t item_size = self.bytes_per_pixel();
if (item_size == 1) {
@ -61,10 +61,10 @@ void define_raw_file_io_bindings(py::module &m) {
// return headers from all subfiles
py::array_t<DetectorHeader> header;
if (self.n_mod() == 1) {
if (self.n_modules() == 1) {
header = py::array_t<DetectorHeader>(n_frames);
} else {
header = py::array_t<DetectorHeader>({self.n_mod(), n_frames});
header = py::array_t<DetectorHeader>({self.n_modules(), n_frames});
}
// py::array_t<DetectorHeader> header({self.n_mod(), n_frames});
@ -100,7 +100,7 @@ void define_raw_file_io_bindings(py::module &m) {
.def_property_readonly("cols", &RawFile::cols)
.def_property_readonly("bitdepth", &RawFile::bitdepth)
.def_property_readonly("geometry", &RawFile::geometry)
.def_property_readonly("n_mod", &RawFile::n_mod)
.def_property_readonly("n_modules", &RawFile::n_modules)
.def_property_readonly("detector_type", &RawFile::detector_type)
.def_property_readonly("master", &RawFile::master);
}

110
python/src/raw_sub_file.hpp Normal file
View File

@ -0,0 +1,110 @@
#include "aare/CtbRawFile.hpp"
#include "aare/File.hpp"
#include "aare/Frame.hpp"
#include "aare/RawFile.hpp"
#include "aare/RawMasterFile.hpp"
#include "aare/RawSubFile.hpp"
#include "aare/defs.hpp"
// #include "aare/fClusterFileV2.hpp"
#include <cstdint>
#include <filesystem>
#include <pybind11/iostream.h>
#include <pybind11/numpy.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/stl/filesystem.h>
#include <string>
namespace py = pybind11;
using namespace ::aare;
auto read_frame_from_RawSubFile(RawSubFile &self) {
py::array_t<DetectorHeader> header(1);
const uint8_t item_size = self.bytes_per_pixel();
std::vector<ssize_t> shape{static_cast<ssize_t>(self.rows()),
static_cast<ssize_t>(self.cols())};
py::array image;
if (item_size == 1) {
image = py::array_t<uint8_t>(shape);
} else if (item_size == 2) {
image = py::array_t<uint16_t>(shape);
} else if (item_size == 4) {
image = py::array_t<uint32_t>(shape);
}
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()),
header.mutable_data());
return py::make_tuple(header, image);
}
auto read_n_frames_from_RawSubFile(RawSubFile &self, size_t n_frames) {
py::array_t<DetectorHeader> header(n_frames);
const uint8_t item_size = self.bytes_per_pixel();
std::vector<ssize_t> shape{
static_cast<ssize_t>(n_frames),
static_cast<ssize_t>(self.rows()),
static_cast<ssize_t>(self.cols())
};
py::array image;
if (item_size == 1) {
image = py::array_t<uint8_t>(shape);
} else if (item_size == 2) {
image = py::array_t<uint16_t>(shape);
} else if (item_size == 4) {
image = py::array_t<uint32_t>(shape);
}
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()), n_frames,
header.mutable_data());
return py::make_tuple(header, image);
}
//Disable warnings for unused parameters, as we ignore some
//in the __exit__ method
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
void define_raw_sub_file_io_bindings(py::module &m) {
py::class_<RawSubFile>(m, "RawSubFile")
.def(py::init<const std::filesystem::path &, DetectorType, size_t,
size_t, size_t>())
.def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame)
.def_property_readonly("pixels_per_frame",
&RawSubFile::pixels_per_frame)
.def_property_readonly("bytes_per_pixel", &RawSubFile::bytes_per_pixel)
.def("seek", &RawSubFile::seek)
.def("tell", &RawSubFile::tell)
.def_property_readonly("rows", &RawSubFile::rows)
.def_property_readonly("cols", &RawSubFile::cols)
.def_property_readonly("frames_in_file", &RawSubFile::frames_in_file)
.def("read_frame", &read_frame_from_RawSubFile)
.def("read_n", &read_n_frames_from_RawSubFile)
.def("read", [](RawSubFile &self){
self.seek(0);
auto n_frames = self.frames_in_file();
return read_n_frames_from_RawSubFile(self, n_frames);
})
.def("__enter__", [](RawSubFile &self) { return &self; })
.def("__exit__",
[](RawSubFile &self,
const std::optional<pybind11::type> &exc_type,
const std::optional<pybind11::object> &exc_value,
const std::optional<pybind11::object> &traceback) {
})
.def("__iter__", [](RawSubFile &self) { return &self; })
.def("__next__", [](RawSubFile &self) {
try {
return read_frame_from_RawSubFile(self);
} catch (std::runtime_error &e) {
throw py::stop_iteration();
}
});
}
#pragma GCC diagnostic pop

View File

@ -0,0 +1,39 @@
import pytest
import numpy as np
from aare import RawSubFile, DetectorType
@pytest.mark.files
def test_read_a_jungfrau_RawSubFile(test_data_path):
# Starting with f1 there is now 7 frames left in the series of files
with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f1_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f:
assert f.frames_in_file == 7
headers, frames = f.read()
assert headers.size == 7
assert frames.shape == (7, 512, 1024)
for i,h in zip(range(4,11,1), headers):
assert h["frameNumber"] == i
# Compare to canned data using numpy
data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy")
assert np.all(data[3:] == frames)
@pytest.mark.files
def test_iterate_over_a_jungfrau_RawSubFile(test_data_path):
data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy")
# Given the first subfile in a series we can read all frames from f0, f1, f2...fN
with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f:
i = 0
for header, frame in f:
assert header["frameNumber"] == i+1
assert np.all(frame == data[i])
i += 1
assert i == 10
assert header["frameNumber"] == 10

View File

@ -21,20 +21,22 @@ using ClusterTypes =
auto get_test_parameters() {
return GENERATE(
std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{0, 0, {1, 2, 3, 1}}},
Eta2<int>{2. / 3, 3. / 4, corner::cBottomLeft, 7}),
Eta2<int>{2. / 3, 3. / 4,
static_cast<int>(corner::cBottomLeft), 7}),
std::make_tuple(
ClusterTypes{Cluster<int, 3, 3>{0, 0, {1, 2, 3, 4, 5, 6, 1, 2, 7}}},
Eta2<int>{6. / 11, 2. / 7, corner::cTopRight, 20}),
Eta2<int>{6. / 11, 2. / 7, static_cast<int>(corner::cTopRight),
20}),
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 8, 8, 9, 2,
0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 2, 8, 9, 8,
1, 4, 5, 6, 7, 8, 4, 1, 1, 1, 1, 1}}},
Eta2<int>{9. / 17, 5. / 13, 8, 28}),
Eta2<int>{8. / 17, 7. / 15, 9, 30}),
std::make_tuple(
ClusterTypes{Cluster<int, 4, 2>{0, 0, {1, 4, 7, 2, 5, 6, 4, 3}}},
Eta2<int>{7. / 11, 6. / 10, 1, 21}),
Eta2<int>{4. / 10, 4. / 11, 1, 21}),
std::make_tuple(
ClusterTypes{Cluster<int, 2, 3>{0, 0, {1, 3, 2, 3, 4, 2}}},
Eta2<int>{3. / 5, 4. / 6, 1, 11}));
Eta2<int>{3. / 5, 2. / 5, 1, 11}));
}
TEST_CASE("compute_largest_2x2_subcluster", "[eta_calculation]") {
@ -61,14 +63,13 @@ TEST_CASE("calculate_eta2", "[eta_calculation]") {
CHECK(eta.sum == expected_eta.sum);
}
// 3x3 cluster layout (rotated to match the cBottomLeft enum):
// 6, 7, 8
// 3, 4, 5
// 0, 1, 2
//3x3 cluster layout (rotated to match the cBottomLeft enum):
// 6, 7, 8
// 3, 4, 5
// 0, 1, 2
TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in the bottom left",
TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in "
"the bottom left",
"[eta_calculation]") {
// Create a 3x3 cluster
@ -84,45 +85,43 @@ TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in th
cl.data[6] = 8;
cl.data[7] = 2;
cl.data[8] = 3;
// 8, 2, 3
// 20, 50, 3
// 30, 23, 5
auto eta = calculate_eta2(cl);
CHECK(eta.c == corner::cBottomLeft);
CHECK(eta.c == static_cast<int>(corner::cBottomLeft));
CHECK(eta.x == 50.0 / (20 + 50)); // 4/(3+4)
CHECK(eta.y == 50.0 / (23 + 50)); // 4/(1+4)
CHECK(eta.sum == 30+23+20+50);
CHECK(eta.sum == 30 + 23 + 20 + 50);
}
TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in the top left",
"[eta_calculation]") {
TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in "
"the top left",
"[eta_calculation]") {
// Create a 3x3 cluster
Cluster<int32_t, 3, 3> cl;
cl.x = 0;
cl.y = 0;
cl.data[0] = 8;
cl.data[1] = 12;
cl.data[2] = 5;
cl.data[3] = 77;
cl.data[4] = 80;
cl.data[5] = 3;
cl.data[6] = 82;
cl.data[7] = 91;
cl.data[8] = 3;
// Create a 3x3 cluster
Cluster<int32_t, 3, 3> cl;
cl.x = 0;
cl.y = 0;
cl.data[0] = 8;
cl.data[1] = 12;
cl.data[2] = 5;
cl.data[3] = 77;
cl.data[4] = 80;
cl.data[5] = 3;
cl.data[6] = 82;
cl.data[7] = 91;
cl.data[8] = 3;
// 82, 91, 3
// 77, 80, 3
// 8, 12, 5
auto eta = calculate_eta2(cl);
CHECK(eta.c == corner::cTopLeft);
CHECK(eta.x == 80. / (77 + 80)); // 4/(3+4)
CHECK(eta.y == 91.0 / (91 + 80)); // 7/(7+4)
CHECK(eta.sum == 77+80+82+91);
// 82, 91, 3
// 77, 80, 3
// 8, 12, 5
auto eta = calculate_eta2(cl);
CHECK(eta.c == static_cast<int>(corner::cTopLeft));
CHECK(eta.x == 80. / (77 + 80)); // 4/(3+4)
CHECK(eta.y == 91.0 / (91 + 80)); // 7/(7+4)
CHECK(eta.sum == 77 + 80 + 82 + 91);
}

View File

@ -14,19 +14,6 @@
using namespace aare;
TEST_CASE("Correct Instantiation of Cluster and ClusterVector",
"[.cluster][.instantiation]") {
CHECK(is_valid_cluster<double, 3, 3>);
CHECK(is_valid_cluster<double, 3, 2>);
CHECK(not is_valid_cluster<int, 0, 0>);
CHECK(not is_valid_cluster<std::string, 2, 2>);
CHECK(not is_valid_cluster<int, 2, 2, double>);
CHECK(not is_cluster_v<int>);
CHECK(is_cluster_v<Cluster<int, 3, 3>>);
}
TEST_CASE("Test sum of Cluster", "[.cluster]") {
Cluster<int, 2, 2> cluster{0, 0, {1, 2, 3, 4}};

402
src/ClusterFile.cpp Normal file
View File

@ -0,0 +1,402 @@
#include "aare/ClusterFile.hpp"
#include <algorithm>
namespace aare {
ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size,
const std::string &mode)
: m_chunk_size(chunk_size), m_mode(mode) {
if (mode == "r") {
fp = fopen(fname.c_str(), "rb");
if (!fp) {
throw std::runtime_error("Could not open file for reading: " +
fname.string());
}
} else if (mode == "w") {
fp = fopen(fname.c_str(), "wb");
if (!fp) {
throw std::runtime_error("Could not open file for writing: " +
fname.string());
}
} else if (mode == "a") {
fp = fopen(fname.c_str(), "ab");
if (!fp) {
throw std::runtime_error("Could not open file for appending: " +
fname.string());
}
} else {
throw std::runtime_error("Unsupported mode: " + mode);
}
}
void ClusterFile::set_roi(ROI roi){
m_roi = roi;
}
void ClusterFile::set_noise_map(const NDView<int32_t, 2> noise_map){
m_noise_map = NDArray<int32_t, 2>(noise_map);
}
void ClusterFile::set_gain_map(const NDView<double, 2> gain_map){
m_gain_map = NDArray<double, 2>(gain_map);
// Gain map is passed as ADU/keV to avoid dividing in when applying the gain
// map we invert it here
for (auto &item : m_gain_map->view()) {
item = 1.0 / item;
}
}
ClusterFile::~ClusterFile() { close(); }
void ClusterFile::close() {
if (fp) {
fclose(fp);
fp = nullptr;
}
}
void ClusterFile::write_frame(const ClusterVector<int32_t> &clusters) {
if (m_mode != "w" && m_mode != "a") {
throw std::runtime_error("File not opened for writing");
}
if (!(clusters.cluster_size_x() == 3) &&
!(clusters.cluster_size_y() == 3)) {
throw std::runtime_error("Only 3x3 clusters are supported");
}
//First write the frame number - 4 bytes
int32_t frame_number = clusters.frame_number();
if(fwrite(&frame_number, sizeof(frame_number), 1, fp)!=1){
throw std::runtime_error(LOCATION + "Could not write frame number");
}
//Then write the number of clusters - 4 bytes
uint32_t n_clusters = clusters.size();
if(fwrite(&n_clusters, sizeof(n_clusters), 1, fp)!=1){
throw std::runtime_error(LOCATION + "Could not write number of clusters");
}
//Now write the clusters in the frame
if(fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp)!=clusters.size()){
throw std::runtime_error(LOCATION + "Could not write clusters");
}
}
ClusterVector<int32_t> ClusterFile::read_clusters(size_t n_clusters){
if (m_mode != "r") {
throw std::runtime_error("File not opened for reading");
}
if (m_noise_map || m_roi){
return read_clusters_with_cut(n_clusters);
}else{
return read_clusters_without_cut(n_clusters);
}
}
ClusterVector<int32_t> ClusterFile::read_clusters_without_cut(size_t n_clusters) {
if (m_mode != "r") {
throw std::runtime_error("File not opened for reading");
}
ClusterVector<int32_t> clusters(3,3, n_clusters);
int32_t iframe = 0; // frame number needs to be 4 bytes!
size_t nph_read = 0;
uint32_t nn = m_num_left;
uint32_t nph = m_num_left; // number of clusters in frame needs to be 4
// auto buf = reinterpret_cast<Cluster3x3 *>(clusters.data());
auto buf = clusters.data();
// if there are photons left from previous frame read them first
if (nph) {
if (nph > n_clusters) {
// if we have more photons left in the frame then photons to read we
// read directly the requested number
nn = n_clusters;
} else {
nn = nph;
}
nph_read += fread((buf + nph_read*clusters.item_size()),
clusters.item_size(), nn, fp);
m_num_left = nph - nn; // write back the number of photons left
}
if (nph_read < n_clusters) {
// keep on reading frames and photons until reaching n_clusters
while (fread(&iframe, sizeof(iframe), 1, fp)) {
clusters.set_frame_number(iframe);
// read number of clusters in frame
if (fread(&nph, sizeof(nph), 1, fp)) {
if (nph > (n_clusters - nph_read))
nn = n_clusters - nph_read;
else
nn = nph;
nph_read += fread((buf + nph_read*clusters.item_size()),
clusters.item_size(), nn, fp);
m_num_left = nph - nn;
}
if (nph_read >= n_clusters)
break;
}
}
// Resize the vector to the number of clusters.
// No new allocation, only change bounds.
clusters.resize(nph_read);
if(m_gain_map)
clusters.apply_gain_map(m_gain_map->view());
return clusters;
}
ClusterVector<int32_t> ClusterFile::read_clusters_with_cut(size_t n_clusters) {
ClusterVector<int32_t> clusters(3,3);
clusters.reserve(n_clusters);
// if there are photons left from previous frame read them first
if (m_num_left) {
while(m_num_left && clusters.size() < n_clusters){
Cluster3x3 c = read_one_cluster();
if(is_selected(c)){
clusters.push_back(c.x, c.y, reinterpret_cast<std::byte*>(c.data));
}
}
}
// we did not have enough clusters left in the previous frame
// keep on reading frames until reaching n_clusters
if (clusters.size() < n_clusters) {
// sanity check
if (m_num_left) {
throw std::runtime_error(LOCATION + "Entered second loop with clusters left\n");
}
int32_t frame_number = 0; // frame number needs to be 4 bytes!
while (fread(&frame_number, sizeof(frame_number), 1, fp)) {
if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) {
clusters.set_frame_number(frame_number); //cluster vector will hold the last frame number
while(m_num_left && clusters.size() < n_clusters){
Cluster3x3 c = read_one_cluster();
if(is_selected(c)){
clusters.push_back(c.x, c.y, reinterpret_cast<std::byte*>(c.data));
}
}
}
// we have enough clusters, break out of the outer while loop
if (clusters.size() >= n_clusters)
break;
}
}
if(m_gain_map)
clusters.apply_gain_map(m_gain_map->view());
return clusters;
}
Cluster3x3 ClusterFile::read_one_cluster(){
Cluster3x3 c;
auto rc = fread(&c, sizeof(c), 1, fp);
if (rc != 1) {
throw std::runtime_error(LOCATION + "Could not read cluster");
}
--m_num_left;
return c;
}
ClusterVector<int32_t> ClusterFile::read_frame(){
if (m_mode != "r") {
throw std::runtime_error(LOCATION + "File not opened for reading");
}
if (m_noise_map || m_roi){
return read_frame_with_cut();
}else{
return read_frame_without_cut();
}
}
ClusterVector<int32_t> ClusterFile::read_frame_without_cut() {
if (m_mode != "r") {
throw std::runtime_error("File not opened for reading");
}
if (m_num_left) {
throw std::runtime_error(
"There are still photons left in the last frame");
}
int32_t frame_number;
if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) {
throw std::runtime_error(LOCATION + "Could not read frame number");
}
int32_t n_clusters; // Saved as 32bit integer in the cluster file
if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) {
throw std::runtime_error(LOCATION + "Could not read number of clusters");
}
ClusterVector<int32_t> clusters(3, 3, n_clusters);
clusters.set_frame_number(frame_number);
if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) !=
static_cast<size_t>(n_clusters)) {
throw std::runtime_error(LOCATION + "Could not read clusters");
}
clusters.resize(n_clusters);
if (m_gain_map)
clusters.apply_gain_map(m_gain_map->view());
return clusters;
}
ClusterVector<int32_t> ClusterFile::read_frame_with_cut() {
if (m_mode != "r") {
throw std::runtime_error("File not opened for reading");
}
if (m_num_left) {
throw std::runtime_error(
"There are still photons left in the last frame");
}
int32_t frame_number;
if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) {
throw std::runtime_error("Could not read frame number");
}
if (fread(&m_num_left, sizeof(m_num_left), 1, fp) != 1) {
throw std::runtime_error("Could not read number of clusters");
}
ClusterVector<int32_t> clusters(3, 3);
clusters.reserve(m_num_left);
clusters.set_frame_number(frame_number);
while(m_num_left){
Cluster3x3 c = read_one_cluster();
if(is_selected(c)){
clusters.push_back(c.x, c.y, reinterpret_cast<std::byte*>(c.data));
}
}
if (m_gain_map)
clusters.apply_gain_map(m_gain_map->view());
return clusters;
}
bool ClusterFile::is_selected(Cluster3x3 &cl) {
//Should fail fast
if (m_roi) {
if (!(m_roi->contains(cl.x, cl.y))) {
return false;
}
}
if (m_noise_map){
int32_t sum_1x1 = cl.data[4]; // central pixel
int32_t sum_2x2 = cl.sum_2x2(); // highest sum of 2x2 subclusters
int32_t sum_3x3 = cl.sum(); // sum of all pixels
auto noise = (*m_noise_map)(cl.y, cl.x); //TODO! check if this is correct
if (sum_1x1 <= noise || sum_2x2 <= 2 * noise || sum_3x3 <= 3 * noise) {
return false;
}
}
//we passed all checks
return true;
}
NDArray<double, 2> calculate_eta2(ClusterVector<int> &clusters) {
//TOTO! make work with 2x2 clusters
NDArray<double, 2> eta2({static_cast<int64_t>(clusters.size()), 2});
if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) {
for (size_t i = 0; i < clusters.size(); i++) {
auto e = calculate_eta2(clusters.at<Cluster3x3>(i));
eta2(i, 0) = e.x;
eta2(i, 1) = e.y;
}
}else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){
for (size_t i = 0; i < clusters.size(); i++) {
auto e = calculate_eta2(clusters.at<Cluster2x2>(i));
eta2(i, 0) = e.x;
eta2(i, 1) = e.y;
}
}else{
throw std::runtime_error("Only 3x3 and 2x2 clusters are supported");
}
return eta2;
}
/**
* @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 struct
* containing etay, etax and the corner of the cluster.
*/
Eta2 calculate_eta2(Cluster3x3 &cl) {
Eta2 eta{};
std::array<int32_t, 4> tot2;
tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4];
tot2[1] = cl.data[1] + cl.data[2] + cl.data[4] + cl.data[5];
tot2[2] = cl.data[3] + cl.data[4] + cl.data[6] + cl.data[7];
tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8];
auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin();
eta.sum = tot2[c];
switch (c) {
case cBottomLeft:
if ((cl.data[3] + cl.data[4]) != 0)
eta.x =
static_cast<double>(cl.data[4]) / (cl.data[3] + cl.data[4]);
if ((cl.data[1] + cl.data[4]) != 0)
eta.y =
static_cast<double>(cl.data[4]) / (cl.data[1] + cl.data[4]);
eta.c = cBottomLeft;
break;
case cBottomRight:
if ((cl.data[2] + cl.data[5]) != 0)
eta.x =
static_cast<double>(cl.data[5]) / (cl.data[4] + cl.data[5]);
if ((cl.data[1] + cl.data[4]) != 0)
eta.y =
static_cast<double>(cl.data[4]) / (cl.data[1] + cl.data[4]);
eta.c = cBottomRight;
break;
case cTopLeft:
if ((cl.data[7] + cl.data[4]) != 0)
eta.x =
static_cast<double>(cl.data[4]) / (cl.data[3] + cl.data[4]);
if ((cl.data[7] + cl.data[4]) != 0)
eta.y =
static_cast<double>(cl.data[7]) / (cl.data[7] + cl.data[4]);
eta.c = cTopLeft;
break;
case cTopRight:
if ((cl.data[5] + cl.data[4]) != 0)
eta.x =
static_cast<double>(cl.data[5]) / (cl.data[5] + cl.data[4]);
if ((cl.data[7] + cl.data[4]) != 0)
eta.y =
static_cast<double>(cl.data[7]) / (cl.data[7] + cl.data[4]);
eta.c = cTopRight;
break;
// no default to allow compiler to warn about missing cases
}
return eta;
}
Eta2 calculate_eta2(Cluster2x2 &cl) {
Eta2 eta{};
if ((cl.data[0] + cl.data[1]) != 0)
eta.x = static_cast<double>(cl.data[1]) / (cl.data[0] + cl.data[1]);
if ((cl.data[0] + cl.data[2]) != 0)
eta.y = static_cast<double>(cl.data[2]) / (cl.data[0] + cl.data[2]);
eta.sum = cl.data[0] + cl.data[1] + cl.data[2]+ cl.data[3];
eta.c = cBottomLeft; //TODO! This is not correct, but need to put something
return eta;
}
} // namespace aare

View File

@ -10,8 +10,9 @@ using aare::Cluster;
using aare::ClusterFile;
using aare::ClusterVector;
TEST_CASE("Read one frame from a cluster file", "[.files]") {
// We know that the frame has 97 clusters
//We know that the frame has 97 clusters
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
REQUIRE(std::filesystem::exists(fpath));
@ -19,14 +20,14 @@ TEST_CASE("Read one frame from a cluster file", "[.files]") {
auto clusters = f.read_frame();
CHECK(clusters.size() == 97);
CHECK(clusters.frame_number() == 135);
CHECK(clusters.at(0).x == 1);
CHECK(clusters.at(0).y == 200);
CHECK(clusters[0].x == 1);
CHECK(clusters[0].y == 200);
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
CHECK(std::equal(std::begin(clusters.at(0).data),
std::end(clusters.at(0).data),
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
std::begin(expected_cluster_data)));
}
TEST_CASE("Read one frame using ROI", "[.files]") {
// We know that the frame has 97 clusters
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
@ -45,21 +46,22 @@ TEST_CASE("Read one frame using ROI", "[.files]") {
// Check that all clusters are within the ROI
for (size_t i = 0; i < clusters.size(); i++) {
auto c = clusters.at(i);
auto c = clusters[i];
REQUIRE(c.x >= roi.xmin);
REQUIRE(c.x <= roi.xmax);
REQUIRE(c.y >= roi.ymin);
REQUIRE(c.y <= roi.ymax);
}
CHECK(clusters.at(0).x == 1);
CHECK(clusters.at(0).y == 200);
CHECK(clusters[0].x == 1);
CHECK(clusters[0].y == 200);
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
CHECK(std::equal(std::begin(clusters.at(0).data),
std::end(clusters.at(0).data),
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
std::begin(expected_cluster_data)));
}
TEST_CASE("Read clusters from single frame file", "[.files]") {
// frame_number, num_clusters [135] 97
@ -162,6 +164,7 @@ TEST_CASE("Read clusters from single frame file", "[.files]") {
// [ 97 296] [864 865 866 867 868 869 870 871 872]
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
REQUIRE(std::filesystem::exists(fpath));
SECTION("Read fewer clusters than available") {
@ -170,10 +173,10 @@ TEST_CASE("Read clusters from single frame file", "[.files]") {
REQUIRE(clusters.size() == 50);
REQUIRE(clusters.frame_number() == 135);
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
REQUIRE(clusters.at(0).x == 1);
REQUIRE(clusters.at(0).y == 200);
CHECK(std::equal(std::begin(clusters.at(0).data),
std::end(clusters.at(0).data),
REQUIRE(clusters[0].x == 1);
REQUIRE(clusters[0].y == 200);
CHECK(std::equal(std::begin(clusters[0].data),
std::end(clusters[0].data),
std::begin(expected_cluster_data)));
}
SECTION("Read more clusters than available") {
@ -183,10 +186,10 @@ TEST_CASE("Read clusters from single frame file", "[.files]") {
REQUIRE(clusters.size() == 97);
REQUIRE(clusters.frame_number() == 135);
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
REQUIRE(clusters.at(0).x == 1);
REQUIRE(clusters.at(0).y == 200);
CHECK(std::equal(std::begin(clusters.at(0).data),
std::end(clusters.at(0).data),
REQUIRE(clusters[0].x == 1);
REQUIRE(clusters[0].y == 200);
CHECK(std::equal(std::begin(clusters[0].data),
std::end(clusters[0].data),
std::begin(expected_cluster_data)));
}
SECTION("Read all clusters") {
@ -194,11 +197,11 @@ TEST_CASE("Read clusters from single frame file", "[.files]") {
auto clusters = f.read_clusters(97);
REQUIRE(clusters.size() == 97);
REQUIRE(clusters.frame_number() == 135);
REQUIRE(clusters.at(0).x == 1);
REQUIRE(clusters.at(0).y == 200);
REQUIRE(clusters[0].x == 1);
REQUIRE(clusters[0].y == 200);
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
CHECK(std::equal(std::begin(clusters.at(0).data),
std::end(clusters.at(0).data),
CHECK(std::equal(std::begin(clusters[0].data),
std::end(clusters[0].data),
std::begin(expected_cluster_data)));
}
}
@ -220,11 +223,10 @@ TEST_CASE("Read clusters from single frame file with ROI", "[.files]") {
CHECK(clusters.size() == 10);
CHECK(clusters.frame_number() == 135);
CHECK(clusters.at(0).x == 1);
CHECK(clusters.at(0).y == 200);
CHECK(clusters[0].x == 1);
CHECK(clusters[0].y == 200);
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
CHECK(std::equal(std::begin(clusters.at(0).data),
std::end(clusters.at(0).data),
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
std::begin(expected_cluster_data)));
}
@ -309,21 +311,21 @@ TEST_CASE("Write cluster with potential padding", "[.files][.ClusterFile]") {
CHECK(read_cluster_vector.size() == 2);
CHECK(read_cluster_vector.frame_number() == 0);
CHECK(read_cluster_vector.at(0).x == clustervec.at(0).x);
CHECK(read_cluster_vector.at(0).y == clustervec.at(0).y);
CHECK(std::equal(clustervec.at(0).data, clustervec.at(0).data + 9,
read_cluster_vector.at(0).data, [](double a, double b) {
return std::abs(a - b) <
std::numeric_limits<double>::epsilon();
}));
CHECK(read_cluster_vector[0].x == clustervec[0].x);
CHECK(read_cluster_vector[0].y == clustervec[0].y);
CHECK(std::equal(
clustervec[0].data.begin(), clustervec[0].data.end(),
read_cluster_vector[0].data.begin(), [](double a, double b) {
return std::abs(a - b) < std::numeric_limits<double>::epsilon();
}));
CHECK(read_cluster_vector.at(1).x == clustervec.at(1).x);
CHECK(read_cluster_vector.at(1).y == clustervec.at(1).y);
CHECK(std::equal(clustervec.at(1).data, std::end(clustervec.at(1).data),
read_cluster_vector.at(1).data, [](double a, double b) {
return std::abs(a - b) <
std::numeric_limits<double>::epsilon();
}));
CHECK(read_cluster_vector[1].x == clustervec[1].x);
CHECK(read_cluster_vector[1].y == clustervec[1].y);
CHECK(std::equal(
clustervec[1].data.begin(), clustervec[1].data.end(),
read_cluster_vector[1].data.begin(), [](double a, double b) {
return std::abs(a - b) < std::numeric_limits<double>::epsilon();
}));
}
TEST_CASE("Read frame and modify cluster data", "[.files][.ClusterFile]") {
@ -341,10 +343,9 @@ TEST_CASE("Read frame and modify cluster data", "[.files][.ClusterFile]") {
Cluster<int32_t, 3, 3>{0, 0, {0, 1, 2, 3, 4, 5, 6, 7, 8}});
CHECK(clusters.size() == 98);
CHECK(clusters.at(0).x == 1);
CHECK(clusters.at(0).y == 200);
CHECK(clusters[0].x == 1);
CHECK(clusters[0].y == 200);
CHECK(std::equal(std::begin(clusters.at(0).data),
std::end(clusters.at(0).data),
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
std::begin(expected_cluster_data)));
}

View File

@ -0,0 +1,99 @@
#include "aare/ClusterFinderMT.hpp"
#include "aare/Cluster.hpp"
#include "aare/ClusterCollector.hpp"
#include "aare/File.hpp"
#include "test_config.hpp"
#include <catch2/catch_test_macros.hpp>
#include <filesystem>
#include <memory>
using namespace aare;
// wrapper function to access private member variables for testing
template <typename ClusterType, typename FRAME_TYPE = uint16_t,
typename PEDESTAL_TYPE = double>
class ClusterFinderMTWrapper
: public ClusterFinderMT<ClusterType, FRAME_TYPE, PEDESTAL_TYPE> {
public:
ClusterFinderMTWrapper(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
size_t capacity = 2000, size_t n_threads = 3)
: ClusterFinderMT<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>(
image_size, nSigma, capacity, n_threads) {}
size_t get_m_input_queues_size() const {
return this->m_input_queues.size();
}
size_t get_m_output_queues_size() const {
return this->m_output_queues.size();
}
size_t get_m_cluster_finders_size() const {
return this->m_cluster_finders.size();
}
bool m_output_queues_are_empty() const {
for (auto &queue : this->m_output_queues) {
if (!queue->isEmpty())
return false;
}
return true;
}
bool m_input_queues_are_empty() const {
for (auto &queue : this->m_input_queues) {
if (!queue->isEmpty())
return false;
}
return true;
}
bool m_sink_is_empty() const { return this->m_sink.isEmpty(); }
size_t m_sink_size() const { return this->m_sink.sizeGuess(); }
};
TEST_CASE("multithreaded cluster finder", "[.files][.ClusterFinder]") {
auto fpath = "/mnt/sls_det_storage/matterhorn_data/aare_test_data/"
"Moench03new/cu_half_speed_master_4.json";
File file(fpath);
size_t n_threads = 2;
size_t n_frames_pd = 10;
using ClusterType = Cluster<int32_t, 3, 3>;
ClusterFinderMTWrapper<ClusterType> cf(
{static_cast<int64_t>(file.rows()), static_cast<int64_t>(file.cols())},
5, 2000, n_threads); // no idea what frame type is!!! default uint16_t
CHECK(cf.get_m_input_queues_size() == n_threads);
CHECK(cf.get_m_output_queues_size() == n_threads);
CHECK(cf.get_m_cluster_finders_size() == n_threads);
CHECK(cf.m_output_queues_are_empty() == true);
CHECK(cf.m_input_queues_are_empty() == true);
for (size_t i = 0; i < n_frames_pd; ++i) {
cf.find_clusters(file.read_frame().view<uint16_t>());
}
cf.stop();
CHECK(cf.m_output_queues_are_empty() == true);
CHECK(cf.m_input_queues_are_empty() == true);
CHECK(cf.m_sink_size() == n_frames_pd);
ClusterCollector<ClusterType> clustercollector(&cf);
clustercollector.stop();
CHECK(cf.m_sink_size() == 0);
auto clustervec = clustercollector.steal_clusters();
// CHECK(clustervec.size() == ) //dont know how many clusters to expect
}

View File

@ -60,7 +60,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read",
REQUIRE(cv.size() == 1);
REQUIRE(cv.capacity() == 4);
auto c2 = cv.at(0);
auto c2 = cv[0];
// Check that the data is the same
REQUIRE(c1.x == c2.x);

View File

@ -21,7 +21,7 @@ FilePtr &FilePtr::operator=(FilePtr &&other) {
FILE *FilePtr::get() { return fp_; }
int64_t FilePtr::tell() {
ssize_t FilePtr::tell() {
auto pos = ftell(fp_);
if (pos == -1)
throw std::runtime_error(fmt::format("Error getting file position: {}", error_msg()));

View File

@ -34,6 +34,30 @@ NDArray<double, 1> pol1(NDView<double, 1> x, NDView<double, 1> par) {
return y;
}
double scurve(const double x, const double * par) {
return (par[0] + par[1] * x) + 0.5 * (1 + erf((x - par[2]) / (sqrt(2) * par[3]))) * (par[4] + par[5] * (x - par[2]));
}
NDArray<double, 1> scurve(NDView<double, 1> x, NDView<double, 1> par) {
NDArray<double, 1> y({x.shape()}, 0);
for (ssize_t i = 0; i < x.size(); i++) {
y(i) = scurve(x(i), par.data());
}
return y;
}
double scurve2(const double x, const double * par) {
return (par[0] + par[1] * x) + 0.5 * (1 - erf((x - par[2]) / (sqrt(2) * par[3]))) * (par[4] + par[5] * (x - par[2]));
}
NDArray<double, 1> scurve2(NDView<double, 1> x, NDView<double, 1> par) {
NDArray<double, 1> y({x.shape()}, 0);
for (ssize_t i = 0; i < x.size(); i++) {
y(i) = scurve2(x(i), par.data());
}
return y;
}
} // namespace func
NDArray<double, 1> fit_gaus(NDView<double, 1> x, NDView<double, 1> y) {
@ -81,7 +105,7 @@ std::array<double, 3> gaus_init_par(const NDView<double, 1> x, const NDView<doub
auto delta = x[1] - x[0];
start_par[2] =
std::count_if(y.begin(), y.end(),
[e, delta](double val) { return val > *e / 2; }) *
[e](double val) { return val > *e / 2; }) *
delta / 2.35;
return start_par;
@ -273,4 +297,229 @@ NDArray<double, 3> fit_pol1(NDView<double, 1> x, NDView<double, 3> y,
return result;
}
// ~~ S-CURVES ~~
// SCURVE --
std::array<double, 6> scurve_init_par(const NDView<double, 1> x, const NDView<double, 1> y){
// Estimate the initial parameters for the fit
std::array<double, 6> start_par{0, 0, 0, 0, 0, 0};
auto ymax = std::max_element(y.begin(), y.end());
auto ymin = std::min_element(y.begin(), y.end());
start_par[4] = *ymin + (*ymax - *ymin) / 2;
// Find the first x where the corresponding y value is above the threshold (start_par[4])
for (ssize_t i = 0; i < y.size(); ++i) {
if (y[i] >= start_par[4]) {
start_par[2] = x[i];
break; // Exit the loop after finding the first valid x
}
}
start_par[3] = 2 * sqrt(start_par[2]);
start_par[0] = 100;
start_par[1] = 0.25;
start_par[5] = 1;
return start_par;
}
// - No error
NDArray<double, 1> fit_scurve(NDView<double, 1> x, NDView<double, 1> y) {
NDArray<double, 1> result = scurve_init_par(x, y);
lm_status_struct status;
lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(),
aare::func::scurve, &lm_control_double, &status);
return result;
}
NDArray<double, 3> fit_scurve(NDView<double, 1> x, NDView<double, 3> y, int n_threads) {
NDArray<double, 3> result({y.shape(0), y.shape(1), 6}, 0);
auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) {
for (ssize_t row = first_row; row < last_row; row++) {
for (ssize_t col = 0; col < y.shape(1); col++) {
NDView<double, 1> values(&y(row, col, 0), {y.shape(2)});
auto res = fit_scurve(x, values);
result(row, col, 0) = res(0);
result(row, col, 1) = res(1);
result(row, col, 2) = res(2);
result(row, col, 3) = res(3);
result(row, col, 4) = res(4);
result(row, col, 5) = res(5);
}
}
};
auto tasks = split_task(0, y.shape(0), n_threads);
RunInParallel(process, tasks);
return result;
}
// - Error
void fit_scurve(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
NDView<double, 1> par_out, NDView<double, 1> par_err_out, double& chi2) {
// Check that we have the correct sizes
if (y.size() != x.size() || y.size() != y_err.size() ||
par_out.size() != 6 || par_err_out.size() != 6) {
throw std::runtime_error("Data, x, data_err must have the same size "
"and par_out, par_err_out must have size 6");
}
lm_status_struct status;
par_out = scurve_init_par(x, y);
std::array<double, 36> cov = {0}; // size 6x6
// std::array<double, 4> cov{0, 0, 0, 0};
lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(),
x.size(), x.data(), y.data(), y_err.data(), aare::func::scurve,
&lm_control_double, &status);
// Calculate chi2
chi2 = 0;
for (ssize_t i = 0; i < y.size(); i++) {
chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2);
}
}
void fit_scurve(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
NDView<double, 3> par_out, NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
int n_threads) {
auto process = [&](ssize_t first_row, ssize_t last_row) {
for (ssize_t row = first_row; row < last_row; row++) {
for (ssize_t col = 0; col < y.shape(1); col++) {
NDView<double, 1> y_view(&y(row, col, 0), {y.shape(2)});
NDView<double, 1> y_err_view(&y_err(row, col, 0),
{y_err.shape(2)});
NDView<double, 1> par_out_view(&par_out(row, col, 0),
{par_out.shape(2)});
NDView<double, 1> par_err_out_view(&par_err_out(row, col, 0),
{par_err_out.shape(2)});
fit_scurve(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col));
}
}
};
auto tasks = split_task(0, y.shape(0), n_threads);
RunInParallel(process, tasks);
}
// SCURVE2 ---
std::array<double, 6> scurve2_init_par(const NDView<double, 1> x, const NDView<double, 1> y){
// Estimate the initial parameters for the fit
std::array<double, 6> start_par{0, 0, 0, 0, 0, 0};
auto ymax = std::max_element(y.begin(), y.end());
auto ymin = std::min_element(y.begin(), y.end());
start_par[4] = *ymin + (*ymax - *ymin) / 2;
// Find the first x where the corresponding y value is above the threshold (start_par[4])
for (ssize_t i = 0; i < y.size(); ++i) {
if (y[i] <= start_par[4]) {
start_par[2] = x[i];
break; // Exit the loop after finding the first valid x
}
}
start_par[3] = 2 * sqrt(start_par[2]);
start_par[0] = 100;
start_par[1] = 0.25;
start_par[5] = -1;
return start_par;
}
// - No error
NDArray<double, 1> fit_scurve2(NDView<double, 1> x, NDView<double, 1> y) {
NDArray<double, 1> result = scurve2_init_par(x, y);
lm_status_struct status;
lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(),
aare::func::scurve2, &lm_control_double, &status);
return result;
}
NDArray<double, 3> fit_scurve2(NDView<double, 1> x, NDView<double, 3> y, int n_threads) {
NDArray<double, 3> result({y.shape(0), y.shape(1), 6}, 0);
auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) {
for (ssize_t row = first_row; row < last_row; row++) {
for (ssize_t col = 0; col < y.shape(1); col++) {
NDView<double, 1> values(&y(row, col, 0), {y.shape(2)});
auto res = fit_scurve2(x, values);
result(row, col, 0) = res(0);
result(row, col, 1) = res(1);
result(row, col, 2) = res(2);
result(row, col, 3) = res(3);
result(row, col, 4) = res(4);
result(row, col, 5) = res(5);
}
}
};
auto tasks = split_task(0, y.shape(0), n_threads);
RunInParallel(process, tasks);
return result;
}
// - Error
void fit_scurve2(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
NDView<double, 1> par_out, NDView<double, 1> par_err_out, double& chi2) {
// Check that we have the correct sizes
if (y.size() != x.size() || y.size() != y_err.size() ||
par_out.size() != 6 || par_err_out.size() != 6) {
throw std::runtime_error("Data, x, data_err must have the same size "
"and par_out, par_err_out must have size 6");
}
lm_status_struct status;
par_out = scurve2_init_par(x, y);
std::array<double, 36> cov = {0}; // size 6x6
// std::array<double, 4> cov{0, 0, 0, 0};
lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(),
x.size(), x.data(), y.data(), y_err.data(), aare::func::scurve2,
&lm_control_double, &status);
// Calculate chi2
chi2 = 0;
for (ssize_t i = 0; i < y.size(); i++) {
chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2);
}
}
void fit_scurve2(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
NDView<double, 3> par_out, NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
int n_threads) {
auto process = [&](ssize_t first_row, ssize_t last_row) {
for (ssize_t row = first_row; row < last_row; row++) {
for (ssize_t col = 0; col < y.shape(1); col++) {
NDView<double, 1> y_view(&y(row, col, 0), {y.shape(2)});
NDView<double, 1> y_err_view(&y_err(row, col, 0),
{y_err.shape(2)});
NDView<double, 1> par_out_view(&par_out(row, col, 0),
{par_out.shape(2)});
NDView<double, 1> par_err_out_view(&par_err_out(row, col, 0),
{par_err_out.shape(2)});
fit_scurve2(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col));
}
}
};
auto tasks = split_task(0, y.shape(0), n_threads);
RunInParallel(process, tasks);
}
} // namespace aare

View File

@ -89,7 +89,7 @@ void JungfrauDataFile::seek(size_t frame_index) {
: frame_index;
auto byte_offset = frame_offset * (m_bytes_per_frame + header_size);
m_fp.seek(byte_offset);
};
}
size_t JungfrauDataFile::tell() { return m_current_frame_index; }
size_t JungfrauDataFile::total_frames() const { return m_total_frames; }
@ -235,4 +235,4 @@ std::filesystem::path JungfrauDataFile::fpath(size_t file_index) const {
return m_path / fname;
}
} // namespace aare
} // namespace aare

View File

@ -44,9 +44,9 @@ TEST_CASE("3D NDArray from NDView"){
REQUIRE(image.size() == view.size());
REQUIRE(image.data() != view.data());
for(int64_t i=0; i<image.shape(0); i++){
for(int64_t j=0; j<image.shape(1); j++){
for(int64_t k=0; k<image.shape(2); k++){
for(ssize_t i=0; i<image.shape(0); i++){
for(ssize_t j=0; j<image.shape(1); j++){
for(ssize_t k=0; k<image.shape(2); k++){
REQUIRE(image(i, j, k) == view(i, j, k));
}
}
@ -54,7 +54,7 @@ TEST_CASE("3D NDArray from NDView"){
}
TEST_CASE("1D image") {
std::array<int64_t, 1> shape{{20}};
std::array<ssize_t, 1> shape{{20}};
NDArray<short, 1> img(shape, 3);
REQUIRE(img.size() == 20);
REQUIRE(img(5) == 3);
@ -71,7 +71,7 @@ TEST_CASE("Accessing a const object") {
}
TEST_CASE("Indexing of a 2D image") {
std::array<int64_t, 2> shape{{3, 7}};
std::array<ssize_t, 2> shape{{3, 7}};
NDArray<long> img(shape, 5);
for (uint32_t i = 0; i != img.size(); ++i) {
REQUIRE(img(i) == 5);
@ -114,7 +114,7 @@ TEST_CASE("Divide double by int") {
}
TEST_CASE("Elementwise multiplication of 3D image") {
std::array<int64_t, 3> shape{3, 4, 2};
std::array<ssize_t, 3> shape{3, 4, 2};
NDArray<double, 3> a{shape};
NDArray<double, 3> b{shape};
for (uint32_t i = 0; i != a.size(); ++i) {
@ -179,9 +179,9 @@ TEST_CASE("Compare two images") {
}
TEST_CASE("Size and shape matches") {
int64_t w = 15;
int64_t h = 75;
std::array<int64_t, 2> shape{w, h};
ssize_t w = 15;
ssize_t h = 75;
std::array<ssize_t, 2> shape{w, h};
NDArray<double> a{shape};
REQUIRE(a.size() == w * h);
REQUIRE(a.shape() == shape);
@ -224,7 +224,7 @@ TEST_CASE("Bitwise and on data") {
TEST_CASE("Elementwise operations on images") {
std::array<int64_t, 2> shape{5, 5};
std::array<ssize_t, 2> shape{5, 5};
double a_val = 3.0;
double b_val = 8.0;

View File

@ -3,6 +3,7 @@
#include <iostream>
#include <vector>
#include <numeric>
using aare::NDView;
using aare::Shape;
@ -21,10 +22,8 @@ TEST_CASE("Element reference 1D") {
}
TEST_CASE("Element reference 2D") {
std::vector<int> vec;
for (int i = 0; i != 12; ++i) {
vec.push_back(i);
}
std::vector<int> vec(12);
std::iota(vec.begin(), vec.end(), 0);
NDView<int, 2> data(vec.data(), Shape<2>{3, 4});
REQUIRE(vec.size() == static_cast<size_t>(data.size()));
@ -58,10 +57,8 @@ TEST_CASE("Element reference 3D") {
}
TEST_CASE("Plus and miuns with single value") {
std::vector<int> vec;
for (int i = 0; i != 12; ++i) {
vec.push_back(i);
}
std::vector<int> vec(12);
std::iota(vec.begin(), vec.end(), 0);
NDView<int, 2> data(vec.data(), Shape<2>{3, 4});
data += 5;
int i = 0;
@ -116,10 +113,8 @@ TEST_CASE("elementwise assign") {
}
TEST_CASE("iterators") {
std::vector<int> vec;
for (int i = 0; i != 12; ++i) {
vec.push_back(i);
}
std::vector<int> vec(12);
std::iota(vec.begin(), vec.end(), 0);
NDView<int, 1> data(vec.data(), Shape<1>{12});
int i = 0;
for (const auto item : data) {
@ -147,7 +142,7 @@ TEST_CASE("iterators") {
// for (int i = 0; i != 12; ++i) {
// vec.push_back(i);
// }
// std::vector<int64_t> shape{3, 4};
// std::vector<ssize_t> shape{3, 4};
// NDView<int, 2> data(vec.data(), shape);
// }
@ -156,8 +151,8 @@ TEST_CASE("divide with another span") {
std::vector<int> vec1{3, 2, 1};
std::vector<int> result{3, 6, 3};
NDView<int, 1> data0(vec0.data(), Shape<1>{static_cast<int64_t>(vec0.size())});
NDView<int, 1> data1(vec1.data(), Shape<1>{static_cast<int64_t>(vec1.size())});
NDView<int, 1> data0(vec0.data(), Shape<1>{static_cast<ssize_t>(vec0.size())});
NDView<int, 1> data1(vec1.data(), Shape<1>{static_cast<ssize_t>(vec1.size())});
data0 /= data1;
@ -167,27 +162,31 @@ TEST_CASE("divide with another span") {
}
TEST_CASE("Retrieve shape") {
std::vector<int> vec;
for (int i = 0; i != 12; ++i) {
vec.push_back(i);
}
std::vector<int> vec(12);
std::iota(vec.begin(), vec.end(), 0);
NDView<int, 2> data(vec.data(), Shape<2>{3, 4});
REQUIRE(data.shape()[0] == 3);
REQUIRE(data.shape()[1] == 4);
}
TEST_CASE("compare two views") {
std::vector<int> vec1;
for (int i = 0; i != 12; ++i) {
vec1.push_back(i);
}
std::vector<int> vec1(12);
std::iota(vec1.begin(), vec1.end(), 0);
NDView<int, 2> view1(vec1.data(), Shape<2>{3, 4});
std::vector<int> vec2;
for (int i = 0; i != 12; ++i) {
vec2.push_back(i);
}
std::vector<int> vec2(12);
std::iota(vec2.begin(), vec2.end(), 0);
NDView<int, 2> view2(vec2.data(), Shape<2>{3, 4});
REQUIRE((view1 == view2));
}
TEST_CASE("Create a view over a vector"){
std::vector<int> vec(12);
std::iota(vec.begin(), vec.end(), 0);
auto v = aare::make_view(vec);
REQUIRE(v.shape()[0] == 12);
REQUIRE(v[0] == 0);
REQUIRE(v[11] == 11);
}

View File

@ -72,8 +72,8 @@ void NumpyFile::get_frame_into(size_t frame_number, std::byte *image_buf) {
}
}
size_t NumpyFile::pixels_per_frame() { return m_pixels_per_frame; };
size_t NumpyFile::bytes_per_frame() { return m_bytes_per_frame; };
size_t NumpyFile::pixels_per_frame() { return m_pixels_per_frame; }
size_t NumpyFile::bytes_per_frame() { return m_bytes_per_frame; }
std::vector<Frame> NumpyFile::read_n(size_t n_frames) {
// TODO: implement this in a more efficient way
@ -197,4 +197,4 @@ void NumpyFile::load_metadata() {
m_header = {dtype, fortran_order, shape};
}
} // namespace aare
} // namespace aare

View File

@ -1,6 +1,8 @@
#include "aare/RawFile.hpp"
#include "aare/algorithm.hpp"
#include "aare/PixelMap.hpp"
#include "aare/defs.hpp"
#include "aare/logger.hpp"
#include "aare/geo_helpers.hpp"
#include <fmt/format.h>
@ -14,27 +16,18 @@ RawFile::RawFile(const std::filesystem::path &fname, const std::string &mode)
: m_master(fname) {
m_mode = mode;
if (mode == "r") {
n_subfiles = find_number_of_subfiles(); // f0,f1...fn
n_subfile_parts =
m_master.geometry().col * m_master.geometry().row; // d0,d1...dn
find_geometry();
if (m_master.roi()){
m_geometry = update_geometry_with_roi(m_geometry, m_master.roi().value());
}
open_subfiles();
} else {
throw std::runtime_error(LOCATION +
"Unsupported mode. Can only read RawFiles.");
" Unsupported mode. Can only read RawFiles.");
}
}
Frame RawFile::read_frame() { return get_frame(m_current_frame++); };
Frame RawFile::read_frame() { return get_frame(m_current_frame++); }
Frame RawFile::read_frame(size_t frame_number) {
seek(frame_number);
@ -52,13 +45,13 @@ void RawFile::read_into(std::byte *image_buf, size_t n_frames) {
void RawFile::read_into(std::byte *image_buf) {
return get_frame_into(m_current_frame++, image_buf);
};
}
void RawFile::read_into(std::byte *image_buf, DetectorHeader *header) {
return get_frame_into(m_current_frame++, image_buf, header);
};
}
void RawFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) {
// return get_frame_into(m_current_frame++, image_buf, header);
@ -67,12 +60,12 @@ void RawFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *h
this->get_frame_into(m_current_frame++, image_buf, header);
image_buf += bytes_per_frame();
if(header)
header+=n_mod();
header+=n_modules();
}
};
}
size_t RawFile::n_mod() const { return n_subfile_parts; }
size_t RawFile::n_modules() const { return m_master.n_modules(); }
size_t RawFile::bytes_per_frame() {
@ -94,9 +87,9 @@ void RawFile::seek(size_t frame_index) {
frame_index, total_frames()));
}
m_current_frame = frame_index;
};
}
size_t RawFile::tell() { return m_current_frame; };
size_t RawFile::tell() { return m_current_frame; }
size_t RawFile::total_frames() const { return m_master.frames_in_file(); }
size_t RawFile::rows() const { return m_geometry.pixels_y; }
@ -106,17 +99,11 @@ xy RawFile::geometry() { return m_master.geometry(); }
void RawFile::open_subfiles() {
if (m_mode == "r")
for (size_t i = 0; i != n_subfiles; ++i) {
auto v = std::vector<RawSubFile *>(n_subfile_parts);
for (size_t j = 0; j != n_subfile_parts; ++j) {
auto pos = m_geometry.module_pixel_0[j];
v[j] = new RawSubFile(m_master.data_fname(j, i),
m_master.detector_type(), pos.height,
pos.width, m_master.bitdepth(),
pos.row_index, pos.col_index);
}
subfiles.push_back(v);
for (size_t i = 0; i != n_modules(); ++i) {
auto pos = m_geometry.module_pixel_0[i];
m_subfiles.emplace_back(std::make_unique<RawSubFile>(
m_master.data_fname(i, 0), m_master.detector_type(), pos.height,
pos.width, m_master.bitdepth(), pos.row_index, pos.col_index));
}
else {
throw std::runtime_error(LOCATION +
@ -141,18 +128,6 @@ DetectorHeader RawFile::read_header(const std::filesystem::path &fname) {
return h;
}
int RawFile::find_number_of_subfiles() {
int n_files = 0;
// f0,f1...fn How many files is the data split into?
while (std::filesystem::exists(m_master.data_fname(0, n_files)))
n_files++; // increment after test
#ifdef AARE_VERBOSE
fmt::print("Found: {} subfiles\n", n_files);
#endif
return n_files;
}
RawMasterFile RawFile::master() const { return m_master; }
@ -168,7 +143,7 @@ void RawFile::find_geometry() {
uint16_t c{};
for (size_t i = 0; i < n_subfile_parts; i++) {
for (size_t i = 0; i < n_modules(); i++) {
auto h = read_header(m_master.data_fname(i, 0));
r = std::max(r, h.row);
c = std::max(c, h.column);
@ -210,70 +185,58 @@ size_t RawFile::bytes_per_pixel() const {
}
void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, DetectorHeader *header) {
LOG(logDEBUG) << "RawFile::get_frame_into(" << frame_index << ")";
if (frame_index >= total_frames()) {
throw std::runtime_error(LOCATION + "Frame number out of range");
}
std::vector<size_t> frame_numbers(n_subfile_parts);
std::vector<size_t> frame_indices(n_subfile_parts, frame_index);
std::vector<size_t> frame_numbers(n_modules());
std::vector<size_t> frame_indices(n_modules(), frame_index);
// sync the frame numbers
if (n_subfile_parts != 1) {
for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) {
auto subfile_id = frame_index / m_master.max_frames_per_file();
if (subfile_id >= subfiles.size()) {
throw std::runtime_error(LOCATION +
" Subfile out of range. Possible missing data.");
}
frame_numbers[part_idx] =
subfiles[subfile_id][part_idx]->frame_number(
frame_index % m_master.max_frames_per_file());
if (n_modules() != 1) { //if we have more than one module
for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) {
frame_numbers[part_idx] = m_subfiles[part_idx]->frame_number(frame_index);
}
// 1. if frame number vector is the same break
while (std::adjacent_find(frame_numbers.begin(), frame_numbers.end(),
std::not_equal_to<>()) !=
frame_numbers.end()) {
while (!all_equal(frame_numbers)) {
// 2. find the index of the minimum frame number,
auto min_frame_idx = std::distance(
frame_numbers.begin(),
std::min_element(frame_numbers.begin(), frame_numbers.end()));
// 3. increase its index and update its respective frame number
frame_indices[min_frame_idx]++;
// 4. if we can't increase its index => throw error
if (frame_indices[min_frame_idx] >= total_frames()) {
throw std::runtime_error(LOCATION +
"Frame number out of range");
}
auto subfile_id =
frame_indices[min_frame_idx] / m_master.max_frames_per_file();
frame_numbers[min_frame_idx] =
subfiles[subfile_id][min_frame_idx]->frame_number(
frame_indices[min_frame_idx] %
m_master.max_frames_per_file());
m_subfiles[min_frame_idx]->frame_number(frame_indices[min_frame_idx]);
}
}
if (m_master.geometry().col == 1) {
// get the part from each subfile and copy it to the frame
for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) {
for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) {
auto corrected_idx = frame_indices[part_idx];
auto subfile_id = corrected_idx / m_master.max_frames_per_file();
if (subfile_id >= subfiles.size()) {
throw std::runtime_error(LOCATION +
" Subfile out of range. Possible missing data.");
}
// This is where we start writing
auto offset = (m_geometry.module_pixel_0[part_idx].origin_y * m_geometry.pixels_x +
m_geometry.module_pixel_0[part_idx].origin_x)*m_master.bitdepth()/8;
if (m_geometry.module_pixel_0[part_idx].origin_x!=0)
throw std::runtime_error(LOCATION + "Implementation error. x pos not 0.");
throw std::runtime_error(LOCATION + " Implementation error. x pos not 0.");
//TODO! Risk for out of range access
subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file());
subfiles[subfile_id][part_idx]->read_into(frame_buffer + offset, header);
//TODO! What if the files don't match?
m_subfiles[part_idx]->seek(corrected_idx);
m_subfiles[part_idx]->read_into(frame_buffer + offset, header);
if (header)
++header;
}
@ -282,26 +245,21 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect
//TODO! should we read row by row?
// create a buffer large enough to hold a full module
auto bytes_per_part = m_master.pixels_y() * m_master.pixels_x() *
m_master.bitdepth() /
8; // TODO! replace with image_size_in_bytes
auto *part_buffer = new std::byte[bytes_per_part];
// TODO! if we have many submodules we should reorder them on the module
// level
for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) {
for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) {
auto pos = m_geometry.module_pixel_0[part_idx];
auto corrected_idx = frame_indices[part_idx];
auto subfile_id = corrected_idx / m_master.max_frames_per_file();
if (subfile_id >= subfiles.size()) {
throw std::runtime_error(LOCATION +
" Subfile out of range. Possible missing data.");
}
subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file());
subfiles[subfile_id][part_idx]->read_into(part_buffer, header);
m_subfiles[part_idx]->seek(corrected_idx);
m_subfiles[part_idx]->read_into(part_buffer, header);
if(header)
++header;
@ -321,6 +279,7 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect
}
delete[] part_buffer;
}
}
std::vector<Frame> RawFile::read_n(size_t n_frames) {
@ -337,27 +296,8 @@ size_t RawFile::frame_number(size_t frame_index) {
if (frame_index >= m_master.frames_in_file()) {
throw std::runtime_error(LOCATION + " Frame number out of range");
}
size_t subfile_id = frame_index / m_master.max_frames_per_file();
if (subfile_id >= subfiles.size()) {
throw std::runtime_error(
LOCATION + " Subfile out of range. Possible missing data.");
}
return subfiles[subfile_id][0]->frame_number(
frame_index % m_master.max_frames_per_file());
}
RawFile::~RawFile() {
// TODO! Fix this, for file closing
for (auto &vec : subfiles) {
for (auto *subfile : vec) {
delete subfile;
}
}
return m_subfiles[0]->frame_number(frame_index);
}
} // namespace aare
} // namespace aare

View File

@ -99,11 +99,11 @@ TEST_CASE("Read frame numbers from a raw file", "[.integration]") {
}
}
TEST_CASE("Compare reading from a numpy file with a raw file", "[.integration]") {
auto fpath_raw = test_data_path() / "jungfrau" / "jungfrau_single_master_0.json";
TEST_CASE("Compare reading from a numpy file with a raw file", "[.files]") {
auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_master_0.json";
REQUIRE(std::filesystem::exists(fpath_raw));
auto fpath_npy = test_data_path() / "jungfrau" / "jungfrau_single_0.npy";
auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy";
REQUIRE(std::filesystem::exists(fpath_npy));
File raw(fpath_raw, "r");
@ -113,6 +113,7 @@ TEST_CASE("Compare reading from a numpy file with a raw file", "[.integration]")
CHECK(npy.total_frames() == 10);
for (size_t i = 0; i < 10; ++i) {
CHECK(raw.tell() == i);
auto raw_frame = raw.read_frame();
auto npy_frame = npy.read_frame();
CHECK((raw_frame.view<uint16_t>() == npy_frame.view<uint16_t>()));

View File

@ -87,7 +87,7 @@ int ScanParameters::start() const { return m_start; }
int ScanParameters::stop() const { return m_stop; }
void ScanParameters::increment_stop(){
m_stop += 1;
};
}
int ScanParameters::step() const { return m_step; }
const std::string &ScanParameters::dac() const { return m_dac; }
bool ScanParameters::enabled() const { return m_enabled; }
@ -140,6 +140,10 @@ std::optional<size_t> RawMasterFile::number_of_rows() const {
xy RawMasterFile::geometry() const { return m_geometry; }
size_t RawMasterFile::n_modules() const {
return m_geometry.row * m_geometry.col;
}
std::optional<uint8_t> RawMasterFile::quad() const { return m_quad; }
// optional values, these may or may not be present in the master file
@ -417,4 +421,4 @@ void RawMasterFile::parse_raw(const std::filesystem::path &fpath) {
if(m_frames_in_file==0)
m_frames_in_file = m_total_frames_expected;
}
} // namespace aare
} // namespace aare

View File

@ -1,65 +1,78 @@
#include "aare/RawSubFile.hpp"
#include "aare/PixelMap.hpp"
#include "aare/algorithm.hpp"
#include "aare/utils/ifstream_helpers.hpp"
#include "aare/logger.hpp"
#include <cstring> // memcpy
#include <fmt/core.h>
#include <iostream>
#include <regex>
namespace aare {
RawSubFile::RawSubFile(const std::filesystem::path &fname,
DetectorType detector, size_t rows, size_t cols,
size_t bitdepth, uint32_t pos_row, uint32_t pos_col)
: m_detector_type(detector), m_bitdepth(bitdepth), m_fname(fname),
: m_detector_type(detector), m_bitdepth(bitdepth),
m_rows(rows), m_cols(cols),
m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row),
m_pos_col(pos_col) {
LOG(logDEBUG) << "RawSubFile::RawSubFile()";
if (m_detector_type == DetectorType::Moench03_old) {
m_pixel_map = GenerateMoench03PixelMap();
} else if (m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0) {
m_pixel_map = GenerateEigerFlipRowsPixelMap();
}
if (std::filesystem::exists(fname)) {
n_frames = std::filesystem::file_size(fname) /
(sizeof(DetectorHeader) + rows * cols * bitdepth / 8);
} else {
throw std::runtime_error(
LOCATION + fmt::format("File {} does not exist", m_fname.string()));
}
// fp = fopen(m_fname.string().c_str(), "rb");
m_file.open(m_fname, std::ios::binary);
if (!m_file.is_open()) {
throw std::runtime_error(
LOCATION + fmt::format("Could not open file {}", m_fname.string()));
}
#ifdef AARE_VERBOSE
fmt::print("Opened file: {} with {} frames\n", m_fname.string(), n_frames);
fmt::print("m_rows: {}, m_cols: {}, m_bitdepth: {}\n", m_rows, m_cols,
m_bitdepth);
fmt::print("file size: {}\n", std::filesystem::file_size(fname));
#endif
parse_fname(fname);
scan_files();
open_file(m_current_file_index); // open the first file
}
void RawSubFile::seek(size_t frame_index) {
if (frame_index >= n_frames) {
throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, n_frames));
LOG(logDEBUG) << "RawSubFile::seek(" << frame_index << ")";
if (frame_index >= m_total_frames) {
throw std::runtime_error(LOCATION + " Frame index out of range: " +
std::to_string(frame_index));
}
m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index);
m_current_frame_index = frame_index;
auto file_index = first_larger(m_last_frame_in_file, frame_index);
if (file_index != m_current_file_index)
open_file(file_index);
auto frame_offset = (file_index)
? frame_index - m_last_frame_in_file[file_index - 1]
: frame_index;
auto byte_offset = frame_offset * (m_bytes_per_frame + sizeof(DetectorHeader));
m_file.seekg(byte_offset);
}
size_t RawSubFile::tell() {
return m_file.tellg() / (sizeof(DetectorHeader) + bytes_per_frame());
LOG(logDEBUG) << "RawSubFile::tell():" << m_current_frame_index;
return m_current_frame_index;
}
void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) {
LOG(logDEBUG) << "RawSubFile::read_into()";
if (header) {
m_file.read(reinterpret_cast<char *>(header), sizeof(DetectorHeader));
} else {
m_file.seekg(sizeof(DetectorHeader), std::ios::cur);
}
if (m_file.fail()){
throw std::runtime_error(LOCATION + ifstream_error_msg(m_file));
}
// TODO! expand support for different bitdepths
if (m_pixel_map) {
// read into a temporary buffer and then copy the data to the buffer
@ -79,8 +92,31 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) {
// read directly into the buffer
m_file.read(reinterpret_cast<char *>(image_buf), bytes_per_frame());
}
if (m_file.fail()){
throw std::runtime_error(LOCATION + ifstream_error_msg(m_file));
}
++ m_current_frame_index;
if (m_current_frame_index >= m_last_frame_in_file[m_current_file_index] &&
(m_current_frame_index < m_total_frames)) {
++m_current_file_index;
open_file(m_current_file_index);
}
}
void RawSubFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) {
for (size_t i = 0; i < n_frames; i++) {
read_into(image_buf, header);
image_buf += bytes_per_frame();
if (header) {
++header;
}
}
}
template <typename T>
void RawSubFile::read_with_map(std::byte *image_buf) {
auto part_buffer = new std::byte[bytes_per_frame()];
@ -107,4 +143,69 @@ size_t RawSubFile::frame_number(size_t frame_index) {
return h.frameNumber;
}
void RawSubFile::parse_fname(const std::filesystem::path &fname) {
LOG(logDEBUG) << "RawSubFile::parse_fname()";
// data has the format: /path/too/data/jungfrau_single_d0_f1_0.raw
// d0 is the module index, will not change for this file
// f1 is the file index - thi is the one we need
// 0 is the measurement index, will not change
m_path = fname.parent_path();
m_base_name = fname.filename();
// Regex to extract numbers after 'd' and 'f'
std::regex pattern(R"(^(.*_d)(\d+)(_f)(\d+)(_\d+\.raw)$)");
std::smatch match;
if (std::regex_match(m_base_name, match, pattern)) {
m_offset = std::stoi(match[4].str()); // find the first file index in case of a truncated series
m_base_name = match[1].str() + match[2].str() + match[3].str() + "{}" + match[5].str();
LOG(logDEBUG) << "Base name: " << m_base_name;
LOG(logDEBUG) << "Offset: " << m_offset;
LOG(logDEBUG) << "Path: " << m_path.string();
} else {
throw std::runtime_error(
LOCATION + fmt::format("Could not parse file name {}", fname.string()));
}
}
std::filesystem::path RawSubFile::fpath(size_t file_index) const {
auto fname = fmt::format(m_base_name, file_index);
return m_path / fname;
}
void RawSubFile::open_file(size_t file_index) {
m_file.close();
auto fname = fpath(file_index+m_offset);
LOG(logDEBUG) << "RawSubFile::open_file(): " << fname.string();
m_file.open(fname, std::ios::binary);
if (!m_file.is_open()) {
throw std::runtime_error(
LOCATION + fmt::format("Could not open file {}", fpath(file_index).string()));
}
m_current_file_index = file_index;
}
void RawSubFile::scan_files() {
LOG(logDEBUG) << "RawSubFile::scan_files()";
// find how many files we have and the number of frames in each file
m_last_frame_in_file.clear();
size_t file_index = m_offset;
while (std::filesystem::exists(fpath(file_index))) {
auto n_frames = std::filesystem::file_size(fpath(file_index)) /
(m_bytes_per_frame + sizeof(DetectorHeader));
m_last_frame_in_file.push_back(n_frames);
LOG(logDEBUG) << "Found: " << n_frames << " frames in file: " << fpath(file_index).string();
++file_index;
}
// find where we need to open the next file and total number of frames
m_last_frame_in_file = cumsum(m_last_frame_in_file);
if(m_last_frame_in_file.empty()){
m_total_frames = 0;
}else{
m_total_frames = m_last_frame_in_file.back();
}
}
} // namespace aare

76
src/RawSubFile.test.cpp Normal file
View File

@ -0,0 +1,76 @@
#include "aare/RawSubFile.hpp"
#include "aare/File.hpp"
#include "aare/NDArray.hpp"
#include <catch2/catch_test_macros.hpp>
#include "test_config.hpp"
using namespace aare;
TEST_CASE("Read frames directly from a RawSubFile", "[.files]"){
auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f0_0.raw";
REQUIRE(std::filesystem::exists(fpath_raw));
RawSubFile f(fpath_raw, DetectorType::Jungfrau, 512, 1024, 16);
REQUIRE(f.rows() == 512);
REQUIRE(f.cols() == 1024);
REQUIRE(f.pixels_per_frame() == 512 * 1024);
REQUIRE(f.bytes_per_frame() == 512 * 1024 * 2);
REQUIRE(f.bytes_per_pixel() == 2);
auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy";
REQUIRE(std::filesystem::exists(fpath_npy));
//Numpy file with the same data to use as reference
File npy(fpath_npy, "r");
CHECK(f.frames_in_file() == 10);
CHECK(npy.total_frames() == 10);
DetectorHeader header{};
NDArray<uint16_t, 2> image({static_cast<ssize_t>(f.rows()), static_cast<ssize_t>(f.cols())});
for (size_t i = 0; i < 10; ++i) {
CHECK(f.tell() == i);
f.read_into(image.buffer(), &header);
auto npy_frame = npy.read_frame();
CHECK((image.view() == npy_frame.view<uint16_t>()));
}
}
TEST_CASE("Read frames directly from a RawSubFile starting at the second file", "[.files]"){
// we know this file has 10 frames with frame numbers 1 to 10
// f0 1,2,3
// f1 4,5,6 <-- starting here
// f2 7,8,9
// f3 10
auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f1_0.raw";
REQUIRE(std::filesystem::exists(fpath_raw));
RawSubFile f(fpath_raw, DetectorType::Jungfrau, 512, 1024, 16);
auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy";
REQUIRE(std::filesystem::exists(fpath_npy));
//Numpy file with the same data to use as reference
File npy(fpath_npy, "r");
npy.seek(3);
CHECK(f.frames_in_file() == 7);
CHECK(npy.total_frames() == 10);
DetectorHeader header{};
NDArray<uint16_t, 2> image({static_cast<ssize_t>(f.rows()), static_cast<ssize_t>(f.cols())});
for (size_t i = 0; i < 7; ++i) {
CHECK(f.tell() == i);
f.read_into(image.buffer(), &header);
// frame numbers start at 1 frame index at 0
// adding 3 + 1 to verify the frame number
CHECK(header.frameNumber == i + 4);
auto npy_frame = npy.read_frame();
CHECK((image.view() == npy_frame.view<uint16_t>()));
}
}

View File

@ -160,3 +160,36 @@ TEST_CASE("cumsum works with negative numbers", "[algorithm]") {
REQUIRE(result[3] == -6);
REQUIRE(result[4] == -10);
}
TEST_CASE("cumsum on an empty vector", "[algorithm]") {
std::vector<double> vec = {};
auto result = aare::cumsum(vec);
REQUIRE(result.size() == 0);
}
TEST_CASE("All equal on an empty vector is false", "[algorithm]") {
std::vector<int> vec = {};
REQUIRE(aare::all_equal(vec) == false);
}
TEST_CASE("All equal on a vector with 1 element is true", "[algorithm]") {
std::vector<int> vec = {1};
REQUIRE(aare::all_equal(vec) == true);
}
TEST_CASE("All equal on a vector with 2 elements is true", "[algorithm]") {
std::vector<int> vec = {1, 1};
REQUIRE(aare::all_equal(vec) == true);
}
TEST_CASE("All equal on a vector with two different elements is false", "[algorithm]") {
std::vector<int> vec = {1, 2};
REQUIRE(aare::all_equal(vec) == false);
}
TEST_CASE("Last element is different", "[algorithm]") {
std::vector<int> vec = {1, 1, 1, 1, 2};
REQUIRE(aare::all_equal(vec) == false);
}

View File

@ -1,5 +1,5 @@
#include "aare/decode.hpp"
#include <cmath>
namespace aare {
uint16_t adc_sar_05_decode64to16(uint64_t input){
@ -22,8 +22,12 @@ uint16_t adc_sar_05_decode64to16(uint64_t input){
}
void adc_sar_05_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output){
for(int64_t i = 0; i < input.shape(0); i++){
for(int64_t j = 0; j < input.shape(1); j++){
if(input.shape() != output.shape()){
throw std::invalid_argument(LOCATION + " input and output shapes must match");
}
for(ssize_t i = 0; i < input.shape(0); i++){
for(ssize_t j = 0; j < input.shape(1); j++){
output(i,j) = adc_sar_05_decode64to16(input(i,j));
}
}
@ -49,13 +53,50 @@ uint16_t adc_sar_04_decode64to16(uint64_t input){
}
void adc_sar_04_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output){
for(int64_t i = 0; i < input.shape(0); i++){
for(int64_t j = 0; j < input.shape(1); j++){
if(input.shape() != output.shape()){
throw std::invalid_argument(LOCATION + " input and output shapes must match");
}
for(ssize_t i = 0; i < input.shape(0); i++){
for(ssize_t j = 0; j < input.shape(1); j++){
output(i,j) = adc_sar_04_decode64to16(input(i,j));
}
}
}
double apply_custom_weights(uint16_t input, const NDView<double, 1> weights) {
if(weights.size() > 16){
throw std::invalid_argument("weights size must be less than or equal to 16");
}
double result = 0.0;
for (ssize_t i = 0; i < weights.size(); ++i) {
result += ((input >> i) & 1) * std::pow(weights[i], i);
}
return result;
}
void apply_custom_weights(NDView<uint16_t, 1> input, NDView<double, 1> output, const NDView<double,1> weights) {
if(input.shape() != output.shape()){
throw std::invalid_argument(LOCATION + " input and output shapes must match");
}
//Calculate weights to avoid repeatedly calling std::pow
std::vector<double> weights_powers(weights.size());
for (ssize_t i = 0; i < weights.size(); ++i) {
weights_powers[i] = std::pow(weights[i], i);
}
// Apply custom weights to each element in the input array
for (ssize_t i = 0; i < input.shape(0); i++) {
double result = 0.0;
for (size_t bit_index = 0; bit_index < weights_powers.size(); ++bit_index) {
result += ((input(i) >> bit_index) & 1) * weights_powers[bit_index];
}
output(i) = result;
}
}
} // namespace aare

80
src/decode.test.cpp Normal file
View File

@ -0,0 +1,80 @@
#include "aare/decode.hpp"
#include <catch2/matchers/catch_matchers_floating_point.hpp>
#include <catch2/catch_test_macros.hpp>
#include "aare/NDArray.hpp"
using Catch::Matchers::WithinAbs;
#include <vector>
TEST_CASE("test_adc_sar_05_decode64to16"){
uint64_t input = 0;
uint16_t output = aare::adc_sar_05_decode64to16(input);
CHECK(output == 0);
// bit 29 on th input is bit 0 on the output
input = 1UL << 29;
output = aare::adc_sar_05_decode64to16(input);
CHECK(output == 1);
// test all bits by iteratting through the bitlist
std::vector<int> bitlist = {29, 19, 28, 18, 31, 21, 27, 20, 24, 23, 25, 22};
for (size_t i = 0; i < bitlist.size(); i++) {
input = 1UL << bitlist[i];
output = aare::adc_sar_05_decode64to16(input);
CHECK(output == (1 << i));
}
// test a few "random" values
input = 0;
input |= (1UL << 29);
input |= (1UL << 19);
input |= (1UL << 28);
output = aare::adc_sar_05_decode64to16(input);
CHECK(output == 7UL);
input = 0;
input |= (1UL << 18);
input |= (1UL << 27);
input |= (1UL << 25);
output = aare::adc_sar_05_decode64to16(input);
CHECK(output == 1096UL);
input = 0;
input |= (1UL << 25);
input |= (1UL << 22);
output = aare::adc_sar_05_decode64to16(input);
CHECK(output == 3072UL);
}
TEST_CASE("test_apply_custom_weights") {
uint16_t input = 1;
aare::NDArray<double, 1> weights_data({3}, 0.0);
weights_data(0) = 1.7;
weights_data(1) = 2.1;
weights_data(2) = 1.8;
auto weights = weights_data.view();
double output = aare::apply_custom_weights(input, weights);
CHECK_THAT(output, WithinAbs(1.0, 0.001));
input = 1 << 1;
output = aare::apply_custom_weights(input, weights);
CHECK_THAT(output, WithinAbs(2.1, 0.001));
input = 1 << 2;
output = aare::apply_custom_weights(input, weights);
CHECK_THAT(output, WithinAbs(3.24, 0.001));
input = 0b111;
output = aare::apply_custom_weights(input, weights);
CHECK_THAT(output, WithinAbs(6.34, 0.001));
}

View File

@ -0,0 +1,18 @@
#include "aare/utils/ifstream_helpers.hpp"
namespace aare {
std::string ifstream_error_msg(std::ifstream &ifs) {
std::ios_base::iostate state = ifs.rdstate();
if (state & std::ios_base::eofbit) {
return " End of file reached";
} else if (state & std::ios_base::badbit) {
return " Bad file stream";
} else if (state & std::ios_base::failbit) {
return " File read failed";
}else{
return " Unknown/no error";
}
}
} // namespace aare

57
update_version.py Normal file
View File

@ -0,0 +1,57 @@
# SPDX-License-Identifier: LGPL-3.0-or-other
# Copyright (C) 2021 Contributors to the Aare Package
"""
Script to update VERSION file with semantic versioning if provided as an argument, or with 0.0.0 if no argument is provided.
"""
import sys
import os
import re
from packaging.version import Version, InvalidVersion
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
def is_integer(value):
try:
int(value)
except ValueError:
return False
else:
return True
def get_version():
# Check at least one argument is passed
if len(sys.argv) < 2:
return "0.0.0"
version = sys.argv[1]
try:
v = Version(version) # normalize check if version follows PEP 440 specification
version_normalized = version.replace("-", ".")
version_normalized = re.sub(r'0*(\d+)', lambda m : str(int(m.group(0))), version_normalized) #remove leading zeros
return version_normalized
except InvalidVersion as e:
print(f"Invalid version {version}. Version format must follow semantic versioning format of python PEP 440 version identification specification.")
sys.exit(1)
def write_version_to_file(version):
version_file_path = os.path.join(SCRIPT_DIR, "VERSION")
with open(version_file_path, "w") as version_file:
version_file.write(version)
print(f"Version {version} written to VERSION file.")
# Main script
if __name__ == "__main__":
version = get_version()
write_version_to_file(version)