mirror of
https://github.com/slsdetectorgroup/aare.git
synced 2025-06-15 00:37:13 +02:00
Compare commits
30 Commits
fix/api_cl
...
api_cluste
Author | SHA1 | Date | |
---|---|---|---|
eb6862ff99 | |||
f06e722dce | |||
7b5e32a824 | |||
86d343f5f5 | |||
129e7e9f9d | |||
58c934d9cf | |||
4088b0889d | |||
d5f8daf194 | |||
c6e8e5f6a1 | |||
b501c31e38 | |||
326941e2b4 | |||
84aafa75f6 | |||
177459c98a | |||
c49a2fdf8e | |||
14211047ff | |||
acd9d5d487 | |||
d4050ec557 | |||
fca9d5d2fa | |||
1174f7f434 | |||
2bb7d360bf | |||
a90e532b21 | |||
8d8182c632 | |||
5f34ab6df1 | |||
5c8a5099fd | |||
7c93632605 | |||
54def26334 | |||
a59e9656be | |||
3f753ec900 | |||
6e4db45b57 | |||
e1533282f1 |
@ -1,18 +1,24 @@
|
||||
name: Build on RHEL8
|
||||
|
||||
on:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
buildh:
|
||||
build:
|
||||
runs-on: "ubuntu-latest"
|
||||
container:
|
||||
image: gitea.psi.ch/images/rhel8-developer-gitea-actions
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
# workaround until actions/checkout@v4 is available for RH8
|
||||
# - uses: actions/checkout@v4
|
||||
- name: Clone repository
|
||||
run: |
|
||||
echo Cloning ${{ github.ref_name }}
|
||||
git clone https://${{secrets.GITHUB_TOKEN}}@gitea.psi.ch/${{ github.repository }}.git --branch=${{ github.ref_name }} .
|
||||
|
||||
|
||||
- name: Install dependencies
|
||||
@ -22,7 +28,7 @@ jobs:
|
||||
- name: Build library
|
||||
run: |
|
||||
mkdir build && cd build
|
||||
cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON
|
||||
cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON -DPython_FIND_VIRTUALENV=FIRST
|
||||
make -j 2
|
||||
|
||||
- name: C++ unit tests
|
||||
|
@ -8,7 +8,7 @@ permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
buildh:
|
||||
build:
|
||||
runs-on: "ubuntu-latest"
|
||||
container:
|
||||
image: gitea.psi.ch/images/rhel9-developer-gitea-actions
|
||||
|
64
.github/workflows/build_wheel.yml
vendored
Normal file
64
.github/workflows/build_wheel.yml
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
name: Build wheel
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
|
||||
|
||||
jobs:
|
||||
build_wheels:
|
||||
name: Build wheels on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest,]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Build wheels
|
||||
run: pipx run cibuildwheel==2.23.0
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }}
|
||||
path: ./wheelhouse/*.whl
|
||||
|
||||
build_sdist:
|
||||
name: Build source distribution
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Build sdist
|
||||
run: pipx run build --sdist
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cibw-sdist
|
||||
path: dist/*.tar.gz
|
||||
|
||||
upload_pypi:
|
||||
needs: [build_wheels, build_sdist]
|
||||
runs-on: ubuntu-latest
|
||||
environment: pypi
|
||||
permissions:
|
||||
id-token: write
|
||||
if: github.event_name == 'release' && github.event.action == 'published'
|
||||
# or, alternatively, upload to PyPI on every tag starting with 'v' (remove on: release above to use this)
|
||||
# if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v')
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
# unpacks all CIBW artifacts into dist/
|
||||
pattern: cibw-*
|
||||
path: dist
|
||||
merge-multiple: true
|
||||
|
||||
- uses: pypa/gh-action-pypi-publish@release/v1
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -17,7 +17,8 @@ Testing/
|
||||
ctbDict.cpp
|
||||
ctbDict.h
|
||||
|
||||
|
||||
wheelhouse/
|
||||
dist/
|
||||
|
||||
*.pyc
|
||||
*/__pycache__/*
|
||||
|
@ -1,4 +1,4 @@
|
||||
cmake_minimum_required(VERSION 3.14)
|
||||
cmake_minimum_required(VERSION 3.15)
|
||||
|
||||
project(aare
|
||||
VERSION 1.0.0
|
||||
@ -11,6 +11,14 @@ set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
|
||||
execute_process(
|
||||
COMMAND git log -1 --format=%h
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
|
||||
OUTPUT_VARIABLE GIT_HASH
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
message(STATUS "Building from git hash: ${GIT_HASH}")
|
||||
|
||||
if (${CMAKE_VERSION} VERSION_GREATER "3.24")
|
||||
cmake_policy(SET CMP0135 NEW) #Fetch content download timestamp
|
||||
endif()
|
||||
@ -381,9 +389,10 @@ set(SourceFiles
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
|
||||
)
|
||||
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp
|
||||
)
|
||||
|
||||
add_library(aare_core STATIC ${SourceFiles})
|
||||
target_include_directories(aare_core PUBLIC
|
||||
@ -416,6 +425,7 @@ if(AARE_TESTS)
|
||||
set(TestSources
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.test.cpp
|
||||
@ -427,6 +437,7 @@ if(AARE_TESTS)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Cluster.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/CalculateEta.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinderMT.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp
|
||||
|
@ -1,6 +1,7 @@
|
||||
package:
|
||||
name: aare
|
||||
version: 2025.4.1 #TODO! how to not duplicate this?
|
||||
version: 2025.4.22 #TODO! how to not duplicate this?
|
||||
|
||||
|
||||
|
||||
|
||||
@ -38,6 +39,7 @@ requirements:
|
||||
run:
|
||||
- python {{python}}
|
||||
- numpy {{ numpy }}
|
||||
- matplotlib
|
||||
|
||||
|
||||
test:
|
||||
|
@ -6,14 +6,14 @@
|
||||
|
||||
namespace aare {
|
||||
|
||||
typedef enum {
|
||||
enum class corner : int {
|
||||
cBottomLeft = 0,
|
||||
cBottomRight = 1,
|
||||
cTopLeft = 2,
|
||||
cTopRight = 3
|
||||
} corner;
|
||||
};
|
||||
|
||||
typedef enum {
|
||||
enum class pixel : int {
|
||||
pBottomLeft = 0,
|
||||
pBottom = 1,
|
||||
pBottomRight = 2,
|
||||
@ -23,7 +23,7 @@ typedef enum {
|
||||
pTopLeft = 6,
|
||||
pTop = 7,
|
||||
pTopRight = 8
|
||||
} pixel;
|
||||
};
|
||||
|
||||
template <typename T> struct Eta2 {
|
||||
double x;
|
||||
@ -33,7 +33,7 @@ template <typename T> struct Eta2 {
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Calculate the eta2 values for all clusters in a Clsutervector
|
||||
* @brief Calculate the eta2 values for all clusters in a Clustervector
|
||||
*/
|
||||
template <typename ClusterType,
|
||||
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||
@ -41,7 +41,7 @@ NDArray<double, 2> calculate_eta2(const ClusterVector<ClusterType> &clusters) {
|
||||
NDArray<double, 2> eta2({static_cast<int64_t>(clusters.size()), 2});
|
||||
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
auto e = calculate_eta2(clusters.at(i));
|
||||
auto e = calculate_eta2(clusters[i]);
|
||||
eta2(i, 0) = e.x;
|
||||
eta2(i, 1) = e.y;
|
||||
}
|
||||
@ -64,31 +64,79 @@ calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||
eta.sum = max_sum.first;
|
||||
auto c = max_sum.second;
|
||||
|
||||
size_t cluster_center_index =
|
||||
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||
|
||||
size_t index_bottom_left_max_2x2_subcluster =
|
||||
(int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1);
|
||||
|
||||
if ((cl.data[index_bottom_left_max_2x2_subcluster] +
|
||||
cl.data[index_bottom_left_max_2x2_subcluster + 1]) != 0)
|
||||
eta.x = static_cast<double>(
|
||||
cl.data[index_bottom_left_max_2x2_subcluster + 1]) /
|
||||
static_cast<double>(
|
||||
(cl.data[index_bottom_left_max_2x2_subcluster] +
|
||||
cl.data[index_bottom_left_max_2x2_subcluster + 1]));
|
||||
// check that cluster center is in max subcluster
|
||||
if (cluster_center_index != index_bottom_left_max_2x2_subcluster &&
|
||||
cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 &&
|
||||
cluster_center_index !=
|
||||
index_bottom_left_max_2x2_subcluster + ClusterSizeX &&
|
||||
cluster_center_index !=
|
||||
index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1)
|
||||
throw std::runtime_error("Photon center is not in max 2x2_subcluster");
|
||||
|
||||
if ((cl.data[index_bottom_left_max_2x2_subcluster] +
|
||||
cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]) != 0)
|
||||
eta.y =
|
||||
static_cast<double>(
|
||||
cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]) /
|
||||
static_cast<double>(
|
||||
(cl.data[index_bottom_left_max_2x2_subcluster] +
|
||||
cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]));
|
||||
if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) %
|
||||
ClusterSizeX ==
|
||||
0) {
|
||||
if ((cl.data[cluster_center_index + 1] +
|
||||
cl.data[cluster_center_index]) != 0)
|
||||
|
||||
eta.x = static_cast<double>(cl.data[cluster_center_index + 1]) /
|
||||
static_cast<double>((cl.data[cluster_center_index + 1] +
|
||||
cl.data[cluster_center_index]));
|
||||
} else {
|
||||
if ((cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index - 1]) != 0)
|
||||
|
||||
eta.x = static_cast<double>(cl.data[cluster_center_index]) /
|
||||
static_cast<double>((cl.data[cluster_center_index - 1] +
|
||||
cl.data[cluster_center_index]));
|
||||
}
|
||||
if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) /
|
||||
ClusterSizeX <
|
||||
1) {
|
||||
assert(cluster_center_index + ClusterSizeX <
|
||||
ClusterSizeX * ClusterSizeY); // suppress warning
|
||||
if ((cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index + ClusterSizeX]) != 0)
|
||||
eta.y = static_cast<double>(
|
||||
cl.data[cluster_center_index + ClusterSizeX]) /
|
||||
static_cast<double>(
|
||||
(cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index + ClusterSizeX]));
|
||||
} else {
|
||||
if ((cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index - ClusterSizeX]) != 0)
|
||||
eta.y = static_cast<double>(cl.data[cluster_center_index]) /
|
||||
static_cast<double>(
|
||||
(cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index - ClusterSizeX]));
|
||||
}
|
||||
|
||||
eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no
|
||||
// underyling enum class
|
||||
return eta;
|
||||
}
|
||||
|
||||
// TODO! Look up eta2 calculation - photon center should be top right corner
|
||||
template <typename T>
|
||||
Eta2<T> calculate_eta2(const Cluster<T, 2, 2, int16_t> &cl) {
|
||||
Eta2<T> eta{};
|
||||
|
||||
if ((cl.data[0] + cl.data[1]) != 0)
|
||||
eta.x = static_cast<double>(cl.data[1]) / (cl.data[0] + cl.data[1]);
|
||||
if ((cl.data[0] + cl.data[2]) != 0)
|
||||
eta.y = static_cast<double>(cl.data[2]) / (cl.data[0] + cl.data[2]);
|
||||
eta.sum = cl.sum();
|
||||
eta.c = static_cast<int>(corner::cBottomLeft); // TODO! This is not correct,
|
||||
// but need to put something
|
||||
return eta;
|
||||
}
|
||||
|
||||
// calculates Eta3 for 3x3 cluster based on code from analyze_cluster
|
||||
// TODO only supported for 3x3 Clusters
|
||||
template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) {
|
||||
|
@ -16,80 +16,61 @@
|
||||
|
||||
namespace aare {
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = int16_t>
|
||||
constexpr bool is_valid_cluster =
|
||||
std::is_arithmetic_v<T> && std::is_integral_v<CoordType> &&
|
||||
(ClusterSizeX > 0) && (ClusterSizeY > 0);
|
||||
|
||||
// requires clause c++20 maybe update
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = int16_t,
|
||||
typename Enable = std::enable_if_t<
|
||||
is_valid_cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>>
|
||||
typename CoordType = int16_t>
|
||||
struct Cluster {
|
||||
|
||||
static_assert(std::is_arithmetic_v<T>, "T needs to be an arithmetic type");
|
||||
static_assert(std::is_integral_v<CoordType>,
|
||||
"CoordType needs to be an integral type");
|
||||
static_assert(ClusterSizeX > 0 && ClusterSizeY > 0,
|
||||
"Cluster sizes must be bigger than zero");
|
||||
|
||||
CoordType x;
|
||||
CoordType y;
|
||||
T data[ClusterSizeX * ClusterSizeY];
|
||||
std::array<T, ClusterSizeX * ClusterSizeY> data;
|
||||
|
||||
T sum() const {
|
||||
return std::accumulate(data, data + ClusterSizeX * ClusterSizeY, 0);
|
||||
}
|
||||
static constexpr uint8_t cluster_size_x = ClusterSizeX;
|
||||
static constexpr uint8_t cluster_size_y = ClusterSizeY;
|
||||
using value_type = T;
|
||||
using coord_type = CoordType;
|
||||
|
||||
T sum() const { return std::accumulate(data.begin(), data.end(), T{}); }
|
||||
|
||||
std::pair<T, int> max_sum_2x2() const {
|
||||
|
||||
constexpr size_t num_2x2_subclusters =
|
||||
(ClusterSizeX - 1) * (ClusterSizeY - 1);
|
||||
if constexpr (cluster_size_x == 3 && cluster_size_y == 3) {
|
||||
std::array<T, 4> sum_2x2_subclusters;
|
||||
sum_2x2_subclusters[0] = data[0] + data[1] + data[3] + data[4];
|
||||
sum_2x2_subclusters[1] = data[1] + data[2] + data[4] + data[5];
|
||||
sum_2x2_subclusters[2] = data[3] + data[4] + data[6] + data[7];
|
||||
sum_2x2_subclusters[3] = data[4] + data[5] + data[7] + data[8];
|
||||
int index = std::max_element(sum_2x2_subclusters.begin(),
|
||||
sum_2x2_subclusters.end()) -
|
||||
sum_2x2_subclusters.begin();
|
||||
return std::make_pair(sum_2x2_subclusters[index], index);
|
||||
} else if constexpr (cluster_size_x == 2 && cluster_size_y == 2) {
|
||||
return std::make_pair(data[0] + data[1] + data[2] + data[3], 0);
|
||||
} else {
|
||||
constexpr size_t num_2x2_subclusters =
|
||||
(ClusterSizeX - 1) * (ClusterSizeY - 1);
|
||||
|
||||
std::array<T, num_2x2_subclusters> sum_2x2_subcluster;
|
||||
for (size_t i = 0; i < ClusterSizeY - 1; ++i) {
|
||||
for (size_t j = 0; j < ClusterSizeX - 1; ++j)
|
||||
sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] =
|
||||
data[i * ClusterSizeX + j] +
|
||||
data[i * ClusterSizeX + j + 1] +
|
||||
data[(i + 1) * ClusterSizeX + j] +
|
||||
data[(i + 1) * ClusterSizeX + j + 1];
|
||||
std::array<T, num_2x2_subclusters> sum_2x2_subcluster;
|
||||
for (size_t i = 0; i < ClusterSizeY - 1; ++i) {
|
||||
for (size_t j = 0; j < ClusterSizeX - 1; ++j)
|
||||
sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] =
|
||||
data[i * ClusterSizeX + j] +
|
||||
data[i * ClusterSizeX + j + 1] +
|
||||
data[(i + 1) * ClusterSizeX + j] +
|
||||
data[(i + 1) * ClusterSizeX + j + 1];
|
||||
}
|
||||
|
||||
int index = std::max_element(sum_2x2_subcluster.begin(),
|
||||
sum_2x2_subcluster.end()) -
|
||||
sum_2x2_subcluster.begin();
|
||||
return std::make_pair(sum_2x2_subcluster[index], index);
|
||||
}
|
||||
|
||||
int index = std::max_element(sum_2x2_subcluster.begin(),
|
||||
sum_2x2_subcluster.end()) -
|
||||
sum_2x2_subcluster.begin();
|
||||
return std::make_pair(sum_2x2_subcluster[index], index);
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization for 2x2 clusters (only one sum exists)
|
||||
template <typename T> struct Cluster<T, 2, 2, int16_t> {
|
||||
int16_t x;
|
||||
int16_t y;
|
||||
T data[4];
|
||||
|
||||
T sum() const { return std::accumulate(data, data + 4, 0); }
|
||||
|
||||
std::pair<T, int> max_sum_2x2() const {
|
||||
return std::make_pair(data[0] + data[1] + data[2] + data[3],
|
||||
0); // Only one possible 2x2 sum
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization for 3x3 clusters
|
||||
template <typename T> struct Cluster<T, 3, 3, int16_t> {
|
||||
int16_t x;
|
||||
int16_t y;
|
||||
T data[9];
|
||||
|
||||
T sum() const { return std::accumulate(data, data + 9, 0); }
|
||||
|
||||
std::pair<T, int> max_sum_2x2() const {
|
||||
std::array<T, 4> sum_2x2_subclusters;
|
||||
sum_2x2_subclusters[0] = data[0] + data[1] + data[3] + data[4];
|
||||
sum_2x2_subclusters[1] = data[1] + data[2] + data[4] + data[5];
|
||||
sum_2x2_subclusters[2] = data[3] + data[4] + data[6] + data[7];
|
||||
sum_2x2_subclusters[3] = data[4] + data[5] + data[7] + data[8];
|
||||
int index = std::max_element(sum_2x2_subclusters.begin(),
|
||||
sum_2x2_subclusters.end()) -
|
||||
sum_2x2_subclusters.begin();
|
||||
return std::make_pair(sum_2x2_subclusters[index], index);
|
||||
}
|
||||
};
|
||||
|
||||
@ -102,20 +83,4 @@ struct is_cluster<Cluster<T, X, Y, CoordType>> : std::true_type {}; // Cluster
|
||||
|
||||
template <typename T> constexpr bool is_cluster_v = is_cluster<T>::value;
|
||||
|
||||
template <typename ClusterType,
|
||||
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||
struct extract_template_arguments; // Forward declaration
|
||||
|
||||
// helper struct to extract template argument
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType>
|
||||
struct extract_template_arguments<
|
||||
Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
|
||||
using value_type = T;
|
||||
static constexpr int cluster_size_x = ClusterSizeX;
|
||||
static constexpr int cluster_size_y = ClusterSizeY;
|
||||
using coordtype = CoordType;
|
||||
};
|
||||
|
||||
} // namespace aare
|
||||
|
@ -37,7 +37,11 @@ class ClusterCollector {
|
||||
public:
|
||||
ClusterCollector(ClusterFinderMT<ClusterType, uint16_t, double> *source) {
|
||||
m_source = source->sink();
|
||||
m_thread = std::thread(&ClusterCollector::process, this);
|
||||
m_thread =
|
||||
std::thread(&ClusterCollector::process,
|
||||
this); // only one process does that so why isnt it
|
||||
// automatically written to m_cluster in collect
|
||||
// - instead of writing first to m_sink?
|
||||
}
|
||||
void stop() {
|
||||
m_stop_requested = true;
|
||||
|
@ -39,14 +39,15 @@ template <typename ClusterType,
|
||||
typename Enable = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||
class ClusterFile {
|
||||
FILE *fp{};
|
||||
const std::string m_filename{};
|
||||
uint32_t m_num_left{}; /*Number of photons left in frame*/
|
||||
size_t m_chunk_size{}; /*Number of clusters to read at a time*/
|
||||
const std::string m_mode; /*Mode to open the file in*/
|
||||
std::string m_mode; /*Mode to open the file in*/
|
||||
std::optional<ROI> m_roi; /*Region of interest, will be applied if set*/
|
||||
std::optional<NDArray<int32_t, 2>>
|
||||
m_noise_map; /*Noise map to cut photons, will be applied if set*/
|
||||
std::optional<GainMap> m_gain_map; /*Gain map to apply to the clusters, will
|
||||
be applied if set*/
|
||||
std::optional<InvertedGainMap> m_gain_map; /*Gain map to apply to the
|
||||
clusters, will be applied if set*/
|
||||
|
||||
public:
|
||||
/**
|
||||
@ -59,26 +60,81 @@ class ClusterFile {
|
||||
* @throws std::runtime_error if the file could not be opened
|
||||
*/
|
||||
ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000,
|
||||
const std::string &mode = "r");
|
||||
const std::string &mode = "r")
|
||||
|
||||
~ClusterFile();
|
||||
: m_filename(fname.string()), m_chunk_size(chunk_size), m_mode(mode) {
|
||||
|
||||
if (mode == "r") {
|
||||
fp = fopen(m_filename.c_str(), "rb");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for reading: " +
|
||||
m_filename);
|
||||
}
|
||||
} else if (mode == "w") {
|
||||
fp = fopen(m_filename.c_str(), "wb");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for writing: " +
|
||||
m_filename);
|
||||
}
|
||||
} else if (mode == "a") {
|
||||
fp = fopen(m_filename.c_str(), "ab");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for appending: " +
|
||||
m_filename);
|
||||
}
|
||||
} else {
|
||||
throw std::runtime_error("Unsupported mode: " + mode);
|
||||
}
|
||||
}
|
||||
|
||||
~ClusterFile() { close(); }
|
||||
|
||||
/**
|
||||
* @brief Read n_clusters clusters from the file discarding frame numbers.
|
||||
* If EOF is reached the returned vector will have less than n_clusters
|
||||
* clusters
|
||||
* @brief Read n_clusters clusters from the file discarding
|
||||
* frame numbers. If EOF is reached the returned vector will
|
||||
* have less than n_clusters clusters
|
||||
*/
|
||||
ClusterVector<ClusterType> read_clusters(size_t n_clusters);
|
||||
ClusterVector<ClusterType> read_clusters(size_t n_clusters) {
|
||||
if (m_mode != "r") {
|
||||
throw std::runtime_error("File not opened for reading");
|
||||
}
|
||||
if (m_noise_map || m_roi) {
|
||||
return read_clusters_with_cut(n_clusters);
|
||||
} else {
|
||||
return read_clusters_without_cut(n_clusters);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Read a single frame from the file and return the clusters. The
|
||||
* cluster vector will have the frame number set.
|
||||
* @throws std::runtime_error if the file is not opened for reading or the
|
||||
* file pointer not at the beginning of a frame
|
||||
* @brief Read a single frame from the file and return the
|
||||
* clusters. The cluster vector will have the frame number
|
||||
* set.
|
||||
* @throws std::runtime_error if the file is not opened for
|
||||
* reading or the file pointer not at the beginning of a
|
||||
* frame
|
||||
*/
|
||||
ClusterVector<ClusterType> read_frame();
|
||||
ClusterVector<ClusterType> read_frame() {
|
||||
if (m_mode != "r") {
|
||||
throw std::runtime_error(LOCATION + "File not opened for reading");
|
||||
}
|
||||
if (m_noise_map || m_roi) {
|
||||
return read_frame_with_cut();
|
||||
} else {
|
||||
return read_frame_without_cut();
|
||||
}
|
||||
}
|
||||
|
||||
void write_frame(const ClusterVector<ClusterType> &clusters);
|
||||
void write_frame(const ClusterVector<ClusterType> &clusters) {
|
||||
if (m_mode != "w" && m_mode != "a") {
|
||||
throw std::runtime_error("File not opened for writing");
|
||||
}
|
||||
|
||||
int32_t frame_number = clusters.frame_number();
|
||||
fwrite(&frame_number, sizeof(frame_number), 1, fp);
|
||||
uint32_t n_clusters = clusters.size();
|
||||
fwrite(&n_clusters, sizeof(n_clusters), 1, fp);
|
||||
fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the chunk size
|
||||
@ -86,34 +142,84 @@ class ClusterFile {
|
||||
size_t chunk_size() const { return m_chunk_size; }
|
||||
|
||||
/**
|
||||
* @brief Set the region of interest to use when reading clusters. If set
|
||||
* only clusters within the ROI will be read.
|
||||
* @brief Set the region of interest to use when reading
|
||||
* clusters. If set only clusters within the ROI will be
|
||||
* read.
|
||||
*/
|
||||
void set_roi(ROI roi);
|
||||
void set_roi(ROI roi) { m_roi = roi; }
|
||||
|
||||
/**
|
||||
* @brief Set the noise map to use when reading clusters. If set clusters
|
||||
* below the noise level will be discarded. Selection criteria one of:
|
||||
* Central pixel above noise, highest 2x2 sum above 2 * noise, total sum
|
||||
* above 3 * noise.
|
||||
* @brief Set the noise map to use when reading clusters. If
|
||||
* set clusters below the noise level will be discarded.
|
||||
* Selection criteria one of: Central pixel above noise,
|
||||
* highest 2x2 sum above 2 * noise, total sum above 3 *
|
||||
* noise.
|
||||
*/
|
||||
void set_noise_map(const NDView<int32_t, 2> noise_map);
|
||||
void set_noise_map(const NDView<int32_t, 2> noise_map) {
|
||||
m_noise_map = NDArray<int32_t, 2>(noise_map);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the gain map to use when reading clusters. If set the gain map
|
||||
* will be applied to the clusters that pass ROI and noise_map selection.
|
||||
* The gain map is expected to be in ADU/energy.
|
||||
*/
|
||||
void set_gain_map(const NDView<double, 2> gain_map);
|
||||
void set_gain_map(const NDView<double, 2> gain_map) {
|
||||
m_gain_map = InvertedGainMap(gain_map);
|
||||
}
|
||||
|
||||
void set_gain_map(const GainMap &gain_map);
|
||||
void set_gain_map(const InvertedGainMap &gain_map) {
|
||||
m_gain_map = gain_map;
|
||||
}
|
||||
|
||||
void set_gain_map(const GainMap &&gain_map);
|
||||
void set_gain_map(const InvertedGainMap &&gain_map) {
|
||||
m_gain_map = gain_map;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Close the file. If not closed the file will be closed in the
|
||||
* destructor
|
||||
* @brief Close the file. If not closed the file will be
|
||||
* closed in the destructor
|
||||
*/
|
||||
void close();
|
||||
void close() {
|
||||
if (fp) {
|
||||
fclose(fp);
|
||||
fp = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
/** @brief Open the file in specific mode
|
||||
*
|
||||
*/
|
||||
void open(const std::string &mode) {
|
||||
if (fp) {
|
||||
close();
|
||||
}
|
||||
|
||||
if (mode == "r") {
|
||||
fp = fopen(m_filename.c_str(), "rb");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for reading: " +
|
||||
m_filename);
|
||||
}
|
||||
m_mode = "r";
|
||||
} else if (mode == "w") {
|
||||
fp = fopen(m_filename.c_str(), "wb");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for writing: " +
|
||||
m_filename);
|
||||
}
|
||||
m_mode = "w";
|
||||
} else if (mode == "a") {
|
||||
fp = fopen(m_filename.c_str(), "ab");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for appending: " +
|
||||
m_filename);
|
||||
}
|
||||
m_mode = "a";
|
||||
} else {
|
||||
throw std::runtime_error("Unsupported mode: " + mode);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
ClusterVector<ClusterType> read_clusters_with_cut(size_t n_clusters);
|
||||
@ -124,103 +230,6 @@ class ClusterFile {
|
||||
ClusterType read_one_cluster();
|
||||
};
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
ClusterFile<ClusterType, Enable>::ClusterFile(
|
||||
const std::filesystem::path &fname, size_t chunk_size,
|
||||
const std::string &mode)
|
||||
: m_chunk_size(chunk_size), m_mode(mode) {
|
||||
|
||||
if (mode == "r") {
|
||||
fp = fopen(fname.c_str(), "rb");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for reading: " +
|
||||
fname.string());
|
||||
}
|
||||
} else if (mode == "w") {
|
||||
fp = fopen(fname.c_str(), "wb");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for writing: " +
|
||||
fname.string());
|
||||
}
|
||||
} else if (mode == "a") {
|
||||
fp = fopen(fname.c_str(), "ab");
|
||||
if (!fp) {
|
||||
throw std::runtime_error("Could not open file for appending: " +
|
||||
fname.string());
|
||||
}
|
||||
} else {
|
||||
throw std::runtime_error("Unsupported mode: " + mode);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
ClusterFile<ClusterType, Enable>::~ClusterFile() {
|
||||
close();
|
||||
}
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
void ClusterFile<ClusterType, Enable>::close() {
|
||||
if (fp) {
|
||||
fclose(fp);
|
||||
fp = nullptr;
|
||||
}
|
||||
}
|
||||
template <typename ClusterType, typename Enable>
|
||||
void ClusterFile<ClusterType, Enable>::set_roi(ROI roi) {
|
||||
m_roi = roi;
|
||||
}
|
||||
template <typename ClusterType, typename Enable>
|
||||
void ClusterFile<ClusterType, Enable>::set_noise_map(
|
||||
const NDView<int32_t, 2> noise_map) {
|
||||
m_noise_map = NDArray<int32_t, 2>(noise_map);
|
||||
}
|
||||
template <typename ClusterType, typename Enable>
|
||||
void ClusterFile<ClusterType, Enable>::set_gain_map(
|
||||
const NDView<double, 2> gain_map) {
|
||||
m_gain_map = GainMap(gain_map);
|
||||
}
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
void ClusterFile<ClusterType, Enable>::set_gain_map(const GainMap &gain_map) {
|
||||
m_gain_map = gain_map;
|
||||
}
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
void ClusterFile<ClusterType, Enable>::set_gain_map(const GainMap &&gain_map) {
|
||||
m_gain_map = gain_map;
|
||||
}
|
||||
|
||||
// TODO generally supported for all clsuter types
|
||||
template <typename ClusterType, typename Enable>
|
||||
void ClusterFile<ClusterType, Enable>::write_frame(
|
||||
const ClusterVector<ClusterType> &clusters) {
|
||||
if (m_mode != "w" && m_mode != "a") {
|
||||
throw std::runtime_error("File not opened for writing");
|
||||
}
|
||||
if (!(clusters.cluster_size_x() == 3) &&
|
||||
!(clusters.cluster_size_y() == 3)) {
|
||||
throw std::runtime_error("Only 3x3 clusters are supported");
|
||||
}
|
||||
int32_t frame_number = clusters.frame_number();
|
||||
fwrite(&frame_number, sizeof(frame_number), 1, fp);
|
||||
uint32_t n_clusters = clusters.size();
|
||||
fwrite(&n_clusters, sizeof(n_clusters), 1, fp);
|
||||
fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp);
|
||||
}
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
ClusterVector<ClusterType>
|
||||
ClusterFile<ClusterType, Enable>::read_clusters(size_t n_clusters) {
|
||||
if (m_mode != "r") {
|
||||
throw std::runtime_error("File not opened for reading");
|
||||
}
|
||||
if (m_noise_map || m_roi) {
|
||||
return read_clusters_with_cut(n_clusters);
|
||||
} else {
|
||||
return read_clusters_without_cut(n_clusters);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
ClusterVector<ClusterType>
|
||||
ClusterFile<ClusterType, Enable>::read_clusters_without_cut(size_t n_clusters) {
|
||||
@ -240,8 +249,8 @@ ClusterFile<ClusterType, Enable>::read_clusters_without_cut(size_t n_clusters) {
|
||||
// if there are photons left from previous frame read them first
|
||||
if (nph) {
|
||||
if (nph > n_clusters) {
|
||||
// if we have more photons left in the frame then photons to read we
|
||||
// read directly the requested number
|
||||
// if we have more photons left in the frame then photons to
|
||||
// read we read directly the requested number
|
||||
nn = n_clusters;
|
||||
} else {
|
||||
nn = nph;
|
||||
@ -270,7 +279,7 @@ ClusterFile<ClusterType, Enable>::read_clusters_without_cut(size_t n_clusters) {
|
||||
}
|
||||
}
|
||||
|
||||
// Resize the vector to the number of clusters.
|
||||
// Resize the vector to the number o f clusters.
|
||||
// No new allocation, only change bounds.
|
||||
clusters.resize(nph_read);
|
||||
if (m_gain_map)
|
||||
@ -282,7 +291,7 @@ template <typename ClusterType, typename Enable>
|
||||
ClusterVector<ClusterType>
|
||||
ClusterFile<ClusterType, Enable>::read_clusters_with_cut(size_t n_clusters) {
|
||||
ClusterVector<ClusterType> clusters;
|
||||
clusters.resize(n_clusters);
|
||||
clusters.reserve(n_clusters);
|
||||
|
||||
// if there are photons left from previous frame read them first
|
||||
if (m_num_left) {
|
||||
@ -307,8 +316,8 @@ ClusterFile<ClusterType, Enable>::read_clusters_with_cut(size_t n_clusters) {
|
||||
while (fread(&frame_number, sizeof(frame_number), 1, fp)) {
|
||||
if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) {
|
||||
clusters.set_frame_number(
|
||||
frame_number); // cluster vector will hold the last frame
|
||||
// number
|
||||
frame_number); // cluster vector will hold the last
|
||||
// frame number
|
||||
while (m_num_left && clusters.size() < n_clusters) {
|
||||
ClusterType c = read_one_cluster();
|
||||
if (is_selected(c)) {
|
||||
@ -339,18 +348,6 @@ ClusterType ClusterFile<ClusterType, Enable>::read_one_cluster() {
|
||||
return c;
|
||||
}
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
ClusterVector<ClusterType> ClusterFile<ClusterType, Enable>::read_frame() {
|
||||
if (m_mode != "r") {
|
||||
throw std::runtime_error(LOCATION + "File not opened for reading");
|
||||
}
|
||||
if (m_noise_map || m_roi) {
|
||||
return read_frame_with_cut();
|
||||
} else {
|
||||
return read_frame_without_cut();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ClusterType, typename Enable>
|
||||
ClusterVector<ClusterType>
|
||||
ClusterFile<ClusterType, Enable>::read_frame_without_cut() {
|
||||
@ -375,11 +372,13 @@ ClusterFile<ClusterType, Enable>::read_frame_without_cut() {
|
||||
ClusterVector<ClusterType> clusters(n_clusters);
|
||||
clusters.set_frame_number(frame_number);
|
||||
|
||||
clusters.resize(n_clusters);
|
||||
|
||||
if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) !=
|
||||
static_cast<size_t>(n_clusters)) {
|
||||
throw std::runtime_error(LOCATION + "Could not read clusters");
|
||||
}
|
||||
clusters.resize(n_clusters);
|
||||
|
||||
if (m_gain_map)
|
||||
m_gain_map->apply_gain_map(clusters);
|
||||
return clusters;
|
||||
@ -427,13 +426,9 @@ bool ClusterFile<ClusterType, Enable>::is_selected(ClusterType &cl) {
|
||||
}
|
||||
}
|
||||
|
||||
auto cluster_size_x = extract_template_arguments<
|
||||
std::remove_reference_t<decltype(cl)>>::cluster_size_x;
|
||||
auto cluster_size_y = extract_template_arguments<
|
||||
std::remove_reference_t<decltype(cl)>>::cluster_size_y;
|
||||
|
||||
size_t cluster_center_index =
|
||||
(cluster_size_x / 2) + (cluster_size_y / 2) * cluster_size_x;
|
||||
(ClusterType::cluster_size_x / 2) +
|
||||
(ClusterType::cluster_size_y / 2) * ClusterType::cluster_size_x;
|
||||
|
||||
if (m_noise_map) {
|
||||
auto sum_1x1 = cl.data[cluster_center_index]; // central pixel
|
||||
|
@ -1,154 +0,0 @@
|
||||
#pragma once
|
||||
#include "aare/core/defs.hpp"
|
||||
#include <filesystem>
|
||||
#include <fmt/format.h>
|
||||
#include <string>
|
||||
|
||||
namespace aare {
|
||||
struct ClusterHeader {
|
||||
int32_t frame_number;
|
||||
int32_t n_clusters;
|
||||
std::string to_string() const {
|
||||
return "frame_number: " + std::to_string(frame_number) +
|
||||
", n_clusters: " + std::to_string(n_clusters);
|
||||
}
|
||||
};
|
||||
|
||||
struct ClusterV2_ {
|
||||
int16_t x;
|
||||
int16_t y;
|
||||
std::array<int32_t, 9> data;
|
||||
std::string to_string(bool detailed = false) const {
|
||||
if (detailed) {
|
||||
std::string data_str = "[";
|
||||
for (auto &d : data) {
|
||||
data_str += std::to_string(d) + ", ";
|
||||
}
|
||||
data_str += "]";
|
||||
return "x: " + std::to_string(x) + ", y: " + std::to_string(y) +
|
||||
", data: " + data_str;
|
||||
}
|
||||
return "x: " + std::to_string(x) + ", y: " + std::to_string(y);
|
||||
}
|
||||
};
|
||||
|
||||
struct ClusterV2 {
|
||||
ClusterV2_ cluster;
|
||||
int32_t frame_number;
|
||||
std::string to_string() const {
|
||||
return "frame_number: " + std::to_string(frame_number) + ", " +
|
||||
cluster.to_string();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief
|
||||
* important not: fp always points to the clusters header and does not point to
|
||||
* individual clusters
|
||||
*
|
||||
*/
|
||||
class ClusterFileV2 {
|
||||
std::filesystem::path m_fpath;
|
||||
std::string m_mode;
|
||||
FILE *fp{nullptr};
|
||||
|
||||
void check_open() {
|
||||
if (!fp)
|
||||
throw std::runtime_error(
|
||||
fmt::format("File: {} not open", m_fpath.string()));
|
||||
}
|
||||
|
||||
public:
|
||||
ClusterFileV2(std::filesystem::path const &fpath, std::string const &mode)
|
||||
: m_fpath(fpath), m_mode(mode) {
|
||||
if (m_mode != "r" && m_mode != "w")
|
||||
throw std::invalid_argument("mode must be 'r' or 'w'");
|
||||
if (m_mode == "r" && !std::filesystem::exists(m_fpath))
|
||||
throw std::invalid_argument("File does not exist");
|
||||
if (mode == "r") {
|
||||
fp = fopen(fpath.string().c_str(), "rb");
|
||||
} else if (mode == "w") {
|
||||
if (std::filesystem::exists(fpath)) {
|
||||
fp = fopen(fpath.string().c_str(), "r+b");
|
||||
} else {
|
||||
fp = fopen(fpath.string().c_str(), "wb");
|
||||
}
|
||||
}
|
||||
if (fp == nullptr) {
|
||||
throw std::runtime_error("Failed to open file");
|
||||
}
|
||||
}
|
||||
~ClusterFileV2() { close(); }
|
||||
std::vector<ClusterV2> read() {
|
||||
check_open();
|
||||
|
||||
ClusterHeader header;
|
||||
fread(&header, sizeof(ClusterHeader), 1, fp);
|
||||
std::vector<ClusterV2_> clusters_(header.n_clusters);
|
||||
fread(clusters_.data(), sizeof(ClusterV2_), header.n_clusters, fp);
|
||||
std::vector<ClusterV2> clusters;
|
||||
for (auto &c : clusters_) {
|
||||
ClusterV2 cluster;
|
||||
cluster.cluster = std::move(c);
|
||||
cluster.frame_number = header.frame_number;
|
||||
clusters.push_back(cluster);
|
||||
}
|
||||
|
||||
return clusters;
|
||||
}
|
||||
std::vector<std::vector<ClusterV2>> read(int n_frames) {
|
||||
std::vector<std::vector<ClusterV2>> clusters;
|
||||
for (int i = 0; i < n_frames; i++) {
|
||||
clusters.push_back(read());
|
||||
}
|
||||
return clusters;
|
||||
}
|
||||
|
||||
size_t write(std::vector<ClusterV2> const &clusters) {
|
||||
check_open();
|
||||
if (m_mode != "w")
|
||||
throw std::runtime_error("File not opened in write mode");
|
||||
if (clusters.empty())
|
||||
return 0;
|
||||
|
||||
ClusterHeader header;
|
||||
header.frame_number = clusters[0].frame_number;
|
||||
header.n_clusters = clusters.size();
|
||||
fwrite(&header, sizeof(ClusterHeader), 1, fp);
|
||||
for (auto &c : clusters) {
|
||||
fwrite(&c.cluster, sizeof(ClusterV2_), 1, fp);
|
||||
}
|
||||
return clusters.size();
|
||||
}
|
||||
|
||||
size_t write(std::vector<std::vector<ClusterV2>> const &clusters) {
|
||||
check_open();
|
||||
if (m_mode != "w")
|
||||
throw std::runtime_error("File not opened in write mode");
|
||||
|
||||
size_t n_clusters = 0;
|
||||
for (auto &c : clusters) {
|
||||
n_clusters += write(c);
|
||||
}
|
||||
return n_clusters;
|
||||
}
|
||||
|
||||
int seek_to_begin() { return fseek(fp, 0, SEEK_SET); }
|
||||
int seek_to_end() { return fseek(fp, 0, SEEK_END); }
|
||||
|
||||
int32_t frame_number() {
|
||||
auto pos = ftell(fp);
|
||||
ClusterHeader header;
|
||||
fread(&header, sizeof(ClusterHeader), 1, fp);
|
||||
fseek(fp, pos, SEEK_SET);
|
||||
return header.frame_number;
|
||||
}
|
||||
|
||||
void close() {
|
||||
if (fp) {
|
||||
fclose(fp);
|
||||
fp = nullptr;
|
||||
}
|
||||
}
|
||||
};
|
||||
} // namespace aare
|
@ -20,11 +20,9 @@ class ClusterFinder {
|
||||
Pedestal<PEDESTAL_TYPE> m_pedestal;
|
||||
ClusterVector<ClusterType> m_clusters;
|
||||
|
||||
static const uint8_t ClusterSizeX =
|
||||
extract_template_arguments<ClusterType>::cluster_size_x;
|
||||
static const uint8_t ClusterSizeY =
|
||||
extract_template_arguments<ClusterType>::cluster_size_x;
|
||||
using CT = typename extract_template_arguments<ClusterType>::value_type;
|
||||
static const uint8_t ClusterSizeX = ClusterType::cluster_size_x;
|
||||
static const uint8_t ClusterSizeY = ClusterType::cluster_size_y;
|
||||
using CT = typename ClusterType::value_type;
|
||||
|
||||
public:
|
||||
/**
|
||||
@ -79,7 +77,6 @@ class ClusterFinder {
|
||||
int has_center_pixel_y = ClusterSizeY % 2;
|
||||
|
||||
m_clusters.set_frame_number(frame_number);
|
||||
std::vector<CT> cluster_data(ClusterSizeX * ClusterSizeY);
|
||||
for (int iy = 0; iy < frame.shape(0); iy++) {
|
||||
for (int ix = 0; ix < frame.shape(1); ix++) {
|
||||
|
||||
@ -126,8 +123,9 @@ class ClusterFinder {
|
||||
|
||||
// Store cluster
|
||||
if (value == max) {
|
||||
// Zero out the cluster data
|
||||
std::fill(cluster_data.begin(), cluster_data.end(), 0);
|
||||
ClusterType cluster{};
|
||||
cluster.x = ix;
|
||||
cluster.y = iy;
|
||||
|
||||
// Fill the cluster data since we have a photon to store
|
||||
// It's worth redoing the look since most of the time we
|
||||
@ -141,20 +139,15 @@ class ClusterFinder {
|
||||
static_cast<CT>(frame(iy + ir, ix + ic)) -
|
||||
static_cast<CT>(
|
||||
m_pedestal.mean(iy + ir, ix + ic));
|
||||
cluster_data[i] =
|
||||
cluster.data[i] =
|
||||
tmp; // Watch for out of bounds access
|
||||
i++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ClusterType new_cluster{};
|
||||
new_cluster.x = ix;
|
||||
new_cluster.y = iy;
|
||||
std::copy(cluster_data.begin(), cluster_data.end(),
|
||||
new_cluster.data);
|
||||
// Add the cluster to the output ClusterVector
|
||||
m_clusters.push_back(new_cluster);
|
||||
m_clusters.push_back(cluster);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -34,7 +34,8 @@ template <typename ClusterType = Cluster<int32_t, 3, 3>,
|
||||
typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double>
|
||||
class ClusterFinderMT {
|
||||
|
||||
using CT = typename extract_template_arguments<ClusterType>::value_type;
|
||||
protected:
|
||||
using CT = typename ClusterType::value_type;
|
||||
size_t m_current_thread{0};
|
||||
size_t m_n_threads{0};
|
||||
using Finder = ClusterFinder<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>;
|
||||
@ -50,6 +51,7 @@ class ClusterFinderMT {
|
||||
std::thread m_collect_thread;
|
||||
std::chrono::milliseconds m_default_wait{1};
|
||||
|
||||
private:
|
||||
std::atomic<bool> m_stop_requested{false};
|
||||
std::atomic<bool> m_processing_threads_stopped{true};
|
||||
|
||||
@ -120,6 +122,7 @@ class ClusterFinderMT {
|
||||
ClusterFinderMT(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
|
||||
size_t capacity = 2000, size_t n_threads = 3)
|
||||
: m_n_threads(n_threads) {
|
||||
|
||||
for (size_t i = 0; i < n_threads; i++) {
|
||||
m_cluster_finders.push_back(
|
||||
std::make_unique<
|
||||
|
@ -18,256 +18,6 @@ template <typename ClusterType,
|
||||
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||
class ClusterVector; // Forward declaration
|
||||
|
||||
/**
|
||||
* @brief ClusterVector is a container for clusters of various sizes. It uses a
|
||||
* contiguous memory buffer to store the clusters. It is templated on the data
|
||||
* type and the coordinate type of the clusters.
|
||||
* @note push_back can invalidate pointers to elements in the container
|
||||
* @warning ClusterVector is currently move only to catch unintended copies, but
|
||||
* this might change since there are probably use cases where copying is needed.
|
||||
* @tparam T data type of the pixels in the cluster
|
||||
* @tparam CoordType data type of the x and y coordinates of the cluster
|
||||
* (normally int16_t)
|
||||
*/
|
||||
#if 0
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType>
|
||||
class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
|
||||
std::byte *m_data{};
|
||||
size_t m_size{0};
|
||||
size_t m_capacity;
|
||||
uint64_t m_frame_number{0}; // TODO! Check frame number size and type
|
||||
/**
|
||||
Format string used in the python bindings to create a numpy
|
||||
array from the buffer
|
||||
= - native byte order
|
||||
h - short
|
||||
d - double
|
||||
i - int
|
||||
*/
|
||||
constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:";
|
||||
|
||||
public:
|
||||
using value_type = T;
|
||||
using ClusterType = Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||
|
||||
/**
|
||||
* @brief Construct a new ClusterVector object
|
||||
* @param capacity initial capacity of the buffer in number of clusters
|
||||
* @param frame_number frame number of the clusters. Default is 0, which is
|
||||
* also used to indicate that the clusters come from many frames
|
||||
*/
|
||||
ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0)
|
||||
: m_capacity(capacity), m_frame_number(frame_number) {
|
||||
allocate_buffer(m_capacity);
|
||||
}
|
||||
|
||||
~ClusterVector() { delete[] m_data; }
|
||||
|
||||
// Move constructor
|
||||
ClusterVector(ClusterVector &&other) noexcept
|
||||
: m_data(other.m_data), m_size(other.m_size),
|
||||
m_capacity(other.m_capacity), m_frame_number(other.m_frame_number) {
|
||||
other.m_data = nullptr;
|
||||
other.m_size = 0;
|
||||
other.m_capacity = 0;
|
||||
}
|
||||
|
||||
// Move assignment operator
|
||||
ClusterVector &operator=(ClusterVector &&other) noexcept {
|
||||
if (this != &other) {
|
||||
delete[] m_data;
|
||||
m_data = other.m_data;
|
||||
m_size = other.m_size;
|
||||
m_capacity = other.m_capacity;
|
||||
m_frame_number = other.m_frame_number;
|
||||
other.m_data = nullptr;
|
||||
other.m_size = 0;
|
||||
other.m_capacity = 0;
|
||||
other.m_frame_number = 0;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reserve space for at least capacity clusters
|
||||
* @param capacity number of clusters to reserve space for
|
||||
* @note If capacity is less than the current capacity, the function does
|
||||
* nothing.
|
||||
*/
|
||||
void reserve(size_t capacity) {
|
||||
if (capacity > m_capacity) {
|
||||
allocate_buffer(capacity);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Add a cluster to the vector
|
||||
*/
|
||||
void push_back(const ClusterType &cluster) {
|
||||
if (m_size == m_capacity) {
|
||||
allocate_buffer(m_capacity * 2);
|
||||
}
|
||||
std::byte *ptr = element_ptr(m_size);
|
||||
*reinterpret_cast<CoordType *>(ptr) = cluster.x;
|
||||
ptr += sizeof(CoordType);
|
||||
*reinterpret_cast<CoordType *>(ptr) = cluster.y;
|
||||
ptr += sizeof(CoordType);
|
||||
|
||||
std::memcpy(ptr, cluster.data, ClusterSizeX * ClusterSizeY * sizeof(T));
|
||||
|
||||
m_size++;
|
||||
}
|
||||
|
||||
ClusterVector &operator+=(const ClusterVector &other) {
|
||||
if (m_size + other.m_size > m_capacity) {
|
||||
allocate_buffer(m_capacity + other.m_size);
|
||||
}
|
||||
std::copy(other.m_data, other.m_data + other.m_size * item_size(),
|
||||
m_data + m_size * item_size());
|
||||
m_size += other.m_size;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Sum the pixels in each cluster
|
||||
* @return std::vector<T> vector of sums for each cluster
|
||||
*/
|
||||
/*
|
||||
std::vector<T> sum() {
|
||||
std::vector<T> sums(m_size);
|
||||
const size_t stride = item_size();
|
||||
const size_t n_pixels = ClusterSizeX * ClusterSizeY;
|
||||
std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y
|
||||
|
||||
for (size_t i = 0; i < m_size; i++) {
|
||||
sums[i] =
|
||||
std::accumulate(reinterpret_cast<T *>(ptr),
|
||||
reinterpret_cast<T *>(ptr) + n_pixels, T{});
|
||||
ptr += stride;
|
||||
}
|
||||
return sums;
|
||||
}
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Sum the pixels in the 2x2 subcluster with the biggest pixel sum in
|
||||
* each cluster
|
||||
* @return std::vector<T> vector of sums for each cluster
|
||||
*/ //TODO if underlying container is a vector use std::for_each
|
||||
/*
|
||||
std::vector<T> sum_2x2() {
|
||||
std::vector<T> sums_2x2(m_size);
|
||||
|
||||
for (size_t i = 0; i < m_size; i++) {
|
||||
sums_2x2[i] = at(i).max_sum_2x2;
|
||||
}
|
||||
return sums_2x2;
|
||||
}
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Return the number of clusters in the vector
|
||||
*/
|
||||
size_t size() const { return m_size; }
|
||||
|
||||
uint8_t cluster_size_x() const { return ClusterSizeX; }
|
||||
|
||||
uint8_t cluster_size_y() const { return ClusterSizeY; }
|
||||
|
||||
/**
|
||||
* @brief Return the capacity of the buffer in number of clusters. This is
|
||||
* the number of clusters that can be stored in the current buffer without
|
||||
* reallocation.
|
||||
*/
|
||||
size_t capacity() const { return m_capacity; }
|
||||
|
||||
/**
|
||||
* @brief Return the size in bytes of a single cluster
|
||||
*/
|
||||
size_t item_size() const {
|
||||
return 2 * sizeof(CoordType) + ClusterSizeX * ClusterSizeY * sizeof(T);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the offset in bytes for the i-th cluster
|
||||
*/
|
||||
size_t element_offset(size_t i) const { return item_size() * i; }
|
||||
|
||||
/**
|
||||
* @brief Return a pointer to the i-th cluster
|
||||
*/
|
||||
std::byte *element_ptr(size_t i) { return m_data + element_offset(i); }
|
||||
|
||||
/**
|
||||
* @brief Return a pointer to the i-th cluster
|
||||
*/
|
||||
const std::byte *element_ptr(size_t i) const {
|
||||
return m_data + element_offset(i);
|
||||
}
|
||||
|
||||
std::byte *data() { return m_data; }
|
||||
std::byte const *data() const { return m_data; }
|
||||
|
||||
/**
|
||||
* @brief Return a reference to the i-th cluster casted to type V
|
||||
* @tparam V type of the cluster
|
||||
*/
|
||||
ClusterType &at(size_t i) {
|
||||
return *reinterpret_cast<ClusterType *>(element_ptr(i));
|
||||
}
|
||||
|
||||
const ClusterType &at(size_t i) const {
|
||||
return *reinterpret_cast<const ClusterType *>(element_ptr(i));
|
||||
}
|
||||
|
||||
template <typename V> const V &at(size_t i) const {
|
||||
return *reinterpret_cast<const V *>(element_ptr(i));
|
||||
}
|
||||
|
||||
const std::string_view fmt_base() const {
|
||||
// TODO! how do we match on coord_t?
|
||||
return m_fmt_base;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the frame number of the clusters. 0 is used to indicate
|
||||
* that the clusters come from many frames
|
||||
*/
|
||||
uint64_t frame_number() const { return m_frame_number; }
|
||||
|
||||
void set_frame_number(uint64_t frame_number) {
|
||||
m_frame_number = frame_number;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Resize the vector to contain new_size clusters. If new_size is
|
||||
* greater than the current capacity, a new buffer is allocated. If the size
|
||||
* is smaller no memory is freed, size is just updated.
|
||||
* @param new_size new size of the vector
|
||||
* @warning The additional clusters are not initialized
|
||||
*/
|
||||
void resize(size_t new_size) {
|
||||
// TODO! Should we initialize the new clusters?
|
||||
if (new_size > m_capacity) {
|
||||
allocate_buffer(new_size);
|
||||
}
|
||||
m_size = new_size;
|
||||
}
|
||||
|
||||
private:
|
||||
void allocate_buffer(size_t new_capacity) {
|
||||
size_t num_bytes = item_size() * new_capacity;
|
||||
std::byte *new_data = new std::byte[num_bytes]{};
|
||||
std::copy(m_data, m_data + item_size() * m_size, new_data);
|
||||
delete[] m_data;
|
||||
m_data = new_data;
|
||||
m_capacity = new_capacity;
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief ClusterVector is a container for clusters of various sizes. It
|
||||
* uses a contiguous memory buffer to store the clusters. It is templated on
|
||||
@ -285,7 +35,7 @@ template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
|
||||
std::vector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> m_data{};
|
||||
uint64_t m_frame_number{0}; // TODO! Check frame number size and type
|
||||
int32_t m_frame_number{0}; // TODO! Check frame number size and type
|
||||
|
||||
public:
|
||||
using value_type = T;
|
||||
@ -297,7 +47,7 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
* @param frame_number frame number of the clusters. Default is 0, which is
|
||||
* also used to indicate that the clusters come from many frames
|
||||
*/
|
||||
ClusterVector(size_t capacity = 300, int32_t frame_number = 0)
|
||||
ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0)
|
||||
: m_frame_number(frame_number) {
|
||||
m_data.reserve(capacity);
|
||||
}
|
||||
@ -319,6 +69,36 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Sum the pixels in each cluster
|
||||
* @return std::vector<T> vector of sums for each cluster
|
||||
*/
|
||||
std::vector<T> sum() {
|
||||
std::vector<T> sums(m_data.size());
|
||||
|
||||
std::transform(
|
||||
m_data.begin(), m_data.end(), sums.begin(),
|
||||
[](const ClusterType &cluster) { return cluster.sum(); });
|
||||
|
||||
return sums;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Sum the pixels in the 2x2 subcluster with the biggest pixel sum in
|
||||
* each cluster
|
||||
* @return std::vector<T> vector of sums for each cluster
|
||||
*/
|
||||
std::vector<T> sum_2x2() {
|
||||
std::vector<T> sums_2x2(m_data.size());
|
||||
|
||||
std::transform(m_data.begin(), m_data.end(), sums_2x2.begin(),
|
||||
[](const ClusterType &cluster) {
|
||||
return cluster.max_sum_2x2().first;
|
||||
});
|
||||
|
||||
return sums_2x2;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reserve space for at least capacity clusters
|
||||
* @param capacity number of clusters to reserve space for
|
||||
@ -361,7 +141,8 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
* @brief Return the size in bytes of a single cluster
|
||||
*/
|
||||
size_t item_size() const {
|
||||
return 2 * sizeof(CoordType) + ClusterSizeX * ClusterSizeY * sizeof(T);
|
||||
return sizeof(ClusterType); // 2 * sizeof(CoordType) + ClusterSizeX *
|
||||
// ClusterSizeY * sizeof(T);
|
||||
}
|
||||
|
||||
ClusterType *data() { return m_data.data(); }
|
||||
@ -371,17 +152,17 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
* @brief Return a reference to the i-th cluster casted to type V
|
||||
* @tparam V type of the cluster
|
||||
*/
|
||||
ClusterType &at(size_t i) { return m_data[i]; }
|
||||
ClusterType &operator[](size_t i) { return m_data[i]; }
|
||||
|
||||
const ClusterType &at(size_t i) const { return m_data[i]; }
|
||||
const ClusterType &operator[](size_t i) const { return m_data[i]; }
|
||||
|
||||
/**
|
||||
* @brief Return the frame number of the clusters. 0 is used to indicate
|
||||
* that the clusters come from many frames
|
||||
*/
|
||||
uint64_t frame_number() const { return m_frame_number; }
|
||||
int32_t frame_number() const { return m_frame_number; }
|
||||
|
||||
void set_frame_number(uint64_t frame_number) {
|
||||
void set_frame_number(int32_t frame_number) {
|
||||
m_frame_number = frame_number;
|
||||
}
|
||||
};
|
||||
|
@ -1,6 +1,7 @@
|
||||
/************************************************
|
||||
* @file ApplyGainMap.hpp
|
||||
* @short function to apply gain map of image size to a vector of clusters
|
||||
* @file GainMap.hpp
|
||||
* @short function to apply gain map of image size to a vector of clusters -
|
||||
*note stored gainmap is inverted for efficient aaplication to images
|
||||
***********************************************/
|
||||
|
||||
#pragma once
|
||||
@ -12,14 +13,21 @@
|
||||
|
||||
namespace aare {
|
||||
|
||||
class GainMap {
|
||||
class InvertedGainMap {
|
||||
|
||||
public:
|
||||
explicit GainMap(const NDArray<double, 2> &gain_map)
|
||||
: m_gain_map(gain_map) {};
|
||||
explicit InvertedGainMap(const NDArray<double, 2> &gain_map)
|
||||
: m_gain_map(gain_map) {
|
||||
for (auto &item : m_gain_map) {
|
||||
item = 1.0 / item;
|
||||
}
|
||||
};
|
||||
|
||||
explicit GainMap(const NDView<double, 2> gain_map) {
|
||||
explicit InvertedGainMap(const NDView<double, 2> gain_map) {
|
||||
m_gain_map = NDArray<double, 2>(gain_map);
|
||||
for (auto &item : m_gain_map) {
|
||||
item = 1.0 / item;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ClusterType,
|
||||
@ -34,19 +42,21 @@ class GainMap {
|
||||
int64_t index_cluster_center_x = ClusterSizeX / 2;
|
||||
int64_t index_cluster_center_y = ClusterSizeY / 2;
|
||||
for (size_t i = 0; i < clustervec.size(); i++) {
|
||||
auto &cl = clustervec.at(i);
|
||||
auto &cl = clustervec[i];
|
||||
|
||||
if (cl.x > 0 && cl.y > 0 && cl.x < m_gain_map.shape(1) - 1 &&
|
||||
cl.y < m_gain_map.shape(0) - 1) {
|
||||
for (size_t j = 0; j < ClusterSizeX * ClusterSizeY; j++) {
|
||||
size_t x = cl.x + j % ClusterSizeX - index_cluster_center_x;
|
||||
size_t y = cl.y + j / ClusterSizeX - index_cluster_center_y;
|
||||
cl.data[j] = cl.data[j] * static_cast<T>(m_gain_map(y, x));
|
||||
cl.data[j] = static_cast<T>(
|
||||
static_cast<double>(cl.data[j]) *
|
||||
m_gain_map(
|
||||
y, x)); // cast after conversion to keep precision
|
||||
}
|
||||
} else {
|
||||
memset(cl.data, 0,
|
||||
ClusterSizeX * ClusterSizeY *
|
||||
sizeof(T)); // clear edge clusters
|
||||
// clear edge clusters
|
||||
cl.data.fill(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -44,9 +44,8 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
|
||||
photons.reserve(clusters.size());
|
||||
|
||||
if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) {
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
for (const ClusterType &cluster : clusters) {
|
||||
|
||||
auto cluster = clusters.at(i);
|
||||
auto eta = calculate_eta2(cluster);
|
||||
|
||||
Photon photon;
|
||||
@ -70,20 +69,20 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
|
||||
// cBottomRight = 1,
|
||||
// cTopLeft = 2,
|
||||
// cTopRight = 3
|
||||
switch (eta.c) {
|
||||
case cTopLeft:
|
||||
switch (static_cast<corner>(eta.c)) {
|
||||
case corner::cTopLeft:
|
||||
dX = -1.;
|
||||
dY = 0;
|
||||
break;
|
||||
case cTopRight:;
|
||||
case corner::cTopRight:;
|
||||
dX = 0;
|
||||
dY = 0;
|
||||
break;
|
||||
case cBottomLeft:
|
||||
case corner::cBottomLeft:
|
||||
dX = -1.;
|
||||
dY = -1.;
|
||||
break;
|
||||
case cBottomRight:
|
||||
case corner::cBottomRight:
|
||||
dX = 0.;
|
||||
dY = -1.;
|
||||
break;
|
||||
@ -94,8 +93,7 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
|
||||
}
|
||||
} else if (clusters.cluster_size_x() == 2 ||
|
||||
clusters.cluster_size_y() == 2) {
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
auto cluster = clusters.at(i);
|
||||
for (const ClusterType &cluster : clusters) {
|
||||
auto eta = calculate_eta2(cluster);
|
||||
|
||||
Photon photon;
|
||||
|
@ -184,4 +184,9 @@ std::ostream& operator <<(std::ostream& os, const NDView<T, Ndim>& arr){
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
NDView<T,1> make_view(std::vector<T>& vec){
|
||||
return NDView<T,1>(vec.data(), {static_cast<int64_t>(vec.size())});
|
||||
}
|
||||
|
||||
} // namespace aare
|
@ -22,7 +22,7 @@ class RawSubFile {
|
||||
size_t m_rows{};
|
||||
size_t m_cols{};
|
||||
size_t m_bytes_per_frame{};
|
||||
size_t n_frames{};
|
||||
size_t m_num_frames{};
|
||||
uint32_t m_pos_row{};
|
||||
uint32_t m_pos_col{};
|
||||
|
||||
@ -53,6 +53,7 @@ class RawSubFile {
|
||||
size_t tell();
|
||||
|
||||
void read_into(std::byte *image_buf, DetectorHeader *header = nullptr);
|
||||
void read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header= nullptr);
|
||||
void get_part(std::byte *buffer, size_t frame_index);
|
||||
|
||||
void read_header(DetectorHeader *header);
|
||||
@ -66,6 +67,8 @@ class RawSubFile {
|
||||
size_t pixels_per_frame() const { return m_rows * m_cols; }
|
||||
size_t bytes_per_pixel() const { return m_bitdepth / bits_per_byte; }
|
||||
|
||||
size_t frames_in_file() const { return m_num_frames; }
|
||||
|
||||
private:
|
||||
template <typename T>
|
||||
void read_with_map(std::byte *image_buf);
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
#include <aare/NDView.hpp>
|
||||
namespace aare {
|
||||
|
||||
@ -10,4 +11,16 @@ uint16_t adc_sar_04_decode64to16(uint64_t input);
|
||||
void adc_sar_05_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output);
|
||||
void adc_sar_04_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output);
|
||||
|
||||
} // namespace aare
|
||||
|
||||
/**
|
||||
* @brief Apply custom weights to a 16-bit input value. Will sum up weights[i]**i
|
||||
* for each bit i that is set in the input value.
|
||||
* @throws std::out_of_range if weights.size() < 16
|
||||
* @param input 16-bit input value
|
||||
* @param weights vector of weights, size must be less than or equal to 16
|
||||
*/
|
||||
double apply_custom_weights(uint16_t input, const NDView<double, 1> weights);
|
||||
|
||||
void apply_custom_weights(NDView<uint16_t, 1> input, NDView<double, 1> output, const NDView<double, 1> weights);
|
||||
|
||||
} // namespace aare
|
||||
|
12
include/aare/utils/ifstream_helpers.hpp
Normal file
12
include/aare/utils/ifstream_helpers.hpp
Normal file
@ -0,0 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
namespace aare {
|
||||
|
||||
/**
|
||||
* @brief Get the error message from an ifstream object
|
||||
*/
|
||||
std::string ifstream_error_msg(std::ifstream &ifs);
|
||||
|
||||
} // namespace aare
|
@ -4,19 +4,31 @@ build-backend = "scikit_build_core.build"
|
||||
|
||||
[project]
|
||||
name = "aare"
|
||||
version = "2025.4.1"
|
||||
version = "2025.4.22"
|
||||
requires-python = ">=3.11"
|
||||
dependencies = [
|
||||
"numpy",
|
||||
"matplotlib",
|
||||
]
|
||||
|
||||
|
||||
[tool.cibuildwheel]
|
||||
|
||||
build = "cp{311,312,313}-manylinux_x86_64"
|
||||
|
||||
|
||||
|
||||
|
||||
[tool.scikit-build]
|
||||
cmake.verbose = true
|
||||
build.verbose = true
|
||||
cmake.build-type = "Release"
|
||||
install.components = ["python"]
|
||||
|
||||
[tool.scikit-build.cmake.define]
|
||||
AARE_PYTHON_BINDINGS = "ON"
|
||||
AARE_SYSTEM_LIBRARIES = "ON"
|
||||
AARE_INSTALL_PYTHONEXT = "ON"
|
||||
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
markers = [
|
||||
"files: marks tests that need additional data (deselect with '-m \"not files\"')",
|
||||
|
@ -1,12 +1,13 @@
|
||||
|
||||
find_package (Python 3.10 COMPONENTS Interpreter Development REQUIRED)
|
||||
find_package (Python 3.10 COMPONENTS Interpreter Development.Module REQUIRED)
|
||||
set(PYBIND11_FINDPYTHON ON) # Needed for RH8
|
||||
|
||||
# Download or find pybind11 depending on configuration
|
||||
if(AARE_FETCH_PYBIND11)
|
||||
FetchContent_Declare(
|
||||
pybind11
|
||||
GIT_REPOSITORY https://github.com/pybind/pybind11
|
||||
GIT_TAG v2.13.0
|
||||
GIT_TAG v2.13.6
|
||||
)
|
||||
FetchContent_MakeAvailable(pybind11)
|
||||
else()
|
||||
@ -28,6 +29,9 @@ target_link_libraries(_aare PRIVATE aare_core aare_compiler_flags)
|
||||
set( PYTHON_FILES
|
||||
aare/__init__.py
|
||||
aare/CtbRawFile.py
|
||||
aare/ClusterFinder.py
|
||||
aare/ClusterVector.py
|
||||
|
||||
aare/func.py
|
||||
aare/RawFile.py
|
||||
aare/transform.py
|
||||
@ -35,6 +39,7 @@ set( PYTHON_FILES
|
||||
aare/utils.py
|
||||
)
|
||||
|
||||
|
||||
# Copy the python files to the build directory
|
||||
foreach(FILE ${PYTHON_FILES})
|
||||
configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} )
|
||||
@ -58,10 +63,16 @@ endforeach(FILE ${PYTHON_EXAMPLES})
|
||||
|
||||
|
||||
if(AARE_INSTALL_PYTHONEXT)
|
||||
install(TARGETS _aare
|
||||
install(
|
||||
TARGETS _aare
|
||||
EXPORT "${TARGETS_EXPORT_NAME}"
|
||||
LIBRARY DESTINATION aare
|
||||
COMPONENT python
|
||||
)
|
||||
|
||||
install(FILES ${PYTHON_FILES} DESTINATION aare)
|
||||
install(
|
||||
FILES ${PYTHON_FILES}
|
||||
DESTINATION aare
|
||||
COMPONENT python
|
||||
)
|
||||
endif()
|
67
python/aare/ClusterFinder.py
Normal file
67
python/aare/ClusterFinder.py
Normal file
@ -0,0 +1,67 @@
|
||||
|
||||
from ._aare import ClusterFinder_Cluster3x3i, ClusterFinder_Cluster2x2i, ClusterFinderMT_Cluster3x3i, ClusterFinderMT_Cluster2x2i, ClusterCollector_Cluster3x3i, ClusterCollector_Cluster2x2i
|
||||
|
||||
|
||||
from ._aare import ClusterFileSink_Cluster3x3i, ClusterFileSink_Cluster2x2i
|
||||
import numpy as np
|
||||
|
||||
def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024):
|
||||
"""
|
||||
Factory function to create a ClusterFinder object. Provides a cleaner syntax for
|
||||
the templated ClusterFinder in C++.
|
||||
"""
|
||||
if dtype == np.int32 and cluster_size == (3,3):
|
||||
return ClusterFinder_Cluster3x3i(image_size, n_sigma = n_sigma, capacity=capacity)
|
||||
elif dtype == np.int32 and cluster_size == (2,2):
|
||||
return ClusterFinder_Cluster2x2i(image_size, n_sigma = n_sigma, capacity=capacity)
|
||||
else:
|
||||
#TODO! add the other formats
|
||||
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
||||
|
||||
|
||||
def ClusterFinderMT(image_size, cluster_size = (3,3), dtype=np.int32, n_sigma=5, capacity = 1024, n_threads = 3):
|
||||
"""
|
||||
Factory function to create a ClusterFinderMT object. Provides a cleaner syntax for
|
||||
the templated ClusterFinderMT in C++.
|
||||
"""
|
||||
|
||||
if dtype == np.int32 and cluster_size == (3,3):
|
||||
return ClusterFinderMT_Cluster3x3i(image_size, n_sigma = n_sigma,
|
||||
capacity = capacity, n_threads = n_threads)
|
||||
elif dtype == np.int32 and cluster_size == (2,2):
|
||||
return ClusterFinderMT_Cluster2x2i(image_size, n_sigma = n_sigma,
|
||||
capacity = capacity, n_threads = n_threads)
|
||||
else:
|
||||
#TODO! add the other formats
|
||||
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
||||
|
||||
|
||||
def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32):
|
||||
"""
|
||||
Factory function to create a ClusterCollector object. Provides a cleaner syntax for
|
||||
the templated ClusterCollector in C++.
|
||||
"""
|
||||
|
||||
if dtype == np.int32 and cluster_size == (3,3):
|
||||
return ClusterCollector_Cluster3x3i(clusterfindermt)
|
||||
elif dtype == np.int32 and cluster_size == (2,2):
|
||||
return ClusterCollector_Cluster2x2i(clusterfindermt)
|
||||
|
||||
else:
|
||||
#TODO! add the other formats
|
||||
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
||||
|
||||
def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):
|
||||
"""
|
||||
Factory function to create a ClusterCollector object. Provides a cleaner syntax for
|
||||
the templated ClusterCollector in C++.
|
||||
"""
|
||||
|
||||
if dtype == np.int32 and clusterfindermt.cluster_size == (3,3):
|
||||
return ClusterFileSink_Cluster3x3i(clusterfindermt, cluster_file)
|
||||
elif dtype == np.int32 and clusterfindermt.cluster_size == (2,2):
|
||||
return ClusterFileSink_Cluster2x2i(clusterfindermt, cluster_file)
|
||||
|
||||
else:
|
||||
#TODO! add the other formats
|
||||
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
11
python/aare/ClusterVector.py
Normal file
11
python/aare/ClusterVector.py
Normal file
@ -0,0 +1,11 @@
|
||||
|
||||
|
||||
from ._aare import ClusterVector_Cluster3x3i
|
||||
import numpy as np
|
||||
|
||||
def ClusterVector(cluster_size, dtype = np.int32):
|
||||
|
||||
if dtype == np.int32 and cluster_size == (3,3):
|
||||
return ClusterVector_Cluster3x3i()
|
||||
else:
|
||||
raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.")
|
@ -11,8 +11,17 @@ from ._aare import ROI
|
||||
|
||||
# from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i
|
||||
|
||||
from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT, ClusterFileSink
|
||||
from .ClusterVector import ClusterVector
|
||||
|
||||
|
||||
from ._aare import fit_gaus, fit_pol1
|
||||
from ._aare import Interpolator
|
||||
from ._aare import calculate_eta2
|
||||
|
||||
|
||||
from ._aare import apply_custom_weights
|
||||
|
||||
from .CtbRawFile import CtbRawFile
|
||||
from .RawFile import RawFile
|
||||
from .ScanParameters import ScanParameters
|
||||
|
104
python/src/bind_ClusterVector.hpp
Normal file
104
python/src/bind_ClusterVector.hpp
Normal file
@ -0,0 +1,104 @@
|
||||
#include "aare/ClusterCollector.hpp"
|
||||
#include "aare/ClusterFileSink.hpp"
|
||||
#include "aare/ClusterFinder.hpp"
|
||||
#include "aare/ClusterFinderMT.hpp"
|
||||
#include "aare/ClusterVector.hpp"
|
||||
#include "aare/NDView.hpp"
|
||||
#include "aare/Pedestal.hpp"
|
||||
#include "np_helper.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/stl.h>
|
||||
#include <pybind11/stl_bind.h>
|
||||
|
||||
namespace py = pybind11;
|
||||
using pd_type = double;
|
||||
|
||||
using namespace aare;
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
|
||||
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_ClusterVector(py::module &m, const std::string &typestr) {
|
||||
using ClusterType = Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||
auto class_name = fmt::format("ClusterVector_{}", typestr);
|
||||
|
||||
py::class_<ClusterVector<
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>, void>>(
|
||||
m, class_name.c_str(),
|
||||
py::buffer_protocol())
|
||||
|
||||
.def(py::init()) // TODO change!!!
|
||||
|
||||
.def("push_back",
|
||||
[](ClusterVector<ClusterType> &self, const ClusterType &cluster) {
|
||||
self.push_back(cluster);
|
||||
})
|
||||
|
||||
.def("sum",
|
||||
[](ClusterVector<ClusterType> &self) {
|
||||
auto *vec = new std::vector<Type>(self.sum());
|
||||
return return_vector(vec);
|
||||
})
|
||||
.def("sum_2x2", [](ClusterVector<ClusterType> &self){
|
||||
auto *vec = new std::vector<Type>(self.sum_2x2());
|
||||
return return_vector(vec);
|
||||
})
|
||||
.def_property_readonly("size", &ClusterVector<ClusterType>::size)
|
||||
.def("item_size", &ClusterVector<ClusterType>::item_size)
|
||||
.def_property_readonly("fmt",
|
||||
[typestr](ClusterVector<ClusterType> &self) {
|
||||
return fmt_format<ClusterType>;
|
||||
})
|
||||
|
||||
.def_property_readonly("cluster_size_x",
|
||||
&ClusterVector<ClusterType>::cluster_size_x)
|
||||
.def_property_readonly("cluster_size_y",
|
||||
&ClusterVector<ClusterType>::cluster_size_y)
|
||||
.def_property_readonly("capacity",
|
||||
&ClusterVector<ClusterType>::capacity)
|
||||
.def_property("frame_number", &ClusterVector<ClusterType>::frame_number,
|
||||
&ClusterVector<ClusterType>::set_frame_number)
|
||||
.def_buffer(
|
||||
[typestr](ClusterVector<ClusterType> &self) -> py::buffer_info {
|
||||
return py::buffer_info(
|
||||
self.data(), /* Pointer to buffer */
|
||||
self.item_size(), /* Size of one scalar */
|
||||
fmt_format<ClusterType>, /* Format descriptor */
|
||||
1, /* Number of dimensions */
|
||||
{self.size()}, /* Buffer dimensions */
|
||||
{self.item_size()} /* Strides (in bytes) for each index */
|
||||
);
|
||||
});
|
||||
|
||||
// Free functions using ClusterVector
|
||||
m.def("hitmap",
|
||||
[](std::array<size_t, 2> image_size, ClusterVector<ClusterType> &cv) {
|
||||
// Create a numpy array to hold the hitmap
|
||||
// The shape of the array is (image_size[0], image_size[1])
|
||||
// note that the python array is passed as [row, col] which
|
||||
// is the opposite of the clusters [x,y]
|
||||
py::array_t<int32_t> hitmap(image_size);
|
||||
auto r = hitmap.mutable_unchecked<2>();
|
||||
|
||||
// Initialize hitmap to 0
|
||||
for (py::ssize_t i = 0; i < r.shape(0); i++)
|
||||
for (py::ssize_t j = 0; j < r.shape(1); j++)
|
||||
r(i, j) = 0;
|
||||
|
||||
// Loop over the clusters and increment the hitmap
|
||||
// Skip out of bound clusters
|
||||
for (const auto &cluster : cv) {
|
||||
auto x = cluster.x;
|
||||
auto y = cluster.y;
|
||||
if (x < image_size[1] && y < image_size[0])
|
||||
r(cluster.y, cluster.x) += 1;
|
||||
}
|
||||
|
||||
return hitmap;
|
||||
});
|
||||
}
|
@ -26,17 +26,18 @@ template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
void define_cluster(py::module &m, const std::string &typestr) {
|
||||
auto class_name = fmt::format("Cluster{}", typestr);
|
||||
|
||||
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType, void>>(
|
||||
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>(
|
||||
m, class_name.c_str(), py::buffer_protocol())
|
||||
|
||||
.def(py::init([](uint8_t x, uint8_t y, py::array_t<Type> data) {
|
||||
py::buffer_info buf_info = data.request();
|
||||
Type *ptr = static_cast<Type *>(buf_info.ptr);
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType, void> cluster;
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster;
|
||||
cluster.x = x;
|
||||
cluster.y = y;
|
||||
std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY,
|
||||
cluster.data); // Copy array contents
|
||||
auto r = data.template unchecked<1>(); // no bounds checks
|
||||
for (py::ssize_t i = 0; i < data.size(); ++i) {
|
||||
cluster.data[i] = r(i);
|
||||
}
|
||||
return cluster;
|
||||
}));
|
||||
|
||||
@ -64,54 +65,6 @@ void define_cluster(py::module &m, const std::string &typestr) {
|
||||
*/
|
||||
}
|
||||
|
||||
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_cluster_vector(py::module &m, const std::string &typestr) {
|
||||
using ClusterType =
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType, void>;
|
||||
auto class_name = fmt::format("ClusterVector_{}", typestr);
|
||||
|
||||
py::class_<ClusterVector<
|
||||
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType, void>, void>>(
|
||||
m, class_name.c_str(),
|
||||
py::buffer_protocol())
|
||||
|
||||
.def(py::init()) // TODO change!!!
|
||||
|
||||
.def("push_back",
|
||||
[](ClusterVector<ClusterType> &self, const ClusterType &cluster) {
|
||||
self.push_back(cluster);
|
||||
})
|
||||
|
||||
// implement push_back
|
||||
.def_property_readonly("size", &ClusterVector<ClusterType>::size)
|
||||
.def("item_size", &ClusterVector<ClusterType>::item_size)
|
||||
.def_property_readonly("fmt",
|
||||
[typestr](ClusterVector<ClusterType> &self) {
|
||||
return fmt_format<ClusterType>;
|
||||
})
|
||||
|
||||
.def_property_readonly("cluster_size_x",
|
||||
&ClusterVector<ClusterType>::cluster_size_x)
|
||||
.def_property_readonly("cluster_size_y",
|
||||
&ClusterVector<ClusterType>::cluster_size_y)
|
||||
.def_property_readonly("capacity",
|
||||
&ClusterVector<ClusterType>::capacity)
|
||||
.def_property("frame_number", &ClusterVector<ClusterType>::frame_number,
|
||||
&ClusterVector<ClusterType>::set_frame_number)
|
||||
.def_buffer(
|
||||
[typestr](ClusterVector<ClusterType> &self) -> py::buffer_info {
|
||||
return py::buffer_info(
|
||||
self.data(), /* Pointer to buffer */
|
||||
self.item_size(), /* Size of one scalar */
|
||||
fmt_format<ClusterType>, /* Format descriptor */
|
||||
1, /* Number of dimensions */
|
||||
{self.size()}, /* Buffer dimensions */
|
||||
{self.item_size()} /* Strides (in bytes) for each index */
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
void define_cluster_finder_mt_bindings(py::module &m,
|
||||
@ -140,6 +93,9 @@ void define_cluster_finder_mt_bindings(py::module &m,
|
||||
return;
|
||||
},
|
||||
py::arg(), py::arg("frame_number") = 0)
|
||||
.def_property_readonly("cluster_size", [](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self){
|
||||
return py::make_tuple(ClusterSizeX, ClusterSizeY);
|
||||
})
|
||||
.def("clear_pedestal",
|
||||
&ClusterFinderMT<ClusterType, uint16_t, pd_type>::clear_pedestal)
|
||||
.def("sync", &ClusterFinderMT<ClusterType, uint16_t, pd_type>::sync)
|
||||
@ -251,26 +207,5 @@ void define_cluster_finder_bindings(py::module &m, const std::string &typestr) {
|
||||
return;
|
||||
},
|
||||
py::arg(), py::arg("frame_number") = 0);
|
||||
|
||||
m.def("hitmap",
|
||||
[](std::array<size_t, 2> image_size, ClusterVector<ClusterType> &cv) {
|
||||
py::array_t<int32_t> hitmap(image_size);
|
||||
auto r = hitmap.mutable_unchecked<2>();
|
||||
|
||||
// Initialize hitmap to 0
|
||||
for (py::ssize_t i = 0; i < r.shape(0); i++)
|
||||
for (py::ssize_t j = 0; j < r.shape(1); j++)
|
||||
r(i, j) = 0;
|
||||
|
||||
size_t stride = cv.item_size();
|
||||
auto ptr = cv.data();
|
||||
for (size_t i = 0; i < cv.size(); i++) {
|
||||
auto x = *reinterpret_cast<int16_t *>(ptr);
|
||||
auto y = *reinterpret_cast<int16_t *>(ptr + sizeof(int16_t));
|
||||
r(y, x) += 1;
|
||||
ptr += stride;
|
||||
}
|
||||
return hitmap;
|
||||
});
|
||||
}
|
||||
#pragma GCC diagnostic pop
|
||||
|
@ -59,9 +59,6 @@ void define_cluster_file_io_bindings(py::module &m,
|
||||
self.set_gain_map(view);
|
||||
})
|
||||
|
||||
// void set_gain_map(const GainMap &gain_map); //TODO do i need a
|
||||
// gainmap constructor?
|
||||
|
||||
.def("close", &ClusterFile<ClusterType>::close)
|
||||
.def("write_frame", &ClusterFile<ClusterType>::write_frame)
|
||||
.def("__enter__", [](ClusterFile<ClusterType> &self) { return &self; })
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include "aare/decode.hpp"
|
||||
// #include "aare/fClusterFileV2.hpp"
|
||||
|
||||
#include "np_helper.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <pybind11/iostream.h>
|
||||
@ -65,35 +67,54 @@ m.def("adc_sar_04_decode64to16", [](py::array_t<uint8_t> input) {
|
||||
return output;
|
||||
});
|
||||
|
||||
py::class_<CtbRawFile>(m, "CtbRawFile")
|
||||
.def(py::init<const std::filesystem::path &>())
|
||||
.def("read_frame",
|
||||
[](CtbRawFile &self) {
|
||||
size_t image_size = self.image_size_in_bytes();
|
||||
py::array image;
|
||||
std::vector<ssize_t> shape;
|
||||
shape.reserve(2);
|
||||
shape.push_back(1);
|
||||
shape.push_back(image_size);
|
||||
m.def(
|
||||
"apply_custom_weights",
|
||||
[](py::array_t<uint16_t, py::array::c_style | py::array::forcecast> &input,
|
||||
py::array_t<double, py::array::c_style | py::array::forcecast>
|
||||
&weights) {
|
||||
|
||||
|
||||
py::array_t<DetectorHeader> header(1);
|
||||
// Create new array with same shape as the input array (uninitialized values)
|
||||
py::buffer_info buf = input.request();
|
||||
py::array_t<double> output(buf.shape);
|
||||
|
||||
// always read bytes
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
// Use NDViews to call into the C++ library
|
||||
auto weights_view = make_view_1d(weights);
|
||||
NDView<uint16_t, 1> input_view(input.mutable_data(), {input.size()});
|
||||
NDView<double, 1> output_view(output.mutable_data(), {output.size()});
|
||||
|
||||
self.read_into(
|
||||
reinterpret_cast<std::byte *>(image.mutable_data()),
|
||||
header.mutable_data());
|
||||
apply_custom_weights(input_view, output_view, weights_view);
|
||||
return output;
|
||||
});
|
||||
|
||||
return py::make_tuple(header, image);
|
||||
})
|
||||
.def("seek", &CtbRawFile::seek)
|
||||
.def("tell", &CtbRawFile::tell)
|
||||
.def("master", &CtbRawFile::master)
|
||||
py::class_<CtbRawFile>(m, "CtbRawFile")
|
||||
.def(py::init<const std::filesystem::path &>())
|
||||
.def("read_frame",
|
||||
[](CtbRawFile &self) {
|
||||
size_t image_size = self.image_size_in_bytes();
|
||||
py::array image;
|
||||
std::vector<ssize_t> shape;
|
||||
shape.reserve(2);
|
||||
shape.push_back(1);
|
||||
shape.push_back(image_size);
|
||||
|
||||
.def_property_readonly("image_size_in_bytes",
|
||||
&CtbRawFile::image_size_in_bytes)
|
||||
py::array_t<DetectorHeader> header(1);
|
||||
|
||||
.def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file);
|
||||
// always read bytes
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
|
||||
}
|
||||
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()),
|
||||
header.mutable_data());
|
||||
|
||||
return py::make_tuple(header, image);
|
||||
})
|
||||
.def("seek", &CtbRawFile::seek)
|
||||
.def("tell", &CtbRawFile::tell)
|
||||
.def("master", &CtbRawFile::master)
|
||||
|
||||
.def_property_readonly("image_size_in_bytes",
|
||||
&CtbRawFile::image_size_in_bytes)
|
||||
|
||||
.def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file);
|
||||
|
||||
}
|
||||
|
@ -20,6 +20,9 @@
|
||||
namespace py = pybind11;
|
||||
using namespace ::aare;
|
||||
|
||||
|
||||
|
||||
|
||||
//Disable warnings for unused parameters, as we ignore some
|
||||
//in the __exit__ method
|
||||
#pragma GCC diagnostic push
|
||||
@ -214,36 +217,9 @@ void define_file_io_bindings(py::module &m) {
|
||||
|
||||
|
||||
|
||||
py::class_<RawSubFile>(m, "RawSubFile")
|
||||
.def(py::init<const std::filesystem::path &, DetectorType, size_t,
|
||||
size_t, size_t>())
|
||||
.def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame)
|
||||
.def_property_readonly("pixels_per_frame",
|
||||
&RawSubFile::pixels_per_frame)
|
||||
.def("seek", &RawSubFile::seek)
|
||||
.def("tell", &RawSubFile::tell)
|
||||
.def_property_readonly("rows", &RawSubFile::rows)
|
||||
.def_property_readonly("cols", &RawSubFile::cols)
|
||||
.def("read_frame",
|
||||
[](RawSubFile &self) {
|
||||
const uint8_t item_size = self.bytes_per_pixel();
|
||||
py::array image;
|
||||
std::vector<ssize_t> shape;
|
||||
shape.reserve(2);
|
||||
shape.push_back(self.rows());
|
||||
shape.push_back(self.cols());
|
||||
if (item_size == 1) {
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
} else if (item_size == 2) {
|
||||
image = py::array_t<uint16_t>(shape);
|
||||
} else if (item_size == 4) {
|
||||
image = py::array_t<uint32_t>(shape);
|
||||
}
|
||||
fmt::print("item_size: {} rows: {} cols: {}\n", item_size, self.rows(), self.cols());
|
||||
self.read_into(
|
||||
reinterpret_cast<std::byte *>(image.mutable_data()));
|
||||
return image;
|
||||
});
|
||||
|
||||
|
||||
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
// py::class_<ClusterHeader>(m, "ClusterHeader")
|
||||
|
@ -1,16 +1,21 @@
|
||||
// Files with bindings to the different classes
|
||||
|
||||
//New style file naming
|
||||
#include "bind_ClusterVector.hpp"
|
||||
|
||||
//TODO! migrate the other names
|
||||
#include "cluster.hpp"
|
||||
#include "cluster_file.hpp"
|
||||
#include "ctb_raw_file.hpp"
|
||||
#include "file.hpp"
|
||||
#include "fit.hpp"
|
||||
#include "interpolation.hpp"
|
||||
#include "pedestal.hpp"
|
||||
#include "pixel_map.hpp"
|
||||
#include "raw_file.hpp"
|
||||
#include "raw_sub_file.hpp"
|
||||
#include "raw_master_file.hpp"
|
||||
#include "raw_file.hpp"
|
||||
#include "pixel_map.hpp"
|
||||
#include "var_cluster.hpp"
|
||||
|
||||
#include "pedestal.hpp"
|
||||
#include "jungfrau_data_file.hpp"
|
||||
|
||||
// Pybind stuff
|
||||
@ -22,6 +27,7 @@ namespace py = pybind11;
|
||||
PYBIND11_MODULE(_aare, m) {
|
||||
define_file_io_bindings(m);
|
||||
define_raw_file_io_bindings(m);
|
||||
define_raw_sub_file_io_bindings(m);
|
||||
define_ctb_raw_file_io_bindings(m);
|
||||
define_raw_master_file_bindings(m);
|
||||
define_var_cluster_finder_bindings(m);
|
||||
@ -39,12 +45,12 @@ PYBIND11_MODULE(_aare, m) {
|
||||
define_cluster_file_io_bindings<float, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||
define_cluster_file_io_bindings<double, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||
|
||||
define_cluster_vector<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||
define_cluster_vector<double, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||
define_cluster_vector<float, 3, 3, uint16_t>(m, "Cluster3x3f");
|
||||
define_cluster_vector<int, 2, 2, uint16_t>(m, "Cluster2x2i");
|
||||
define_cluster_vector<double, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||
define_cluster_vector<float, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||
define_ClusterVector<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||
define_ClusterVector<double, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||
define_ClusterVector<float, 3, 3, uint16_t>(m, "Cluster3x3f");
|
||||
define_ClusterVector<int, 2, 2, uint16_t>(m, "Cluster2x2i");
|
||||
define_ClusterVector<double, 2, 2, uint16_t>(m, "Cluster2x2d");
|
||||
define_ClusterVector<float, 2, 2, uint16_t>(m, "Cluster2x2f");
|
||||
|
||||
define_cluster_finder_bindings<int, 3, 3, uint16_t>(m, "Cluster3x3i");
|
||||
define_cluster_finder_bindings<double, 3, 3, uint16_t>(m, "Cluster3x3d");
|
||||
|
110
python/src/raw_sub_file.hpp
Normal file
110
python/src/raw_sub_file.hpp
Normal file
@ -0,0 +1,110 @@
|
||||
#include "aare/CtbRawFile.hpp"
|
||||
#include "aare/File.hpp"
|
||||
#include "aare/Frame.hpp"
|
||||
#include "aare/RawFile.hpp"
|
||||
#include "aare/RawMasterFile.hpp"
|
||||
#include "aare/RawSubFile.hpp"
|
||||
|
||||
#include "aare/defs.hpp"
|
||||
// #include "aare/fClusterFileV2.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <pybind11/iostream.h>
|
||||
#include <pybind11/numpy.h>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/stl.h>
|
||||
#include <pybind11/stl/filesystem.h>
|
||||
#include <string>
|
||||
|
||||
namespace py = pybind11;
|
||||
using namespace ::aare;
|
||||
|
||||
auto read_frame_from_RawSubFile(RawSubFile &self) {
|
||||
py::array_t<DetectorHeader> header(1);
|
||||
const uint8_t item_size = self.bytes_per_pixel();
|
||||
std::vector<ssize_t> shape{static_cast<ssize_t>(self.rows()),
|
||||
static_cast<ssize_t>(self.cols())};
|
||||
|
||||
py::array image;
|
||||
if (item_size == 1) {
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
} else if (item_size == 2) {
|
||||
image = py::array_t<uint16_t>(shape);
|
||||
} else if (item_size == 4) {
|
||||
image = py::array_t<uint32_t>(shape);
|
||||
}
|
||||
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()),
|
||||
header.mutable_data());
|
||||
|
||||
return py::make_tuple(header, image);
|
||||
}
|
||||
|
||||
auto read_n_frames_from_RawSubFile(RawSubFile &self, size_t n_frames) {
|
||||
py::array_t<DetectorHeader> header(n_frames);
|
||||
const uint8_t item_size = self.bytes_per_pixel();
|
||||
std::vector<ssize_t> shape{
|
||||
static_cast<ssize_t>(n_frames),
|
||||
static_cast<ssize_t>(self.rows()),
|
||||
static_cast<ssize_t>(self.cols())
|
||||
};
|
||||
|
||||
py::array image;
|
||||
if (item_size == 1) {
|
||||
image = py::array_t<uint8_t>(shape);
|
||||
} else if (item_size == 2) {
|
||||
image = py::array_t<uint16_t>(shape);
|
||||
} else if (item_size == 4) {
|
||||
image = py::array_t<uint32_t>(shape);
|
||||
}
|
||||
self.read_into(reinterpret_cast<std::byte *>(image.mutable_data()), n_frames,
|
||||
header.mutable_data());
|
||||
|
||||
return py::make_tuple(header, image);
|
||||
}
|
||||
|
||||
|
||||
//Disable warnings for unused parameters, as we ignore some
|
||||
//in the __exit__ method
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
|
||||
void define_raw_sub_file_io_bindings(py::module &m) {
|
||||
py::class_<RawSubFile>(m, "RawSubFile")
|
||||
.def(py::init<const std::filesystem::path &, DetectorType, size_t,
|
||||
size_t, size_t>())
|
||||
.def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame)
|
||||
.def_property_readonly("pixels_per_frame",
|
||||
&RawSubFile::pixels_per_frame)
|
||||
.def_property_readonly("bytes_per_pixel", &RawSubFile::bytes_per_pixel)
|
||||
.def("seek", &RawSubFile::seek)
|
||||
.def("tell", &RawSubFile::tell)
|
||||
.def_property_readonly("rows", &RawSubFile::rows)
|
||||
.def_property_readonly("cols", &RawSubFile::cols)
|
||||
.def_property_readonly("frames_in_file", &RawSubFile::frames_in_file)
|
||||
.def("read_frame", &read_frame_from_RawSubFile)
|
||||
.def("read_n", &read_n_frames_from_RawSubFile)
|
||||
.def("read", [](RawSubFile &self){
|
||||
self.seek(0);
|
||||
auto n_frames = self.frames_in_file();
|
||||
return read_n_frames_from_RawSubFile(self, n_frames);
|
||||
})
|
||||
.def("__enter__", [](RawSubFile &self) { return &self; })
|
||||
.def("__exit__",
|
||||
[](RawSubFile &self,
|
||||
const std::optional<pybind11::type> &exc_type,
|
||||
const std::optional<pybind11::object> &exc_value,
|
||||
const std::optional<pybind11::object> &traceback) {
|
||||
})
|
||||
.def("__iter__", [](RawSubFile &self) { return &self; })
|
||||
.def("__next__", [](RawSubFile &self) {
|
||||
try {
|
||||
return read_frame_from_RawSubFile(self);
|
||||
} catch (std::runtime_error &e) {
|
||||
throw py::stop_iteration();
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
@ -1,12 +1,12 @@
|
||||
import pytest
|
||||
import numpy as np
|
||||
|
||||
import aare._aare as aare
|
||||
from aare import _aare #import the C++ module
|
||||
from conftest import test_data_path
|
||||
|
||||
|
||||
def test_cluster_vector_can_be_converted_to_numpy():
|
||||
cv = aare.ClusterVector_Cluster3x3i()
|
||||
cv = _aare.ClusterVector_Cluster3x3i()
|
||||
arr = np.array(cv, copy=False)
|
||||
assert arr.shape == (0,) # 4 for x, y, size, energy and 9 for the cluster data
|
||||
|
||||
@ -14,24 +14,23 @@ def test_cluster_vector_can_be_converted_to_numpy():
|
||||
def test_ClusterVector():
|
||||
"""Test ClusterVector"""
|
||||
|
||||
clustervector = aare.ClusterVector_Cluster3x3i()
|
||||
clustervector = _aare.ClusterVector_Cluster3x3i()
|
||||
assert clustervector.cluster_size_x == 3
|
||||
assert clustervector.cluster_size_y == 3
|
||||
assert clustervector.item_size() == 4+9*4
|
||||
assert clustervector.frame_number == 0
|
||||
assert clustervector.capacity == 1024
|
||||
assert clustervector.size == 0
|
||||
|
||||
cluster = aare.Cluster3x3i(0,0,np.ones(9, dtype=np.int32))
|
||||
cluster = _aare.Cluster3x3i(0,0,np.ones(9, dtype=np.int32))
|
||||
|
||||
clustervector.push_back(cluster)
|
||||
assert clustervector.size == 1
|
||||
|
||||
with pytest.raises(TypeError): # Or use the appropriate exception type
|
||||
clustervector.push_back(aare.Cluster2x2i(0,0,np.ones(4, dtype=np.int32)))
|
||||
clustervector.push_back(_aare.Cluster2x2i(0,0,np.ones(4, dtype=np.int32)))
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
clustervector.push_back(aare.Cluster3x3f(0,0,np.ones(9, dtype=np.float32)))
|
||||
clustervector.push_back(_aare.Cluster3x3f(0,0,np.ones(9, dtype=np.float32)))
|
||||
|
||||
def test_Interpolator():
|
||||
"""Test Interpolator"""
|
||||
@ -41,13 +40,13 @@ def test_Interpolator():
|
||||
ybins = np.linspace(0, 5, 30, dtype=np.float64)
|
||||
|
||||
etacube = np.zeros(shape=[30, 30, 20], dtype=np.float64)
|
||||
interpolator = aare.Interpolator(etacube, xbins, ybins, ebins)
|
||||
interpolator = _aare.Interpolator(etacube, xbins, ybins, ebins)
|
||||
|
||||
assert interpolator.get_ietax().shape == (30,30,20)
|
||||
assert interpolator.get_ietay().shape == (30,30,20)
|
||||
clustervector = aare.ClusterVector_Cluster3x3i()
|
||||
clustervector = _aare.ClusterVector_Cluster3x3i()
|
||||
|
||||
cluster = aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32))
|
||||
cluster = _aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32))
|
||||
clustervector.push_back(cluster)
|
||||
|
||||
interpolated_photons = interpolator.interpolate(clustervector)
|
||||
@ -58,9 +57,9 @@ def test_Interpolator():
|
||||
assert interpolated_photons[0]["y"] == -1
|
||||
assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0
|
||||
|
||||
clustervector = aare.ClusterVector_Cluster2x2i()
|
||||
clustervector = _aare.ClusterVector_Cluster2x2i()
|
||||
|
||||
cluster = aare.Cluster2x2i(0,0, np.ones(4, dtype=np.int32))
|
||||
cluster = _aare.Cluster2x2i(0,0, np.ones(4, dtype=np.int32))
|
||||
clustervector.push_back(cluster)
|
||||
|
||||
interpolated_photons = interpolator.interpolate(clustervector)
|
||||
@ -71,28 +70,15 @@ def test_Interpolator():
|
||||
assert interpolated_photons[0]["y"] == 0
|
||||
assert interpolated_photons[0]["energy"] == 4
|
||||
|
||||
@pytest.mark.files
|
||||
def test_cluster_file(test_data_path):
|
||||
"""Test ClusterFile"""
|
||||
cluster_file = aare.ClusterFile_Cluster3x3i(test_data_path / "clust/single_frame_97_clustrers.clust")
|
||||
clustervector = cluster_file.read_clusters(10) #conversion does not work
|
||||
|
||||
cluster_file.close()
|
||||
|
||||
assert clustervector.size == 10
|
||||
|
||||
###reading with wrong file
|
||||
with pytest.raises(TypeError):
|
||||
cluster_file = aare.ClusterFile_Cluster2x2i(test_data_path / "clust/single_frame_97_clustrers.clust")
|
||||
cluster_file.close()
|
||||
|
||||
def test_calculate_eta():
|
||||
"""Calculate Eta"""
|
||||
clusters = aare.ClusterVector_Cluster3x3i()
|
||||
clusters.push_back(aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32)))
|
||||
clusters.push_back(aare.Cluster3x3i(0,0, np.array([1,1,1,2,2,2,3,3,3])))
|
||||
clusters = _aare.ClusterVector_Cluster3x3i()
|
||||
clusters.push_back(_aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32)))
|
||||
clusters.push_back(_aare.Cluster3x3i(0,0, np.array([1,1,1,2,2,2,3,3,3])))
|
||||
|
||||
eta2 = aare.calculate_eta2(clusters)
|
||||
eta2 = _aare.calculate_eta2(clusters)
|
||||
|
||||
assert eta2.shape == (2,2)
|
||||
assert eta2[0,0] == 0.5
|
||||
@ -103,7 +89,7 @@ def test_calculate_eta():
|
||||
def test_cluster_finder():
|
||||
"""Test ClusterFinder"""
|
||||
|
||||
clusterfinder = aare.ClusterFinder_Cluster3x3i([100,100])
|
||||
clusterfinder = _aare.ClusterFinder_Cluster3x3i([100,100])
|
||||
|
||||
#frame = np.random.rand(100,100)
|
||||
frame = np.zeros(shape=[100,100])
|
||||
@ -115,18 +101,7 @@ def test_cluster_finder():
|
||||
assert clusters.size == 0
|
||||
|
||||
|
||||
#TODO dont understand behavior
|
||||
def test_cluster_collector():
|
||||
"""Test ClusterCollector"""
|
||||
|
||||
clusterfinder = aare.ClusterFinderMT_Cluster3x3i([100,100]) #TODO: no idea what the data is in InputQueue not zero
|
||||
|
||||
clustercollector = aare.ClusterCollector_Cluster3x3i(clusterfinder)
|
||||
|
||||
cluster_vectors = clustercollector.steal_clusters()
|
||||
|
||||
assert len(cluster_vectors) == 1 #single thread execution
|
||||
assert cluster_vectors[0].size == 0 #
|
||||
|
||||
|
||||
|
||||
|
||||
|
64
python/tests/test_ClusterFile.py
Normal file
64
python/tests/test_ClusterFile.py
Normal file
@ -0,0 +1,64 @@
|
||||
|
||||
import pytest
|
||||
import numpy as np
|
||||
import boost_histogram as bh
|
||||
import time
|
||||
from pathlib import Path
|
||||
import pickle
|
||||
|
||||
from aare import ClusterFile
|
||||
from conftest import test_data_path
|
||||
|
||||
@pytest.mark.files
|
||||
def test_cluster_file(test_data_path):
|
||||
"""Test ClusterFile"""
|
||||
f = ClusterFile(test_data_path / "clust/single_frame_97_clustrers.clust")
|
||||
cv = f.read_clusters(10) #conversion does not work
|
||||
|
||||
|
||||
assert cv.frame_number == 135
|
||||
assert cv.size == 10
|
||||
|
||||
#Known data
|
||||
#frame_number, num_clusters [135] 97
|
||||
#[ 1 200] [0 1 2 3 4 5 6 7 8]
|
||||
#[ 2 201] [ 9 10 11 12 13 14 15 16 17]
|
||||
#[ 3 202] [18 19 20 21 22 23 24 25 26]
|
||||
#[ 4 203] [27 28 29 30 31 32 33 34 35]
|
||||
#[ 5 204] [36 37 38 39 40 41 42 43 44]
|
||||
#[ 6 205] [45 46 47 48 49 50 51 52 53]
|
||||
#[ 7 206] [54 55 56 57 58 59 60 61 62]
|
||||
#[ 8 207] [63 64 65 66 67 68 69 70 71]
|
||||
#[ 9 208] [72 73 74 75 76 77 78 79 80]
|
||||
#[ 10 209] [81 82 83 84 85 86 87 88 89]
|
||||
|
||||
#conversion to numpy array
|
||||
arr = np.array(cv, copy = False)
|
||||
|
||||
assert arr.size == 10
|
||||
for i in range(10):
|
||||
assert arr[i]['x'] == i+1
|
||||
|
||||
@pytest.mark.files
|
||||
def test_read_clusters_and_fill_histogram(test_data_path):
|
||||
# Create the histogram
|
||||
n_bins = 100
|
||||
xmin = -100
|
||||
xmax = 1e4
|
||||
hist_aare = bh.Histogram(bh.axis.Regular(n_bins, xmin, xmax))
|
||||
|
||||
fname = test_data_path / "clust/beam_En700eV_-40deg_300V_10us_d0_f0_100.clust"
|
||||
|
||||
#Read clusters and fill the histogram with pixel values
|
||||
with ClusterFile(fname, chunk_size = 10000) as f:
|
||||
for clusters in f:
|
||||
arr = np.array(clusters, copy = False)
|
||||
hist_aare.fill(arr['data'].flat)
|
||||
|
||||
|
||||
#Load the histogram from the pickle file
|
||||
with open(fname.with_suffix('.pkl'), 'rb') as f:
|
||||
hist_py = pickle.load(f)
|
||||
|
||||
#Compare the two histograms
|
||||
assert hist_aare == hist_py
|
54
python/tests/test_ClusterVector.py
Normal file
54
python/tests/test_ClusterVector.py
Normal file
@ -0,0 +1,54 @@
|
||||
import pytest
|
||||
import numpy as np
|
||||
import boost_histogram as bh
|
||||
import time
|
||||
from pathlib import Path
|
||||
import pickle
|
||||
|
||||
from aare import ClusterFile
|
||||
from aare import _aare
|
||||
from conftest import test_data_path
|
||||
|
||||
|
||||
def test_create_cluster_vector():
|
||||
cv = _aare.ClusterVector_Cluster3x3i()
|
||||
assert cv.cluster_size_x == 3
|
||||
assert cv.cluster_size_y == 3
|
||||
assert cv.size == 0
|
||||
|
||||
|
||||
def test_push_back_on_cluster_vector():
|
||||
cv = _aare.ClusterVector_Cluster2x2i()
|
||||
assert cv.cluster_size_x == 2
|
||||
assert cv.cluster_size_y == 2
|
||||
assert cv.size == 0
|
||||
|
||||
cluster = _aare.Cluster2x2i(19, 22, np.ones(4, dtype=np.int32))
|
||||
cv.push_back(cluster)
|
||||
assert cv.size == 1
|
||||
|
||||
arr = np.array(cv, copy=False)
|
||||
assert arr[0]['x'] == 19
|
||||
assert arr[0]['y'] == 22
|
||||
|
||||
|
||||
def test_make_a_hitmap_from_cluster_vector():
|
||||
cv = _aare.ClusterVector_Cluster3x3i()
|
||||
|
||||
# Push back 4 clusters with different positions
|
||||
cv.push_back(_aare.Cluster3x3i(0, 0, np.ones(9, dtype=np.int32)))
|
||||
cv.push_back(_aare.Cluster3x3i(1, 1, np.ones(9, dtype=np.int32)))
|
||||
cv.push_back(_aare.Cluster3x3i(1, 1, np.ones(9, dtype=np.int32)))
|
||||
cv.push_back(_aare.Cluster3x3i(2, 2, np.ones(9, dtype=np.int32)))
|
||||
|
||||
ref = np.zeros((5, 5), dtype=np.int32)
|
||||
ref[0,0] = 1
|
||||
ref[1,1] = 2
|
||||
ref[2,2] = 1
|
||||
|
||||
|
||||
img = _aare.hitmap((5,5), cv)
|
||||
# print(img)
|
||||
# print(ref)
|
||||
assert (img == ref).all()
|
||||
|
36
python/tests/test_RawSubFile.py
Normal file
36
python/tests/test_RawSubFile.py
Normal file
@ -0,0 +1,36 @@
|
||||
import pytest
|
||||
import numpy as np
|
||||
from aare import RawSubFile, DetectorType
|
||||
|
||||
|
||||
@pytest.mark.files
|
||||
def test_read_a_jungfrau_RawSubFile(test_data_path):
|
||||
with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f1_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f:
|
||||
assert f.frames_in_file == 3
|
||||
|
||||
headers, frames = f.read()
|
||||
|
||||
assert headers.size == 3
|
||||
assert frames.shape == (3, 512, 1024)
|
||||
|
||||
# Frame numbers in this file should be 4, 5, 6
|
||||
for i,h in zip(range(4,7,1), headers):
|
||||
assert h["frameNumber"] == i
|
||||
|
||||
# Compare to canned data using numpy
|
||||
data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy")
|
||||
assert np.all(data[3:6] == frames)
|
||||
|
||||
@pytest.mark.files
|
||||
def test_iterate_over_a_jungfrau_RawSubFile(test_data_path):
|
||||
|
||||
data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy")
|
||||
|
||||
with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f:
|
||||
i = 0
|
||||
for header, frame in f:
|
||||
assert header["frameNumber"] == i+1
|
||||
assert np.all(frame == data[i])
|
||||
i += 1
|
||||
assert i == 3
|
||||
assert header["frameNumber"] == 3
|
@ -21,23 +21,35 @@ using ClusterTypes =
|
||||
auto get_test_parameters() {
|
||||
return GENERATE(
|
||||
std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{0, 0, {1, 2, 3, 1}}},
|
||||
Eta2<int>{2. / 3, 3. / 4, corner::cBottomLeft, 7}),
|
||||
Eta2<int>{2. / 3, 3. / 4,
|
||||
static_cast<int>(corner::cBottomLeft), 7}),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 3, 3>{0, 0, {1, 2, 3, 4, 5, 6, 1, 2, 7}}},
|
||||
Eta2<int>{6. / 11, 2. / 7, corner::cTopRight, 20}),
|
||||
Eta2<int>{6. / 11, 2. / 7, static_cast<int>(corner::cTopRight),
|
||||
20}),
|
||||
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
|
||||
0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 8, 8, 9, 2,
|
||||
0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 2, 8, 9, 8,
|
||||
1, 4, 5, 6, 7, 8, 4, 1, 1, 1, 1, 1}}},
|
||||
Eta2<int>{9. / 17, 5. / 13, 8, 28}),
|
||||
Eta2<int>{8. / 17, 7. / 15, 9, 30}),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 4, 2>{0, 0, {1, 4, 7, 2, 5, 6, 4, 3}}},
|
||||
Eta2<int>{7. / 11, 6. / 10, 1, 21}),
|
||||
Eta2<int>{4. / 10, 4. / 11, 1, 21}),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 2, 3>{0, 0, {1, 3, 2, 3, 4, 2}}},
|
||||
Eta2<int>{3. / 5, 4. / 6, 1, 11}));
|
||||
Eta2<int>{3. / 5, 2. / 5, 1, 11}));
|
||||
}
|
||||
|
||||
TEST_CASE("calculate_eta2", "[.eta_calculation]") {
|
||||
TEST_CASE("compute_largest_2x2_subcluster", "[eta_calculation]") {
|
||||
auto [cluster, expected_eta] = get_test_parameters();
|
||||
|
||||
auto [sum, index] = std::visit(
|
||||
[](const auto &clustertype) { return clustertype.max_sum_2x2(); },
|
||||
cluster);
|
||||
CHECK(expected_eta.c == index);
|
||||
CHECK(expected_eta.sum == sum);
|
||||
}
|
||||
|
||||
TEST_CASE("calculate_eta2", "[eta_calculation]") {
|
||||
|
||||
auto [cluster, expected_eta] = get_test_parameters();
|
||||
|
||||
@ -50,3 +62,66 @@ TEST_CASE("calculate_eta2", "[.eta_calculation]") {
|
||||
CHECK(eta.c == expected_eta.c);
|
||||
CHECK(eta.sum == expected_eta.sum);
|
||||
}
|
||||
|
||||
// 3x3 cluster layout (rotated to match the cBottomLeft enum):
|
||||
// 6, 7, 8
|
||||
// 3, 4, 5
|
||||
// 0, 1, 2
|
||||
|
||||
TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in "
|
||||
"the bottom left",
|
||||
"[eta_calculation]") {
|
||||
|
||||
// Create a 3x3 cluster
|
||||
Cluster<int32_t, 3, 3> cl;
|
||||
cl.x = 0;
|
||||
cl.y = 0;
|
||||
cl.data[0] = 30;
|
||||
cl.data[1] = 23;
|
||||
cl.data[2] = 5;
|
||||
cl.data[3] = 20;
|
||||
cl.data[4] = 50;
|
||||
cl.data[5] = 3;
|
||||
cl.data[6] = 8;
|
||||
cl.data[7] = 2;
|
||||
cl.data[8] = 3;
|
||||
|
||||
// 8, 2, 3
|
||||
// 20, 50, 3
|
||||
// 30, 23, 5
|
||||
|
||||
auto eta = calculate_eta2(cl);
|
||||
CHECK(eta.c == static_cast<int>(corner::cBottomLeft));
|
||||
CHECK(eta.x == 50.0 / (20 + 50)); // 4/(3+4)
|
||||
CHECK(eta.y == 50.0 / (23 + 50)); // 4/(1+4)
|
||||
CHECK(eta.sum == 30 + 23 + 20 + 50);
|
||||
}
|
||||
|
||||
TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in "
|
||||
"the top left",
|
||||
"[eta_calculation]") {
|
||||
|
||||
// Create a 3x3 cluster
|
||||
Cluster<int32_t, 3, 3> cl;
|
||||
cl.x = 0;
|
||||
cl.y = 0;
|
||||
cl.data[0] = 8;
|
||||
cl.data[1] = 12;
|
||||
cl.data[2] = 5;
|
||||
cl.data[3] = 77;
|
||||
cl.data[4] = 80;
|
||||
cl.data[5] = 3;
|
||||
cl.data[6] = 82;
|
||||
cl.data[7] = 91;
|
||||
cl.data[8] = 3;
|
||||
|
||||
// 82, 91, 3
|
||||
// 77, 80, 3
|
||||
// 8, 12, 5
|
||||
|
||||
auto eta = calculate_eta2(cl);
|
||||
CHECK(eta.c == static_cast<int>(corner::cTopLeft));
|
||||
CHECK(eta.x == 80. / (77 + 80)); // 4/(3+4)
|
||||
CHECK(eta.y == 91.0 / (91 + 80)); // 7/(7+4)
|
||||
CHECK(eta.sum == 77 + 80 + 82 + 91);
|
||||
}
|
||||
|
@ -14,61 +14,8 @@
|
||||
|
||||
using namespace aare;
|
||||
|
||||
TEST_CASE("Correct Instantiation of Cluster and ClusterVector",
|
||||
"[.cluster][.instantiation]") {
|
||||
|
||||
CHECK(is_valid_cluster<double, 3, 3>);
|
||||
CHECK(is_valid_cluster<double, 3, 2>);
|
||||
CHECK(not is_valid_cluster<int, 0, 0>);
|
||||
CHECK(not is_valid_cluster<std::string, 2, 2>);
|
||||
CHECK(not is_valid_cluster<int, 2, 2, double>);
|
||||
|
||||
CHECK(not is_cluster_v<int>);
|
||||
CHECK(is_cluster_v<Cluster<int, 3, 3>>);
|
||||
}
|
||||
<<<<<<< Updated upstream
|
||||
=======
|
||||
|
||||
using ClusterTypes =
|
||||
std::variant<Cluster<int, 2, 2>, Cluster<int, 3, 3>, Cluster<int, 5, 5>,
|
||||
Cluster<int, 4, 2>, Cluster<int, 2, 3>>;
|
||||
|
||||
auto get_test_sum_parameters() {
|
||||
return GENERATE(
|
||||
std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{0, 0, {1, 2, 3, 1}}},
|
||||
std::make_pair(7, 0)),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 3, 3>{0, 0, {1, 2, 3, 4, 5, 6, 1, 2, 7}}},
|
||||
std::make_pair(20, 3)),
|
||||
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
|
||||
0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 8, 8, 9, 2,
|
||||
1, 4, 5, 6, 7, 8, 4, 1, 1, 1, 1, 1}}},
|
||||
std::make_pair(28, 8)),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 4, 2>{0, 0, {1, 4, 7, 2, 5, 6, 4, 3}}},
|
||||
std::make_pair(21, 1)),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 2, 3>{0, 0, {1, 3, 2, 3, 4, 2}}},
|
||||
std::make_pair(11, 1)));
|
||||
}
|
||||
|
||||
TEST_CASE("compute_largest_2x2_subcluster", "[.cluster]") {
|
||||
auto [cluster, sum_pair] = get_test_sum_parameters();
|
||||
|
||||
auto sum = std::visit(
|
||||
[](const auto &clustertype) { return clustertype.max_sum_2x2(); },
|
||||
cluster);
|
||||
CHECK(sum_pair.first == sum.first);
|
||||
CHECK(sum_pair.second == sum.second);
|
||||
}
|
||||
|
||||
TEST_CASE("Test sum of Cluster", "[.cluster]") {
|
||||
Cluster<int, 2, 2> cluster{0, 0, {1, 2, 3, 4}};
|
||||
|
||||
CHECK(cluster.sum() == 10);
|
||||
|
||||
Cluster<int, 2, 3> cluster2x3{0, 0, {1, 3, 2, 3, 4, 2}};
|
||||
|
||||
CHECK(cluster2x3.sum() == 15);
|
||||
}
|
||||
>>>>>>> Stashed changes
|
||||
}
|
@ -2,23 +2,32 @@
|
||||
#include "test_config.hpp"
|
||||
|
||||
#include "aare/defs.hpp"
|
||||
#include <algorithm>
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <filesystem>
|
||||
|
||||
using aare::Cluster;
|
||||
using aare::ClusterFile;
|
||||
using aare::ClusterVector;
|
||||
|
||||
TEST_CASE("Read one frame from a a cluster file", "[.files]") {
|
||||
// We know that the frame has 97 clusters
|
||||
|
||||
TEST_CASE("Read one frame from a cluster file", "[.files]") {
|
||||
//We know that the frame has 97 clusters
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
auto clusters = f.read_frame();
|
||||
REQUIRE(clusters.size() == 97);
|
||||
REQUIRE(clusters.frame_number() == 135);
|
||||
CHECK(clusters.size() == 97);
|
||||
CHECK(clusters.frame_number() == 135);
|
||||
CHECK(clusters[0].x == 1);
|
||||
CHECK(clusters[0].y == 200);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
|
||||
|
||||
TEST_CASE("Read one frame using ROI", "[.files]") {
|
||||
// We know that the frame has 97 clusters
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||
@ -37,14 +46,22 @@ TEST_CASE("Read one frame using ROI", "[.files]") {
|
||||
|
||||
// Check that all clusters are within the ROI
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
auto c = clusters.at(i);
|
||||
auto c = clusters[i];
|
||||
REQUIRE(c.x >= roi.xmin);
|
||||
REQUIRE(c.x <= roi.xmax);
|
||||
REQUIRE(c.y >= roi.ymin);
|
||||
REQUIRE(c.y <= roi.ymax);
|
||||
}
|
||||
|
||||
CHECK(clusters[0].x == 1);
|
||||
CHECK(clusters[0].y == 200);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
|
||||
|
||||
|
||||
TEST_CASE("Read clusters from single frame file", "[.files]") {
|
||||
|
||||
// frame_number, num_clusters [135] 97
|
||||
@ -147,6 +164,7 @@ TEST_CASE("Read clusters from single frame file", "[.files]") {
|
||||
// [ 97 296] [864 865 866 867 868 869 870 871 872]
|
||||
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
SECTION("Read fewer clusters than available") {
|
||||
@ -154,6 +172,12 @@ TEST_CASE("Read clusters from single frame file", "[.files]") {
|
||||
auto clusters = f.read_clusters(50);
|
||||
REQUIRE(clusters.size() == 50);
|
||||
REQUIRE(clusters.frame_number() == 135);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
REQUIRE(clusters[0].x == 1);
|
||||
REQUIRE(clusters[0].y == 200);
|
||||
CHECK(std::equal(std::begin(clusters[0].data),
|
||||
std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
SECTION("Read more clusters than available") {
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
@ -161,24 +185,167 @@ TEST_CASE("Read clusters from single frame file", "[.files]") {
|
||||
auto clusters = f.read_clusters(100);
|
||||
REQUIRE(clusters.size() == 97);
|
||||
REQUIRE(clusters.frame_number() == 135);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
REQUIRE(clusters[0].x == 1);
|
||||
REQUIRE(clusters[0].y == 200);
|
||||
CHECK(std::equal(std::begin(clusters[0].data),
|
||||
std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
SECTION("Read all clusters") {
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
auto clusters = f.read_clusters(97);
|
||||
REQUIRE(clusters.size() == 97);
|
||||
REQUIRE(clusters.frame_number() == 135);
|
||||
|
||||
REQUIRE(clusters.at(0).x == 1);
|
||||
REQUIRE(clusters.at(0).y == 200);
|
||||
REQUIRE(clusters[0].x == 1);
|
||||
REQUIRE(clusters[0].y == 200);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
CHECK(std::equal(std::begin(clusters[0].data),
|
||||
std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Read clusters", "[.files]") {
|
||||
// beam_En700eV_-40deg_300V_10us_d0_f0_100.clust
|
||||
auto fpath = test_data_path() / "clust" /
|
||||
"beam_En700eV_-40deg_300V_10us_d0_f0_100.clust";
|
||||
TEST_CASE("Read clusters from single frame file with ROI", "[.files]") {
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
auto clusters = f.read_clusters(500);
|
||||
|
||||
aare::ROI roi;
|
||||
roi.xmin = 0;
|
||||
roi.xmax = 50;
|
||||
roi.ymin = 200;
|
||||
roi.ymax = 249;
|
||||
f.set_roi(roi);
|
||||
|
||||
auto clusters = f.read_clusters(10);
|
||||
|
||||
CHECK(clusters.size() == 10);
|
||||
CHECK(clusters.frame_number() == 135);
|
||||
CHECK(clusters[0].x == 1);
|
||||
CHECK(clusters[0].y == 200);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
|
||||
TEST_CASE("Read cluster from multiple frame file", "[.files]") {
|
||||
|
||||
using ClusterType = Cluster<double, 2, 2>;
|
||||
|
||||
auto fpath =
|
||||
test_data_path() / "clust" / "Two_frames_2x2double_test_clusters.clust";
|
||||
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
// Two_frames_2x2double_test_clusters.clust
|
||||
// frame number, num_clusters 0, 4
|
||||
//[10, 20], {0. ,0., 0., 0.}
|
||||
//[11, 30], {1., 1., 1., 1.}
|
||||
//[12, 40], {2., 2., 2., 2.}
|
||||
//[13, 50], {3., 3., 3., 3.}
|
||||
// 1,4
|
||||
//[10, 20], {4., 4., 4., 4.}
|
||||
//[11, 30], {5., 5., 5., 5.}
|
||||
//[12, 40], {6., 6., 6., 6.}
|
||||
//[13, 50], {7., 7., 7., 7.}
|
||||
|
||||
SECTION("Read clusters from both frames") {
|
||||
ClusterFile<ClusterType> f(fpath);
|
||||
auto clusters = f.read_clusters(2);
|
||||
REQUIRE(clusters.size() == 2);
|
||||
REQUIRE(clusters.frame_number() == 0);
|
||||
|
||||
auto clusters1 = f.read_clusters(3);
|
||||
|
||||
REQUIRE(clusters1.size() == 3);
|
||||
REQUIRE(clusters1.frame_number() == 1);
|
||||
}
|
||||
|
||||
SECTION("Read all clusters") {
|
||||
ClusterFile<ClusterType> f(fpath);
|
||||
auto clusters = f.read_clusters(8);
|
||||
REQUIRE(clusters.size() == 8);
|
||||
REQUIRE(clusters.frame_number() == 1);
|
||||
}
|
||||
|
||||
SECTION("Read clusters from one frame") {
|
||||
ClusterFile<ClusterType> f(fpath);
|
||||
auto clusters = f.read_clusters(2);
|
||||
REQUIRE(clusters.size() == 2);
|
||||
REQUIRE(clusters.frame_number() == 0);
|
||||
|
||||
auto clusters1 = f.read_clusters(1);
|
||||
|
||||
REQUIRE(clusters1.size() == 1);
|
||||
REQUIRE(clusters1.frame_number() == 0);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Write cluster with potential padding", "[.files][.ClusterFile]") {
|
||||
|
||||
using ClusterType = Cluster<double, 3, 3>;
|
||||
|
||||
REQUIRE(std::filesystem::exists(test_data_path() / "clust"));
|
||||
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_2_clusters.clust";
|
||||
|
||||
ClusterFile<ClusterType> file(fpath, 1000, "w");
|
||||
|
||||
ClusterVector<ClusterType> clustervec(2);
|
||||
int16_t coordinate = 5;
|
||||
clustervec.push_back(ClusterType{
|
||||
coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}});
|
||||
clustervec.push_back(ClusterType{
|
||||
coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}});
|
||||
|
||||
file.write_frame(clustervec);
|
||||
|
||||
file.close();
|
||||
|
||||
file.open("r");
|
||||
|
||||
auto read_cluster_vector = file.read_frame();
|
||||
|
||||
CHECK(read_cluster_vector.size() == 2);
|
||||
CHECK(read_cluster_vector.frame_number() == 0);
|
||||
|
||||
CHECK(read_cluster_vector[0].x == clustervec[0].x);
|
||||
CHECK(read_cluster_vector[0].y == clustervec[0].y);
|
||||
CHECK(std::equal(
|
||||
clustervec[0].data.begin(), clustervec[0].data.end(),
|
||||
read_cluster_vector[0].data.begin(), [](double a, double b) {
|
||||
return std::abs(a - b) < std::numeric_limits<double>::epsilon();
|
||||
}));
|
||||
|
||||
CHECK(read_cluster_vector[1].x == clustervec[1].x);
|
||||
CHECK(read_cluster_vector[1].y == clustervec[1].y);
|
||||
CHECK(std::equal(
|
||||
clustervec[1].data.begin(), clustervec[1].data.end(),
|
||||
read_cluster_vector[1].data.begin(), [](double a, double b) {
|
||||
return std::abs(a - b) < std::numeric_limits<double>::epsilon();
|
||||
}));
|
||||
}
|
||||
|
||||
TEST_CASE("Read frame and modify cluster data", "[.files][.ClusterFile]") {
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
|
||||
auto clusters = f.read_frame();
|
||||
CHECK(clusters.size() == 97);
|
||||
CHECK(clusters.frame_number() == 135);
|
||||
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
clusters.push_back(
|
||||
Cluster<int32_t, 3, 3>{0, 0, {0, 1, 2, 3, 4, 5, 6, 7, 8}});
|
||||
|
||||
CHECK(clusters.size() == 98);
|
||||
CHECK(clusters[0].x == 1);
|
||||
CHECK(clusters[0].y == 200);
|
||||
|
||||
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
|
99
src/ClusterFinderMT.test.cpp
Normal file
99
src/ClusterFinderMT.test.cpp
Normal file
@ -0,0 +1,99 @@
|
||||
|
||||
#include "aare/ClusterFinderMT.hpp"
|
||||
#include "aare/Cluster.hpp"
|
||||
#include "aare/ClusterCollector.hpp"
|
||||
#include "aare/File.hpp"
|
||||
|
||||
#include "test_config.hpp"
|
||||
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <filesystem>
|
||||
#include <memory>
|
||||
|
||||
using namespace aare;
|
||||
|
||||
// wrapper function to access private member variables for testing
|
||||
template <typename ClusterType, typename FRAME_TYPE = uint16_t,
|
||||
typename PEDESTAL_TYPE = double>
|
||||
class ClusterFinderMTWrapper
|
||||
: public ClusterFinderMT<ClusterType, FRAME_TYPE, PEDESTAL_TYPE> {
|
||||
|
||||
public:
|
||||
ClusterFinderMTWrapper(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
|
||||
size_t capacity = 2000, size_t n_threads = 3)
|
||||
: ClusterFinderMT<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>(
|
||||
image_size, nSigma, capacity, n_threads) {}
|
||||
|
||||
size_t get_m_input_queues_size() const {
|
||||
return this->m_input_queues.size();
|
||||
}
|
||||
|
||||
size_t get_m_output_queues_size() const {
|
||||
return this->m_output_queues.size();
|
||||
}
|
||||
|
||||
size_t get_m_cluster_finders_size() const {
|
||||
return this->m_cluster_finders.size();
|
||||
}
|
||||
|
||||
bool m_output_queues_are_empty() const {
|
||||
for (auto &queue : this->m_output_queues) {
|
||||
if (!queue->isEmpty())
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool m_input_queues_are_empty() const {
|
||||
for (auto &queue : this->m_input_queues) {
|
||||
if (!queue->isEmpty())
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool m_sink_is_empty() const { return this->m_sink.isEmpty(); }
|
||||
|
||||
size_t m_sink_size() const { return this->m_sink.sizeGuess(); }
|
||||
};
|
||||
|
||||
TEST_CASE("multithreaded cluster finder", "[.files][.ClusterFinder]") {
|
||||
auto fpath = "/mnt/sls_det_storage/matterhorn_data/aare_test_data/"
|
||||
"Moench03new/cu_half_speed_master_4.json";
|
||||
|
||||
File file(fpath);
|
||||
|
||||
size_t n_threads = 2;
|
||||
size_t n_frames_pd = 10;
|
||||
|
||||
using ClusterType = Cluster<int32_t, 3, 3>;
|
||||
|
||||
ClusterFinderMTWrapper<ClusterType> cf(
|
||||
{static_cast<int64_t>(file.rows()), static_cast<int64_t>(file.cols())},
|
||||
5, 2000, n_threads); // no idea what frame type is!!! default uint16_t
|
||||
|
||||
CHECK(cf.get_m_input_queues_size() == n_threads);
|
||||
CHECK(cf.get_m_output_queues_size() == n_threads);
|
||||
CHECK(cf.get_m_cluster_finders_size() == n_threads);
|
||||
CHECK(cf.m_output_queues_are_empty() == true);
|
||||
CHECK(cf.m_input_queues_are_empty() == true);
|
||||
|
||||
for (size_t i = 0; i < n_frames_pd; ++i) {
|
||||
cf.find_clusters(file.read_frame().view<uint16_t>());
|
||||
}
|
||||
|
||||
cf.stop();
|
||||
|
||||
CHECK(cf.m_output_queues_are_empty() == true);
|
||||
CHECK(cf.m_input_queues_are_empty() == true);
|
||||
|
||||
CHECK(cf.m_sink_size() == n_frames_pd);
|
||||
ClusterCollector<ClusterType> clustercollector(&cf);
|
||||
|
||||
clustercollector.stop();
|
||||
|
||||
CHECK(cf.m_sink_size() == 0);
|
||||
|
||||
auto clustervec = clustercollector.steal_clusters();
|
||||
// CHECK(clustervec.size() == ) //dont know how many clusters to expect
|
||||
}
|
@ -8,15 +8,14 @@
|
||||
using aare::Cluster;
|
||||
using aare::ClusterVector;
|
||||
|
||||
|
||||
TEST_CASE("item_size return the size of the cluster stored"){
|
||||
TEST_CASE("item_size return the size of the cluster stored") {
|
||||
using C1 = Cluster<int32_t, 2, 2>;
|
||||
ClusterVector<C1> cv(4);
|
||||
CHECK(cv.item_size() == sizeof(C1));
|
||||
|
||||
//Sanity check
|
||||
//2*2*4 = 16 bytes of data for the cluster
|
||||
// 2*2 = 4 bytes for the x and y coordinates
|
||||
// Sanity check
|
||||
// 2*2*4 = 16 bytes of data for the cluster
|
||||
// 2*2 = 4 bytes for the x and y coordinates
|
||||
REQUIRE(cv.item_size() == 20);
|
||||
|
||||
using C2 = Cluster<int32_t, 3, 3>;
|
||||
@ -30,8 +29,6 @@ TEST_CASE("item_size return the size of the cluster stored"){
|
||||
using C4 = Cluster<char, 10, 5>;
|
||||
ClusterVector<C4> cv4(4);
|
||||
CHECK(cv4.item_size() == sizeof(C4));
|
||||
<<<<<<< Updated upstream
|
||||
=======
|
||||
|
||||
using C5 = Cluster<int32_t, 2, 3>;
|
||||
ClusterVector<C5> cv5(4);
|
||||
@ -39,12 +36,11 @@ TEST_CASE("item_size return the size of the cluster stored"){
|
||||
|
||||
using C6 = Cluster<double, 5, 5>;
|
||||
ClusterVector<C6> cv6(4);
|
||||
CHECK(cv6.item_size() == sizeof(C6)); // double uses padding!!!
|
||||
CHECK(cv6.item_size() == sizeof(C6));
|
||||
|
||||
using C7 = Cluster<double, 3, 3>;
|
||||
ClusterVector<C7> cv7(4);
|
||||
CHECK(cv7.item_size() == sizeof(C7));
|
||||
>>>>>>> Stashed changes
|
||||
}
|
||||
|
||||
TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read",
|
||||
@ -64,7 +60,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read",
|
||||
REQUIRE(cv.size() == 1);
|
||||
REQUIRE(cv.capacity() == 4);
|
||||
|
||||
auto c2 = cv.at(0);
|
||||
auto c2 = cv[0];
|
||||
|
||||
// Check that the data is the same
|
||||
REQUIRE(c1.x == c2.x);
|
||||
@ -226,32 +222,6 @@ TEST_CASE("Concatenate two cluster vectors where we need to allocate",
|
||||
REQUIRE(ptr[3].y == 17);
|
||||
}
|
||||
|
||||
TEST_CASE("calculate cluster sum", "[.ClusterVector]") {
|
||||
ClusterVector<Cluster<int32_t, 2, 2>> cv1(2);
|
||||
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
|
||||
cv1.push_back(c1);
|
||||
Cluster<int32_t, 2, 2> c2 = {6, 7, {8, 9, 10, 11}};
|
||||
cv1.push_back(c2);
|
||||
|
||||
auto sum1 = cv1.sum();
|
||||
|
||||
std::vector<int32_t> expected_sum1{18, 38};
|
||||
|
||||
CHECK(sum1 == expected_sum1);
|
||||
|
||||
ClusterVector<Cluster<int32_t, 3, 3>> cv2(2);
|
||||
Cluster<int32_t, 3, 3> c3 = {1, 2, {3, 4, 5, 6, 1, 7, 8, 1, 1}};
|
||||
cv2.push_back(c3);
|
||||
Cluster<int32_t, 3, 3> c4 = {6, 7, {8, 9, 10, 11, 13, 5, 12, 2, 4}};
|
||||
cv2.push_back(c4);
|
||||
|
||||
auto sum2 = cv2.sum();
|
||||
|
||||
std::vector<int32_t> expected_sum2{36, 74};
|
||||
|
||||
CHECK(sum2 == expected_sum2);
|
||||
}
|
||||
|
||||
struct ClusterTestData {
|
||||
uint8_t ClusterSizeX;
|
||||
uint8_t ClusterSizeY;
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <numeric>
|
||||
|
||||
using aare::NDView;
|
||||
using aare::Shape;
|
||||
@ -21,10 +22,8 @@ TEST_CASE("Element reference 1D") {
|
||||
}
|
||||
|
||||
TEST_CASE("Element reference 2D") {
|
||||
std::vector<int> vec;
|
||||
for (int i = 0; i != 12; ++i) {
|
||||
vec.push_back(i);
|
||||
}
|
||||
std::vector<int> vec(12);
|
||||
std::iota(vec.begin(), vec.end(), 0);
|
||||
|
||||
NDView<int, 2> data(vec.data(), Shape<2>{3, 4});
|
||||
REQUIRE(vec.size() == static_cast<size_t>(data.size()));
|
||||
@ -58,10 +57,8 @@ TEST_CASE("Element reference 3D") {
|
||||
}
|
||||
|
||||
TEST_CASE("Plus and miuns with single value") {
|
||||
std::vector<int> vec;
|
||||
for (int i = 0; i != 12; ++i) {
|
||||
vec.push_back(i);
|
||||
}
|
||||
std::vector<int> vec(12);
|
||||
std::iota(vec.begin(), vec.end(), 0);
|
||||
NDView<int, 2> data(vec.data(), Shape<2>{3, 4});
|
||||
data += 5;
|
||||
int i = 0;
|
||||
@ -116,10 +113,8 @@ TEST_CASE("elementwise assign") {
|
||||
}
|
||||
|
||||
TEST_CASE("iterators") {
|
||||
std::vector<int> vec;
|
||||
for (int i = 0; i != 12; ++i) {
|
||||
vec.push_back(i);
|
||||
}
|
||||
std::vector<int> vec(12);
|
||||
std::iota(vec.begin(), vec.end(), 0);
|
||||
NDView<int, 1> data(vec.data(), Shape<1>{12});
|
||||
int i = 0;
|
||||
for (const auto item : data) {
|
||||
@ -167,27 +162,31 @@ TEST_CASE("divide with another span") {
|
||||
}
|
||||
|
||||
TEST_CASE("Retrieve shape") {
|
||||
std::vector<int> vec;
|
||||
for (int i = 0; i != 12; ++i) {
|
||||
vec.push_back(i);
|
||||
}
|
||||
std::vector<int> vec(12);
|
||||
std::iota(vec.begin(), vec.end(), 0);
|
||||
NDView<int, 2> data(vec.data(), Shape<2>{3, 4});
|
||||
REQUIRE(data.shape()[0] == 3);
|
||||
REQUIRE(data.shape()[1] == 4);
|
||||
}
|
||||
|
||||
TEST_CASE("compare two views") {
|
||||
std::vector<int> vec1;
|
||||
for (int i = 0; i != 12; ++i) {
|
||||
vec1.push_back(i);
|
||||
}
|
||||
std::vector<int> vec1(12);
|
||||
std::iota(vec1.begin(), vec1.end(), 0);
|
||||
NDView<int, 2> view1(vec1.data(), Shape<2>{3, 4});
|
||||
|
||||
std::vector<int> vec2;
|
||||
for (int i = 0; i != 12; ++i) {
|
||||
vec2.push_back(i);
|
||||
}
|
||||
std::vector<int> vec2(12);
|
||||
std::iota(vec2.begin(), vec2.end(), 0);
|
||||
NDView<int, 2> view2(vec2.data(), Shape<2>{3, 4});
|
||||
|
||||
REQUIRE((view1 == view2));
|
||||
}
|
||||
|
||||
|
||||
TEST_CASE("Create a view over a vector"){
|
||||
std::vector<int> vec(12);
|
||||
std::iota(vec.begin(), vec.end(), 0);
|
||||
auto v = aare::make_view(vec);
|
||||
REQUIRE(v.shape()[0] == 12);
|
||||
REQUIRE(v[0] == 0);
|
||||
REQUIRE(v[11] == 11);
|
||||
}
|
@ -1,9 +1,12 @@
|
||||
#include "aare/RawSubFile.hpp"
|
||||
#include "aare/PixelMap.hpp"
|
||||
#include "aare/utils/ifstream_helpers.hpp"
|
||||
#include <cstring> // memcpy
|
||||
#include <fmt/core.h>
|
||||
#include <iostream>
|
||||
|
||||
|
||||
|
||||
namespace aare {
|
||||
|
||||
RawSubFile::RawSubFile(const std::filesystem::path &fname,
|
||||
@ -20,7 +23,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname,
|
||||
}
|
||||
|
||||
if (std::filesystem::exists(fname)) {
|
||||
n_frames = std::filesystem::file_size(fname) /
|
||||
m_num_frames = std::filesystem::file_size(fname) /
|
||||
(sizeof(DetectorHeader) + rows * cols * bitdepth / 8);
|
||||
} else {
|
||||
throw std::runtime_error(
|
||||
@ -35,7 +38,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname,
|
||||
}
|
||||
|
||||
#ifdef AARE_VERBOSE
|
||||
fmt::print("Opened file: {} with {} frames\n", m_fname.string(), n_frames);
|
||||
fmt::print("Opened file: {} with {} frames\n", m_fname.string(), m_num_frames);
|
||||
fmt::print("m_rows: {}, m_cols: {}, m_bitdepth: {}\n", m_rows, m_cols,
|
||||
m_bitdepth);
|
||||
fmt::print("file size: {}\n", std::filesystem::file_size(fname));
|
||||
@ -43,8 +46,8 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname,
|
||||
}
|
||||
|
||||
void RawSubFile::seek(size_t frame_index) {
|
||||
if (frame_index >= n_frames) {
|
||||
throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, n_frames));
|
||||
if (frame_index >= m_num_frames) {
|
||||
throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, m_num_frames));
|
||||
}
|
||||
m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index);
|
||||
}
|
||||
@ -60,6 +63,10 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) {
|
||||
m_file.seekg(sizeof(DetectorHeader), std::ios::cur);
|
||||
}
|
||||
|
||||
if (m_file.fail()){
|
||||
throw std::runtime_error(LOCATION + ifstream_error_msg(m_file));
|
||||
}
|
||||
|
||||
// TODO! expand support for different bitdepths
|
||||
if (m_pixel_map) {
|
||||
// read into a temporary buffer and then copy the data to the buffer
|
||||
@ -79,8 +86,24 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) {
|
||||
// read directly into the buffer
|
||||
m_file.read(reinterpret_cast<char *>(image_buf), bytes_per_frame());
|
||||
}
|
||||
|
||||
if (m_file.fail()){
|
||||
throw std::runtime_error(LOCATION + ifstream_error_msg(m_file));
|
||||
}
|
||||
}
|
||||
|
||||
void RawSubFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) {
|
||||
for (size_t i = 0; i < n_frames; i++) {
|
||||
read_into(image_buf, header);
|
||||
image_buf += bytes_per_frame();
|
||||
if (header) {
|
||||
++header;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
template <typename T>
|
||||
void RawSubFile::read_with_map(std::byte *image_buf) {
|
||||
auto part_buffer = new std::byte[bytes_per_frame()];
|
||||
|
@ -1,5 +1,5 @@
|
||||
#include "aare/decode.hpp"
|
||||
|
||||
#include <cmath>
|
||||
namespace aare {
|
||||
|
||||
uint16_t adc_sar_05_decode64to16(uint64_t input){
|
||||
@ -22,6 +22,10 @@ uint16_t adc_sar_05_decode64to16(uint64_t input){
|
||||
}
|
||||
|
||||
void adc_sar_05_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output){
|
||||
if(input.shape() != output.shape()){
|
||||
throw std::invalid_argument(LOCATION + " input and output shapes must match");
|
||||
}
|
||||
|
||||
for(int64_t i = 0; i < input.shape(0); i++){
|
||||
for(int64_t j = 0; j < input.shape(1); j++){
|
||||
output(i,j) = adc_sar_05_decode64to16(input(i,j));
|
||||
@ -49,6 +53,9 @@ uint16_t adc_sar_04_decode64to16(uint64_t input){
|
||||
}
|
||||
|
||||
void adc_sar_04_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> output){
|
||||
if(input.shape() != output.shape()){
|
||||
throw std::invalid_argument(LOCATION + " input and output shapes must match");
|
||||
}
|
||||
for(int64_t i = 0; i < input.shape(0); i++){
|
||||
for(int64_t j = 0; j < input.shape(1); j++){
|
||||
output(i,j) = adc_sar_04_decode64to16(input(i,j));
|
||||
@ -56,6 +63,40 @@ void adc_sar_04_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> outpu
|
||||
}
|
||||
}
|
||||
|
||||
double apply_custom_weights(uint16_t input, const NDView<double, 1> weights) {
|
||||
if(weights.size() > 16){
|
||||
throw std::invalid_argument("weights size must be less than or equal to 16");
|
||||
}
|
||||
|
||||
double result = 0.0;
|
||||
for (ssize_t i = 0; i < weights.size(); ++i) {
|
||||
result += ((input >> i) & 1) * std::pow(weights[i], i);
|
||||
}
|
||||
return result;
|
||||
|
||||
}
|
||||
|
||||
void apply_custom_weights(NDView<uint16_t, 1> input, NDView<double, 1> output, const NDView<double,1> weights) {
|
||||
if(input.shape() != output.shape()){
|
||||
throw std::invalid_argument(LOCATION + " input and output shapes must match");
|
||||
}
|
||||
|
||||
//Calculate weights to avoid repeatedly calling std::pow
|
||||
std::vector<double> weights_powers(weights.size());
|
||||
for (ssize_t i = 0; i < weights.size(); ++i) {
|
||||
weights_powers[i] = std::pow(weights[i], i);
|
||||
}
|
||||
|
||||
// Apply custom weights to each element in the input array
|
||||
for (ssize_t i = 0; i < input.shape(0); i++) {
|
||||
double result = 0.0;
|
||||
for (size_t bit_index = 0; bit_index < weights_powers.size(); ++bit_index) {
|
||||
result += ((input(i) >> bit_index) & 1) * weights_powers[bit_index];
|
||||
}
|
||||
output(i) = result;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
} // namespace aare
|
||||
|
80
src/decode.test.cpp
Normal file
80
src/decode.test.cpp
Normal file
@ -0,0 +1,80 @@
|
||||
#include "aare/decode.hpp"
|
||||
|
||||
#include <catch2/matchers/catch_matchers_floating_point.hpp>
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include "aare/NDArray.hpp"
|
||||
using Catch::Matchers::WithinAbs;
|
||||
#include <vector>
|
||||
|
||||
TEST_CASE("test_adc_sar_05_decode64to16"){
|
||||
uint64_t input = 0;
|
||||
uint16_t output = aare::adc_sar_05_decode64to16(input);
|
||||
CHECK(output == 0);
|
||||
|
||||
|
||||
// bit 29 on th input is bit 0 on the output
|
||||
input = 1UL << 29;
|
||||
output = aare::adc_sar_05_decode64to16(input);
|
||||
CHECK(output == 1);
|
||||
|
||||
// test all bits by iteratting through the bitlist
|
||||
std::vector<int> bitlist = {29, 19, 28, 18, 31, 21, 27, 20, 24, 23, 25, 22};
|
||||
for (size_t i = 0; i < bitlist.size(); i++) {
|
||||
input = 1UL << bitlist[i];
|
||||
output = aare::adc_sar_05_decode64to16(input);
|
||||
CHECK(output == (1 << i));
|
||||
}
|
||||
|
||||
|
||||
// test a few "random" values
|
||||
input = 0;
|
||||
input |= (1UL << 29);
|
||||
input |= (1UL << 19);
|
||||
input |= (1UL << 28);
|
||||
output = aare::adc_sar_05_decode64to16(input);
|
||||
CHECK(output == 7UL);
|
||||
|
||||
|
||||
input = 0;
|
||||
input |= (1UL << 18);
|
||||
input |= (1UL << 27);
|
||||
input |= (1UL << 25);
|
||||
output = aare::adc_sar_05_decode64to16(input);
|
||||
CHECK(output == 1096UL);
|
||||
|
||||
input = 0;
|
||||
input |= (1UL << 25);
|
||||
input |= (1UL << 22);
|
||||
output = aare::adc_sar_05_decode64to16(input);
|
||||
CHECK(output == 3072UL);
|
||||
}
|
||||
|
||||
|
||||
TEST_CASE("test_apply_custom_weights") {
|
||||
|
||||
uint16_t input = 1;
|
||||
aare::NDArray<double, 1> weights_data({3}, 0.0);
|
||||
weights_data(0) = 1.7;
|
||||
weights_data(1) = 2.1;
|
||||
weights_data(2) = 1.8;
|
||||
|
||||
auto weights = weights_data.view();
|
||||
|
||||
|
||||
double output = aare::apply_custom_weights(input, weights);
|
||||
CHECK_THAT(output, WithinAbs(1.0, 0.001));
|
||||
|
||||
input = 1 << 1;
|
||||
output = aare::apply_custom_weights(input, weights);
|
||||
CHECK_THAT(output, WithinAbs(2.1, 0.001));
|
||||
|
||||
|
||||
input = 1 << 2;
|
||||
output = aare::apply_custom_weights(input, weights);
|
||||
CHECK_THAT(output, WithinAbs(3.24, 0.001));
|
||||
|
||||
input = 0b111;
|
||||
output = aare::apply_custom_weights(input, weights);
|
||||
CHECK_THAT(output, WithinAbs(6.34, 0.001));
|
||||
|
||||
}
|
18
src/utils/ifstream_helpers.cpp
Normal file
18
src/utils/ifstream_helpers.cpp
Normal file
@ -0,0 +1,18 @@
|
||||
#include "aare/utils/ifstream_helpers.hpp"
|
||||
|
||||
namespace aare {
|
||||
|
||||
std::string ifstream_error_msg(std::ifstream &ifs) {
|
||||
std::ios_base::iostate state = ifs.rdstate();
|
||||
if (state & std::ios_base::eofbit) {
|
||||
return " End of file reached";
|
||||
} else if (state & std::ios_base::badbit) {
|
||||
return " Bad file stream";
|
||||
} else if (state & std::ios_base::failbit) {
|
||||
return " File read failed";
|
||||
}else{
|
||||
return " Unknown/no error";
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace aare
|
Reference in New Issue
Block a user