Compare commits

..

2 Commits

Author SHA1 Message Date
055feb2290 Rewrote the multithreading to fix frame loss
Some checks failed
Build on RHEL8 / build (push) Failing after 3m21s
Build on RHEL9 / build (push) Failing after 3m32s
2026-01-06 14:09:51 +01:00
root
c035491b63 Added ability to save a cluster larger than we search for 2025-11-25 14:20:55 +01:00
32 changed files with 412 additions and 940 deletions

View File

@@ -1,22 +1,16 @@
# Release notes # Release notes
### 2025.8.22 ### head
Features: Features:
- Apply calibration works in G0 if passes a 2D calibration and pedestal - Apply calibration works in G0 if passes a 2D calibration and pedestal
- count pixels that switch - count pixels that switch
- calculate pedestal (also g0 version) - calculate pedestal (also g0 version)
- NDArray::view() needs an lvalue to reduce issues with the view outliving the array
Bugfixes: ### 2025.07.18
- Now using glibc 2.17 in conda builds (was using the host)
- Fixed shifted pixels in clusters close to the edge of a frame
### 2025.7.18
Features: Features:
@@ -30,7 +24,7 @@ Bugfixes:
- Removed unused file: ClusterFile.cpp - Removed unused file: ClusterFile.cpp
### 2025.5.22 ### 2025.05.22
Features: Features:

View File

@@ -1 +1 @@
2025.8.22 2025.7.18

View File

@@ -15,7 +15,7 @@ FetchContent_MakeAvailable(benchmark)
add_executable(benchmarks) add_executable(benchmarks)
target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp reduce_benchmark.cpp) target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp)
# Link Google Benchmark and other necessary libraries # Link Google Benchmark and other necessary libraries
target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags) target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags)

View File

@@ -1,168 +0,0 @@
#include "aare/Cluster.hpp"
#include <benchmark/benchmark.h>
using namespace aare;
class ClustersForReduceFixture : public benchmark::Fixture {
public:
Cluster<int, 5, 5> cluster_5x5{};
Cluster<int, 3, 3> cluster_3x3{};
private:
using benchmark::Fixture::SetUp;
void SetUp([[maybe_unused]] const benchmark::State &state) override {
int temp_data[25] = {1, 1, 1, 1, 1, 1, 1, 2, 1, 1,
1, 2, 3, 1, 2, 1, 1, 1, 1, 2};
std::copy(std::begin(temp_data), std::end(temp_data),
std::begin(cluster_5x5.data));
cluster_5x5.x = 5;
cluster_5x5.y = 5;
int temp_data2[9] = {1, 1, 1, 2, 3, 1, 2, 2, 1};
std::copy(std::begin(temp_data2), std::end(temp_data2),
std::begin(cluster_3x3.data));
cluster_3x3.x = 5;
cluster_3x3.y = 5;
}
// void TearDown(::benchmark::State& state) {
// }
};
template <typename T>
Cluster<T, 3, 3, int16_t> reduce_to_3x3(const Cluster<T, 5, 5, int16_t> &c) {
Cluster<T, 3, 3, int16_t> result;
// Write out the sums in the hope that the compiler can optimize this
std::array<T, 9> sum_3x3_subclusters;
// Write out the sums in the hope that the compiler can optimize this
sum_3x3_subclusters[0] = c.data[0] + c.data[1] + c.data[2] + c.data[5] +
c.data[6] + c.data[7] + c.data[10] + c.data[11] +
c.data[12];
sum_3x3_subclusters[1] = c.data[1] + c.data[2] + c.data[3] + c.data[6] +
c.data[7] + c.data[8] + c.data[11] + c.data[12] +
c.data[13];
sum_3x3_subclusters[2] = c.data[2] + c.data[3] + c.data[4] + c.data[7] +
c.data[8] + c.data[9] + c.data[12] + c.data[13] +
c.data[14];
sum_3x3_subclusters[3] = c.data[5] + c.data[6] + c.data[7] + c.data[10] +
c.data[11] + c.data[12] + c.data[15] + c.data[16] +
c.data[17];
sum_3x3_subclusters[4] = c.data[6] + c.data[7] + c.data[8] + c.data[11] +
c.data[12] + c.data[13] + c.data[16] + c.data[17] +
c.data[18];
sum_3x3_subclusters[5] = c.data[7] + c.data[8] + c.data[9] + c.data[12] +
c.data[13] + c.data[14] + c.data[17] + c.data[18] +
c.data[19];
sum_3x3_subclusters[6] = c.data[10] + c.data[11] + c.data[12] + c.data[15] +
c.data[16] + c.data[17] + c.data[20] + c.data[21] +
c.data[22];
sum_3x3_subclusters[7] = c.data[11] + c.data[12] + c.data[13] + c.data[16] +
c.data[17] + c.data[18] + c.data[21] + c.data[22] +
c.data[23];
sum_3x3_subclusters[8] = c.data[12] + c.data[13] + c.data[14] + c.data[17] +
c.data[18] + c.data[19] + c.data[22] + c.data[23] +
c.data[24];
auto index = std::max_element(sum_3x3_subclusters.begin(),
sum_3x3_subclusters.end()) -
sum_3x3_subclusters.begin();
switch (index) {
case 0:
result.x = c.x - 1;
result.y = c.y + 1;
result.data = {c.data[0], c.data[1], c.data[2], c.data[5], c.data[6],
c.data[7], c.data[10], c.data[11], c.data[12]};
break;
case 1:
result.x = c.x;
result.y = c.y + 1;
result.data = {c.data[1], c.data[2], c.data[3], c.data[6], c.data[7],
c.data[8], c.data[11], c.data[12], c.data[13]};
break;
case 2:
result.x = c.x + 1;
result.y = c.y + 1;
result.data = {c.data[2], c.data[3], c.data[4], c.data[7], c.data[8],
c.data[9], c.data[12], c.data[13], c.data[14]};
break;
case 3:
result.x = c.x - 1;
result.y = c.y;
result.data = {c.data[5], c.data[6], c.data[7],
c.data[10], c.data[11], c.data[12],
c.data[15], c.data[16], c.data[17]};
break;
case 4:
result.x = c.x + 1;
result.y = c.y;
result.data = {c.data[6], c.data[7], c.data[8],
c.data[11], c.data[12], c.data[13],
c.data[16], c.data[17], c.data[18]};
break;
case 5:
result.x = c.x + 1;
result.y = c.y;
result.data = {c.data[7], c.data[8], c.data[9],
c.data[12], c.data[13], c.data[14],
c.data[17], c.data[18], c.data[19]};
break;
case 6:
result.x = c.x + 1;
result.y = c.y - 1;
result.data = {c.data[10], c.data[11], c.data[12],
c.data[15], c.data[16], c.data[17],
c.data[20], c.data[21], c.data[22]};
break;
case 7:
result.x = c.x + 1;
result.y = c.y - 1;
result.data = {c.data[11], c.data[12], c.data[13],
c.data[16], c.data[17], c.data[18],
c.data[21], c.data[22], c.data[23]};
break;
case 8:
result.x = c.x + 1;
result.y = c.y - 1;
result.data = {c.data[12], c.data[13], c.data[14],
c.data[17], c.data[18], c.data[19],
c.data[22], c.data[23], c.data[24]};
break;
}
return result;
}
BENCHMARK_F(ClustersForReduceFixture, Reduce2x2)(benchmark::State &st) {
for (auto _ : st) {
// This code gets timed
benchmark::DoNotOptimize(reduce_to_2x2<int, 3, 3, int16_t>(
cluster_3x3)); // make sure compiler evaluates the expression
}
}
BENCHMARK_F(ClustersForReduceFixture, SpecificReduce2x2)(benchmark::State &st) {
for (auto _ : st) {
// This code gets timed
benchmark::DoNotOptimize(reduce_to_2x2<int>(cluster_3x3));
}
}
BENCHMARK_F(ClustersForReduceFixture, Reduce3x3)(benchmark::State &st) {
for (auto _ : st) {
// This code gets timed
benchmark::DoNotOptimize(
reduce_to_3x3<int, 5, 5, int16_t>(cluster_5x5));
}
}
BENCHMARK_F(ClustersForReduceFixture, SpecificReduce3x3)(benchmark::State &st) {
for (auto _ : st) {
// This code gets timed
benchmark::DoNotOptimize(reduce_to_3x3<int>(cluster_5x5));
}
}

View File

@@ -3,14 +3,3 @@ python:
- 3.12 - 3.12
- 3.13 - 3.13
c_compiler:
- gcc # [linux]
c_stdlib:
- sysroot # [linux]
cxx_compiler:
- gxx # [linux]
c_stdlib_version: # [linux]
- 2.17 # [linux]

View File

@@ -16,8 +16,6 @@ build:
requirements: requirements:
build: build:
- {{ compiler('c') }}
- {{ stdlib("c") }}
- {{ compiler('cxx') }} - {{ compiler('cxx') }}
- cmake - cmake
- ninja - ninja

View File

@@ -12,11 +12,4 @@ ClusterVector
:members: :members:
:undoc-members: :undoc-members:
:private-members: :private-members:
**Free Functions:**
.. doxygenfunction:: aare::reduce_to_3x3(const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>&)
.. doxygenfunction:: aare::reduce_to_2x2(const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>&)

View File

@@ -33,17 +33,4 @@ C++ functions that support the ClusterVector or to view it as a numpy array.
:members: :members:
:undoc-members: :undoc-members:
:show-inheritance: :show-inheritance:
:inherited-members: :inherited-members:
**Free Functions:**
.. autofunction:: reduce_to_3x3
:noindex:
Reduce a single Cluster to 3x3 by taking the 3x3 subcluster with highest photon energy.
.. autofunction:: reduce_to_2x2
:noindex:
Reduce a single Cluster to 2x2 by taking the 2x2 subcluster with highest photon energy.

View File

@@ -0,0 +1,59 @@
#pragma once
#include <atomic>
#include <cstdint>
#include <memory>
#include <thread>
#include <vector>
#include <condition_variable>
#include <mutex>
#include <deque>
#include "aare/ClusterFinder.hpp"
#include "aare/NDArray.hpp"
#include "aare/logger.hpp"
template <typename T>
class BlockingQueue {
std::mutex mtx;
std::condition_variable cv_push, cv_pop;
std::deque<T> queue;
size_t max_size;
bool closed = false;
public:
BlockingQueue(size_t capacity) : max_size(capacity) {}
void push(T item) {
std::unique_lock lock(mtx);
cv_push.wait(lock, [this] { return queue.size() < max_size; });
queue.push_back(std::move(item));
cv_pop.notify_one();
}
T pop() {
std::unique_lock lock(mtx);
cv_pop.wait(lock, [this] { return !queue.empty(); });
T item = std::move(queue.front());
queue.pop_front();
cv_push.notify_one();
return item;
}
void close() {
std::lock_guard lock(mtx);
closed = true;
cv_pop.notify_all();
cv_push.notify_all();
}
bool empty() {
std::lock_guard lock(mtx);
return queue.empty();
}
void write(T item) {push(item);}
bool isEmpty() {return empty();}
T frontPtr() {return pop();}
T popFront() {return pop();}
};

View File

@@ -28,7 +28,7 @@ enum class pixel : int {
template <typename T> struct Eta2 { template <typename T> struct Eta2 {
double x; double x;
double y; double y;
int c{0}; int c;
T sum; T sum;
}; };
@@ -70,8 +70,6 @@ calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
size_t index_bottom_left_max_2x2_subcluster = size_t index_bottom_left_max_2x2_subcluster =
(int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1); (int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1);
// calculate direction of gradient
// check that cluster center is in max subcluster // check that cluster center is in max subcluster
if (cluster_center_index != index_bottom_left_max_2x2_subcluster && if (cluster_center_index != index_bottom_left_max_2x2_subcluster &&
cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 && cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 &&
@@ -130,15 +128,12 @@ Eta2<T> calculate_eta2(const Cluster<T, 2, 2, int16_t> &cl) {
Eta2<T> eta{}; Eta2<T> eta{};
if ((cl.data[0] + cl.data[1]) != 0) if ((cl.data[0] + cl.data[1]) != 0)
eta.x = static_cast<double>(cl.data[1]) / eta.x = static_cast<double>(cl.data[1]) / (cl.data[0] + cl.data[1]);
(cl.data[0] + cl.data[1]); // between (0,1) the closer to zero
// left value probably larger
if ((cl.data[0] + cl.data[2]) != 0) if ((cl.data[0] + cl.data[2]) != 0)
eta.y = static_cast<double>(cl.data[2]) / eta.y = static_cast<double>(cl.data[2]) / (cl.data[0] + cl.data[2]);
(cl.data[0] + cl.data[2]); // between (0,1) the closer to zero
// bottom value probably larger
eta.sum = cl.sum(); eta.sum = cl.sum();
eta.c = static_cast<int>(corner::cBottomLeft); // TODO! This is not correct,
// but need to put something
return eta; return eta;
} }
@@ -155,11 +150,13 @@ template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) {
eta.sum = sum; eta.sum = sum;
eta.c = corner::cBottomLeft;
if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0) if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0)
eta.x = static_cast<double>(-cl.data[3] + cl.data[3 + 2]) / eta.x = static_cast<double>(-cl.data[3] + cl.data[3 + 2]) /
(cl.data[3] + cl.data[4] + cl.data[5]); // (-1,1) (cl.data[3] + cl.data[4] + cl.data[5]);
if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0) if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0)

158
include/aare/Cluster.hpp Executable file → Normal file
View File

@@ -8,7 +8,6 @@
#pragma once #pragma once
#include "logger.hpp"
#include <algorithm> #include <algorithm>
#include <array> #include <array>
#include <cstdint> #include <cstdint>
@@ -75,163 +74,6 @@ struct Cluster {
} }
}; };
/**
* @brief Reduce a cluster to a 2x2 cluster by selecting the 2x2 block with the
* highest sum.
* @param c Cluster to reduce
* @return reduced cluster
*/
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = int16_t>
Cluster<T, 2, 2, CoordType>
reduce_to_2x2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
static_assert(ClusterSizeX >= 2 && ClusterSizeY >= 2,
"Cluster sizes must be at least 2x2 for reduction to 2x2");
// TODO maybe add sanity check and check that center is in max subcluster
Cluster<T, 2, 2, CoordType> result;
auto [sum, index] = c.max_sum_2x2();
int16_t cluster_center_index =
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
int16_t index_bottom_left_max_2x2_subcluster =
(int(index / (ClusterSizeX - 1))) * ClusterSizeX +
index % (ClusterSizeX - 1);
result.x =
c.x + (index_bottom_left_max_2x2_subcluster - cluster_center_index) %
ClusterSizeX;
result.y =
c.y - (index_bottom_left_max_2x2_subcluster - cluster_center_index) /
ClusterSizeX;
result.data = {
c.data[index_bottom_left_max_2x2_subcluster],
c.data[index_bottom_left_max_2x2_subcluster + 1],
c.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX],
c.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1]};
return result;
}
template <typename T>
Cluster<T, 2, 2, int16_t> reduce_to_2x2(const Cluster<T, 3, 3, int16_t> &c) {
Cluster<T, 2, 2, int16_t> result;
auto [s, i] = c.max_sum_2x2();
switch (i) {
case 0:
result.x = c.x - 1;
result.y = c.y + 1;
result.data = {c.data[0], c.data[1], c.data[3], c.data[4]};
break;
case 1:
result.x = c.x;
result.y = c.y + 1;
result.data = {c.data[1], c.data[2], c.data[4], c.data[5]};
break;
case 2:
result.x = c.x - 1;
result.y = c.y;
result.data = {c.data[3], c.data[4], c.data[6], c.data[7]};
break;
case 3:
result.x = c.x;
result.y = c.y;
result.data = {c.data[4], c.data[5], c.data[7], c.data[8]};
break;
}
return result;
}
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = int16_t>
inline std::pair<T, uint16_t>
max_3x3_sum(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cluster) {
if constexpr (ClusterSizeX == 3 && ClusterSizeY == 3) {
return std::make_pair(cluster.sum(), 0);
} else {
size_t index = 0;
T max_3x3_subcluster_sum = 0;
for (size_t i = 0; i < ClusterSizeY - 2; ++i) {
for (size_t j = 0; j < ClusterSizeX - 2; ++j) {
T sum = cluster.data[i * ClusterSizeX + j] +
cluster.data[i * ClusterSizeX + j + 1] +
cluster.data[i * ClusterSizeX + j + 2] +
cluster.data[(i + 1) * ClusterSizeX + j] +
cluster.data[(i + 1) * ClusterSizeX + j + 1] +
cluster.data[(i + 1) * ClusterSizeX + j + 2] +
cluster.data[(i + 2) * ClusterSizeX + j] +
cluster.data[(i + 2) * ClusterSizeX + j + 1] +
cluster.data[(i + 2) * ClusterSizeX + j + 2];
if (sum > max_3x3_subcluster_sum) {
max_3x3_subcluster_sum = sum;
index = i * (ClusterSizeX - 2) + j;
}
}
}
return std::make_pair(max_3x3_subcluster_sum, index);
}
}
/**
* @brief Reduce a cluster to a 3x3 cluster by selecting the 3x3 block with the
* highest sum.
* @param c Cluster to reduce
* @return reduced cluster
*/
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = int16_t>
Cluster<T, 3, 3, CoordType>
reduce_to_3x3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
static_assert(ClusterSizeX >= 3 && ClusterSizeY >= 3,
"Cluster sizes must be at least 3x3 for reduction to 3x3");
Cluster<T, 3, 3, CoordType> result;
// TODO maybe add sanity check and check that center is in max subcluster
auto [sum, index] = max_3x3_sum(c);
int16_t cluster_center_index =
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
int16_t index_center_max_3x3_subcluster =
(int(index / (ClusterSizeX - 2))) * ClusterSizeX + ClusterSizeX +
index % (ClusterSizeX - 2) + 1;
int16_t index_3x3_subcluster_cluster_center =
int((cluster_center_index - 1 - ClusterSizeX) / ClusterSizeX) *
(ClusterSizeX - 2) +
(cluster_center_index - 1 - ClusterSizeX) % ClusterSizeX;
result.x =
c.x + (index % (ClusterSizeX - 2) -
(index_3x3_subcluster_cluster_center % (ClusterSizeX - 2)));
result.y =
c.y - (index / (ClusterSizeX - 2) -
(index_3x3_subcluster_cluster_center / (ClusterSizeX - 2)));
result.data = {c.data[index_center_max_3x3_subcluster - ClusterSizeX - 1],
c.data[index_center_max_3x3_subcluster - ClusterSizeX],
c.data[index_center_max_3x3_subcluster - ClusterSizeX + 1],
c.data[index_center_max_3x3_subcluster - 1],
c.data[index_center_max_3x3_subcluster],
c.data[index_center_max_3x3_subcluster + 1],
c.data[index_center_max_3x3_subcluster + ClusterSizeX - 1],
c.data[index_center_max_3x3_subcluster + ClusterSizeX],
c.data[index_center_max_3x3_subcluster + ClusterSizeX + 1]};
return result;
}
// Type Traits for is_cluster_type // Type Traits for is_cluster_type
template <typename T> template <typename T>
struct is_cluster : std::false_type {}; // Default case: Not a Cluster struct is_cluster : std::false_type {}; // Default case: Not a Cluster

View File

@@ -5,13 +5,15 @@
#include "aare/ClusterFinderMT.hpp" #include "aare/ClusterFinderMT.hpp"
#include "aare/ClusterVector.hpp" #include "aare/ClusterVector.hpp"
#include "aare/ProducerConsumerQueue.hpp" #include "aare/ProducerConsumerQueue.hpp"
#include "aare/BlockingQueue.hpp"
namespace aare { namespace aare {
template <typename ClusterType, template <typename ClusterType,
typename = std::enable_if_t<is_cluster_v<ClusterType>>> typename = std::enable_if_t<is_cluster_v<ClusterType>>>
class ClusterCollector { class ClusterCollector {
ProducerConsumerQueue<ClusterVector<ClusterType>> *m_source; // ProducerConsumerQueue<ClusterVector<ClusterType>> *m_source;
BlockingQueue<ClusterVector<ClusterType>> *m_source;
std::atomic<bool> m_stop_requested{false}; std::atomic<bool> m_stop_requested{false};
std::atomic<bool> m_stopped{true}; std::atomic<bool> m_stopped{true};
std::chrono::milliseconds m_default_wait{1}; std::chrono::milliseconds m_default_wait{1};
@@ -19,19 +21,47 @@ class ClusterCollector {
std::vector<ClusterVector<ClusterType>> m_clusters; std::vector<ClusterVector<ClusterType>> m_clusters;
void process() { void process() {
// m_stopped = false;
// fmt::print("ClusterCollector started\n");
// // while (!m_stop_requested || !m_source->isEmpty()) {
// while (true) {
// if (clusters.frame_number() == -1)
// break;
// ClusterVector<ClusterType> clusters = m_source->pop();
// m_clusters.push_back(std::move(clusters));
// // if (ClusterVector<ClusterType> *clusters = m_source->frontPtr();
// // clusters != nullptr) {
// // m_clusters.push_back(std::move(*clusters));
// // m_source->popFront();
// // } else {
// // std::this_thread::sleep_for(m_default_wait);
// // }
// }
// fmt::print("ClusterCollector stopped\n");
// m_stopped = true;
m_stopped = false; m_stopped = false;
fmt::print("ClusterCollector started\n"); fmt::print("ClusterCollector started\n");
while (!m_stop_requested || !m_source->isEmpty()) {
if (ClusterVector<ClusterType> *clusters = m_source->frontPtr(); while (true) {
clusters != nullptr) { // pop blocks until there is data
m_clusters.push_back(std::move(*clusters)); ClusterVector<ClusterType> clusters = m_source->pop();
m_source->popFront();
} else { // POISON DETECTION
std::this_thread::sleep_for(m_default_wait); if (clusters.frame_number() == -1) {
fmt::print("ClusterCollector received poison frame, stopping\n");
break; // exit loop cleanly
} }
// NORMAL DATA: store or process
m_clusters.push_back(std::move(clusters));
} }
fmt::print("ClusterCollector stopped\n"); fmt::print("ClusterCollector stopped\n");
m_stopped = true; m_stopped = true;
} }
public: public:

View File

@@ -6,13 +6,15 @@
#include "aare/ClusterFinderMT.hpp" #include "aare/ClusterFinderMT.hpp"
#include "aare/ClusterVector.hpp" #include "aare/ClusterVector.hpp"
#include "aare/ProducerConsumerQueue.hpp" #include "aare/ProducerConsumerQueue.hpp"
#include "aare/BlockingQueue.hpp"
namespace aare { namespace aare {
template <typename ClusterType, template <typename ClusterType,
typename = std::enable_if_t<is_cluster_v<ClusterType>>> typename = std::enable_if_t<is_cluster_v<ClusterType>>>
class ClusterFileSink { class ClusterFileSink {
ProducerConsumerQueue<ClusterVector<ClusterType>> *m_source; // ProducerConsumerQueue<ClusterVector<ClusterType>> *m_source;
BlockingQueue<ClusterVector<ClusterType>> *m_source;
std::atomic<bool> m_stop_requested{false}; std::atomic<bool> m_stop_requested{false};
std::atomic<bool> m_stopped{true}; std::atomic<bool> m_stopped{true};
std::chrono::milliseconds m_default_wait{1}; std::chrono::milliseconds m_default_wait{1};
@@ -20,29 +22,60 @@ class ClusterFileSink {
std::ofstream m_file; std::ofstream m_file;
void process() { void process() {
m_stopped = false; // m_stopped = false;
// LOG(logDEBUG) << "ClusterFileSink started";
// while (!m_stop_requested || !m_source->isEmpty()) {
// // if (ClusterVector<ClusterType> *clusters = m_source->pop(); m_source->frontPtr();
// // clusters != nullptr) {
// {
// ClusterVector<ClusterType> clusters = m_source->pop();
// // Write clusters to file
// int32_t frame_number =
// clusters->frame_number(); // TODO! Should we store frame
// // number already as int?
// uint32_t num_clusters = clusters.size();
// if (frame_number >= 9910 && frame_number <= 9930)
// std::cout << "prcoess: frame_number = " << frame_number << std::endl;
// m_file.write(reinterpret_cast<const char *>(&frame_number),
// sizeof(frame_number));
// m_file.write(reinterpret_cast<const char *>(&num_clusters),
// sizeof(num_clusters));
// m_file.write(reinterpret_cast<const char *>(clusters.data()),
// clusters.size() * clusters.item_size());
// m_source->popFront();
// }
// // else {
// // std::this_thread::sleep_for(m_default_wait);
// // }
// }
// LOG(logDEBUG) << "ClusterFileSink stopped";
// m_stopped = true;
LOG(logDEBUG) << "ClusterFileSink started"; LOG(logDEBUG) << "ClusterFileSink started";
while (!m_stop_requested || !m_source->isEmpty()) {
if (ClusterVector<ClusterType> *clusters = m_source->frontPtr(); while (true) {
clusters != nullptr) { ClusterVector<ClusterType> clusters = m_source->pop(); // blocks
// Write clusters to file
int32_t frame_number = // POISON PILL CHECK
clusters->frame_number(); // TODO! Should we store frame if (clusters.frame_number() == -1) {
// number already as int? LOG(logDEBUG) << "ClusterFileSink received poison pill";
uint32_t num_clusters = clusters->size(); break;
m_file.write(reinterpret_cast<const char *>(&frame_number),
sizeof(frame_number));
m_file.write(reinterpret_cast<const char *>(&num_clusters),
sizeof(num_clusters));
m_file.write(reinterpret_cast<const char *>(clusters->data()),
clusters->size() * clusters->item_size());
m_source->popFront();
} else {
std::this_thread::sleep_for(m_default_wait);
} }
int32_t frame_number = clusters.frame_number();
uint32_t num_clusters = clusters.size();
m_file.write(reinterpret_cast<const char*>(&frame_number),
sizeof(frame_number));
m_file.write(reinterpret_cast<const char*>(&num_clusters),
sizeof(num_clusters));
m_file.write(reinterpret_cast<const char*>(clusters.data()),
clusters.size() * clusters.item_size());
} }
LOG(logDEBUG) << "ClusterFileSink stopped"; LOG(logDEBUG) << "ClusterFileSink stopped";
m_stopped = true;
} }
public: public:

View File

@@ -19,9 +19,11 @@ class ClusterFinder {
const PEDESTAL_TYPE c3; const PEDESTAL_TYPE c3;
Pedestal<PEDESTAL_TYPE> m_pedestal; Pedestal<PEDESTAL_TYPE> m_pedestal;
ClusterVector<ClusterType> m_clusters; ClusterVector<ClusterType> m_clusters;
const uint32_t ClusterSizeX;
const uint32_t ClusterSizeY;
static const uint8_t ClusterSizeX = ClusterType::cluster_size_x; static const uint8_t SavedClusterSizeX = ClusterType::cluster_size_x;
static const uint8_t ClusterSizeY = ClusterType::cluster_size_y; static const uint8_t SavedClusterSizeY = ClusterType::cluster_size_y;
using CT = typename ClusterType::value_type; using CT = typename ClusterType::value_type;
public: public:
@@ -34,10 +36,12 @@ class ClusterFinder {
* *
*/ */
ClusterFinder(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0, ClusterFinder(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
size_t capacity = 1000000) size_t capacity = 1000000,
uint32_t cluster_size_x = 3, uint32_t cluster_size_y = 3)
: m_image_size(image_size), m_nSigma(nSigma), : m_image_size(image_size), m_nSigma(nSigma),
c2(sqrt((ClusterSizeY + 1) / 2 * (ClusterSizeX + 1) / 2)), c2(sqrt((cluster_size_y + 1) / 2 * (cluster_size_x + 1) / 2)),
c3(sqrt(ClusterSizeX * ClusterSizeY)), c3(sqrt(cluster_size_x * cluster_size_y)),
ClusterSizeX(cluster_size_x), ClusterSizeY(cluster_size_y),
m_pedestal(image_size[0], image_size[1]), m_clusters(capacity) { m_pedestal(image_size[0], image_size[1]), m_clusters(capacity) {
LOG(logDEBUG) << "ClusterFinder: " LOG(logDEBUG) << "ClusterFinder: "
<< "image_size: " << image_size[0] << "x" << image_size[1] << "image_size: " << image_size[0] << "x" << image_size[1]
@@ -74,12 +78,20 @@ class ClusterFinder {
// // 4,4 -> +/- 2 // // 4,4 -> +/- 2
int dy = ClusterSizeY / 2; int dy = ClusterSizeY / 2;
int dx = ClusterSizeX / 2; int dx = ClusterSizeX / 2;
int dy2 = SavedClusterSizeY / 2;
int dx2 = SavedClusterSizeX / 2;
int has_center_pixel_x = int has_center_pixel_x =
ClusterSizeX % ClusterSizeX %
2; // for even sized clusters there is no proper cluster center and 2; // for even sized clusters there is no proper cluster center and
// even amount of pixels around the center // even amount of pixels around the center
int has_center_pixel_y = ClusterSizeY % 2; int has_center_pixel_y = ClusterSizeY % 2;
// if (frame_number >= 8000 && frame_number <= 9000)
// // std::cout << "find_clusters: frame_number = " << frame_number << std::endl;
// std::cout << frame_number << std::endl;
m_clusters.set_frame_number(frame_number); m_clusters.set_frame_number(frame_number);
for (int iy = 0; iy < frame.shape(0); iy++) { for (int iy = 0; iy < frame.shape(0); iy++) {
for (int ix = 0; ix < frame.shape(1); ix++) { for (int ix = 0; ix < frame.shape(1); ix++) {
@@ -135,16 +147,14 @@ class ClusterFinder {
// It's worth redoing the look since most of the time we // It's worth redoing the look since most of the time we
// don't have a photon // don't have a photon
int i = 0; int i = 0;
for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) { for (int ir = -dy2; ir < dy2 + has_center_pixel_y; ir++) {
for (int ic = -dx; ic < dx + has_center_pixel_y; ic++) { for (int ic = -dx2; ic < dx2 + has_center_pixel_y; ic++) {
if (ix + ic >= 0 && ix + ic < frame.shape(1) && if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
iy + ir >= 0 && iy + ir < frame.shape(0)) { iy + ir >= 0 && iy + ir < frame.shape(0)) {
CT tmp =
static_cast<CT>(frame(iy + ir, ix + ic)) - CT tmp = static_cast<CT>(frame(iy + ir, ix + ic)) - static_cast<CT>(m_pedestal.mean(iy + ir, ix + ic));
static_cast<CT>( cluster.data[i] = tmp; // Watch for out of bounds access
m_pedestal.mean(iy + ir, ix + ic));
cluster.data[i] =
tmp; // Watch for out of bounds access
} }
i++; i++;
} }

View File

@@ -4,10 +4,14 @@
#include <memory> #include <memory>
#include <thread> #include <thread>
#include <vector> #include <vector>
#include <condition_variable>
#include <mutex>
#include <deque>
#include "aare/ClusterFinder.hpp" #include "aare/ClusterFinder.hpp"
#include "aare/NDArray.hpp" #include "aare/NDArray.hpp"
#include "aare/ProducerConsumerQueue.hpp" #include "aare/ProducerConsumerQueue.hpp"
#include "aare/BlockingQueue.hpp"
#include "aare/logger.hpp" #include "aare/logger.hpp"
namespace aare { namespace aare {
@@ -23,6 +27,14 @@ struct FrameWrapper {
NDArray<uint16_t, 2> data; NDArray<uint16_t, 2> data;
}; };
static FrameWrapper make_poison_frame() {
return FrameWrapper{FrameType::DATA, UINT64_MAX, NDArray<uint16_t,2>()};
}
static bool is_poison(const FrameWrapper& f) {
return f.frame_number == UINT64_MAX;
}
/** /**
* @brief ClusterFinderMT is a multi-threaded version of ClusterFinder. It uses * @brief ClusterFinderMT is a multi-threaded version of ClusterFinder. It uses
* a producer-consumer queue to distribute the frames to the threads. The * a producer-consumer queue to distribute the frames to the threads. The
@@ -37,11 +49,15 @@ class ClusterFinderMT {
protected: protected:
using CT = typename ClusterType::value_type; using CT = typename ClusterType::value_type;
size_t m_current_thread{0}; // size_t m_current_thread{0};
std::atomic<size_t> m_current_thread{0};
size_t m_n_threads{0}; size_t m_n_threads{0};
using Finder = ClusterFinder<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>; using Finder = ClusterFinder<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>;
using InputQueue = ProducerConsumerQueue<FrameWrapper>; // using InputQueue = ProducerConsumerQueue<FrameWrapper>;
using OutputQueue = ProducerConsumerQueue<ClusterVector<ClusterType>>; // using OutputQueue = ProducerConsumerQueue<ClusterVector<ClusterType>>;
using InputQueue = BlockingQueue<FrameWrapper>;
using OutputQueue = BlockingQueue<ClusterVector<ClusterType>>;
std::vector<std::unique_ptr<InputQueue>> m_input_queues; std::vector<std::unique_ptr<InputQueue>> m_input_queues;
std::vector<std::unique_ptr<OutputQueue>> m_output_queues; std::vector<std::unique_ptr<OutputQueue>> m_output_queues;
@@ -50,41 +66,72 @@ class ClusterFinderMT {
std::vector<std::unique_ptr<Finder>> m_cluster_finders; std::vector<std::unique_ptr<Finder>> m_cluster_finders;
std::vector<std::thread> m_threads; std::vector<std::thread> m_threads;
std::thread m_collect_thread; std::thread m_collect_thread;
std::chrono::milliseconds m_default_wait{1}; std::chrono::milliseconds m_default_wait{1};
private: private:
std::atomic<bool> m_stop_requested{false}; std::atomic<bool> m_stop_requested{false};
std::atomic<bool> m_processing_threads_stopped{true}; std::atomic<bool> m_processing_threads_stopped{true};
static ClusterVector<ClusterType> make_poison_cluster() {
ClusterVector<ClusterType> v;
v.set_frame_number(-1);
return v;
}
/** /**
* @brief Function called by the processing threads. It reads the frames * @brief Function called by the processing threads. It reads the frames
* from the input queue and processes them. * from the input queue and processes them.
*/ */
void process(int thread_id) { void process(int thread_id) {
// auto cf = m_cluster_finders[thread_id].get();
// auto q = m_input_queues[thread_id].get();
// bool realloc_same_capacity = true;
// while (!m_stop_requested || !q->isEmpty()) {
// if (FrameWrapper *frame = q->frontPtr(); frame != nullptr) {
// switch (frame->type) {
// case FrameType::DATA:
// cf->find_clusters(frame->data.view(), frame->frame_number);
// m_output_queues[thread_id]->write(
// cf->steal_clusters(realloc_same_capacity));
// break;
// case FrameType::PEDESTAL:
// m_cluster_finders[thread_id]->push_pedestal_frame(
// frame->data.view());
// break;
// }
// // frame is processed now discard it
// m_input_queues[thread_id]->popFront();
// } else {
// std::this_thread::sleep_for(m_default_wait);
// }
// }
auto cf = m_cluster_finders[thread_id].get(); auto cf = m_cluster_finders[thread_id].get();
auto q = m_input_queues[thread_id].get(); auto q = m_input_queues[thread_id].get();
bool realloc_same_capacity = true;
while (!m_stop_requested || !q->isEmpty()) { while (true) {
if (FrameWrapper *frame = q->frontPtr(); frame != nullptr) { FrameWrapper frame = q->pop(); // blocks
switch (frame->type) { if (is_poison(frame))
break;
switch (frame.type) {
case FrameType::DATA: case FrameType::DATA:
cf->find_clusters(frame->data.view(), frame->frame_number); cf->find_clusters(frame.data.view(), frame.frame_number);
m_output_queues[thread_id]->write( m_output_queues[thread_id]->push(cf->steal_clusters());
cf->steal_clusters(realloc_same_capacity));
break; break;
case FrameType::PEDESTAL: case FrameType::PEDESTAL:
m_cluster_finders[thread_id]->push_pedestal_frame( cf->push_pedestal_frame(frame.data.view());
frame->data.view());
break; break;
}
// frame is processed now discard it
m_input_queues[thread_id]->popFront();
} else {
std::this_thread::sleep_for(m_default_wait);
} }
} }
} }
@@ -94,20 +141,66 @@ class ClusterFinderMT {
* the sink * the sink
*/ */
void collect() { void collect() {
bool empty = true; // std::ofstream frame_log("/mnt/datapool/JMulvey/Data_Analysis/aare_testing/Read_Frame_Bug/test2.txt");
while (!m_stop_requested || !empty || !m_processing_threads_stopped) {
empty = true;
for (auto &queue : m_output_queues) {
if (!queue->isEmpty()) {
while (!m_sink.write(std::move(*queue->frontPtr()))) { // bool empty = true;
std::this_thread::sleep_for(m_default_wait); // while (!m_stop_requested || !all_output_queues_empty() || !all_input_queues_empty()) {
// // while (!m_stop_requested || !empty || !m_processing_threads_stopped) {
// empty = true;
// for (auto &queue : m_output_queues) {
// if (!queue->isEmpty()) {
// // auto item = std::move(*queue->frontPtr()); //For Debug
// // while (!m_sink.write(item)) {
// // std::this_thread::sleep_for(m_default_wait);
// // }
// // frame_log << item.frame_number() << '\n'; //For Debug
// // queue->popFront();
// // empty = false;
// auto& item = *queue->frontPtr(); // use reference
// while (!m_sink.write(std::move(item))) {
// std::this_thread::sleep_for(m_default_wait);
// }
// frame_log << item.frame_number() << '\n'; // log frame number
// queue->popFront();
// empty = false;
// }
// }
// }
// frame_log.close();
std::ofstream frame_log("/mnt/datapool/JMulvey/Data_Analysis/aare_testing/Read_Frame_Bug/test2.txt");
size_t poison_count = 0;
while (true) {
for (auto& queue : m_output_queues) {
auto item = queue->pop(); // BLOCKS
if (item.frame_number() == -1) {
poison_count++;
if (poison_count == m_n_threads) {
// all workers finished
m_sink.push(make_poison_cluster());
return;
} }
queue->popFront(); continue;
empty = false;
} }
m_sink.push(std::move(item));
frame_log << item.frame_number() << '\n';
} }
} }
frame_log.close();
} }
public: public:
@@ -121,7 +214,8 @@ class ClusterFinderMT {
* @param n_threads number of threads to use * @param n_threads number of threads to use
*/ */
ClusterFinderMT(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0, ClusterFinderMT(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
size_t capacity = 2000, size_t n_threads = 3) size_t capacity = 2000, size_t n_threads = 3,
uint32_t cluster_size_x = 3, uint32_t cluster_size_y = 3)
: m_n_threads(n_threads) { : m_n_threads(n_threads) {
LOG(logDEBUG1) << "ClusterFinderMT: " LOG(logDEBUG1) << "ClusterFinderMT: "
@@ -134,7 +228,7 @@ class ClusterFinderMT {
m_cluster_finders.push_back( m_cluster_finders.push_back(
std::make_unique< std::make_unique<
ClusterFinder<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>>( ClusterFinder<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>>(
image_size, nSigma, capacity)); image_size, nSigma, capacity, cluster_size_x, cluster_size_y));
} }
for (size_t i = 0; i < n_threads; i++) { for (size_t i = 0; i < n_threads; i++) {
m_input_queues.emplace_back(std::make_unique<InputQueue>(200)); m_input_queues.emplace_back(std::make_unique<InputQueue>(200));
@@ -149,7 +243,8 @@ class ClusterFinderMT {
* @warning You need to empty this queue otherwise the cluster finder will * @warning You need to empty this queue otherwise the cluster finder will
* wait forever * wait forever
*/ */
ProducerConsumerQueue<ClusterVector<ClusterType>> *sink() { BlockingQueue<ClusterVector<ClusterType>> *sink() {
//ProducerConsumerQueue<ClusterVector<ClusterType>> *sink() {
return &m_sink; return &m_sink;
} }
@@ -172,14 +267,31 @@ class ClusterFinderMT {
* @brief Stop all processing threads * @brief Stop all processing threads
*/ */
void stop() { void stop() {
m_stop_requested = true; // m_stop_requested = true;
for (auto &thread : m_threads) { // for (auto &thread : m_threads) {
thread.join(); // thread.join();
} // }
// m_threads.clear();
// m_processing_threads_stopped = true;
// m_collect_thread.join();
// 1. Send poison to ALL worker input queues
for (auto& q : m_input_queues)
q->push(make_poison_frame());
// 2. Wait for worker threads
for (auto& t : m_threads)
t.join();
m_threads.clear(); m_threads.clear();
m_processing_threads_stopped = true; // 3. Send poison clusters from workers to collector
for (auto& q : m_output_queues)
q->push(make_poison_cluster());
// 4. Wait for collector
m_collect_thread.join(); m_collect_thread.join();
} }
@@ -211,9 +323,10 @@ class ClusterFinderMT {
NDArray(frame)}; // TODO! copies the data! NDArray(frame)}; // TODO! copies the data!
for (auto &queue : m_input_queues) { for (auto &queue : m_input_queues) {
while (!queue->write(fw)) { queue->push(fw);
std::this_thread::sleep_for(m_default_wait); // while (!queue->write(fw)) {
} // std::this_thread::sleep_for(m_default_wait);
// }
} }
} }
@@ -223,12 +336,18 @@ class ClusterFinderMT {
* @note Spin locks with a default wait if the queue is full. * @note Spin locks with a default wait if the queue is full.
*/ */
void find_clusters(NDView<FRAME_TYPE, 2> frame, uint64_t frame_number = 0) { void find_clusters(NDView<FRAME_TYPE, 2> frame, uint64_t frame_number = 0) {
FrameWrapper fw{FrameType::DATA, frame_number, // FrameWrapper fw{FrameType::DATA, frame_number,
NDArray(frame)}; // TODO! copies the data! // NDArray(frame)}; // TODO! copies the data!
while (!m_input_queues[m_current_thread % m_n_threads]->write(fw)) { // size_t thread_idx = m_current_thread.fetch_add(1) % m_n_threads;
std::this_thread::sleep_for(m_default_wait); // while (!m_input_queues[thread_idx]->write(fw)) {
} // // while (!m_input_queues[m_current_thread % m_n_threads]->write(fw)) {
m_current_thread++; // std::this_thread::sleep_for(m_default_wait);
// }
// // m_current_thread++;
FrameWrapper fw{FrameType::DATA, frame_number, NDArray(frame)};
size_t thread_idx = m_current_thread.fetch_add(1) % m_n_threads;
m_input_queues[thread_idx]->push(std::move(fw)); // blocks if full
} }
void clear_pedestal() { void clear_pedestal() {

View File

@@ -32,7 +32,8 @@ class ClusterVector; // Forward declaration
*/ */
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY, template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType> typename CoordType>
class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> { class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
{
std::vector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> m_data{}; std::vector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> m_data{};
int32_t m_frame_number{0}; // TODO! Check frame number size and type int32_t m_frame_number{0}; // TODO! Check frame number size and type
@@ -172,40 +173,4 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
} }
}; };
/**
* @brief Reduce a cluster to a 2x2 cluster by selecting the 2x2 block with the
* highest sum.
* @param cv Clustervector containing clusters to reduce
* @return Clustervector with reduced clusters
*/
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
ClusterVector<Cluster<T, 2, 2, CoordType>> reduce_to_2x2(
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
&cv) {
ClusterVector<Cluster<T, 2, 2, CoordType>> result;
for (const auto &c : cv) {
result.push_back(reduce_to_2x2(c));
}
return result;
}
/**
* @brief Reduce a cluster to a 3x3 cluster by selecting the 3x3 block with the
* highest sum.
* @param cv Clustervector containing clusters to reduce
* @return Clustervector with reduced clusters
*/
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
ClusterVector<Cluster<T, 3, 3, CoordType>> reduce_to_3x3(
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
&cv) {
ClusterVector<Cluster<T, 3, 3, CoordType>> result;
for (const auto &c : cv) {
result.push_back(reduce_to_3x3(c));
}
return result;
}
} // namespace aare } // namespace aare

View File

@@ -105,7 +105,7 @@ class Frame {
* @tparam T type of the pixels * @tparam T type of the pixels
* @return NDView<T, 2> * @return NDView<T, 2>
*/ */
template <typename T> NDView<T, 2> view() & { template <typename T> NDView<T, 2> view() {
std::array<ssize_t, 2> shape = {static_cast<ssize_t>(m_rows), std::array<ssize_t, 2> shape = {static_cast<ssize_t>(m_rows),
static_cast<ssize_t>(m_cols)}; static_cast<ssize_t>(m_cols)};
T *data = reinterpret_cast<T *>(m_data); T *data = reinterpret_cast<T *>(m_data);

View File

@@ -45,63 +45,19 @@ template <class T> struct ProducerConsumerQueue {
ProducerConsumerQueue(const ProducerConsumerQueue &) = delete; ProducerConsumerQueue(const ProducerConsumerQueue &) = delete;
ProducerConsumerQueue &operator=(const ProducerConsumerQueue &) = delete; ProducerConsumerQueue &operator=(const ProducerConsumerQueue &) = delete;
// ProducerConsumerQueue(ProducerConsumerQueue &&other) { ProducerConsumerQueue(ProducerConsumerQueue &&other) {
// size_ = other.size_;
// records_ = other.records_;
// other.records_ = nullptr;
// readIndex_ = other.readIndex_.load(std::memory_order_acquire);
// writeIndex_ = other.writeIndex_.load(std::memory_order_acquire);
// }
ProducerConsumerQueue(ProducerConsumerQueue&& other) noexcept {
size_ = other.size_; size_ = other.size_;
records_ = other.records_; records_ = other.records_;
readIndex_.store(other.readIndex_.load(std::memory_order_acquire),
std::memory_order_relaxed);
writeIndex_.store(other.writeIndex_.load(std::memory_order_acquire),
std::memory_order_relaxed);
other.records_ = nullptr; other.records_ = nullptr;
other.size_ = 0; readIndex_ = other.readIndex_.load(std::memory_order_acquire);
other.readIndex_.store(0, std::memory_order_relaxed); writeIndex_ = other.writeIndex_.load(std::memory_order_acquire);
other.writeIndex_.store(0, std::memory_order_relaxed);
} }
ProducerConsumerQueue &operator=(ProducerConsumerQueue &&other) {
// ProducerConsumerQueue &operator=(ProducerConsumerQueue &&other) {
// size_ = other.size_;
// records_ = other.records_;
// other.records_ = nullptr;
// readIndex_ = other.readIndex_.load(std::memory_order_acquire);
// writeIndex_ = other.writeIndex_.load(std::memory_order_acquire);
// return *this;
// }
ProducerConsumerQueue& operator=(ProducerConsumerQueue&& other) {
if (this == &other) return *this;
//Destroy existing elements and free old storage
if (records_ && !std::is_trivially_destructible<T>::value) {
size_t r = readIndex_.load(std::memory_order_relaxed);
size_t w = writeIndex_.load(std::memory_order_relaxed);
while (r != w) {
records_[r].~T();
if (++r == size_) r = 0;
}
}
std::free(records_);
//Steal other's state
size_ = other.size_; size_ = other.size_;
records_ = other.records_; records_ = other.records_;
readIndex_.store( other.readIndex_.load(std::memory_order_acquire), std::memory_order_relaxed );
writeIndex_.store( other.writeIndex_.load(std::memory_order_acquire), std::memory_order_relaxed );
//leave 'other' empty and harmless
other.records_ = nullptr; other.records_ = nullptr;
other.size_ = 0; readIndex_ = other.readIndex_.load(std::memory_order_acquire);
other.readIndex_.store(0, std::memory_order_relaxed); writeIndex_ = other.writeIndex_.load(std::memory_order_acquire);
other.writeIndex_.store(0, std::memory_order_relaxed);
return *this; return *this;
} }

View File

@@ -26,33 +26,34 @@ def _get_class(name, cluster_size, dtype):
def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024): def ClusterFinder(image_size, saved_cluster_size, checked_cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024):
""" """
Factory function to create a ClusterFinder object. Provides a cleaner syntax for Factory function to create a ClusterFinder object. Provides a cleaner syntax for
the templated ClusterFinder in C++. the templated ClusterFinder in C++.
""" """
cls = _get_class("ClusterFinder", cluster_size, dtype) cls = _get_class("ClusterFinder", saved_cluster_size, dtype)
return cls(image_size, n_sigma=n_sigma, capacity=capacity) return cls(image_size, n_sigma=n_sigma, capacity=capacity, cluster_size_x=checked_cluster_size[0], cluster_size_y=checked_cluster_size[1])
def ClusterFinderMT(image_size, cluster_size = (3,3), dtype=np.int32, n_sigma=5, capacity = 1024, n_threads = 3): def ClusterFinderMT(image_size, saved_cluster_size = (3,3), checked_cluster_size = (3,3), dtype=np.int32, n_sigma=5, capacity = 1024, n_threads = 3):
""" """
Factory function to create a ClusterFinderMT object. Provides a cleaner syntax for Factory function to create a ClusterFinderMT object. Provides a cleaner syntax for
the templated ClusterFinderMT in C++. the templated ClusterFinderMT in C++.
""" """
cls = _get_class("ClusterFinderMT", cluster_size, dtype) cls = _get_class("ClusterFinderMT", saved_cluster_size, dtype)
return cls(image_size, n_sigma=n_sigma, capacity=capacity, n_threads=n_threads) return cls(image_size, n_sigma=n_sigma, capacity=capacity, n_threads=n_threads, cluster_size_x=checked_cluster_size[0], cluster_size_y=checked_cluster_size[1])
def ClusterCollector(clusterfindermt, dtype=np.int32):
def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32):
""" """
Factory function to create a ClusterCollector object. Provides a cleaner syntax for Factory function to create a ClusterCollector object. Provides a cleaner syntax for
the templated ClusterCollector in C++. the templated ClusterCollector in C++.
""" """
cls = _get_class("ClusterCollector", clusterfindermt.cluster_size, dtype) cls = _get_class("ClusterCollector", cluster_size, dtype)
return cls(clusterfindermt) return cls(clusterfindermt)
def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32): def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32):

View File

@@ -17,7 +17,7 @@ from .ClusterVector import ClusterVector
from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2 from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2
from ._aare import Interpolator from ._aare import Interpolator
from ._aare import calculate_eta2 from ._aare import calculate_eta2
from ._aare import reduce_to_2x2, reduce_to_3x3
from ._aare import apply_custom_weights from ._aare import apply_custom_weights

View File

@@ -24,8 +24,7 @@ void define_Cluster(py::module &m, const std::string &typestr) {
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>( py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>(
m, class_name.c_str(), py::buffer_protocol()) m, class_name.c_str(), py::buffer_protocol())
.def(py::init([](uint8_t x, uint8_t y, .def(py::init([](uint8_t x, uint8_t y, py::array_t<Type> data) {
py::array_t<Type, py::array::forcecast> data) {
py::buffer_info buf_info = data.request(); py::buffer_info buf_info = data.request();
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster; Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster;
cluster.x = x; cluster.x = x;
@@ -35,58 +34,31 @@ void define_Cluster(py::module &m, const std::string &typestr) {
cluster.data[i] = r(i); cluster.data[i] = r(i);
} }
return cluster; return cluster;
})) }));
// TODO! Review if to keep or not /*
.def_property_readonly( //TODO! Review if to keep or not
"data", .def_property(
[](Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> &c) "data",
-> py::array { [](ClusterType &c) -> py::array {
return py::array(py::buffer_info( return py::array(py::buffer_info(
c.data.data(), sizeof(Type), c.data, sizeof(Type),
py::format_descriptor<Type>::format(), // Type py::format_descriptor<Type>::format(), // Type
// format // format
2, // Number of dimensions 1, // Number of dimensions
{static_cast<ssize_t>(ClusterSizeX), {static_cast<ssize_t>(ClusterSizeX *
static_cast<ssize_t>(ClusterSizeY)}, // Shape (flattened) ClusterSizeY)}, // Shape (flattened)
{sizeof(Type) * ClusterSizeY, sizeof(Type)} {sizeof(Type)} // Stride (step size between elements)
// Stride (step size between elements) ));
));
})
.def_readonly("x",
&Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>::x)
.def_readonly("y",
&Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>::y);
}
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = int16_t>
void reduce_to_3x3(py::module &m) {
m.def(
"reduce_to_3x3",
[](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
return reduce_to_3x3(cl);
}, },
py::return_value_policy::move, [](ClusterType &c, py::array_t<Type> arr) {
"Reduce cluster to 3x3 subcluster by taking the 3x3 subcluster with " py::buffer_info buf_info = arr.request();
"the highest photon energy."); Type *ptr = static_cast<Type *>(buf_info.ptr);
} std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY,
c.data); // TODO dont iterate over centers!!!
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY, });
typename CoordType = int16_t> */
void reduce_to_2x2(py::module &m) {
m.def(
"reduce_to_2x2",
[](const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
return reduce_to_2x2(cl);
},
py::return_value_policy::move,
"Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with "
"the highest photon energy.");
} }
#pragma GCC diagnostic pop #pragma GCC diagnostic pop

View File

@@ -30,8 +30,9 @@ void define_ClusterFinder(py::module &m, const std::string &typestr) {
py::class_<ClusterFinder<ClusterType, uint16_t, pd_type>>( py::class_<ClusterFinder<ClusterType, uint16_t, pd_type>>(
m, class_name.c_str()) m, class_name.c_str())
.def(py::init<Shape<2>, pd_type, size_t>(), py::arg("image_size"), .def(py::init<Shape<2>, pd_type, size_t, uint32_t, uint32_t>(), py::arg("image_size"),
py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000) py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000,
py::arg("cluster_size_x") = 3, py::arg("cluster_size_y") = 3)
.def("push_pedestal_frame", .def("push_pedestal_frame",
[](ClusterFinder<ClusterType, uint16_t, pd_type> &self, [](ClusterFinder<ClusterType, uint16_t, pd_type> &self,
py::array_t<uint16_t> frame) { py::array_t<uint16_t> frame) {

View File

@@ -30,9 +30,10 @@ void define_ClusterFinderMT(py::module &m, const std::string &typestr) {
py::class_<ClusterFinderMT<ClusterType, uint16_t, pd_type>>( py::class_<ClusterFinderMT<ClusterType, uint16_t, pd_type>>(
m, class_name.c_str()) m, class_name.c_str())
.def(py::init<Shape<2>, pd_type, size_t, size_t>(), .def(py::init<Shape<2>, pd_type, size_t, size_t, uint32_t, uint32_t>(),
py::arg("image_size"), py::arg("n_sigma") = 5.0, py::arg("image_size"), py::arg("n_sigma") = 5.0,
py::arg("capacity") = 2048, py::arg("n_threads") = 3) py::arg("capacity") = 2048, py::arg("n_threads") = 3,
py::arg("cluster_size_x") = 3, py::arg("cluster_size_y") = 3)
.def("push_pedestal_frame", .def("push_pedestal_frame",
[](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self, [](ClusterFinderMT<ClusterType, uint16_t, pd_type> &self,
py::array_t<uint16_t> frame) { py::array_t<uint16_t> frame) {

View File

@@ -104,47 +104,4 @@ void define_ClusterVector(py::module &m, const std::string &typestr) {
}); });
} }
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
void define_2x2_reduction(py::module &m) {
m.def(
"reduce_to_2x2",
[](const ClusterVector<
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>> &cv) {
return new ClusterVector<Cluster<Type, 2, 2, CoordType>>(
reduce_to_2x2(cv));
},
R"(
Reduce cluster to 2x2 subcluster by taking the 2x2 subcluster with
the highest photon energy."
Parameters
----------
cv : ClusterVector
)",
py::arg("clustervector"));
}
template <typename Type, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = uint16_t>
void define_3x3_reduction(py::module &m) {
m.def(
"reduce_to_3x3",
[](const ClusterVector<
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>> &cv) {
return new ClusterVector<Cluster<Type, 3, 3, CoordType>>(
reduce_to_3x3(cv));
},
R"(
Reduce cluster to 3x3 subcluster by taking the 3x3 subcluster with
the highest photon energy."
Parameters
----------
cv : ClusterVector
)",
py::arg("clustervector"));
}
#pragma GCC diagnostic pop #pragma GCC diagnostic pop

View File

@@ -47,9 +47,7 @@ double, 'f' for float)
define_ClusterFileSink<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \ define_ClusterFileSink<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_ClusterCollector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \ define_ClusterCollector<T, N, M, U>(m, "Cluster" #N "x" #M #TYPE_CODE); \
define_Cluster<T, N, M, U>(m, #N "x" #M #TYPE_CODE); \ define_Cluster<T, N, M, U>(m, #N "x" #M #TYPE_CODE); \
register_calculate_eta<T, N, M, U>(m); \ register_calculate_eta<T, N, M, U>(m);
define_2x2_reduction<T, N, M, U>(m); \
reduce_to_2x2<T, N, M, U>(m);
PYBIND11_MODULE(_aare, m) { PYBIND11_MODULE(_aare, m) {
define_file_io_bindings(m); define_file_io_bindings(m);
@@ -87,29 +85,8 @@ PYBIND11_MODULE(_aare, m) {
DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d); DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d);
DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f); DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f);
define_3x3_reduction<int, 3, 3, uint16_t>(m); DEFINE_CLUSTER_BINDINGS(int, 21, 21, uint16_t, i);
define_3x3_reduction<double, 3, 3, uint16_t>(m); DEFINE_CLUSTER_BINDINGS(double, 21, 21, uint16_t, d);
define_3x3_reduction<float, 3, 3, uint16_t>(m); DEFINE_CLUSTER_BINDINGS(float, 21, 21, uint16_t, f);
define_3x3_reduction<int, 5, 5, uint16_t>(m);
define_3x3_reduction<double, 5, 5, uint16_t>(m);
define_3x3_reduction<float, 5, 5, uint16_t>(m);
define_3x3_reduction<int, 7, 7, uint16_t>(m);
define_3x3_reduction<double, 7, 7, uint16_t>(m);
define_3x3_reduction<float, 7, 7, uint16_t>(m);
define_3x3_reduction<int, 9, 9, uint16_t>(m);
define_3x3_reduction<double, 9, 9, uint16_t>(m);
define_3x3_reduction<float, 9, 9, uint16_t>(m);
reduce_to_3x3<int, 3, 3, uint16_t>(m);
reduce_to_3x3<double, 3, 3, uint16_t>(m);
reduce_to_3x3<float, 3, 3, uint16_t>(m);
reduce_to_3x3<int, 5, 5, uint16_t>(m);
reduce_to_3x3<double, 5, 5, uint16_t>(m);
reduce_to_3x3<float, 5, 5, uint16_t>(m);
reduce_to_3x3<int, 7, 7, uint16_t>(m);
reduce_to_3x3<double, 7, 7, uint16_t>(m);
reduce_to_3x3<float, 7, 7, uint16_t>(m);
reduce_to_3x3<int, 9, 9, uint16_t>(m);
reduce_to_3x3<double, 9, 9, uint16_t>(m);
reduce_to_3x3<float, 9, 9, uint16_t>(m);
} }

View File

@@ -101,27 +101,6 @@ def test_cluster_finder():
assert clusters.size == 0 assert clusters.size == 0
def test_2x2_reduction():
"""Test 2x2 Reduction"""
cluster = _aare.Cluster3x3i(5,5,np.array([1, 1, 1, 2, 3, 1, 2, 2, 1], dtype=np.int32))
reduced_cluster = _aare.reduce_to_2x2(cluster)
assert reduced_cluster.x == 4
assert reduced_cluster.y == 5
assert (reduced_cluster.data == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
def test_3x3_reduction():
"""Test 3x3 Reduction"""
cluster = _aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double))
reduced_cluster = _aare.reduce_to_3x3(cluster)
assert reduced_cluster.x == 4
assert reduced_cluster.y == 5
assert (reduced_cluster.data == np.array([[1.0, 2.0, 1.0], [2.0, 2.0, 3.0], [1.0, 2.0, 1.0]], dtype=np.double)).all()

View File

@@ -5,7 +5,7 @@ import time
from pathlib import Path from pathlib import Path
import pickle import pickle
from aare import ClusterFile, ClusterVector from aare import ClusterFile
from aare import _aare from aare import _aare
from conftest import test_data_path from conftest import test_data_path
@@ -51,36 +51,4 @@ def test_make_a_hitmap_from_cluster_vector():
# print(img) # print(img)
# print(ref) # print(ref)
assert (img == ref).all() assert (img == ref).all()
def test_2x2_reduction():
cv = ClusterVector((3,3))
cv.push_back(_aare.Cluster3x3i(5, 5, np.array([1, 1, 1, 2, 3, 1, 2, 2, 1], dtype=np.int32)))
cv.push_back(_aare.Cluster3x3i(5, 5, np.array([2, 2, 1, 2, 3, 1, 1, 1, 1], dtype=np.int32)))
reduced_cv = np.array(_aare.reduce_to_2x2(cv), copy=False)
assert reduced_cv.size == 2
assert reduced_cv[0]["x"] == 4
assert reduced_cv[0]["y"] == 5
assert (reduced_cv[0]["data"] == np.array([[2, 3], [2, 2]], dtype=np.int32)).all()
assert reduced_cv[1]["x"] == 4
assert reduced_cv[1]["y"] == 6
assert (reduced_cv[1]["data"] == np.array([[2, 2], [2, 3]], dtype=np.int32)).all()
def test_3x3_reduction():
cv = _aare.ClusterVector_Cluster5x5d()
cv.push_back(_aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double)))
cv.push_back(_aare.Cluster5x5d(5,5,np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0,
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.double)))
reduced_cv = np.array(_aare.reduce_to_3x3(cv), copy=False)
assert reduced_cv.size == 2
assert reduced_cv[0]["x"] == 4
assert reduced_cv[0]["y"] == 5
assert (reduced_cv[0]["data"] == np.array([[1.0, 2.0, 1.0], [2.0, 2.0, 3.0], [1.0, 2.0, 1.0]], dtype=np.double)).all()

View File

@@ -18,86 +18,4 @@ TEST_CASE("Test sum of Cluster", "[.cluster]") {
Cluster<int, 2, 2> cluster{0, 0, {1, 2, 3, 4}}; Cluster<int, 2, 2> cluster{0, 0, {1, 2, 3, 4}};
CHECK(cluster.sum() == 10); CHECK(cluster.sum() == 10);
}
using ClusterTypes = std::variant<Cluster<int, 2, 2>, Cluster<int, 3, 3>,
Cluster<int, 5, 5>, Cluster<int, 2, 3>>;
using ClusterTypesLargerThan2x2 =
std::variant<Cluster<int, 3, 3>, Cluster<int, 4, 4>, Cluster<int, 5, 5>>;
TEST_CASE("Test reduce to 2x2 Cluster", "[.cluster]") {
auto [cluster, expected_reduced_cluster] = GENERATE(
std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{5, 5, {1, 2, 3, 4}}},
Cluster<int, 2, 2>{4, 6, {1, 2, 3, 4}}),
std::make_tuple(
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 2, 1, 2, 2}}},
Cluster<int, 2, 2>{5, 5, {3, 2, 2, 2}}),
std::make_tuple(
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 1, 1, 2, 3, 1, 2, 2, 1}}},
Cluster<int, 2, 2>{4, 5, {2, 3, 2, 2}}),
std::make_tuple(
ClusterTypes{Cluster<int, 3, 3>{5, 5, {2, 2, 1, 2, 3, 1, 1, 1, 1}}},
Cluster<int, 2, 2>{4, 6, {2, 2, 2, 3}}),
std::make_tuple(
ClusterTypes{Cluster<int, 3, 3>{5, 5, {1, 2, 2, 1, 3, 2, 1, 1, 1}}},
Cluster<int, 2, 2>{5, 6, {2, 2, 3, 2}}),
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
5, 5, {1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 3,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}},
Cluster<int, 2, 2>{5, 6, {2, 2, 3, 2}}),
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
5, 5, {1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 3,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}},
Cluster<int, 2, 2>{4, 6, {2, 2, 2, 3}}),
std::make_tuple(
ClusterTypes{Cluster<int, 2, 3>{5, 5, {2, 2, 3, 2, 1, 1}}},
Cluster<int, 2, 2>{4, 6, {2, 2, 3, 2}}));
auto reduced_cluster = std::visit(
[](const auto &clustertype) { return reduce_to_2x2(clustertype); },
cluster);
CHECK(reduced_cluster.x == expected_reduced_cluster.x);
CHECK(reduced_cluster.y == expected_reduced_cluster.y);
CHECK(std::equal(reduced_cluster.data.begin(),
reduced_cluster.data.begin() + 4,
expected_reduced_cluster.data.begin()));
}
TEST_CASE("Test reduce to 3x3 Cluster", "[.cluster]") {
auto [cluster, expected_reduced_cluster] = GENERATE(
std::make_tuple(ClusterTypesLargerThan2x2{Cluster<int, 3, 3>{
5, 5, {1, 1, 1, 1, 3, 1, 1, 1, 1}}},
Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 1, 1, 1, 1}}),
std::make_tuple(
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
5, 5, {2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1}}},
Cluster<int, 3, 3>{4, 6, {2, 2, 1, 2, 2, 1, 1, 1, 3}}),
std::make_tuple(
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
5, 5, {1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 3, 1, 1, 1, 1, 1}}},
Cluster<int, 3, 3>{5, 6, {1, 2, 2, 1, 2, 2, 1, 3, 1}}),
std::make_tuple(
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
5, 5, {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 1, 1, 2, 2}}},
Cluster<int, 3, 3>{5, 5, {1, 1, 1, 1, 3, 2, 1, 2, 2}}),
std::make_tuple(
ClusterTypesLargerThan2x2{Cluster<int, 4, 4>{
5, 5, {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 1, 2, 2, 1, 1}}},
Cluster<int, 3, 3>{4, 5, {1, 1, 1, 2, 2, 3, 2, 2, 1}}),
std::make_tuple(ClusterTypesLargerThan2x2{Cluster<int, 5, 5>{
5, 5, {1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 3,
1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1}}},
Cluster<int, 3, 3>{4, 5, {1, 2, 1, 2, 2, 3, 1, 2, 1}}));
auto reduced_cluster = std::visit(
[](const auto &clustertype) { return reduce_to_3x3(clustertype); },
cluster);
CHECK(reduced_cluster.x == expected_reduced_cluster.x);
CHECK(reduced_cluster.y == expected_reduced_cluster.y);
CHECK(std::equal(reduced_cluster.data.begin(),
reduced_cluster.data.begin() + 9,
expected_reduced_cluster.data.begin()));
} }

View File

@@ -57,7 +57,6 @@ class ClusterFinderMTWrapper
size_t m_sink_size() const { return this->m_sink.sizeGuess(); } size_t m_sink_size() const { return this->m_sink.sizeGuess(); }
}; };
TEST_CASE("multithreaded cluster finder", "[.with-data]") { TEST_CASE("multithreaded cluster finder", "[.with-data]") {
auto fpath = auto fpath =
test_data_path() / "raw/moench03/cu_half_speed_master_4.json"; test_data_path() / "raw/moench03/cu_half_speed_master_4.json";
@@ -82,8 +81,7 @@ TEST_CASE("multithreaded cluster finder", "[.with-data]") {
CHECK(cf.m_input_queues_are_empty() == true); CHECK(cf.m_input_queues_are_empty() == true);
for (size_t i = 0; i < n_frames_pd; ++i) { for (size_t i = 0; i < n_frames_pd; ++i) {
auto frame = file.read_frame(); cf.find_clusters(file.read_frame().view<uint16_t>());
cf.find_clusters(frame.view<uint16_t>());
} }
cf.stop(); cf.stop();

View File

@@ -1,20 +0,0 @@
# Makefile
CXX := g++
CXXFLAGS := -std=c++17 -O0 -g
INCLUDE := -I../include
SRC := ProducerConsumerQueue.test.cpp
BIN := test_pcq
.PHONY: all clean run
all: $(BIN)
$(BIN): $(SRC)
$(CXX) $(CXXFLAGS) $(INCLUDE) $< -o $@
run: $(BIN)
./$(BIN)
clean:
$(RM) $(BIN)

View File

@@ -1,83 +0,0 @@
#include <iostream>
#include <atomic>
#include <string>
#include <vector>
#include "aare/ProducerConsumerQueue.hpp"
struct Tracker {
static std::atomic<int> ctors;
static std::atomic<int> dtors;
static std::atomic<int> moves;
static std::atomic<int> live;
std::string tag;
std::vector<int> buf;
Tracker() = delete;
explicit Tracker(int id)
: tag("T" + std::to_string(id)), buf(1 << 18, id)
{
++ctors; ++live;
}
Tracker(Tracker&& other) noexcept
: tag(std::move(other.tag)), buf(std::move(other.buf))
{
++moves;
++ctors;
++live;
}
Tracker& operator=(Tracker&&) = delete;
Tracker(const Tracker&) = delete;
Tracker& operator=(const Tracker&) = delete;
~Tracker()
{
++dtors; --live;
}
};
std::atomic<int> Tracker::ctors{0};
std::atomic<int> Tracker::dtors{0};
std::atomic<int> Tracker::moves{0};
std::atomic<int> Tracker::live{0};
int main() {
using Queue = aare::ProducerConsumerQueue<Tracker>;
// Scope make sure destructors have ran before we check the counters.
{
Queue q1(8);
Queue q2(8);
for (int i = 0; i < 3; ++i) q2.write(Tracker(100 + i));
for (int i = 0; i < 5; ++i) q1.write(Tracker(200 + i));
q2 = std::move(q1);
Tracker tmp(9999);
if (auto* p = q2.frontPtr())
{
(void)p;
}
}
std::cout << "ctors=" << Tracker::ctors.load()
<< " dtors=" << Tracker::dtors.load()
<< " moves=" << Tracker::moves.load()
<< " live=" << Tracker::live.load()
<< "\n";
bool ok = (Tracker::ctors.load() == Tracker::dtors.load()) && (Tracker::live.load() == 0);
if (!ok)
{
std::cerr << "Leak or skipped destructors detected (move-assignment bug)\n";
return 1;
}
std::cout << "No leaks; move-assignment cleans up correctly\n";
return 0;
}

View File

@@ -7,7 +7,6 @@ Script to update VERSION file with semantic versioning if provided as an argumen
import sys import sys
import os import os
import re import re
from datetime import datetime
from packaging.version import Version, InvalidVersion from packaging.version import Version, InvalidVersion
@@ -27,9 +26,9 @@ def get_version():
# Check at least one argument is passed # Check at least one argument is passed
if len(sys.argv) < 2: if len(sys.argv) < 2:
version = datetime.today().strftime('%Y.%-m.%-d') return "0.0.0"
else:
version = sys.argv[1] version = sys.argv[1]
try: try:
v = Version(version) # normalize check if version follows PEP 440 specification v = Version(version) # normalize check if version follows PEP 440 specification
@@ -55,4 +54,4 @@ def write_version_to_file(version):
if __name__ == "__main__": if __name__ == "__main__":
version = get_version() version = get_version()
write_version_to_file(version) write_version_to_file(version)