changed push_back to take Cluster as input argument

This commit is contained in:
Mazzoleni Alice Francesca 2025-04-01 15:30:10 +02:00
parent 508adf5016
commit 745d09fbe9
6 changed files with 54 additions and 47 deletions

View File

@ -81,13 +81,13 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
if(AARE_FETCH_LMFIT)
#TODO! Should we fetch lmfit from the web or inlcude a tar.gz in the repo?
#set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch)
set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch)
FetchContent_Declare(
lmfit
GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git
GIT_TAG main
#PATCH_COMMAND ${lmfit_patch}
#UPDATE_DISCONNECTED 1
PATCH_COMMAND ${lmfit_patch}
UPDATE_DISCONNECTED 1
#EXCLUDE_FROM_ALL 1
)
#Disable what we don't need from lmfit
@ -358,7 +358,7 @@ set(SourceFiles
add_library(aare_core STATIC ${SourceFiles})
target_include_directories(aare_core PUBLIC
"$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>"
"$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>" PRIVATE ${lmfit_SOURCE_DIR}/lib
"$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>"
)
target_link_libraries(
@ -369,7 +369,7 @@ target_link_libraries(
${STD_FS_LIB} # from helpers.cmake
PRIVATE
aare_compiler_flags
"$<BUILD_INTERFACE:lmfit>"
$<BUILD_INTERFACE:lmfit>
)

View File

@ -243,8 +243,9 @@ ClusterFile<ClusterType, Enable>::read_clusters(size_t n_clusters, ROI roi) {
fread(&tmp, sizeof(tmp), 1, fp);
if (tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin &&
tmp.y <= roi.ymax) {
clusters.push_back(tmp.x, tmp.y,
reinterpret_cast<std::byte *>(tmp.data));
// clusters.push_back(tmp.x, tmp.y,
// reinterpret_cast<std::byte *>(tmp.data));
clusters.push_back(tmp);
nph_read++;
}
}
@ -268,9 +269,10 @@ ClusterFile<ClusterType, Enable>::read_clusters(size_t n_clusters, ROI roi) {
fread(&tmp, sizeof(tmp), 1, fp);
if (tmp.x >= roi.xmin && tmp.x <= roi.xmax &&
tmp.y >= roi.ymin && tmp.y <= roi.ymax) {
clusters.push_back(
tmp.x, tmp.y,
reinterpret_cast<std::byte *>(tmp.data));
// clusters.push_back(
// tmp.x, tmp.y,
// reinterpret_cast<std::byte *>(tmp.data));
clusters.push_back(tmp);
nph_read++;
}
}

View File

@ -140,9 +140,14 @@ class ClusterFinder {
}
// Add the cluster to the output ClusterVector
/*
m_clusters.push_back(
ix, iy,
reinterpret_cast<std::byte *>(cluster_data.data()));
*/
m_clusters.push_back(
Cluster<CT, ClusterSizeX, ClusterSizeY>{
ix, iy, cluster_data.data()});
}
}
}

View File

@ -100,25 +100,22 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
/**
* @brief Add a cluster to the vector
* @param x x-coordinate of the cluster
* @param y y-coordinate of the cluster
* @param data pointer to the data of the cluster
* @warning The data pointer must point to a buffer of size cluster_size_x *
* cluster_size_y * sizeof(T)
*/
void push_back(CoordType x, CoordType y, const std::byte *data) {
void push_back(const ClusterType &cluster) {
if (m_size == m_capacity) {
allocate_buffer(m_capacity * 2);
}
std::byte *ptr = element_ptr(m_size);
*reinterpret_cast<CoordType *>(ptr) = x;
*reinterpret_cast<CoordType *>(ptr) = cluster.x;
ptr += sizeof(CoordType);
*reinterpret_cast<CoordType *>(ptr) = y;
*reinterpret_cast<CoordType *>(ptr) = cluster.y;
ptr += sizeof(CoordType);
std::copy(data, data + ClusterSizeX * ClusterSizeY * sizeof(T), ptr);
std::memcpy(ptr, cluster.data, ClusterSizeX * ClusterSizeY * sizeof(T));
m_size++;
}
ClusterVector &operator+=(const ClusterVector &other) {
if (m_size + other.m_size > m_capacity) {
allocate_buffer(m_capacity + other.m_size);
@ -154,10 +151,9 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
* @throws std::runtime_error if the cluster size is not 3x3
* @warning Only 3x3 clusters are supported for the 2x2 sum.
*/
/* only needed to calculate eta
std::vector<T> sum_2x2() {
std::vector<T> sums(m_size);
const size_t stride = item_size();
/* only needed to calculate eta TODO: in previous PR already added calculate
sum in PR std::vector<T> sum_2x2() { std::vector<T> sums(m_size); const
size_t stride = item_size();
if (ClusterSizeX != 3 || ClusterSizeY != 3) {
throw std::runtime_error(

View File

@ -7,7 +7,8 @@
using aare::Cluster;
using aare::ClusterVector;
TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") {
TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read",
"[.ClusterVector]") {
ClusterVector<Cluster<int32_t, 2, 2>> cv(4);
REQUIRE(cv.capacity() == 4);
@ -19,7 +20,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") {
// Create a cluster and push back into the vector
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte *>(&c1.data[0]));
cv.push_back(c1);
REQUIRE(cv.size() == 1);
REQUIRE(cv.capacity() == 4);
@ -36,7 +37,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") {
}
}
TEST_CASE("Summing 3x1 clusters of int64") {
TEST_CASE("Summing 3x1 clusters of int64", "[.ClusterVector]") {
ClusterVector<Cluster<int32_t, 3, 1>> cv(2);
REQUIRE(cv.capacity() == 2);
REQUIRE(cv.size() == 0);
@ -45,17 +46,17 @@ TEST_CASE("Summing 3x1 clusters of int64") {
// Create a cluster and push back into the vector
Cluster<int32_t, 3, 1> c1 = {1, 2, {3, 4, 5}};
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte *>(&c1.data[0]));
cv.push_back(c1);
REQUIRE(cv.capacity() == 2);
REQUIRE(cv.size() == 1);
Cluster<int32_t, 3, 1> c2 = {6, 7, {8, 9, 10}};
cv.push_back(c2.x, c2.y, reinterpret_cast<std::byte *>(&c2.data[0]));
cv.push_back(c2);
REQUIRE(cv.capacity() == 2);
REQUIRE(cv.size() == 2);
Cluster<int32_t, 3, 1> c3 = {11, 12, {13, 14, 15}};
cv.push_back(c3.x, c3.y, reinterpret_cast<std::byte *>(&c3.data[0]));
cv.push_back(c3);
REQUIRE(cv.capacity() == 4);
REQUIRE(cv.size() == 3);
@ -66,7 +67,7 @@ TEST_CASE("Summing 3x1 clusters of int64") {
REQUIRE(sums[2] == 42);
}
TEST_CASE("Storing floats") {
TEST_CASE("Storing floats", "[.ClusterVector]") {
ClusterVector<Cluster<float, 2, 4>> cv(10);
REQUIRE(cv.capacity() == 10);
REQUIRE(cv.size() == 0);
@ -75,13 +76,13 @@ TEST_CASE("Storing floats") {
// Create a cluster and push back into the vector
Cluster<float, 2, 4> c1 = {1, 2, {3.0, 4.0, 5.0, 6.0, 3.0, 4.0, 5.0, 6.0}};
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte *>(&c1.data[0]));
cv.push_back(c1);
REQUIRE(cv.capacity() == 10);
REQUIRE(cv.size() == 1);
Cluster<float, 2, 4> c2 = {
6, 7, {8.0, 9.0, 10.0, 11.0, 8.0, 9.0, 10.0, 11.0}};
cv.push_back(c2.x, c2.y, reinterpret_cast<std::byte *>(&c2.data[0]));
cv.push_back(c2);
REQUIRE(cv.capacity() == 10);
REQUIRE(cv.size() == 2);
@ -91,22 +92,22 @@ TEST_CASE("Storing floats") {
REQUIRE_THAT(sums[1], Catch::Matchers::WithinAbs(76.0, 1e-6));
}
TEST_CASE("Push back more than initial capacity") {
TEST_CASE("Push back more than initial capacity", "[.ClusterVector]") {
ClusterVector<Cluster<int32_t, 2, 2>> cv(2);
auto initial_data = cv.data();
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte *>(&c1.data[0]));
cv.push_back(c1);
REQUIRE(cv.size() == 1);
REQUIRE(cv.capacity() == 2);
Cluster<int32_t, 2, 2> c2 = {6, 7, {8, 9, 10, 11}};
cv.push_back(c2.x, c2.y, reinterpret_cast<std::byte *>(&c2.data[0]));
cv.push_back(c2);
REQUIRE(cv.size() == 2);
REQUIRE(cv.capacity() == 2);
Cluster<int32_t, 2, 2> c3 = {11, 12, {13, 14, 15, 16}};
cv.push_back(c3.x, c3.y, reinterpret_cast<std::byte *>(&c3.data[0]));
cv.push_back(c3);
REQUIRE(cv.size() == 3);
REQUIRE(cv.capacity() == 4);
@ -124,19 +125,19 @@ TEST_CASE("Push back more than initial capacity") {
REQUIRE(initial_data != cv.data());
}
TEST_CASE(
"Concatenate two cluster vectors where the first has enough capacity") {
TEST_CASE("Concatenate two cluster vectors where the first has enough capacity",
"[.ClusterVector]") {
ClusterVector<Cluster<int32_t, 2, 2>> cv1(12);
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
cv1.push_back(c1.x, c1.y, reinterpret_cast<std::byte *>(&c1.data[0]));
cv1.push_back(c1);
Cluster<int32_t, 2, 2> c2 = {6, 7, {8, 9, 10, 11}};
cv1.push_back(c2.x, c2.y, reinterpret_cast<std::byte *>(&c2.data[0]));
cv1.push_back(c2);
ClusterVector<Cluster<int32_t, 2, 2>> cv2(2);
Cluster<int32_t, 2, 2> c3 = {11, 12, {13, 14, 15, 16}};
cv2.push_back(c3.x, c3.y, reinterpret_cast<std::byte *>(&c3.data[0]));
cv2.push_back(c3);
Cluster<int32_t, 2, 2> c4 = {16, 17, {18, 19, 20, 21}};
cv2.push_back(c4.x, c4.y, reinterpret_cast<std::byte *>(&c4.data[0]));
cv2.push_back(c4);
cv1 += cv2;
REQUIRE(cv1.size() == 4);
@ -154,18 +155,19 @@ TEST_CASE(
REQUIRE(ptr[3].y == 17);
}
TEST_CASE("Concatenate two cluster vectors where we need to allocate") {
TEST_CASE("Concatenate two cluster vectors where we need to allocate",
"[.ClusterVector]") {
ClusterVector<Cluster<int32_t, 2, 2>> cv1(2);
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
cv1.push_back(c1.x, c1.y, reinterpret_cast<std::byte *>(&c1.data[0]));
cv1.push_back(c1);
Cluster<int32_t, 2, 2> c2 = {6, 7, {8, 9, 10, 11}};
cv1.push_back(c2.x, c2.y, reinterpret_cast<std::byte *>(&c2.data[0]));
cv1.push_back(c2);
ClusterVector<Cluster<int32_t, 2, 2>> cv2(2);
Cluster<int32_t, 2, 2> c3 = {11, 12, {13, 14, 15, 16}};
cv2.push_back(c3.x, c3.y, reinterpret_cast<std::byte *>(&c3.data[0]));
cv2.push_back(c3);
Cluster<int32_t, 2, 2> c4 = {16, 17, {18, 19, 20, 21}};
cv2.push_back(c4.x, c4.y, reinterpret_cast<std::byte *>(&c4.data[0]));
cv2.push_back(c4);
cv1 += cv2;
REQUIRE(cv1.size() == 4);

View File

@ -55,6 +55,8 @@ Interpolator::Interpolator(NDView<double, 3> etacube, NDView<double, 1> xbins,
}
}
// TODO: generalize to support any clustertype!!! otherwise add std::enable_if_t
// to only take Cluster2x2 and Cluster3x3
template <typename ClusterType,
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
std::vector<Photon>