21 Commits

Author SHA1 Message Date
Erik Fröjdh
5dfaba53c2 pkgs for python 3.14 2025-10-17 17:33:39 +02:00
45f506b473 Fix/adapt and test interpolation (#231)
Some checks failed
Build on RHEL8 / build (push) Failing after 3m9s
Build on RHEL9 / build (push) Failing after 3m18s
Adapted eta interpolation: 

### Issues with previous interpolation: 

## Eta Calculation: 
- previously assumed photon hit to be in bottom left pixel of cluster
(photon hit assumed in bottom right pixel of cluster)
- clusters are filled from top left to bottom right (previously assumed:
bottom left to top right)

## Actual Interpolation: 
- photon hits are given in pixel coordinates (previous interpolation
assumed euclidean coordinates, e.g. positive distance in y coordinate
becomes negative distance in row pixels)
- removed *2 of calculated distance 

## General Adaption: 
- max_sum_2x2 return subcluster index relative to cluster center e.g.
bottomleft, bottomright

## Added proper test case 
- simulated photon hit with normal energy distribution 
- Note: Test case for 2x2 cluster fails - Think uniform photon hit
distribution cant be modeled by normalized eta distribution for 2x2
clusters
2025-10-17 10:44:08 +02:00
6f10afbcdc Merge branch 'main' into fix/adapt_and_test_interpolation 2025-10-17 10:03:26 +02:00
e418986fd2 fix/roi_max (#237)
All checks were successful
Build on RHEL8 / build (push) Successful in 3m3s
Build on RHEL9 / build (push) Successful in 3m13s
roi max should be incremented by 1 for all versions of the file
2025-10-16 16:08:10 +02:00
723c8dd013 add nlohmann_json to support CMake find_package after 3.12 split 2025-10-16 15:30:43 +02:00
351f4626b3 roi max should be incremented by 1 for all versions of the file 2025-10-16 12:26:30 +02:00
516ef88d10 adresses SonarQube comments 2025-10-08 18:19:17 +02:00
5329be816e removed times 2 in calculated photon center distance 2025-10-08 17:01:38 +02:00
72a2604ca5 test for interpolation with simulated normal energy distribution 2025-10-08 16:35:52 +02:00
c78a73ebaf changed default CoordType in Cluster constructor in python bindings to uint16_t 2025-10-07 16:49:06 +02:00
77a9788891 changed eta interpolation to take into account photon center 2025-10-07 16:48:14 +02:00
c0ee17275e Bug/aare file reading (#230)
All checks were successful
Build on RHEL8 / build (push) Successful in 3m10s
Build on RHEL9 / build (push) Successful in 3m12s
MasterFile supports reading new json file format (backwards compatible
for older versions)
Multiple ROI's not supported yet
2025-10-02 10:05:11 +02:00
ad3ef88607 changed default DAC value in ScanParameters 2025-10-01 20:37:40 +02:00
f814b3f4e7 updated release notes 2025-10-01 20:30:25 +02:00
1f46266183 clang-format 2025-10-01 20:25:27 +02:00
d3d9f760b3 updated parse_json to parse new master json file 2025-10-01 20:17:37 +02:00
0891ffb1ee compile with POSITION_INDEPENDANT_CODE=On (#228)
All checks were successful
Build on RHEL9 / build (push) Successful in 3m17s
Build on RHEL8 / build (push) Successful in 3m20s
The python bindings build a shared library and I cant link against
static libraries. Apparently I have to build with
CMAKE_POSITION_INDEPENDANT_CODE=On.
2025-09-30 17:39:43 +02:00
0b74bc25d5 enabled position independant code only for aare_core 2025-09-30 16:29:42 +02:00
3ec40fa809 Merge branch 'main' into fix/cmake_fix_compile_width_position_independent_code
All checks were successful
Build on RHEL8 / build (push) Successful in 3m18s
Build on RHEL9 / build (push) Successful in 3m49s
2025-09-30 10:58:35 +02:00
bce8e9d5fc Merge branch 'main' into fix/cmake_fix_compile_width_position_independent_code 2025-09-05 14:11:33 +02:00
4c1e276e2c compile with POSITION_INDEPENDANT_CODE=On 2025-09-05 14:02:26 +02:00
22 changed files with 1403 additions and 145 deletions

View File

@@ -412,6 +412,8 @@ target_link_libraries(
)
set_property(TARGET aare_core PROPERTY POSITION_INDEPENDENT_CODE ON)
if(AARE_TESTS)
target_compile_definitions(aare_core PRIVATE AARE_TESTS)
endif()
@@ -431,10 +433,6 @@ set_target_properties(aare_core PROPERTIES
PUBLIC_HEADER "${PUBLICHEADERS}"
)
if (AARE_PYTHON_BINDINGS)
set_property(TARGET aare_core PROPERTY POSITION_INDEPENDENT_CODE ON)
endif()
if(AARE_TESTS)
set(TestSources
${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp
@@ -465,6 +463,7 @@ if(AARE_TESTS)
target_sources(tests PRIVATE ${TestSources} )
endif()
if(AARE_MASTER_PROJECT)
install(TARGETS aare_core aare_compiler_flags
EXPORT "${TARGETS_EXPORT_NAME}"
@@ -474,7 +473,6 @@ if(AARE_MASTER_PROJECT)
)
endif()
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
set(CMAKE_INSTALL_RPATH $ORIGIN)
set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)

View File

@@ -1,5 +1,10 @@
# Release notes
### 2025.10.1
Bugfixes:
- File supports reading new master json file format (multiple ROI's not supported yet)
### 2025.8.22
@@ -43,3 +48,6 @@ Bugfixes:

View File

@@ -1,7 +1,8 @@
python:
- 3.11
- 3.12
- 3.13
# - 3.11
# - 3.12
# - 3.13
- 3.14
c_compiler:
- gcc # [linux]

View File

@@ -25,7 +25,7 @@ requirements:
host:
- python
- pip
- numpy=2.1
- numpy=2.3
- scikit-build-core
- pybind11 >=2.13.0
- matplotlib # needed in host to solve the environment for run
@@ -42,11 +42,11 @@ test:
- aare
requires:
- pytest
- boost-histogram
# - boost-histogram
source_files:
- python/tests
# - python/tests
commands:
- python -m pytest python/tests
# - python -m pytest python/tests
about:
summary: Data analysis library for hybrid pixel detectors from PSI

View File

@@ -13,4 +13,5 @@ dependencies:
- pybind11
- numpy
- matplotlib
- nlohmann_json

View File

@@ -7,10 +7,10 @@
namespace aare {
enum class corner : int {
cBottomLeft = 0,
cBottomRight = 1,
cTopLeft = 2,
cTopRight = 3
cTopLeft = 0,
cTopRight = 1,
cBottomLeft = 2,
cBottomRight = 3
};
enum class pixel : int {
@@ -58,90 +58,126 @@ template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType>
Eta2<T>
calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
Eta2<T> eta{};
auto max_sum = cl.max_sum_2x2();
eta.sum = max_sum.first;
auto c = max_sum.second;
static_assert(ClusterSizeX > 1 && ClusterSizeY > 1);
Eta2<T> eta{};
size_t cluster_center_index =
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
size_t index_bottom_left_max_2x2_subcluster =
(int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1);
auto max_sum = cl.max_sum_2x2();
eta.sum = max_sum.first;
int c = max_sum.second;
// calculate direction of gradient
// check that cluster center is in max subcluster
if (cluster_center_index != index_bottom_left_max_2x2_subcluster &&
cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 &&
cluster_center_index !=
index_bottom_left_max_2x2_subcluster + ClusterSizeX &&
cluster_center_index !=
index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1)
throw std::runtime_error("Photon center is not in max 2x2_subcluster");
if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) %
ClusterSizeX ==
0) {
if ((cl.data[cluster_center_index + 1] +
// subcluster top right from center
switch (static_cast<corner>(c)) {
case (corner::cTopLeft):
if ((cl.data[cluster_center_index - 1] +
cl.data[cluster_center_index]) != 0)
eta.x = static_cast<double>(cl.data[cluster_center_index - 1]) /
static_cast<double>(cl.data[cluster_center_index - 1] +
cl.data[cluster_center_index]);
if ((cl.data[cluster_center_index - ClusterSizeX] +
cl.data[cluster_center_index]) != 0)
eta.y = static_cast<double>(
cl.data[cluster_center_index - ClusterSizeX]) /
static_cast<double>(
cl.data[cluster_center_index - ClusterSizeX] +
cl.data[cluster_center_index]);
eta.x = static_cast<double>(cl.data[cluster_center_index + 1]) /
static_cast<double>((cl.data[cluster_center_index + 1] +
cl.data[cluster_center_index]));
} else {
if ((cl.data[cluster_center_index] +
cl.data[cluster_center_index - 1]) != 0)
// dx = 0
// dy = 0
break;
case (corner::cTopRight):
if (cl.data[cluster_center_index] + cl.data[cluster_center_index + 1] !=
0)
eta.x = static_cast<double>(cl.data[cluster_center_index]) /
static_cast<double>((cl.data[cluster_center_index - 1] +
cl.data[cluster_center_index]));
}
if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) /
ClusterSizeX <
1) {
assert(cluster_center_index + ClusterSizeX <
ClusterSizeX * ClusterSizeY); // suppress warning
static_cast<double>(cl.data[cluster_center_index] +
cl.data[cluster_center_index + 1]);
if ((cl.data[cluster_center_index - ClusterSizeX] +
cl.data[cluster_center_index]) != 0)
eta.y = static_cast<double>(
cl.data[cluster_center_index - ClusterSizeX]) /
static_cast<double>(
cl.data[cluster_center_index - ClusterSizeX] +
cl.data[cluster_center_index]);
// dx = 1
// dy = 0
break;
case (corner::cBottomLeft):
if ((cl.data[cluster_center_index - 1] +
cl.data[cluster_center_index]) != 0)
eta.x = static_cast<double>(cl.data[cluster_center_index - 1]) /
static_cast<double>(cl.data[cluster_center_index - 1] +
cl.data[cluster_center_index]);
if ((cl.data[cluster_center_index] +
cl.data[cluster_center_index + ClusterSizeX]) != 0)
eta.y = static_cast<double>(
cl.data[cluster_center_index + ClusterSizeX]) /
static_cast<double>(
(cl.data[cluster_center_index] +
cl.data[cluster_center_index + ClusterSizeX]));
} else {
if ((cl.data[cluster_center_index] +
cl.data[cluster_center_index - ClusterSizeX]) != 0)
eta.y = static_cast<double>(cl.data[cluster_center_index]) /
static_cast<double>(
(cl.data[cluster_center_index] +
cl.data[cluster_center_index - ClusterSizeX]));
cl.data[cluster_center_index] +
cl.data[cluster_center_index + ClusterSizeX]);
// dx = 0
// dy = 1
break;
case (corner::cBottomRight):
if (cl.data[cluster_center_index] + cl.data[cluster_center_index + 1] !=
0)
eta.x = static_cast<double>(cl.data[cluster_center_index]) /
static_cast<double>(cl.data[cluster_center_index] +
cl.data[cluster_center_index + 1]);
if ((cl.data[cluster_center_index] +
cl.data[cluster_center_index + ClusterSizeX]) != 0)
eta.y = static_cast<double>(cl.data[cluster_center_index]) /
static_cast<double>(
cl.data[cluster_center_index] +
cl.data[cluster_center_index + ClusterSizeX]);
// dx = 1
// dy = 1
break;
}
eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no
// underyling enum class
eta.c = c;
return eta;
}
// TODO! Look up eta2 calculation - photon center should be top right corner
// TODO! Look up eta2 calculation - photon center should be bottom right corner
template <typename T>
Eta2<T> calculate_eta2(const Cluster<T, 2, 2, int16_t> &cl) {
Eta2<T> eta{};
if ((cl.data[0] + cl.data[1]) != 0)
eta.x = static_cast<double>(cl.data[1]) /
(cl.data[0] + cl.data[1]); // between (0,1) the closer to zero
eta.x = static_cast<double>(cl.data[2]) /
(cl.data[2] + cl.data[3]); // between (0,1) the closer to zero
// left value probably larger
if ((cl.data[0] + cl.data[2]) != 0)
eta.y = static_cast<double>(cl.data[2]) /
(cl.data[0] + cl.data[2]); // between (0,1) the closer to zero
eta.y = static_cast<double>(cl.data[1]) /
(cl.data[1] + cl.data[3]); // between (0,1) the closer to zero
// bottom value probably larger
eta.sum = cl.sum();
return eta;
}
// TODO generalize
template <typename T>
Eta2<T> calculate_eta2(const Cluster<T, 1, 2, int16_t> &cl) {
Eta2<T> eta{};
eta.x = 0;
eta.y = static_cast<double>(cl.data[0]) / cl.data[1];
eta.sum = cl.sum();
}
template <typename T>
Eta2<T> calculate_eta2(const Cluster<T, 2, 1, int16_t> &cl) {
Eta2<T> eta{};
eta.x = static_cast<double>(cl.data[0]) / cl.data[1];
eta.y = 0;
eta.sum = cl.sum();
}
// calculates Eta3 for 3x3 cluster based on code from analyze_cluster
// TODO only supported for 3x3 Clusters
template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) {

View File

@@ -8,7 +8,6 @@
#pragma once
#include "logger.hpp"
#include <algorithm>
#include <array>
#include <cstdint>
@@ -19,7 +18,7 @@ namespace aare {
// requires clause c++20 maybe update
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
typename CoordType = int16_t>
typename CoordType = uint16_t>
struct Cluster {
static_assert(std::is_arithmetic_v<T>, "T needs to be an arithmetic type");
@@ -39,6 +38,13 @@ struct Cluster {
T sum() const { return std::accumulate(data.begin(), data.end(), T{}); }
// TODO: handle 1 dimensional clusters
// TODO: change int to corner
/**
* @brief sum of 2x2 subcluster with highest energy
* @return photon energy of subcluster, 2x2 subcluster index relative to
* cluster center
*/
std::pair<T, int> max_sum_2x2() const {
if constexpr (cluster_size_x == 3 && cluster_size_y == 3) {
@@ -54,17 +60,38 @@ struct Cluster {
} else if constexpr (cluster_size_x == 2 && cluster_size_y == 2) {
return std::make_pair(data[0] + data[1] + data[2] + data[3], 0);
} else {
constexpr size_t num_2x2_subclusters =
(ClusterSizeX - 1) * (ClusterSizeY - 1);
constexpr size_t cluster_center_index =
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
std::array<T, num_2x2_subclusters> sum_2x2_subcluster;
for (size_t i = 0; i < ClusterSizeY - 1; ++i) {
for (size_t j = 0; j < ClusterSizeX - 1; ++j)
sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] =
data[i * ClusterSizeX + j] +
data[i * ClusterSizeX + j + 1] +
data[(i + 1) * ClusterSizeX + j] +
data[(i + 1) * ClusterSizeX + j + 1];
std::array<T, 4> sum_2x2_subcluster{0};
// subcluster top left from center
sum_2x2_subcluster[0] =
data[cluster_center_index] + data[cluster_center_index - 1] +
data[cluster_center_index - ClusterSizeX] +
data[cluster_center_index - 1 - ClusterSizeX];
// subcluster top right from center
if (ClusterSizeX > 2) {
sum_2x2_subcluster[1] =
data[cluster_center_index] +
data[cluster_center_index + 1] +
data[cluster_center_index - ClusterSizeX] +
data[cluster_center_index - ClusterSizeX + 1];
}
// subcluster bottom left from center
if (ClusterSizeY > 2) {
sum_2x2_subcluster[2] =
data[cluster_center_index] +
data[cluster_center_index - 1] +
data[cluster_center_index + ClusterSizeX] +
data[cluster_center_index + ClusterSizeX - 1];
}
// subcluster bottom right from center
if (ClusterSizeX > 2 && ClusterSizeY > 2) {
sum_2x2_subcluster[3] =
data[cluster_center_index] +
data[cluster_center_index + 1] +
data[cluster_center_index + ClusterSizeX] +
data[cluster_center_index + ClusterSizeX + 1];
}
int index = std::max_element(sum_2x2_subcluster.begin(),

View File

@@ -136,7 +136,7 @@ class ClusterFinder {
// don't have a photon
int i = 0;
for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) {
for (int ic = -dx; ic < dx + has_center_pixel_y; ic++) {
for (int ic = -dx; ic < dx + has_center_pixel_x; ic++) {
if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
iy + ir >= 0 && iy + ir < frame.shape(0)) {
CT tmp =

View File

@@ -69,26 +69,27 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
// cBottomRight = 1,
// cTopLeft = 2,
// cTopRight = 3
// TODO: could also chaneg the sign of the eta calculation
switch (static_cast<corner>(eta.c)) {
case corner::cTopLeft:
dX = -1.;
dY = 0;
dX = 0.0;
dY = 0.0;
break;
case corner::cTopRight:;
dX = 0;
dY = 0;
dX = 1.0;
dY = 0.0;
break;
case corner::cBottomLeft:
dX = -1.;
dY = -1.;
dX = 0.0;
dY = 1.0;
break;
case corner::cBottomRight:
dX = 0.;
dY = -1.;
dX = 1.0;
dY = 1.0;
break;
}
photon.x += m_ietax(ix, iy, ie) * 2 + dX;
photon.y += m_ietay(ix, iy, ie) * 2 + dY;
photon.x -= m_ietax(ix, iy, ie) - dX;
photon.y -= m_ietay(ix, iy, ie) - dY;
photons.push_back(photon);
}
} else if (clusters.cluster_size_x() == 2 ||
@@ -112,10 +113,11 @@ Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
auto ix = last_smaller(m_etabinsx, eta.x);
auto iy = last_smaller(m_etabinsy, eta.y);
photon.x += m_ietax(ix, iy, ie) *
2; // eta goes between 0 and 1 but we could move the hit
// anywhere in the 2x2
photon.y += m_ietay(ix, iy, ie) * 2;
// TODO: why 2?
photon.x -=
m_ietax(ix, iy, ie); // eta goes between 0 and 1 but we could
// move the hit anywhere in the 2x2
photon.y -= m_ietay(ix, iy, ie);
photons.push_back(photon);
}

View File

@@ -42,14 +42,16 @@ class RawFileNameComponents {
class ScanParameters {
bool m_enabled = false;
std::string m_dac;
DACIndex m_dac{};
int m_start = 0;
int m_stop = 0;
int m_step = 0;
// TODO! add settleTime, requires string to time conversion
int64_t m_settleTime = 0; // [ns]
public:
ScanParameters(const std::string &par);
ScanParameters(const bool enabled, const DACIndex dac, const int start,
const int stop, const int step, const int64_t settleTime);
ScanParameters() = default;
ScanParameters(const ScanParameters &) = default;
ScanParameters &operator=(const ScanParameters &) = default;
@@ -57,8 +59,9 @@ class ScanParameters {
int start() const;
int stop() const;
int step() const;
const std::string &dac() const;
DACIndex dac() const;
bool enabled() const;
int64_t settleTime() const;
void increment_stop();
};

View File

@@ -215,6 +215,122 @@ enum class DetectorType {
Unknown
};
/**
* @brief Enum class to define the Digital to Analog converter
* The values are the same as in slsDetectorPackage
*/
enum DACIndex {
DAC_0,
DAC_1,
DAC_2,
DAC_3,
DAC_4,
DAC_5,
DAC_6,
DAC_7,
DAC_8,
DAC_9,
DAC_10,
DAC_11,
DAC_12,
DAC_13,
DAC_14,
DAC_15,
DAC_16,
DAC_17,
VSVP,
VTRIM,
VRPREAMP,
VRSHAPER,
VSVN,
VTGSTV,
VCMP_LL,
VCMP_LR,
VCAL,
VCMP_RL,
RXB_RB,
RXB_LB,
VCMP_RR,
VCP,
VCN,
VISHAPER,
VTHRESHOLD,
IO_DELAY,
VREF_DS,
VOUT_CM,
VIN_CM,
VREF_COMP,
VB_COMP,
VDD_PROT,
VIN_COM,
VREF_PRECH,
VB_PIXBUF,
VB_DS,
VREF_H_ADC,
VB_COMP_FE,
VB_COMP_ADC,
VCOM_CDS,
VREF_RSTORE,
VB_OPA_1ST,
VREF_COMP_FE,
VCOM_ADC1,
VREF_L_ADC,
VREF_CDS,
VB_CS,
VB_OPA_FD,
VCOM_ADC2,
VCASSH,
VTH2,
VRSHAPER_N,
VIPRE_OUT,
VTH3,
VTH1,
VICIN,
VCAS,
VCAL_N,
VIPRE,
VCAL_P,
VDCSH,
VBP_COLBUF,
VB_SDA,
VCASC_SFP,
VIPRE_CDS,
IBIAS_SFP,
ADC_VPP,
HIGH_VOLTAGE,
TEMPERATURE_ADC,
TEMPERATURE_FPGA,
TEMPERATURE_FPGAEXT,
TEMPERATURE_10GE,
TEMPERATURE_DCDC,
TEMPERATURE_SODL,
TEMPERATURE_SODR,
TEMPERATURE_FPGA2,
TEMPERATURE_FPGA3,
TRIMBIT_SCAN,
V_POWER_A = 100,
V_POWER_B = 101,
V_POWER_C = 102,
V_POWER_D = 103,
V_POWER_IO = 104,
V_POWER_CHIP = 105,
I_POWER_A = 106,
I_POWER_B = 107,
I_POWER_C = 108,
I_POWER_D = 109,
I_POWER_IO = 110,
V_LIMIT = 111,
SLOW_ADC0 = 1000,
SLOW_ADC1,
SLOW_ADC2,
SLOW_ADC3,
SLOW_ADC4,
SLOW_ADC5,
SLOW_ADC6,
SLOW_ADC7,
SLOW_ADC_TEMP
};
enum class TimingMode { Auto, Trigger };
enum class FrameDiscardPolicy { NoDiscard, Discard, DiscardPartial };
@@ -231,6 +347,15 @@ template <> FrameDiscardPolicy StringTo(const std::string & /*mode*/);
using DataTypeVariants = std::variant<uint16_t, uint32_t>;
constexpr uint16_t ADC_MASK = 0x3FFF; // used to mask out the gain bits in Jungfrau
constexpr uint16_t ADC_MASK =
0x3FFF; // used to mask out the gain bits in Jungfrau
/**
* @brief Convert a string to a DACIndex
* @param arg string representation of the dacIndex
* @return DACIndex
* @throw invalid argument error if the string does not match any DACIndex
*/
template <> DACIndex StringTo(const std::string &arg);
} // namespace aare

View File

@@ -24,7 +24,7 @@ void define_Cluster(py::module &m, const std::string &typestr) {
py::class_<Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType>>(
m, class_name.c_str(), py::buffer_protocol())
.def(py::init([](uint8_t x, uint8_t y,
.def(py::init([](CoordType x, CoordType y,
py::array_t<Type, py::array::forcecast> data) {
py::buffer_info buf_info = data.request();
Cluster<Type, ClusterSizeX, ClusterSizeY, CoordType> cluster;

View File

@@ -44,14 +44,14 @@ void define_ClusterFile(py::module &m, const std::string &typestr) {
auto v = new ClusterVector<ClusterType>(self.read_frame());
return v;
})
.def("set_roi", &ClusterFile<ClusterType>::set_roi,
py::arg("roi"))
.def("set_roi", &ClusterFile<ClusterType>::set_roi, py::arg("roi"))
.def(
"set_noise_map",
[](ClusterFile<ClusterType> &self, py::array_t<int32_t> noise_map) {
auto view = make_view_2d(noise_map);
self.set_noise_map(view);
}, py::arg("noise_map"))
},
py::arg("noise_map"))
.def("set_gain_map",
[](ClusterFile<ClusterType> &self, py::array_t<double> gain_map) {
@@ -84,11 +84,19 @@ template <typename Type, uint8_t CoordSizeX, uint8_t CoordSizeY,
typename CoordType = uint16_t>
void register_calculate_eta(py::module &m) {
using ClusterType = Cluster<Type, CoordSizeX, CoordSizeY, CoordType>;
m.def("calculate_eta2",
[](const aare::ClusterVector<ClusterType> &clusters) {
auto eta2 = new NDArray<double, 2>(calculate_eta2(clusters));
return return_image_data(eta2);
});
m.def("calculate_eta2", [](const aare::Cluster<Type, CoordSizeX, CoordSizeY,
CoordType> &cluster) {
auto eta2 = calculate_eta2(cluster);
// TODO return proper eta class
return py::make_tuple(eta2.x, eta2.y, eta2.sum);
});
}
#pragma GCC diagnostic pop

View File

@@ -87,6 +87,8 @@ PYBIND11_MODULE(_aare, m) {
DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d);
DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f);
// DEFINE_CLUSTER_BINDINGS(double, 2, 1, uint16_t, d);
define_3x3_reduction<int, 3, 3, uint16_t>(m);
define_3x3_reduction<double, 3, 3, uint16_t>(m);
define_3x3_reduction<float, 3, 3, uint16_t>(m);

View File

@@ -53,8 +53,8 @@ def test_Interpolator():
assert interpolated_photons.size == 1
assert interpolated_photons[0]["x"] == -1
assert interpolated_photons[0]["y"] == -1
assert interpolated_photons[0]["x"] == 0
assert interpolated_photons[0]["y"] == 0
assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0
clustervector = _aare.ClusterVector_Cluster2x2i()
@@ -84,7 +84,7 @@ def test_calculate_eta():
assert eta2[0,0] == 0.5
assert eta2[0,1] == 0.5
assert eta2[1,0] == 0.5
assert eta2[1,1] == 0.6 #1/5
assert eta2[1,1] == 0.4 #2/5
def test_cluster_finder():
"""Test ClusterFinder"""

View File

@@ -0,0 +1,148 @@
import pytest
import numpy as np
import boost_histogram as bh
import pickle
from scipy.stats import multivariate_normal
from aare import Interpolator, calculate_eta2
from aare._aare import ClusterVector_Cluster2x2d, Cluster2x2d, Cluster3x3d, ClusterVector_Cluster3x3d
from conftest import test_data_path
pixel_width = 1e-4
values = np.arange(0.5*pixel_width, 0.1, pixel_width)
num_pixels = values.size
X, Y = np.meshgrid(values, values)
data_points = np.stack([X.ravel(), Y.ravel()], axis=1)
variance = 10*pixel_width
covariance_matrix = np.array([[variance, 0],[0, variance]])
def create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points):
gaussian = multivariate_normal(mean=mean, cov=covariance_matrix)
probability_values = gaussian.pdf(data_points)
return (probability_values.reshape(X.shape)).round() #python bindings only support frame types of uint16_t
def photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, photon_hit):
scaled_photon_hit_x = cluster_center - (1 - photon_hit[0][0])*pixels_per_superpixel*pixel_width
scaled_photon_hit_y = cluster_center - (1 - photon_hit[0][1])*pixels_per_superpixel*pixel_width
return (scaled_photon_hit_x, scaled_photon_hit_y)
def create_2x2cluster_from_frame(frame, pixels_per_superpixel):
return Cluster2x2d(1, 1, np.array([frame[0:pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
frame[0:pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
frame[pixels_per_superpixel:2*pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
frame[pixels_per_superpixel:2*pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum()], dtype=np.float64))
def create_3x3cluster_from_frame(frame, pixels_per_superpixel):
return Cluster3x3d(1, 1, np.array([frame[0:pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
frame[0:pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
frame[0:pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum(),
frame[pixels_per_superpixel:2*pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
frame[pixels_per_superpixel:2*pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
frame[pixels_per_superpixel:2*pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum(),
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum()], dtype=np.float64))
def calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, cluster_2x2 = True):
hist = bh.Histogram(
bh.axis.Regular(100, -0.2, 1.2),
bh.axis.Regular(100, -0.2, 1.2), bh.axis.Regular(1, 0, num_pixels*num_pixels*1/(variance*2*np.pi)))
for _ in range(0, num_frames):
mean_x = random_number_generator.uniform(pixels_per_superpixel*pixel_width, 2*pixels_per_superpixel*pixel_width)
mean_y = random_number_generator.uniform(pixels_per_superpixel*pixel_width, 2*pixels_per_superpixel*pixel_width)
frame = create_photon_hit_with_gaussian_distribution(np.array([mean_x, mean_y]), variance, data_points)
cluster = None
if cluster_2x2:
cluster = create_2x2cluster_from_frame(frame, pixels_per_superpixel)
else:
cluster = create_3x3cluster_from_frame(frame, pixels_per_superpixel)
eta2 = calculate_eta2(cluster)
hist.fill(eta2[0], eta2[1], eta2[2])
return hist
@pytest.mark.withdata
def test_interpolation_of_2x2_cluster(test_data_path):
"""Test Interpolation of 2x2 cluster from Photon hit with Gaussian Distribution"""
#TODO maybe better to compute in test instead of loading - depends on eta
"""
filename = test_data_path/"eta_distributions"/"eta_distribution_2x2cluster_gaussian.pkl"
with open(filename, "rb") as f:
eta_distribution = pickle.load(f)
"""
num_frames = 1000
pixels_per_superpixel = int(num_pixels*0.5)
random_number_generator = np.random.default_rng(42)
eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator)
interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges[:-1], eta_distribution.axes[1].edges[:-1], eta_distribution.axes[2].edges[:-1])
#actual photon hit
mean = 1.2*pixels_per_superpixel*pixel_width
mean = np.array([mean, mean])
frame = create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points)
cluster = create_2x2cluster_from_frame(frame, pixels_per_superpixel)
clustervec = ClusterVector_Cluster2x2d()
clustervec.push_back(cluster)
interpolated_photon = interpolation.interpolate(clustervec)
assert interpolated_photon.size == 1
cluster_center = 1.5*pixels_per_superpixel*pixel_width
scaled_photon_hit = photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, interpolated_photon)
assert (np.linalg.norm(scaled_photon_hit - mean) < np.linalg.norm(np.array([cluster_center, cluster_center] - mean)))
@pytest.mark.withdata
def test_interpolation_of_3x3_cluster(test_data_path):
"""Test Interpolation of 3x3 Cluster from Photon hit with Gaussian Distribution"""
#TODO maybe better to compute in test instead of loading - depends on eta
"""
filename = test_data_path/"eta_distributions"/"eta_distribution_3x3cluster_gaussian.pkl"
with open(filename, "rb") as f:
eta_distribution = pickle.load(f)
"""
num_frames = 1000
pixels_per_superpixel = int(num_pixels/3)
random_number_generator = np.random.default_rng(42)
eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, False)
interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges[:-1], eta_distribution.axes[1].edges[:-1], eta_distribution.axes[2].edges[:-1])
#actual photon hit
mean = 1.2*pixels_per_superpixel*pixel_width
mean = np.array([mean, mean])
frame = create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points)
cluster = create_3x3cluster_from_frame(frame, pixels_per_superpixel)
clustervec = ClusterVector_Cluster3x3d()
clustervec.push_back(cluster)
interpolated_photon = interpolation.interpolate(clustervec)
assert interpolated_photon.size == 1
cluster_center = 1.5*pixels_per_superpixel*pixel_width
scaled_photon_hit = photon_hit_in_euclidean_space(cluster_center, pixels_per_superpixel, interpolated_photon)
assert (np.linalg.norm(scaled_photon_hit - mean) < np.linalg.norm(np.array([cluster_center, cluster_center] - mean)))

File diff suppressed because one or more lines are too long

View File

@@ -279,7 +279,8 @@ TEST_CASE("Read cluster from multiple frame file", "[.with-data]") {
}
}
TEST_CASE("Write cluster with potential padding", "[.with-data][.ClusterFile]") {
TEST_CASE("Write cluster with potential padding",
"[.with-data][.ClusterFile]") {
using ClusterType = Cluster<double, 3, 3>;
@@ -290,7 +291,7 @@ TEST_CASE("Write cluster with potential padding", "[.with-data][.ClusterFile]")
ClusterFile<ClusterType> file(fpath, 1000, "w");
ClusterVector<ClusterType> clustervec(2);
int16_t coordinate = 5;
uint16_t coordinate = 5;
clustervec.push_back(ClusterType{
coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}});
clustervec.push_back(ClusterType{

View File

@@ -99,7 +99,8 @@ TEST_CASE("Read data from a jungfrau 500k single port raw file",
}
TEST_CASE("Read frame numbers from a raw file", "[.with-data]") {
auto fpath = test_data_path() / "raw/eiger" / "eiger_500k_16bit_master_0.json";
auto fpath =
test_data_path() / "raw/eiger" / "eiger_500k_16bit_master_0.json";
REQUIRE(std::filesystem::exists(fpath));
// we know this file has 3 frames with frame numbers 14, 15, 16
@@ -288,8 +289,7 @@ TEST_CASE("check find_geometry", "[.with-data]") {
}
}
TEST_CASE("Open multi module file with ROI",
"[.with-data]") {
TEST_CASE("Open multi module file with ROI", "[.with-data]") {
auto fpath = test_data_path() / "raw/SingleChipROI/Data_master_0.json";
REQUIRE(std::filesystem::exists(fpath));

View File

@@ -64,6 +64,12 @@ const std::string &RawFileNameComponents::base_name() const {
const std::string &RawFileNameComponents::ext() const { return m_ext; }
int RawFileNameComponents::file_index() const { return m_file_index; }
ScanParameters::ScanParameters(const bool enabled, const DACIndex dac,
const int start, const int stop, const int step,
const int64_t settleTime)
: m_enabled(enabled), m_dac(dac), m_start(start), m_stop(stop),
m_step(step), m_settleTime(settleTime){};
// "[enabled\ndac dac 4\nstart 500\nstop 2200\nstep 5\nsettleTime 100us\n]"
ScanParameters::ScanParameters(const std::string &par) {
std::istringstream iss(par.substr(1, par.size() - 2));
@@ -72,7 +78,7 @@ ScanParameters::ScanParameters(const std::string &par) {
if (line == "enabled") {
m_enabled = true;
} else if (line.find("dac") != std::string::npos) {
m_dac = line.substr(4);
m_dac = StringTo<DACIndex>(line.substr(4));
} else if (line.find("start") != std::string::npos) {
m_start = std::stoi(line.substr(6));
} else if (line.find("stop") != std::string::npos) {
@@ -87,8 +93,9 @@ int ScanParameters::start() const { return m_start; }
int ScanParameters::stop() const { return m_stop; }
void ScanParameters::increment_stop() { m_stop += 1; }
int ScanParameters::step() const { return m_step; }
const std::string &ScanParameters::dac() const { return m_dac; }
DACIndex ScanParameters::dac() const { return m_dac; }
bool ScanParameters::enabled() const { return m_enabled; }
int64_t ScanParameters::settleTime() const { return m_settleTime; }
RawMasterFile::RawMasterFile(const std::filesystem::path &fpath)
: m_fnc(fpath) {
@@ -170,6 +177,7 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
std::ifstream ifs(fpath);
json j;
ifs >> j;
double v = j["Version"];
m_version = fmt::format("{:.1f}", v);
@@ -181,7 +189,9 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
j["Geometry"]["x"]}; // TODO: isnt it only available for version > 7.1?
// - try block default should be 1x1
m_image_size_in_bytes = j["Image Size in bytes"];
m_image_size_in_bytes =
v < 8.0 ? j["Image Size in bytes"] : j["Image Size"];
m_frames_in_file = j["Frames in File"];
m_pixels_y = j["Pixels"]["y"];
m_pixels_x = j["Pixels"]["x"];
@@ -206,7 +216,6 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
} catch (const json::out_of_range &e) {
// keep the optional empty
}
// ----------------------------------------------------------------
// Special treatment of analog flag because of Moench03
try {
@@ -227,7 +236,6 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
m_analog_flag = 0;
}
//-----------------------------------------------------------------
try {
m_quad = j.at("Quad");
} catch (const json::out_of_range &e) {
@@ -239,7 +247,6 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
// }catch (const json::out_of_range &e) {
// m_adc_mask = 0;
// }
try {
int digital_flag = j.at("Digital Flag");
if (digital_flag) {
@@ -248,7 +255,6 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
} catch (const json::out_of_range &e) {
// keep the optional empty
}
try {
m_transceiver_flag = j.at("Transceiver Flag");
if (m_transceiver_flag) {
@@ -257,10 +263,20 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
} catch (const json::out_of_range &e) {
// keep the optional empty
}
try {
if (v < 8.0) {
std::string scan_parameters = j.at("Scan Parameters");
m_scan_parameters = ScanParameters(scan_parameters);
} else {
auto json_obj = j.at("Scan Parameters");
m_scan_parameters = ScanParameters(
json_obj.at("enable").get<int>(),
static_cast<DACIndex>(json_obj.at("dacInd").get<int>()),
json_obj.at("start offset").get<int>(),
json_obj.at("stop offset").get<int>(),
json_obj.at("step size").get<int>(),
json_obj.at("dac settle time ns").get<int>());
}
if (v < 7.21) {
m_scan_parameters
.increment_stop(); // adjust for endpoint being included
@@ -268,6 +284,7 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
} catch (const json::out_of_range &e) {
// not a scan
}
try {
m_udp_interfaces_per_module = {j.at("Number of UDP Interfaces"), 1};
} catch (const json::out_of_range &e) {
@@ -277,35 +294,36 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) {
m_udp_interfaces_per_module = {1, 2};
}
}
try {
ROI tmp_roi;
if (v < 8.0) {
auto obj = j.at("Receiver Roi");
tmp_roi.xmin = obj.at("xmin");
tmp_roi.xmax = obj.at("xmax");
tmp_roi.ymin = obj.at("ymin");
tmp_roi.ymax = obj.at("ymax");
} else {
// TODO: for now only handle single ROI
auto obj = j.at("Receiver Rois");
tmp_roi.xmin = obj[0].at("xmin");
tmp_roi.xmax = obj[0].at("xmax");
tmp_roi.ymin = obj[0].at("ymin");
tmp_roi.ymax = obj[0].at("ymax");
}
// if any of the values are set update the roi
if (tmp_roi.xmin != 4294967295 || tmp_roi.xmax != 4294967295 ||
tmp_roi.ymin != 4294967295 || tmp_roi.ymax != 4294967295) {
if (v < 7.21) {
tmp_roi.xmax++; // why is it updated
tmp_roi.xmax++;
tmp_roi.ymax++;
}
m_roi = tmp_roi;
}
} catch (const json::out_of_range &e) {
std::cout << e.what() << std::endl;
LOG(TLogLevel::logERROR) << e.what() << std::endl;
// leave the optional empty
}
// if we have an roi we need to update the geometry for the subfiles
if (m_roi) {
}
// Update detector type for Moench
// TODO! How does this work with old .raw master files?
#ifdef AARE_VERBOSE

View File

@@ -51,7 +51,7 @@ TEST_CASE("Parse scan parameters") {
ScanParameters s("[enabled\ndac dac 4\nstart 500\nstop 2200\nstep "
"5\nsettleTime 100us\n]");
REQUIRE(s.enabled());
REQUIRE(s.dac() == "dac 4");
REQUIRE(s.dac() == DACIndex::DAC_4);
REQUIRE(s.start() == 500);
REQUIRE(s.stop() == 2200);
REQUIRE(s.step() == 5);
@@ -60,7 +60,7 @@ TEST_CASE("Parse scan parameters") {
TEST_CASE("A disabled scan") {
ScanParameters s("[disabled]");
REQUIRE_FALSE(s.enabled());
REQUIRE(s.dac() == "");
REQUIRE(s.dac() == DACIndex::DAC_0);
REQUIRE(s.start() == 0);
REQUIRE(s.stop() == 0);
REQUIRE(s.step() == 0);
@@ -68,7 +68,7 @@ TEST_CASE("A disabled scan") {
TEST_CASE("Parse a master file in .json format", "[.integration]") {
auto fpath =
test_data_path() / "jungfrau" / "jungfrau_single_master_0.json";
test_data_path() / "raw" / "jungfrau" / "jungfrau_single_master_0.json";
REQUIRE(std::filesystem::exists(fpath));
RawMasterFile f(fpath);
@@ -224,6 +224,41 @@ TEST_CASE("Parse a master file in .raw format", "[.integration]") {
// Packets Caught Mask : 64 bytes
}
TEST_CASE("Parse a master file in new .json format",
"[.integration][.width-data]") {
auto file_path =
test_data_path() / "raw" / "newmythen03" / "run_87_master_0.json";
REQUIRE(std::filesystem::exists(file_path));
RawMasterFile f(file_path);
// Version : 8.0
REQUIRE(f.version() == "8.0");
REQUIRE(f.detector_type() == DetectorType::Mythen3);
// Timing Mode : auto
REQUIRE(f.timing_mode() == TimingMode::Auto);
// Geometry : [2, 1]
REQUIRE(f.geometry().col == 2);
REQUIRE(f.geometry().row == 1);
// Image Size : 5120 bytes
REQUIRE(f.image_size_in_bytes() == 5120);
REQUIRE(f.scan_parameters().enabled() == false);
REQUIRE(f.scan_parameters().dac() == DACIndex::DAC_0);
REQUIRE(f.scan_parameters().start() == 0);
REQUIRE(f.scan_parameters().stop() == 0);
REQUIRE(f.scan_parameters().step() == 0);
REQUIRE(f.scan_parameters().settleTime() == 0);
auto roi = f.roi().value();
REQUIRE(roi.xmin == 0);
REQUIRE(roi.xmax == 2559);
REQUIRE(roi.ymin == -1);
REQUIRE(roi.ymax == -1);
}
TEST_CASE("Read eiger master file", "[.integration]") {
auto fpath = test_data_path() / "eiger" / "eiger_500k_32bit_master_0.json";
REQUIRE(std::filesystem::exists(fpath));

View File

@@ -115,4 +115,186 @@ template <> FrameDiscardPolicy StringTo(const std::string &arg) {
// template <> TimingMode StringTo<TimingMode>(std::string mode);
template <> DACIndex StringTo(const std::string &arg) {
if (arg == "dac 0")
return DACIndex::DAC_0;
else if (arg == "dac 1")
return DACIndex::DAC_1;
else if (arg == "dac 2")
return DACIndex::DAC_2;
else if (arg == "dac 3")
return DACIndex::DAC_3;
else if (arg == "dac 4")
return DACIndex::DAC_4;
else if (arg == "dac 5")
return DACIndex::DAC_5;
else if (arg == "dac 6")
return DACIndex::DAC_6;
else if (arg == "dac 7")
return DACIndex::DAC_7;
else if (arg == "dac 8")
return DACIndex::DAC_8;
else if (arg == "dac 9")
return DACIndex::DAC_9;
else if (arg == "dac 10")
return DACIndex::DAC_10;
else if (arg == "dac 11")
return DACIndex::DAC_11;
else if (arg == "dac 12")
return DACIndex::DAC_12;
else if (arg == "dac 13")
return DACIndex::DAC_13;
else if (arg == "dac 14")
return DACIndex::DAC_14;
else if (arg == "dac 15")
return DACIndex::DAC_15;
else if (arg == "dac 16")
return DACIndex::DAC_16;
else if (arg == "dac 17")
return DACIndex::DAC_17;
else if (arg == "vsvp")
return DACIndex::VSVP;
else if (arg == "vtrim")
return DACIndex::VTRIM;
else if (arg == "vrpreamp")
return DACIndex::VRPREAMP;
else if (arg == "vrshaper")
return DACIndex::VRSHAPER;
else if (arg == "vsvn")
return DACIndex::VSVN;
else if (arg == "vtgstv")
return DACIndex::VTGSTV;
else if (arg == "vcmp_ll")
return DACIndex::VCMP_LL;
else if (arg == "vcmp_lr")
return DACIndex::VCMP_LR;
else if (arg == "vcal")
return DACIndex::VCAL;
else if (arg == "vcmp_rl")
return DACIndex::VCMP_RL;
else if (arg == "rxb_rb")
return DACIndex::RXB_RB;
else if (arg == "rxb_lb")
return DACIndex::RXB_LB;
else if (arg == "vcmp_rr")
return DACIndex::VCMP_RR;
else if (arg == "vcp")
return DACIndex::VCP;
else if (arg == "vcn")
return DACIndex::VCN;
else if (arg == "vishaper")
return DACIndex::VISHAPER;
else if (arg == "vthreshold")
return DACIndex::VTHRESHOLD;
else if (arg == "vref_ds")
return DACIndex::VREF_DS;
else if (arg == "vout_cm")
return DACIndex::VOUT_CM;
else if (arg == "vin_cm")
return DACIndex::VIN_CM;
else if (arg == "vref_comp")
return DACIndex::VREF_COMP;
else if (arg == "vb_comp")
return DACIndex::VB_COMP;
else if (arg == "vdd_prot")
return DACIndex::VDD_PROT;
else if (arg == "vin_com")
return DACIndex::VIN_COM;
else if (arg == "vref_prech")
return DACIndex::VREF_PRECH;
else if (arg == "vb_pixbuf")
return DACIndex::VB_PIXBUF;
else if (arg == "vb_ds")
return DACIndex::VB_DS;
else if (arg == "vref_h_adc")
return DACIndex::VREF_H_ADC;
else if (arg == "vb_comp_fe")
return DACIndex::VB_COMP_FE;
else if (arg == "vb_comp_adc")
return DACIndex::VB_COMP_ADC;
else if (arg == "vcom_cds")
return DACIndex::VCOM_CDS;
else if (arg == "vref_rstore")
return DACIndex::VREF_RSTORE;
else if (arg == "vb_opa_1st")
return DACIndex::VB_OPA_1ST;
else if (arg == "vref_comp_fe")
return DACIndex::VREF_COMP_FE;
else if (arg == "vcom_adc1")
return DACIndex::VCOM_ADC1;
else if (arg == "vref_l_adc")
return DACIndex::VREF_L_ADC;
else if (arg == "vref_cds")
return DACIndex::VREF_CDS;
else if (arg == "vb_cs")
return DACIndex::VB_CS;
else if (arg == "vb_opa_fd")
return DACIndex::VB_OPA_FD;
else if (arg == "vcom_adc2")
return DACIndex::VCOM_ADC2;
else if (arg == "vcassh")
return DACIndex::VCASSH;
else if (arg == "vth2")
return DACIndex::VTH2;
else if (arg == "vrshaper_n")
return DACIndex::VRSHAPER_N;
else if (arg == "vipre_out")
return DACIndex::VIPRE_OUT;
else if (arg == "vth3")
return DACIndex::VTH3;
else if (arg == "vth1")
return DACIndex::VTH1;
else if (arg == "vicin")
return DACIndex::VICIN;
else if (arg == "vcas")
return DACIndex::VCAS;
else if (arg == "vcal_n")
return DACIndex::VCAL_N;
else if (arg == "vipre")
return DACIndex::VIPRE;
else if (arg == "vcal_p")
return DACIndex::VCAL_P;
else if (arg == "vdcsh")
return DACIndex::VDCSH;
else if (arg == "vbp_colbuf")
return DACIndex::VBP_COLBUF;
else if (arg == "vb_sda")
return DACIndex::VB_SDA;
else if (arg == "vcasc_sfp")
return DACIndex::VCASC_SFP;
else if (arg == "vipre_cds")
return DACIndex::VIPRE_CDS;
else if (arg == "ibias_sfp")
return DACIndex::IBIAS_SFP;
else if (arg == "trimbits")
return DACIndex::TRIMBIT_SCAN;
else if (arg == "highvoltage")
return DACIndex::HIGH_VOLTAGE;
else if (arg == "iodelay")
return DACIndex::IO_DELAY;
else if (arg == "temp_adc")
return DACIndex::TEMPERATURE_ADC;
else if (arg == "temp_fpga")
return DACIndex::TEMPERATURE_FPGA;
else if (arg == "temp_fpgaext")
return DACIndex::TEMPERATURE_FPGAEXT;
else if (arg == "temp_10ge")
return DACIndex::TEMPERATURE_10GE;
else if (arg == "temp_dcdc")
return DACIndex::TEMPERATURE_DCDC;
else if (arg == "temp_sodl")
return DACIndex::TEMPERATURE_SODL;
else if (arg == "temp_sodr")
return DACIndex::TEMPERATURE_SODR;
else if (arg == "temp_fpgafl")
return DACIndex::TEMPERATURE_FPGA2;
else if (arg == "temp_fpgafr")
return DACIndex::TEMPERATURE_FPGA3;
else if (arg == "temp_slowadc")
return DACIndex::SLOW_ADC_TEMP;
else
throw std::invalid_argument("Could not decode DACIndex from: \"" + arg +
"\"");
}
} // namespace aare