Compare commits

..

1 Commits

Author SHA1 Message Date
Erik Fröjdh
9bf21244b0 added custom io for my302 2025-11-27 17:17:43 +01:00
33 changed files with 252 additions and 430 deletions

View File

@@ -1,11 +1,5 @@
# Release notes
## head
### New Features:
- Expanding 24 to 32 bit data
- Decoding digital data from Mythen 302
### 2025.11.21

View File

@@ -12,19 +12,15 @@ set(SPHINX_SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/src)
set(SPHINX_BUILD ${CMAKE_CURRENT_BINARY_DIR})
file(GLOB_RECURSE SPHINX_SOURCE_FILES
CONFIGURE_DEPENDS
RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}/src"
"${CMAKE_CURRENT_SOURCE_DIR}/src/*.rst"
)
file(GLOB SPHINX_SOURCE_FILES CONFIGURE_DEPENDS "src/*.rst")
foreach(relpath IN LISTS SPHINX_SOURCE_FILES)
set(src "${CMAKE_CURRENT_SOURCE_DIR}/src/${relpath}")
set(dst "${SPHINX_BUILD}/src/${relpath}")
message(STATUS "Copying ${src} to ${dst}")
configure_file("${src}" "${dst}" COPYONLY)
endforeach()
foreach(filename ${SPHINX_SOURCE_FILES})
get_filename_component(fname ${filename} NAME)
message(STATUS "Copying ${filename} to ${SPHINX_BUILD}/src/${fname}")
configure_file(${filename} "${SPHINX_BUILD}/src/${fname}")
endforeach(filename ${SPHINX_SOURCE_FILES})
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in"

View File

@@ -22,14 +22,21 @@ AARE
.. toctree::
:caption: Python API
:maxdepth: 3
:hidden:
pycalibration
python/cluster/index
python/file/index
pyFit
:maxdepth: 1
pyFile
pycalibration
pyCtbRawFile
pyClusterFile
pyClusterVector
pyCluster
pyInterpolation
pyJungfrauDataFile
pyRawFile
pyRawMasterFile
pyVarClusterFinder
pyFit
.. toctree::

11
docs/src/pyCtbRawFile.rst Normal file
View File

@@ -0,0 +1,11 @@
CtbRawFile
============
.. py:currentmodule:: aare
.. autoclass:: CtbRawFile
:members:
:undoc-members:
:show-inheritance:
:inherited-members:

View File

@@ -21,8 +21,8 @@ Supported are the following :math:`\eta`-functions:
.. py:currentmodule:: aare
.. image:: ../../../figures/Eta2x2.png
:target: ../../../figures/Eta2x2.png
.. image:: ../figures/Eta2x2.png
:target: ../figures/Eta2x2.png
:width: 650px
:align: center
:alt: Eta2x2
@@ -35,8 +35,8 @@ Supported are the following :math:`\eta`-functions:
.. autofunction:: calculate_eta2
.. image:: ../../../figures/Eta2x2Full.png
:target: ../../../figures/Eta2x2Full.png
.. image:: ../figures/Eta2x2Full.png
:target: ../figures/Eta2x2Full.png
:width: 650px
:align: center
:alt: Eta2x2 Full
@@ -49,8 +49,8 @@ Supported are the following :math:`\eta`-functions:
.. autofunction:: calculate_full_eta2
.. image:: ../../../figures/Eta3x3.png
:target: ../../../figures/Eta3x3.png
.. image:: ../figures/Eta3x3.png
:target: ../figures/Eta3x3.png
:width: 650px
:align: center
:alt: Eta3x3
@@ -63,8 +63,8 @@ Supported are the following :math:`\eta`-functions:
.. autofunction:: calculate_eta3
.. image:: ../../../figures/Eta3x3Cross.png
:target: ../../../figures/Eta3x3Cross.png
.. image:: ../figures/Eta3x3Cross.png
:target: ../figures/Eta3x3Cross.png
:width: 650px
:align: center
:alt: Cross Eta3x3

View File

@@ -1,11 +0,0 @@
Cluster & Interpolation
==========================
.. toctree::
:caption: Cluster & Interpolation
:maxdepth: 1
pyCluster
pyClusterVector
pyInterpolation
pyVarClusterFinder

View File

@@ -1,14 +0,0 @@
File I/O
===================
.. toctree::
:caption: File I/O
:maxdepth: 1
pyClusterFile
pyCtbRawFile
pyFile
pyJungfrauDataFile
pyRawFile
pyRawMasterFile
pyTransform

View File

@@ -1,25 +0,0 @@
CtbRawFile
============
Read analog, digital and transceiver samples from a raw file containing
data from the Chip Test Board. Uses :mod:`aare.transform` to decode the
data into a format that the user can work with.
.. code:: python
import aare
from aare.transform import Mythen302Transform
my302 = Mythen302Transform(offset = 4)
with aare.CtbRawFile(fname, transform = my302) as f:
for header, data in f:
#do something with the data
.. py:currentmodule:: aare
.. autoclass:: CtbRawFile
:members:
:undoc-members:
:show-inheritance:
:inherited-members:

View File

@@ -1,27 +0,0 @@
Transform
===================
The transform module takes data read by :class:`aare.CtbRawFile` and decodes it
to a useful image format. Depending on detector it supports both analog
and digital samples.
For convenience the following transform objects are defined with a short name
.. code:: python
moench05 = Moench05Transform()
moench05_1g = Moench05Transform1g()
moench05_old = Moench05TransformOld()
matterhorn02 = Matterhorn02Transform()
adc_sar_04_64to16 = AdcSar04Transform64to16()
adc_sar_05_64to16 = AdcSar05Transform64to16()
.. py:currentmodule:: aare
.. automodule:: aare.transform
:members:
:undoc-members:
:private-members:
:special-members: __call__
:show-inheritance:
:inherited-members:

View File

@@ -1,12 +1,11 @@
// SPDX-License-Identifier: MPL-2.0
#pragma once
#include "aare/defs.hpp"
#include <aare/NDView.hpp>
#include <cstdint>
#include <vector>
namespace aare {
uint16_t adc_sar_05_decode64to16(uint64_t input);
uint16_t adc_sar_04_decode64to16(uint64_t input);
void adc_sar_05_decode64to16(NDView<uint64_t, 2> input,
@@ -14,25 +13,6 @@ void adc_sar_05_decode64to16(NDView<uint64_t, 2> input,
void adc_sar_04_decode64to16(NDView<uint64_t, 2> input,
NDView<uint16_t, 2> output);
/**
* @brief Called with a 32 bit unsigned integer, shift by offset
* and then return the lower 24 bits as an 32 bit integer
* @param input 32-ibt input value
* @param offset (should be in range 0-7 to allow for full 24 bits)
* @return uint32_t
*/
uint32_t mask32to24bits(uint32_t input, BitOffset offset={});
/**
* @brief Expand 24 bit values in a 8bit buffer to 32bit unsigned integers
* Used for detectors with 24bit counters in combination with CTB
*
* @param input View of the 24 bit data as uint8_t (no 24bit native data type exists)
* @param output Destination of the expanded data (32bit, unsigned)
* @param offset Offset within the first byte to where the data starts (0-7 bits)
*/
void expand24to32bit(NDView<uint8_t,1> input, NDView<uint32_t,1> output, BitOffset offset={});
/**
* @brief Apply custom weights to a 16-bit input value. Will sum up
* weights[i]**i for each bit i that is set in the input value.

View File

@@ -372,15 +372,4 @@ constexpr uint16_t ADC_MASK =
*/
template <> DACIndex StringTo(const std::string &arg);
class BitOffset{
uint8_t m_offset{};
public:
BitOffset() = default;
explicit BitOffset(uint32_t offset);
uint8_t value() const {return m_offset;}
bool operator==(const BitOffset& other) const;
bool operator<(const BitOffset& other) const;
};
} // namespace aare

View File

@@ -39,6 +39,8 @@ set( PYTHON_FILES
aare/transform.py
aare/ScanParameters.py
aare/utils.py
aare/experimental/__init__.py
aare/experimental/custom_io.py
)

View File

View File

@@ -0,0 +1,58 @@
import numpy as np
n_counters = 64*3
bitfield_size = 64
header_dt = [('frameNumber',np.uint64),
('expLength',np.uint32),
('packetNumber', np.uint32),
('bunchId', np.uint64),
('timestamp', np.uint64),
('modId', np.uint16),
('row', np.uint16),
('col', np.uint16),
('reserved', np.uint16),
('debug', np.uint32),
('roundRNumber', np.uint16),
('detType', np.uint8),
('version', np.uint8)]
def ExtractBits(raw_data, dr=24, bits = (17,6)):
bits = np.uint64(bits)
data = np.zeros(0, dtype = np.uint64)
for bit in bits:
tmp = (raw_data >> bit) & np.uint64(1)
data = np.hstack((data, tmp))
#Shift the bits to the righ place
for i in np.arange(dr, dtype = np.uint64):
data[i::dr] = data[i::dr] << i
data = data.reshape(data.size//dr, dr)
return data.sum(axis = 1)
def read_my302_file(fname, dr=24, bits = (17,6),
offset=48, tail = 72, n_frames=1):
header = np.zeros(n_frames, header_dt)
data = np.zeros((n_frames, n_counters), dtype = np.uint64)
with open(fname, 'rb') as f:
for i in range(n_frames):
header[i], raw_data = _read_my302_frame(f, offset, tail, dr)
data[i] = ExtractBits(raw_data, dr=dr, bits = bits)
return header, data
def _read_my302_frame(f, offset, tail, dr):
header = np.fromfile(f, count=1, dtype = header_dt)
f.seek(bitfield_size+offset, 1)
data = np.fromfile(f, count = int(n_counters*dr/2), dtype = np.uint64)
f.seek(tail, 1)
return header, data

View File

@@ -49,43 +49,6 @@ class Matterhorn02Transform:
else:
return np.take(data.view(np.uint16), self.pixel_map[0:counters])
class Mythen302Transform:
"""
Transform Mythen 302 test chip data from a buffer of bytes (uint8_t)
to a uint32 numpy array of [64,3] representing channels and counters.
Assumes data taken with rx_dbitlist 17 6, rx_dbitreorder 1 and Digital
Samples = 2310 [(64x3x24)/2 + some extra]
.. note::
The offset is in number of bits 0-7
"""
_n_channels = 64
_n_counters = 3
def __init__(self, offset=4):
self.offset = offset
def __call__(self, data : np.ndarray):
"""
Transform buffer of data to a [64,3] np.ndarray of uint32.
Parameters
----------
data : np.ndarray
Expected dtype: uint8
Returns
----------
image : np.ndarray
uint32 array of size 64, 3
"""
res = _aare.decode_my302(data, self.offset)
res = res.reshape(
Mythen302Transform._n_channels, Mythen302Transform._n_counters
)
return res
#on import generate the pixel maps to avoid doing it every time
moench05 = Moench05Transform()

View File

@@ -96,69 +96,6 @@ void define_ctb_raw_file_io_bindings(py::module &m) {
return output;
});
m.def("expand24to32bit",
[](py::array_t<uint8_t, py::array::c_style | py::array::forcecast>
&input, uint32_t offset){
aare::BitOffset bitoff(offset);
py::buffer_info buf = input.request();
constexpr uint32_t bytes_per_channel = 3; //24 bit
py::array_t<uint32_t> output(buf.size/bytes_per_channel);
NDView<uint8_t, 1> input_view(input.mutable_data(),
{input.size()});
NDView<uint32_t, 1> output_view(output.mutable_data(),
{output.size()});
aare::expand24to32bit(input_view, output_view, bitoff);
return output;
});
m.def("decode_my302",
[](py::array_t<uint8_t, py::array::c_style | py::array::forcecast>
&input, uint32_t offset){
// Physical layout of the chip
constexpr size_t channels = 64;
constexpr size_t counters = 3;
constexpr size_t bytes_per_channel = 3; //24 bit
constexpr int n_outputs = 2;
ssize_t expected_size = channels*counters*bytes_per_channel;
//If whe have an offset we need one extra byte per output
aare::BitOffset bitoff(offset);
if(bitoff.value())
expected_size += n_outputs;
if (input.size() != expected_size) {
throw std::runtime_error(
fmt::format("{} Expected an input size of {} bytes. Called "
"with input size of {}",
LOCATION, expected_size, input.size()));
}
py::buffer_info buf = input.request();
py::array_t<uint32_t> output(channels * counters);
for (int i = 0; i!=n_outputs; ++i){
auto step = input.size()/n_outputs;
auto out_step = output.size()/n_outputs;
NDView<uint8_t, 1> input_view(input.mutable_data()+step*i,
{input.size()/n_outputs});
NDView<uint32_t, 1> output_view(output.mutable_data()+out_step*i,
{output.size()/n_outputs});
aare::expand24to32bit(input_view, output_view, bitoff);
}
return output;
});
py::class_<CtbRawFile>(m, "CtbRawFile")
.def(py::init<const std::filesystem::path &>())
.def("read_frame",

View File

@@ -44,8 +44,8 @@ def test_Interpolator():
etacube = np.zeros(shape=[29, 29, 19], dtype=np.float64)
interpolator = _aare.Interpolator(etacube, xbins, ybins, ebins)
assert interpolator.get_ietax().shape == (29,29,19)
assert interpolator.get_ietay().shape == (29,29,19)
assert interpolator.get_ietax().shape == (30,30,20)
assert interpolator.get_ietay().shape == (30,30,20)
clustervector = _aare.ClusterVector_Cluster3x3i()
cluster = _aare.Cluster3x3i(1,1, np.ones(9, dtype=np.int32))

View File

@@ -0,0 +1,146 @@
# SPDX-License-Identifier: MPL-2.0
import pytest
import numpy as np
import boost_histogram as bh
import pickle
from scipy.stats import multivariate_normal
from aare import Interpolator, calculate_eta2
from aare._aare import ClusterVector_Cluster2x2d, Cluster2x2d, Cluster3x3d, ClusterVector_Cluster3x3d
from conftest import test_data_path
pixel_width = 1e-4
values = np.arange(0.5*pixel_width, 0.1, pixel_width)
num_pixels = values.size
X, Y = np.meshgrid(values, values)
data_points = np.stack([X.ravel(), Y.ravel()], axis=1)
variance = 10*pixel_width
covariance_matrix = np.array([[variance, 0],[0, variance]])
def create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points):
gaussian = multivariate_normal(mean=mean, cov=covariance_matrix)
probability_values = gaussian.pdf(data_points)
return (probability_values.reshape(X.shape)).round() #python bindings only support frame types of uint16_t
def create_2x2cluster_from_frame(frame, pixels_per_superpixel):
return Cluster2x2d(1, 1, np.array([frame[0:pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
frame[0:pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
frame[pixels_per_superpixel:2*pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
frame[pixels_per_superpixel:2*pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum()], dtype=np.float64))
def create_3x3cluster_from_frame(frame, pixels_per_superpixel):
return Cluster3x3d(1, 1, np.array([frame[0:pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
frame[0:pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
frame[0:pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum(),
frame[pixels_per_superpixel:2*pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
frame[pixels_per_superpixel:2*pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
frame[pixels_per_superpixel:2*pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum(),
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, 0:pixels_per_superpixel].sum(),
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, pixels_per_superpixel:2*pixels_per_superpixel].sum(),
frame[2*pixels_per_superpixel:3*pixels_per_superpixel, 2*pixels_per_superpixel:3*pixels_per_superpixel].sum()], dtype=np.float64))
def calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, bin_edges_x = bh.axis.Regular(100, -0.2, 1.2), bin_edges_y = bh.axis.Regular(100, -0.2, 1.2), cluster_2x2 = True):
hist = bh.Histogram(
bin_edges_x,
bin_edges_y, bh.axis.Regular(1, 0, num_pixels*num_pixels*1/(variance*2*np.pi)))
for _ in range(0, num_frames):
mean_x = random_number_generator.uniform(pixels_per_superpixel*pixel_width, 2*pixels_per_superpixel*pixel_width)
mean_y = random_number_generator.uniform(pixels_per_superpixel*pixel_width, 2*pixels_per_superpixel*pixel_width)
frame = create_photon_hit_with_gaussian_distribution(np.array([mean_x, mean_y]), variance, data_points)
cluster = None
if cluster_2x2:
cluster = create_2x2cluster_from_frame(frame, pixels_per_superpixel)
else:
cluster = create_3x3cluster_from_frame(frame, pixels_per_superpixel)
eta2 = calculate_eta2(cluster)
hist.fill(eta2.x, eta2.y, eta2.sum)
return hist
@pytest.mark.withdata
def test_interpolation_of_2x2_cluster(test_data_path):
"""Test Interpolation of 2x2 cluster from Photon hit with Gaussian Distribution"""
#TODO maybe better to compute in test instead of loading - depends on eta
"""
filename = test_data_path/"eta_distributions"/"eta_distribution_2x2cluster_gaussian.pkl"
with open(filename, "rb") as f:
eta_distribution = pickle.load(f)
"""
num_frames = 1000
pixels_per_superpixel = int(num_pixels*0.5)
random_number_generator = np.random.default_rng(42)
eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, bin_edges_x = bh.axis.Regular(100, -0.1, 0.6), bin_edges_y = bh.axis.Regular(100, -0.1, 0.6))
interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
#actual photon hit
mean = 1.2*pixels_per_superpixel*pixel_width
mean = np.array([mean, mean])
frame = create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points)
cluster = create_2x2cluster_from_frame(frame, pixels_per_superpixel)
clustervec = ClusterVector_Cluster2x2d()
clustervec.push_back(cluster)
interpolated_photon = interpolation.interpolate(clustervec)
assert interpolated_photon.size == 1
cluster_center = 1.5*pixels_per_superpixel*pixel_width
scaled_photon_hit = (interpolated_photon[0][0]*pixels_per_superpixel*pixel_width, interpolated_photon[0][1]*pixels_per_superpixel*pixel_width)
assert (np.linalg.norm(scaled_photon_hit - mean) < np.linalg.norm(np.array([cluster_center, cluster_center] - mean)))
@pytest.mark.withdata
def test_interpolation_of_3x3_cluster(test_data_path):
"""Test Interpolation of 3x3 Cluster from Photon hit with Gaussian Distribution"""
#TODO maybe better to compute in test instead of loading - depends on eta
"""
filename = test_data_path/"eta_distributions"/"eta_distribution_3x3cluster_gaussian.pkl"
with open(filename, "rb") as f:
eta_distribution = pickle.load(f)
"""
num_frames = 1000
pixels_per_superpixel = int(num_pixels/3)
random_number_generator = np.random.default_rng(42)
eta_distribution = calculate_eta_distribution(num_frames, pixels_per_superpixel, random_number_generator, bin_edges_x = bh.axis.Regular(100, -0.1, 1.1), bin_edges_y = bh.axis.Regular(100, -0.1, 1.1), cluster_2x2 = False)
interpolation = Interpolator(eta_distribution, eta_distribution.axes[0].edges, eta_distribution.axes[1].edges, eta_distribution.axes[2].edges)
#actual photon hit
mean_x = (1 + 0.8)*pixels_per_superpixel*pixel_width
mean_y = (1 + 0.2)*pixels_per_superpixel*pixel_width
mean = np.array([mean_x, mean_y])
frame = create_photon_hit_with_gaussian_distribution(mean, covariance_matrix, data_points)
cluster = create_3x3cluster_from_frame(frame, pixels_per_superpixel)
clustervec = ClusterVector_Cluster3x3d()
clustervec.push_back(cluster)
interpolated_photon = interpolation.interpolate(clustervec)
assert interpolated_photon.size == 1
cluster_center = 1.5*pixels_per_superpixel*pixel_width
scaled_photon_hit = (interpolated_photon[0][0]*pixels_per_superpixel*pixel_width, interpolated_photon[0][1]*pixels_per_superpixel*pixel_width)
assert (np.linalg.norm(scaled_photon_hit - mean) < np.linalg.norm(np.array([cluster_center, cluster_center] - mean)))

View File

@@ -5,6 +5,8 @@ import pytest
import pytest_check as check
import numpy as np
import boost_histogram as bh
import pickle
from scipy.stats import multivariate_normal
from aare import Interpolator, calculate_eta2, calculate_cross_eta3, calculate_full_eta2, calculate_eta3
from aare import ClusterFile

View File

@@ -61,6 +61,7 @@ void CtbRawFile::find_subfiles() {
while (std::filesystem::exists(m_master.data_fname(0, m_num_subfiles)))
m_num_subfiles++;
fmt::print("Found {} subfiles\n", m_num_subfiles);
}
void CtbRawFile::open_data_file(size_t subfile_index) {

View File

@@ -5,7 +5,6 @@
#include <iostream>
#include <numeric>
#include <vector>
#include <cstddef>
using aare::NDView;
using aare::Shape;
@@ -260,12 +259,4 @@ TEST_CASE("Create a view over a vector") {
REQUIRE(v.shape()[0] == 12);
REQUIRE(v[0] == 0);
REQUIRE(v[11] == 11);
}
TEST_CASE("NDView over byte"){
std::vector<std::byte> buf(5);
auto v = aare::make_view(buf);
REQUIRE(v.shape()[0] == 5);
REQUIRE(v[0] == std::byte{0});
}

View File

@@ -1,6 +1,5 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/decode.hpp"
#include <fmt/format.h>
#include <cmath>
namespace aare {
@@ -106,49 +105,4 @@ void apply_custom_weights(NDView<uint16_t, 1> input, NDView<double, 1> output,
}
}
uint32_t mask32to24bits(uint32_t input, BitOffset offset){
constexpr uint32_t mask24bits{0xFFFFFF};
return (input >> offset.value()) & mask24bits;
}
void expand24to32bit(NDView<uint8_t,1> input, NDView<uint32_t,1> output, BitOffset bit_offset){
ssize_t bytes_per_channel = 3; //24bit
ssize_t min_input_size = output.size()*bytes_per_channel;
//if we have an offset we need one more byte in the input data
if (bit_offset.value())
min_input_size += 1;
if (input.size() < min_input_size)
throw std::runtime_error(fmt::format(
"{} Mismatch between input and output size. Output "
"size of {} with bit offset {} requires an input of at least {} "
"bytes. Called with input size: {} output size: {}",
LOCATION, output.size(), bit_offset.value(), min_input_size, input.size(), output.size()));
auto* in = input.data();
if(bit_offset.value()){
//If there is a bit_offset we copy 4 bytes and then
//mask out the correct ones.
for (auto& v : output){
uint32_t val{};
std::memcpy(&val, in, sizeof(val));
v = mask32to24bits(val, bit_offset);
in += bytes_per_channel;
}
}else{
//If there is no offset we can directly copy the bits
//without masking
for (auto& v : output){
uint32_t val{};
std::memcpy(&val, in, 3);
v = val;
in += bytes_per_channel;
}
}
}
} // namespace aare

View File

@@ -7,8 +7,6 @@
using Catch::Matchers::WithinAbs;
#include <vector>
using aare::BitOffset;
TEST_CASE("test_adc_sar_05_decode64to16") {
uint64_t input = 0;
uint16_t output = aare::adc_sar_05_decode64to16(input);
@@ -73,94 +71,4 @@ TEST_CASE("test_apply_custom_weights") {
input = 0b111;
output = aare::apply_custom_weights(input, weights);
CHECK_THAT(output, WithinAbs(6.34, 0.001));
}
TEST_CASE("Mask 32 bit unsigned integer to 24 bit"){
//any number less than 2**24 (16777216) should be the same
CHECK(aare::mask32to24bits(0)==0);
CHECK(aare::mask32to24bits(19)==19);
CHECK(aare::mask32to24bits(29875)==29875);
CHECK(aare::mask32to24bits(1092177)==1092177);
CHECK(aare::mask32to24bits(0xFFFF)==0xFFFF);
CHECK(aare::mask32to24bits(0xFFFFFFFF)==0xFFFFFF);
// Offset specifies that the should ignore 0-7 bits
// at the start
CHECK(aare::mask32to24bits(0xFFFF, BitOffset(4))==0xFFF);
CHECK(aare::mask32to24bits(0xFF0000d9)==0xd9);
CHECK(aare::mask32to24bits(0xFF000d9F, BitOffset(4))==0xF000d9);
CHECK(aare::mask32to24bits(16777217)==1);
CHECK(aare::mask32to24bits(15,BitOffset(7))==0);
//Highest bit set to 1 should just be excluded
//lowest 4 bits set to 1
CHECK(aare::mask32to24bits(0x8000000f,BitOffset(7))==0);
}
TEST_CASE("Expand container with 24 bit data to 32"){
{
uint8_t buffer[] = {
0x00, 0x00, 0x00,
0x00, 0x00, 0x00,
0x00, 0x00, 0x00,
};
aare::NDView<uint8_t, 1> input(&buffer[0], {9});
aare::NDArray<uint32_t, 1> out({3});
aare::expand24to32bit(input, out.view());
CHECK(out(0) == 0);
CHECK(out(1) == 0);
CHECK(out(2) == 0);
}
{
uint8_t buffer[] = {
0x0F, 0x00, 0x00,
0xFF, 0x00, 0x00,
0xFF, 0xFF, 0xFF,
};
aare::NDView<uint8_t, 1> input(&buffer[0], {9});
aare::NDArray<uint32_t, 1> out({3});
aare::expand24to32bit(input, out.view());
CHECK(out(0) == 0xF);
CHECK(out(1) == 0xFF);
CHECK(out(2) == 0xFFFFFF);
}
{
uint8_t buffer[] = {
0x00, 0x00, 0xFF,
0xFF, 0xFF, 0x00,
0x00, 0xFF, 0x00,
};
aare::NDView<uint8_t, 1> input(&buffer[0], {9});
aare::NDArray<uint32_t, 1> out({3});
aare::expand24to32bit(input, out.view());
CHECK(out(0) == 0xFF0000);
CHECK(out(1) == 0xFFFF);
CHECK(out(2) == 0xFF00);
REQUIRE_THROWS(aare::expand24to32bit(input, out.view(), BitOffset(4)));
}
{
//For use with offset we need an extra byte
uint8_t buffer[] = {
0x00, 0x00, 0xFF,
0xFF, 0xFF, 0x00,
0x00, 0xFF, 0x00, 0x00
};
aare::NDView<uint8_t, 1> input(&buffer[0], {10});
aare::NDArray<uint32_t, 1> out({3}); //still output.size == 3
aare::expand24to32bit(input, out.view(), BitOffset(4));
CHECK(out(0) == 0xFFF000);
CHECK(out(1) == 0xFFF);
CHECK(out(2) == 0xFF0);
}
}

View File

@@ -298,22 +298,4 @@ template <> DACIndex StringTo(const std::string &arg) {
"\"");
}
BitOffset::BitOffset(uint32_t offset){
if (offset>7)
throw std::runtime_error(fmt::format("{} BitOffset needs to be <8: Called with {}", LOCATION, offset));
m_offset = static_cast<uint8_t>(offset);
}
bool BitOffset::operator==(const BitOffset& other) const {
return m_offset == other.m_offset;
}
bool BitOffset::operator<(const BitOffset& other) const {
return m_offset < other.m_offset;
}
} // namespace aare

View File

@@ -83,28 +83,6 @@ TEST_CASE("DynamicCluster creation") {
REQUIRE(c2.data() != nullptr);
}
TEST_CASE("Basic ops on BitOffset"){
REQUIRE_THROWS(aare::BitOffset(10));
aare::BitOffset offset(5);
REQUIRE(offset.value()==5);
aare::BitOffset offset2;
REQUIRE(offset2.value()==0);
aare::BitOffset offset3(offset);
REQUIRE(offset3.value()==5);
REQUIRE(offset==offset3);
//Now assign offset to offset2 which should get the value 5
offset2 = offset;
REQUIRE(offset2.value()==5);
REQUIRE(offset2==offset);
}
// TEST_CASE("cluster set and get data") {
// aare::DynamicCluster c2(33, 44, aare::Dtype(typeid(double)));