From fd0196f2fd5cb3f58e2044a1d898d0e38428c0fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 22 Apr 2025 16:41:48 +0200 Subject: [PATCH 01/13] Developer (#164) - State before merging the new cluster vector API --------- Co-authored-by: Patrick Co-authored-by: JulianHeymes Co-authored-by: Dhanya Thattil Co-authored-by: Xiangyu Xie <45243914+xiangyuxie@users.noreply.github.com> Co-authored-by: xiangyu.xie Co-authored-by: siebsi --- .gitea/workflows/cmake_build.yml | 18 +- .gitea/workflows/rh8-native.yml | 36 ++++ .gitea/workflows/rh9-native.yml | 31 +++ .github/workflows/build_docs.yml | 12 +- .github/workflows/build_wheel.yml | 64 +++++++ .gitignore | 3 +- CMakeLists.txt | 18 +- conda-recipe/meta.yaml | 4 +- docs/src/JungfrauDataFile.rst | 25 +++ docs/src/Tests.rst | 47 +++++ docs/src/algorithm.rst | 5 + docs/src/index.rst | 12 +- docs/src/pyJungfrauDataFile.rst | 10 + etc/dev-env.yml | 15 ++ include/aare/ClusterFile.hpp | 2 +- include/aare/FilePtr.hpp | 30 +++ include/aare/JungfrauDataFile.hpp | 106 +++++++++++ include/aare/NDArray.hpp | 2 +- include/aare/NDView.hpp | 9 +- include/aare/RawSubFile.hpp | 5 +- include/aare/VarClusterFinder.hpp | 4 +- include/aare/algorithm.hpp | 62 +++++- include/aare/decode.hpp | 15 +- include/aare/utils/ifstream_helpers.hpp | 12 ++ pyproject.toml | 25 ++- python/CMakeLists.txt | 15 +- python/aare/__init__.py | 8 +- python/aare/utils.py | 11 +- python/src/ctb_raw_file.hpp | 71 ++++--- python/src/file.hpp | 36 +--- python/src/jungfrau_data_file.hpp | 116 ++++++++++++ python/src/module.cpp | 5 + python/src/raw_sub_file.hpp | 110 +++++++++++ python/tests/conftest.py | 29 +++ python/tests/test_RawSubFile.py | 36 ++++ python/tests/test_jungfrau_dat_files.py | 92 +++++++++ src/ClusterFile.cpp | 6 + src/ClusterFile.test.cpp | 16 +- src/File.cpp | 3 + src/FilePtr.cpp | 44 +++++ src/Fit.cpp | 8 +- src/Interpolator.cpp | 9 +- src/JungfrauDataFile.cpp | 238 ++++++++++++++++++++++++ src/JungfrauDataFile.test.cpp | 114 ++++++++++++ src/NDArray.test.cpp | 4 +- src/NDView.test.cpp | 47 +++-- src/RawSubFile.cpp | 31 ++- src/algorithm.test.cpp | 98 +++++++++- src/decode.cpp | 43 ++++- src/decode.test.cpp | 80 ++++++++ src/utils/ifstream_helpers.cpp | 18 ++ 51 files changed, 1706 insertions(+), 154 deletions(-) create mode 100644 .gitea/workflows/rh8-native.yml create mode 100644 .gitea/workflows/rh9-native.yml create mode 100644 .github/workflows/build_wheel.yml create mode 100644 docs/src/JungfrauDataFile.rst create mode 100644 docs/src/Tests.rst create mode 100644 docs/src/algorithm.rst create mode 100644 docs/src/pyJungfrauDataFile.rst create mode 100644 etc/dev-env.yml create mode 100644 include/aare/FilePtr.hpp create mode 100644 include/aare/JungfrauDataFile.hpp create mode 100644 include/aare/utils/ifstream_helpers.hpp create mode 100644 python/src/jungfrau_data_file.hpp create mode 100644 python/src/raw_sub_file.hpp create mode 100644 python/tests/conftest.py create mode 100644 python/tests/test_RawSubFile.py create mode 100644 python/tests/test_jungfrau_dat_files.py create mode 100644 src/FilePtr.cpp create mode 100644 src/JungfrauDataFile.cpp create mode 100644 src/JungfrauDataFile.test.cpp create mode 100644 src/decode.test.cpp create mode 100644 src/utils/ifstream_helpers.cpp diff --git a/.gitea/workflows/cmake_build.yml b/.gitea/workflows/cmake_build.yml index 43a0181..aa7a297 100644 --- a/.gitea/workflows/cmake_build.yml +++ b/.gitea/workflows/cmake_build.yml @@ -2,9 +2,8 @@ name: Build the package using cmake then documentation on: workflow_dispatch: - push: - + permissions: contents: read @@ -16,12 +15,12 @@ jobs: strategy: fail-fast: false matrix: - platform: [ubuntu-latest, ] # macos-12, windows-2019] - python-version: ["3.12",] + platform: [ubuntu-latest, ] + python-version: ["3.12", ] runs-on: ${{ matrix.platform }} - # The setup-miniconda action needs this to activate miniconda + defaults: run: shell: "bash -l {0}" @@ -35,13 +34,13 @@ jobs: sudo apt-get -y install cmake gcc g++ - name: Get conda - uses: conda-incubator/setup-miniconda@v3.0.4 + uses: conda-incubator/setup-miniconda@v3 with: python-version: ${{ matrix.python-version }} + environment-file: etc/dev-env.yml + miniforge-version: latest channels: conda-forge - - - name: Prepare - run: conda install doxygen sphinx=7.1.2 breathe pybind11 sphinx_rtd_theme furo nlohmann_json zeromq fmt numpy + conda-remove-defaults: "true" - name: Build library run: | @@ -56,3 +55,4 @@ jobs: + diff --git a/.gitea/workflows/rh8-native.yml b/.gitea/workflows/rh8-native.yml new file mode 100644 index 0000000..1c64161 --- /dev/null +++ b/.gitea/workflows/rh8-native.yml @@ -0,0 +1,36 @@ +name: Build on RHEL8 + +on: + push: + workflow_dispatch: + +permissions: + contents: read + +jobs: + build: + runs-on: "ubuntu-latest" + container: + image: gitea.psi.ch/images/rhel8-developer-gitea-actions + steps: + # workaround until actions/checkout@v4 is available for RH8 + # - uses: actions/checkout@v4 + - name: Clone repository + run: | + echo Cloning ${{ github.ref_name }} + git clone https://${{secrets.GITHUB_TOKEN}}@gitea.psi.ch/${{ github.repository }}.git --branch=${{ github.ref_name }} . + + + - name: Install dependencies + run: | + dnf install -y cmake python3.12 python3.12-devel python3.12-pip + + - name: Build library + run: | + mkdir build && cd build + cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON -DPython_FIND_VIRTUALENV=FIRST + make -j 2 + + - name: C++ unit tests + working-directory: ${{gitea.workspace}}/build + run: ctest \ No newline at end of file diff --git a/.gitea/workflows/rh9-native.yml b/.gitea/workflows/rh9-native.yml new file mode 100644 index 0000000..5027365 --- /dev/null +++ b/.gitea/workflows/rh9-native.yml @@ -0,0 +1,31 @@ +name: Build on RHEL9 + +on: + push: + workflow_dispatch: + +permissions: + contents: read + +jobs: + build: + runs-on: "ubuntu-latest" + container: + image: gitea.psi.ch/images/rhel9-developer-gitea-actions + steps: + - uses: actions/checkout@v4 + + + - name: Install dependencies + run: | + dnf install -y cmake python3.12 python3.12-devel python3.12-pip + + - name: Build library + run: | + mkdir build && cd build + cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON + make -j 2 + + - name: C++ unit tests + working-directory: ${{gitea.workspace}}/build + run: ctest \ No newline at end of file diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index 959ab70..24050a3 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -5,7 +5,6 @@ on: push: - permissions: contents: read pages: write @@ -16,12 +15,11 @@ jobs: strategy: fail-fast: false matrix: - platform: [ubuntu-latest, ] # macos-12, windows-2019] + platform: [ubuntu-latest, ] python-version: ["3.12",] runs-on: ${{ matrix.platform }} - # The setup-miniconda action needs this to activate miniconda defaults: run: shell: "bash -l {0}" @@ -30,13 +28,13 @@ jobs: - uses: actions/checkout@v4 - name: Get conda - uses: conda-incubator/setup-miniconda@v3.0.4 + uses: conda-incubator/setup-miniconda@v3 with: python-version: ${{ matrix.python-version }} + environment-file: etc/dev-env.yml + miniforge-version: latest channels: conda-forge - - - name: Prepare - run: conda install doxygen sphinx=7.1.2 breathe pybind11 sphinx_rtd_theme furo nlohmann_json zeromq fmt numpy + conda-remove-defaults: "true" - name: Build library run: | diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml new file mode 100644 index 0000000..f131e77 --- /dev/null +++ b/.github/workflows/build_wheel.yml @@ -0,0 +1,64 @@ +name: Build wheel + +on: + workflow_dispatch: + pull_request: + push: + branches: + - main + release: + types: + - published + + +jobs: + build_wheels: + name: Build wheels on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest,] + + steps: + - uses: actions/checkout@v4 + + - name: Build wheels + run: pipx run cibuildwheel==2.23.0 + + - uses: actions/upload-artifact@v4 + with: + name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }} + path: ./wheelhouse/*.whl + + build_sdist: + name: Build source distribution + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Build sdist + run: pipx run build --sdist + + - uses: actions/upload-artifact@v4 + with: + name: cibw-sdist + path: dist/*.tar.gz + + upload_pypi: + needs: [build_wheels, build_sdist] + runs-on: ubuntu-latest + environment: pypi + permissions: + id-token: write + if: github.event_name == 'release' && github.event.action == 'published' + # or, alternatively, upload to PyPI on every tag starting with 'v' (remove on: release above to use this) + # if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') + steps: + - uses: actions/download-artifact@v4 + with: + # unpacks all CIBW artifacts into dist/ + pattern: cibw-* + path: dist + merge-multiple: true + + - uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.gitignore b/.gitignore index af3e3b7..5982f7f 100644 --- a/.gitignore +++ b/.gitignore @@ -17,7 +17,8 @@ Testing/ ctbDict.cpp ctbDict.h - +wheelhouse/ +dist/ *.pyc */__pycache__/* diff --git a/CMakeLists.txt b/CMakeLists.txt index 804b2f6..b3d7377 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.14) +cmake_minimum_required(VERSION 3.15) project(aare VERSION 1.0.0 @@ -11,6 +11,14 @@ set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) +execute_process( + COMMAND git log -1 --format=%h + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + OUTPUT_VARIABLE GIT_HASH + OUTPUT_STRIP_TRAILING_WHITESPACE + ) +message(STATUS "Building from git hash: ${GIT_HASH}") + if (${CMAKE_VERSION} VERSION_GREATER "3.24") cmake_policy(SET CMP0135 NEW) #Fetch content download timestamp endif() @@ -342,8 +350,10 @@ set(PUBLICHEADERS include/aare/File.hpp include/aare/Fit.hpp include/aare/FileInterface.hpp + include/aare/FilePtr.hpp include/aare/Frame.hpp include/aare/geo_helpers.hpp + include/aare/JungfrauDataFile.hpp include/aare/NDArray.hpp include/aare/NDView.hpp include/aare/NumpyFile.hpp @@ -367,8 +377,10 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/FilePtr.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Fit.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp @@ -376,7 +388,9 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp ) @@ -413,6 +427,7 @@ if(AARE_TESTS) set(TestSources ${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.test.cpp @@ -423,6 +438,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 560e831..12c6ca0 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,7 @@ package: name: aare - version: 2025.4.1 #TODO! how to not duplicate this? + version: 2025.4.22 #TODO! how to not duplicate this? + @@ -38,6 +39,7 @@ requirements: run: - python {{python}} - numpy {{ numpy }} + - matplotlib test: diff --git a/docs/src/JungfrauDataFile.rst b/docs/src/JungfrauDataFile.rst new file mode 100644 index 0000000..78d473f --- /dev/null +++ b/docs/src/JungfrauDataFile.rst @@ -0,0 +1,25 @@ +JungfrauDataFile +================== + +JungfrauDataFile is a class to read the .dat files that are produced by Aldo's receiver. +It is mostly used for calibration. + +The structure of the file is: + +* JungfrauDataHeader +* Binary data (256x256, 256x1024 or 512x1024) +* JungfrauDataHeader +* ... + +There is no metadata indicating number of frames or the size of the image, but this +will be infered by this reader. + +.. doxygenstruct:: aare::JungfrauDataHeader + :members: + :undoc-members: + :private-members: + +.. doxygenclass:: aare::JungfrauDataFile + :members: + :undoc-members: + :private-members: \ No newline at end of file diff --git a/docs/src/Tests.rst b/docs/src/Tests.rst new file mode 100644 index 0000000..da98001 --- /dev/null +++ b/docs/src/Tests.rst @@ -0,0 +1,47 @@ +**************** +Tests +**************** + +We test the code both from the C++ and Python API. By default only tests that does not require image data is run. + +C++ +~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + mkdir build + cd build + cmake .. -DAARE_TESTS=ON + make -j 4 + + export AARE_TEST_DATA=/path/to/test/data + ./run_test [.files] #or using ctest, [.files] is the option to include tests needing data + + + +Python +~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + #From the root dir of the library + python -m pytest python/tests --files # passing --files will run the tests needing data + + + +Getting the test data +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. attention :: + + The tests needing the test data are not run by default. To make the data available, you need to set the environment variable + AARE_TEST_DATA to the path of the test data directory. Then pass either [.files] for the C++ tests or --files for Python + +The image files needed for the test are large and are not included in the repository. They are stored +using GIT LFS in a separate repository. To get the test data, you need to clone the repository. +To do this, you need to have GIT LFS installed. You can find instructions on how to install it here: https://git-lfs.github.com/ +Once you have GIT LFS installed, you can clone the repository like any normal repo using: + +.. code-block:: bash + + git clone https://gitea.psi.ch/detectors/aare-test-data.git diff --git a/docs/src/algorithm.rst b/docs/src/algorithm.rst new file mode 100644 index 0000000..9b11857 --- /dev/null +++ b/docs/src/algorithm.rst @@ -0,0 +1,5 @@ +algorithm +============= + +.. doxygenfile:: algorithm.hpp + diff --git a/docs/src/index.rst b/docs/src/index.rst index 905caea..af5e99a 100644 --- a/docs/src/index.rst +++ b/docs/src/index.rst @@ -20,9 +20,6 @@ AARE Requirements Consume - - - .. toctree:: :caption: Python API :maxdepth: 1 @@ -31,6 +28,7 @@ AARE pyCtbRawFile pyClusterFile pyClusterVector + pyJungfrauDataFile pyRawFile pyRawMasterFile pyVarClusterFinder @@ -42,6 +40,7 @@ AARE :caption: C++ API :maxdepth: 1 + algorithm NDArray NDView Frame @@ -51,6 +50,7 @@ AARE ClusterFinderMT ClusterFile ClusterVector + JungfrauDataFile Pedestal RawFile RawSubFile @@ -59,4 +59,8 @@ AARE - +.. toctree:: + :caption: Developer + :maxdepth: 3 + + Tests \ No newline at end of file diff --git a/docs/src/pyJungfrauDataFile.rst b/docs/src/pyJungfrauDataFile.rst new file mode 100644 index 0000000..2173adf --- /dev/null +++ b/docs/src/pyJungfrauDataFile.rst @@ -0,0 +1,10 @@ +JungfrauDataFile +=================== + +.. py:currentmodule:: aare + +.. autoclass:: JungfrauDataFile + :members: + :undoc-members: + :show-inheritance: + :inherited-members: \ No newline at end of file diff --git a/etc/dev-env.yml b/etc/dev-env.yml new file mode 100644 index 0000000..25038ee --- /dev/null +++ b/etc/dev-env.yml @@ -0,0 +1,15 @@ +name: dev-environment +channels: + - conda-forge +dependencies: + - anaconda-client + - doxygen + - sphinx=7.1.2 + - breathe + - pybind11 + - sphinx_rtd_theme + - furo + - nlohmann_json + - zeromq + - fmt + - numpy diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index bea9f48..b47a1d5 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -124,7 +124,7 @@ class ClusterFile { /** * @brief Set the gain map to use when reading clusters. If set the gain map will be applied - * to the clusters that pass ROI and noise_map selection. + * to the clusters that pass ROI and noise_map selection. The gain map is expected to be in ADU/energy. */ void set_gain_map(const NDView gain_map); diff --git a/include/aare/FilePtr.hpp b/include/aare/FilePtr.hpp new file mode 100644 index 0000000..4c88ecb --- /dev/null +++ b/include/aare/FilePtr.hpp @@ -0,0 +1,30 @@ +#pragma once +#include +#include + +namespace aare { + +/** + * \brief RAII wrapper for FILE pointer + */ +class FilePtr { + FILE *fp_{nullptr}; + + public: + FilePtr() = default; + FilePtr(const std::filesystem::path& fname, const std::string& mode); + FilePtr(const FilePtr &) = delete; // we don't want a copy + FilePtr &operator=(const FilePtr &) = delete; // since we handle a resource + FilePtr(FilePtr &&other); + FilePtr &operator=(FilePtr &&other); + FILE *get(); + int64_t tell(); + void seek(int64_t offset, int whence = SEEK_SET) { + if (fseek(fp_, offset, whence) != 0) + throw std::runtime_error("Error seeking in file"); + } + std::string error_msg(); + ~FilePtr(); +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/JungfrauDataFile.hpp b/include/aare/JungfrauDataFile.hpp new file mode 100644 index 0000000..9b1bc48 --- /dev/null +++ b/include/aare/JungfrauDataFile.hpp @@ -0,0 +1,106 @@ +#pragma once +#include +#include +#include + +#include "aare/FilePtr.hpp" +#include "aare/defs.hpp" +#include "aare/NDArray.hpp" +#include "aare/FileInterface.hpp" +namespace aare { + + +struct JungfrauDataHeader{ + uint64_t framenum; + uint64_t bunchid; +}; + +class JungfrauDataFile : public FileInterface { + + size_t m_rows{}; //!< number of rows in the image, from find_frame_size(); + size_t m_cols{}; //!< number of columns in the image, from find_frame_size(); + size_t m_bytes_per_frame{}; //!< number of bytes per frame excluding header + size_t m_total_frames{}; //!< total number of frames in the series of files + size_t m_offset{}; //!< file index of the first file, allow starting at non zero file + size_t m_current_file_index{}; //!< The index of the open file + size_t m_current_frame_index{}; //!< The index of the current frame (with reference to all files) + + std::vector m_last_frame_in_file{}; //!< Used for seeking to the correct file + std::filesystem::path m_path; //!< path to the files + std::string m_base_name; //!< base name used for formatting file names + + FilePtr m_fp; //!< RAII wrapper for a FILE* + + + using pixel_type = uint16_t; + static constexpr size_t header_size = sizeof(JungfrauDataHeader); + static constexpr size_t n_digits_in_file_index = 6; //!< to format file names + + public: + JungfrauDataFile(const std::filesystem::path &fname); + + std::string base_name() const; //!< get the base name of the file (without path and extension) + size_t bytes_per_frame() override; + size_t pixels_per_frame() override; + size_t bytes_per_pixel() const; + size_t bitdepth() const override; + void seek(size_t frame_index) override; //!< seek to the given frame index (note not byte offset) + size_t tell() override; //!< get the frame index of the file pointer + size_t total_frames() const override; + size_t rows() const override; + size_t cols() const override; + std::array shape() const; + size_t n_files() const; //!< get the number of files in the series. + + // Extra functions needed for FileInterface + Frame read_frame() override; + Frame read_frame(size_t frame_number) override; + std::vector read_n(size_t n_frames=0) override; + void read_into(std::byte *image_buf) override; + void read_into(std::byte *image_buf, size_t n_frames) override; + size_t frame_number(size_t frame_index) override; + DetectorType detector_type() const override; + + /** + * @brief Read a single frame from the file into the given buffer. + * @param image_buf buffer to read the frame into. (Note the caller is responsible for allocating the buffer) + * @param header pointer to a JungfrauDataHeader or nullptr to skip header) + */ + void read_into(std::byte *image_buf, JungfrauDataHeader *header = nullptr); + + /** + * @brief Read a multiple frames from the file into the given buffer. + * @param image_buf buffer to read the frame into. (Note the caller is responsible for allocating the buffer) + * @param n_frames number of frames to read + * @param header pointer to a JungfrauDataHeader or nullptr to skip header) + */ + void read_into(std::byte *image_buf, size_t n_frames, JungfrauDataHeader *header = nullptr); + + /** + * @brief Read a single frame from the file into the given NDArray + * @param image NDArray to read the frame into. + */ + void read_into(NDArray* image, JungfrauDataHeader* header = nullptr); + + JungfrauDataHeader read_header(); + std::filesystem::path current_file() const { return fpath(m_current_file_index+m_offset); } + + + private: + /** + * @brief Find the size of the frame in the file. (256x256, 256x1024, 512x1024) + * @param fname path to the file + * @throws std::runtime_error if the file is empty or the size cannot be determined + */ + void find_frame_size(const std::filesystem::path &fname); + + + void parse_fname(const std::filesystem::path &fname); + void scan_files(); + void open_file(size_t file_index); + std::filesystem::path fpath(size_t frame_index) const; + + + }; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index 45d3a83..ceb1e0b 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -194,7 +194,7 @@ class NDArray : public ArrayExpr, Ndim> { T *data() { return data_; } std::byte *buffer() { return reinterpret_cast(data_); } - size_t size() const { return size_; } + ssize_t size() const { return static_cast(size_); } size_t total_bytes() const { return size_ * sizeof(T); } std::array shape() const noexcept { return shape_; } int64_t shape(int64_t i) const noexcept { return shape_[i]; } diff --git a/include/aare/NDView.hpp b/include/aare/NDView.hpp index f53f758..ddb5d1c 100644 --- a/include/aare/NDView.hpp +++ b/include/aare/NDView.hpp @@ -71,7 +71,7 @@ template class NDView : public ArrayExpr(size_); } size_t total_bytes() const { return size_ * sizeof(T); } std::array strides() const noexcept { return strides_; } @@ -102,7 +102,7 @@ template class NDView : public ArrayExpr NDView& operator=(const std::array &arr) { - if(size() != arr.size()) + if(size() != static_cast(arr.size())) throw std::runtime_error(LOCATION + "Array and NDView size mismatch"); std::copy(arr.begin(), arr.end(), begin()); return *this; @@ -184,4 +184,9 @@ std::ostream& operator <<(std::ostream& os, const NDView& arr){ } +template +NDView make_view(std::vector& vec){ + return NDView(vec.data(), {static_cast(vec.size())}); +} + } // namespace aare \ No newline at end of file diff --git a/include/aare/RawSubFile.hpp b/include/aare/RawSubFile.hpp index 1d554e8..350a475 100644 --- a/include/aare/RawSubFile.hpp +++ b/include/aare/RawSubFile.hpp @@ -22,7 +22,7 @@ class RawSubFile { size_t m_rows{}; size_t m_cols{}; size_t m_bytes_per_frame{}; - size_t n_frames{}; + size_t m_num_frames{}; uint32_t m_pos_row{}; uint32_t m_pos_col{}; @@ -53,6 +53,7 @@ class RawSubFile { size_t tell(); void read_into(std::byte *image_buf, DetectorHeader *header = nullptr); + void read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header= nullptr); void get_part(std::byte *buffer, size_t frame_index); void read_header(DetectorHeader *header); @@ -66,6 +67,8 @@ class RawSubFile { size_t pixels_per_frame() const { return m_rows * m_cols; } size_t bytes_per_pixel() const { return m_bitdepth / bits_per_byte; } + size_t frames_in_file() const { return m_num_frames; } + private: template void read_with_map(std::byte *image_buf); diff --git a/include/aare/VarClusterFinder.hpp b/include/aare/VarClusterFinder.hpp index ea62a9d..161941a 100644 --- a/include/aare/VarClusterFinder.hpp +++ b/include/aare/VarClusterFinder.hpp @@ -226,7 +226,7 @@ template void VarClusterFinder::single_pass(NDView img) { template void VarClusterFinder::first_pass() { - for (size_t i = 0; i < original_.size(); ++i) { + for (ssize_t i = 0; i < original_.size(); ++i) { if (use_noise_map) threshold_ = 5 * noiseMap(i); binary_(i) = (original_(i) > threshold_); @@ -250,7 +250,7 @@ template void VarClusterFinder::first_pass() { template void VarClusterFinder::second_pass() { - for (size_t i = 0; i != labeled_.size(); ++i) { + for (ssize_t i = 0; i != labeled_.size(); ++i) { auto cl = labeled_(i); if (cl != 0) { auto it = child.find(cl); diff --git a/include/aare/algorithm.hpp b/include/aare/algorithm.hpp index 5d6dc57..fc7d51f 100644 --- a/include/aare/algorithm.hpp +++ b/include/aare/algorithm.hpp @@ -7,13 +7,20 @@ namespace aare { /** - * @brief Find the index of the last element smaller than val - * assume a sorted array + * @brief Index of the last element that is smaller than val. + * Requires a sorted array. Uses >= for ordering. If all elements + * are smaller it returns the last element and if all elements are + * larger it returns the first element. + * @param first iterator to the first element + * @param last iterator to the last element + * @param val value to compare + * @return index of the last element that is smaller than val + * */ template size_t last_smaller(const T* first, const T* last, T val) { for (auto iter = first+1; iter != last; ++iter) { - if (*iter > val) { + if (*iter >= val) { return std::distance(first, iter-1); } } @@ -25,7 +32,49 @@ size_t last_smaller(const NDArray& arr, T val) { return last_smaller(arr.begin(), arr.end(), val); } +template +size_t last_smaller(const std::vector& vec, T val) { + return last_smaller(vec.data(), vec.data()+vec.size(), val); +} +/** + * @brief Index of the first element that is larger than val. + * Requires a sorted array. Uses > for ordering. If all elements + * are larger it returns the first element and if all elements are + * smaller it returns the last element. + * @param first iterator to the first element + * @param last iterator to the last element + * @param val value to compare + * @return index of the first element that is larger than val + */ +template +size_t first_larger(const T* first, const T* last, T val) { + for (auto iter = first; iter != last; ++iter) { + if (*iter > val) { + return std::distance(first, iter); + } + } + return std::distance(first, last-1); +} + +template +size_t first_larger(const NDArray& arr, T val) { + return first_larger(arr.begin(), arr.end(), val); +} + +template +size_t first_larger(const std::vector& vec, T val) { + return first_larger(vec.data(), vec.data()+vec.size(), val); +} + +/** + * @brief Index of the nearest element to val. + * Requires a sorted array. If there is no difference it takes the first element. + * @param first iterator to the first element + * @param last iterator to the last element + * @param val value to compare + * @return index of the nearest element + */ template size_t nearest_index(const T* first, const T* last, T val) { auto iter = std::min_element(first, last, @@ -50,6 +99,13 @@ size_t nearest_index(const std::array& arr, T val) { return nearest_index(arr.data(), arr.data()+arr.size(), val); } +template +std::vector cumsum(const std::vector& vec) { + std::vector result(vec.size()); + std::partial_sum(vec.begin(), vec.end(), result.begin()); + return result; +} + } // namespace aare \ No newline at end of file diff --git a/include/aare/decode.hpp b/include/aare/decode.hpp index 1c3c479..e784c4a 100644 --- a/include/aare/decode.hpp +++ b/include/aare/decode.hpp @@ -1,6 +1,7 @@ #pragma once #include +#include #include namespace aare { @@ -10,4 +11,16 @@ uint16_t adc_sar_04_decode64to16(uint64_t input); void adc_sar_05_decode64to16(NDView input, NDView output); void adc_sar_04_decode64to16(NDView input, NDView output); -} // namespace aare \ No newline at end of file + +/** + * @brief Apply custom weights to a 16-bit input value. Will sum up weights[i]**i + * for each bit i that is set in the input value. + * @throws std::out_of_range if weights.size() < 16 + * @param input 16-bit input value + * @param weights vector of weights, size must be less than or equal to 16 + */ +double apply_custom_weights(uint16_t input, const NDView weights); + +void apply_custom_weights(NDView input, NDView output, const NDView weights); + +} // namespace aare diff --git a/include/aare/utils/ifstream_helpers.hpp b/include/aare/utils/ifstream_helpers.hpp new file mode 100644 index 0000000..0a842ed --- /dev/null +++ b/include/aare/utils/ifstream_helpers.hpp @@ -0,0 +1,12 @@ +#pragma once + +#include +#include +namespace aare { + +/** + * @brief Get the error message from an ifstream object +*/ +std::string ifstream_error_msg(std::ifstream &ifs); + +} // namespace aare \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 60128c9..7415062 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,15 +4,32 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.4.1" +version = "2025.4.22" +requires-python = ">=3.11" +dependencies = [ + "numpy", + "matplotlib", +] + + +[tool.cibuildwheel] + +build = "cp{311,312,313}-manylinux_x86_64" [tool.scikit-build] -cmake.verbose = true +build.verbose = true +cmake.build-type = "Release" +install.components = ["python"] [tool.scikit-build.cmake.define] AARE_PYTHON_BINDINGS = "ON" -AARE_SYSTEM_LIBRARIES = "ON" -AARE_INSTALL_PYTHONEXT = "ON" \ No newline at end of file +AARE_INSTALL_PYTHONEXT = "ON" + + +[tool.pytest.ini_options] +markers = [ + "files: marks tests that need additional data (deselect with '-m \"not files\"')", +] \ No newline at end of file diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 09de736..549205a 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -1,12 +1,13 @@ -find_package (Python 3.10 COMPONENTS Interpreter Development REQUIRED) +find_package (Python 3.10 COMPONENTS Interpreter Development.Module REQUIRED) +set(PYBIND11_FINDPYTHON ON) # Needed for RH8 # Download or find pybind11 depending on configuration if(AARE_FETCH_PYBIND11) FetchContent_Declare( pybind11 GIT_REPOSITORY https://github.com/pybind/pybind11 - GIT_TAG v2.13.0 + GIT_TAG v2.13.6 ) FetchContent_MakeAvailable(pybind11) else() @@ -58,10 +59,16 @@ endforeach(FILE ${PYTHON_EXAMPLES}) if(AARE_INSTALL_PYTHONEXT) - install(TARGETS _aare + install( + TARGETS _aare EXPORT "${TARGETS_EXPORT_NAME}" LIBRARY DESTINATION aare + COMPONENT python ) - install(FILES ${PYTHON_FILES} DESTINATION aare) + install( + FILES ${PYTHON_FILES} + DESTINATION aare + COMPONENT python + ) endif() \ No newline at end of file diff --git a/python/aare/__init__.py b/python/aare/__init__.py index 058d7cf..db9672f 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -2,7 +2,7 @@ from . import _aare -from ._aare import File, RawMasterFile, RawSubFile +from ._aare import File, RawMasterFile, RawSubFile, JungfrauDataFile from ._aare import Pedestal_d, Pedestal_f, ClusterFinder, VarClusterFinder from ._aare import DetectorType from ._aare import ClusterFile @@ -13,11 +13,15 @@ from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVe from ._aare import fit_gaus, fit_pol1 from ._aare import Interpolator + + +from ._aare import apply_custom_weights + from .CtbRawFile import CtbRawFile from .RawFile import RawFile from .ScanParameters import ScanParameters -from .utils import random_pixels, random_pixel, flat_list +from .utils import random_pixels, random_pixel, flat_list, add_colorbar #make functions available in the top level API diff --git a/python/aare/utils.py b/python/aare/utils.py index 4708921..a10f54c 100644 --- a/python/aare/utils.py +++ b/python/aare/utils.py @@ -1,4 +1,6 @@ import numpy as np +import matplotlib.pyplot as plt +from mpl_toolkits.axes_grid1 import make_axes_locatable def random_pixels(n_pixels, xmin=0, xmax=512, ymin=0, ymax=1024): """Return a list of random pixels. @@ -24,4 +26,11 @@ def random_pixel(xmin=0, xmax=512, ymin=0, ymax=1024): def flat_list(xss): """Flatten a list of lists.""" - return [x for xs in xss for x in xs] \ No newline at end of file + return [x for xs in xss for x in xs] + +def add_colorbar(ax, im, size="5%", pad=0.05): + """Add a colorbar with the same height as the image.""" + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size=size, pad=pad) + plt.colorbar(im, cax=cax) + return ax, im, cax \ No newline at end of file diff --git a/python/src/ctb_raw_file.hpp b/python/src/ctb_raw_file.hpp index 56e571b..a88a9d1 100644 --- a/python/src/ctb_raw_file.hpp +++ b/python/src/ctb_raw_file.hpp @@ -10,6 +10,8 @@ #include "aare/decode.hpp" // #include "aare/fClusterFileV2.hpp" +#include "np_helper.hpp" + #include #include #include @@ -65,35 +67,54 @@ m.def("adc_sar_04_decode64to16", [](py::array_t input) { return output; }); - py::class_(m, "CtbRawFile") - .def(py::init()) - .def("read_frame", - [](CtbRawFile &self) { - size_t image_size = self.image_size_in_bytes(); - py::array image; - std::vector shape; - shape.reserve(2); - shape.push_back(1); - shape.push_back(image_size); +m.def( + "apply_custom_weights", + [](py::array_t &input, + py::array_t + &weights) { + - py::array_t header(1); + // Create new array with same shape as the input array (uninitialized values) + py::buffer_info buf = input.request(); + py::array_t output(buf.shape); - // always read bytes - image = py::array_t(shape); + // Use NDViews to call into the C++ library + auto weights_view = make_view_1d(weights); + NDView input_view(input.mutable_data(), {input.size()}); + NDView output_view(output.mutable_data(), {output.size()}); - self.read_into( - reinterpret_cast(image.mutable_data()), - header.mutable_data()); + apply_custom_weights(input_view, output_view, weights_view); + return output; + }); - return py::make_tuple(header, image); - }) - .def("seek", &CtbRawFile::seek) - .def("tell", &CtbRawFile::tell) - .def("master", &CtbRawFile::master) +py::class_(m, "CtbRawFile") + .def(py::init()) + .def("read_frame", + [](CtbRawFile &self) { + size_t image_size = self.image_size_in_bytes(); + py::array image; + std::vector shape; + shape.reserve(2); + shape.push_back(1); + shape.push_back(image_size); - .def_property_readonly("image_size_in_bytes", - &CtbRawFile::image_size_in_bytes) + py::array_t header(1); - .def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file); + // always read bytes + image = py::array_t(shape); -} \ No newline at end of file + self.read_into(reinterpret_cast(image.mutable_data()), + header.mutable_data()); + + return py::make_tuple(header, image); + }) + .def("seek", &CtbRawFile::seek) + .def("tell", &CtbRawFile::tell) + .def("master", &CtbRawFile::master) + + .def_property_readonly("image_size_in_bytes", + &CtbRawFile::image_size_in_bytes) + + .def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file); + +} diff --git a/python/src/file.hpp b/python/src/file.hpp index 0d64e16..2d0f53e 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -20,6 +20,9 @@ namespace py = pybind11; using namespace ::aare; + + + //Disable warnings for unused parameters, as we ignore some //in the __exit__ method #pragma GCC diagnostic push @@ -214,36 +217,9 @@ void define_file_io_bindings(py::module &m) { - py::class_(m, "RawSubFile") - .def(py::init()) - .def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame) - .def_property_readonly("pixels_per_frame", - &RawSubFile::pixels_per_frame) - .def("seek", &RawSubFile::seek) - .def("tell", &RawSubFile::tell) - .def_property_readonly("rows", &RawSubFile::rows) - .def_property_readonly("cols", &RawSubFile::cols) - .def("read_frame", - [](RawSubFile &self) { - const uint8_t item_size = self.bytes_per_pixel(); - py::array image; - std::vector shape; - shape.reserve(2); - shape.push_back(self.rows()); - shape.push_back(self.cols()); - if (item_size == 1) { - image = py::array_t(shape); - } else if (item_size == 2) { - image = py::array_t(shape); - } else if (item_size == 4) { - image = py::array_t(shape); - } - fmt::print("item_size: {} rows: {} cols: {}\n", item_size, self.rows(), self.cols()); - self.read_into( - reinterpret_cast(image.mutable_data())); - return image; - }); + + + #pragma GCC diagnostic pop // py::class_(m, "ClusterHeader") diff --git a/python/src/jungfrau_data_file.hpp b/python/src/jungfrau_data_file.hpp new file mode 100644 index 0000000..942f6a6 --- /dev/null +++ b/python/src/jungfrau_data_file.hpp @@ -0,0 +1,116 @@ + +#include "aare/JungfrauDataFile.hpp" +#include "aare/defs.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; +using namespace ::aare; + +// Disable warnings for unused parameters, as we ignore some +// in the __exit__ method +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +auto read_dat_frame(JungfrauDataFile &self) { + py::array_t header(1); + py::array_t image({ + self.rows(), + self.cols() + }); + + self.read_into(reinterpret_cast(image.mutable_data()), + header.mutable_data()); + + return py::make_tuple(header, image); +} + +auto read_n_dat_frames(JungfrauDataFile &self, size_t n_frames) { + // adjust for actual frames left in the file + n_frames = std::min(n_frames, self.total_frames() - self.tell()); + if (n_frames == 0) { + throw std::runtime_error("No frames left in file"); + } + + py::array_t header(n_frames); + py::array_t image({ + n_frames, self.rows(), + self.cols()}); + + self.read_into(reinterpret_cast(image.mutable_data()), + n_frames, header.mutable_data()); + + return py::make_tuple(header, image); +} + +void define_jungfrau_data_file_io_bindings(py::module &m) { + // Make the JungfrauDataHeader usable from numpy + PYBIND11_NUMPY_DTYPE(JungfrauDataHeader, framenum, bunchid); + + py::class_(m, "JungfrauDataFile") + .def(py::init()) + .def("seek", &JungfrauDataFile::seek, + R"( + Seek to the given frame index. + )") + .def("tell", &JungfrauDataFile::tell, + R"( + Get the current frame index. + )") + .def_property_readonly("rows", &JungfrauDataFile::rows) + .def_property_readonly("cols", &JungfrauDataFile::cols) + .def_property_readonly("base_name", &JungfrauDataFile::base_name) + .def_property_readonly("bytes_per_frame", + &JungfrauDataFile::bytes_per_frame) + .def_property_readonly("pixels_per_frame", + &JungfrauDataFile::pixels_per_frame) + .def_property_readonly("bytes_per_pixel", + &JungfrauDataFile::bytes_per_pixel) + .def_property_readonly("bitdepth", &JungfrauDataFile::bitdepth) + .def_property_readonly("current_file", &JungfrauDataFile::current_file) + .def_property_readonly("total_frames", &JungfrauDataFile::total_frames) + .def_property_readonly("n_files", &JungfrauDataFile::n_files) + .def("read_frame", &read_dat_frame, + R"( + Read a single frame from the file. + )") + .def("read_n", &read_n_dat_frames, + R"( + Read maximum n_frames frames from the file. + )") + .def( + "read", + [](JungfrauDataFile &self) { + self.seek(0); + auto n_frames = self.total_frames(); + return read_n_dat_frames(self, n_frames); + }, + R"( + Read all frames from the file. Seeks to the beginning before reading. + )") + .def("__enter__", [](JungfrauDataFile &self) { return &self; }) + .def("__exit__", + [](JungfrauDataFile &self, + const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + // self.close(); + }) + .def("__iter__", [](JungfrauDataFile &self) { return &self; }) + .def("__next__", [](JungfrauDataFile &self) { + try { + return read_dat_frame(self); + } catch (std::runtime_error &e) { + throw py::stop_iteration(); + } + }); +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/module.cpp b/python/src/module.cpp index 43f48ba..75fe237 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -10,6 +10,9 @@ #include "cluster_file.hpp" #include "fit.hpp" #include "interpolation.hpp" +#include "raw_sub_file.hpp" + +#include "jungfrau_data_file.hpp" //Pybind stuff #include @@ -20,6 +23,7 @@ namespace py = pybind11; PYBIND11_MODULE(_aare, m) { define_file_io_bindings(m); define_raw_file_io_bindings(m); + define_raw_sub_file_io_bindings(m); define_ctb_raw_file_io_bindings(m); define_raw_master_file_bindings(m); define_var_cluster_finder_bindings(m); @@ -33,5 +37,6 @@ PYBIND11_MODULE(_aare, m) { define_cluster_file_sink_bindings(m); define_fit_bindings(m); define_interpolation_bindings(m); + define_jungfrau_data_file_io_bindings(m); } \ No newline at end of file diff --git a/python/src/raw_sub_file.hpp b/python/src/raw_sub_file.hpp new file mode 100644 index 0000000..2cb83fc --- /dev/null +++ b/python/src/raw_sub_file.hpp @@ -0,0 +1,110 @@ +#include "aare/CtbRawFile.hpp" +#include "aare/File.hpp" +#include "aare/Frame.hpp" +#include "aare/RawFile.hpp" +#include "aare/RawMasterFile.hpp" +#include "aare/RawSubFile.hpp" + +#include "aare/defs.hpp" +// #include "aare/fClusterFileV2.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; +using namespace ::aare; + +auto read_frame_from_RawSubFile(RawSubFile &self) { + py::array_t header(1); + const uint8_t item_size = self.bytes_per_pixel(); + std::vector shape{static_cast(self.rows()), + static_cast(self.cols())}; + + py::array image; + if (item_size == 1) { + image = py::array_t(shape); + } else if (item_size == 2) { + image = py::array_t(shape); + } else if (item_size == 4) { + image = py::array_t(shape); + } + self.read_into(reinterpret_cast(image.mutable_data()), + header.mutable_data()); + + return py::make_tuple(header, image); +} + +auto read_n_frames_from_RawSubFile(RawSubFile &self, size_t n_frames) { + py::array_t header(n_frames); + const uint8_t item_size = self.bytes_per_pixel(); + std::vector shape{ + static_cast(n_frames), + static_cast(self.rows()), + static_cast(self.cols()) + }; + + py::array image; + if (item_size == 1) { + image = py::array_t(shape); + } else if (item_size == 2) { + image = py::array_t(shape); + } else if (item_size == 4) { + image = py::array_t(shape); + } + self.read_into(reinterpret_cast(image.mutable_data()), n_frames, + header.mutable_data()); + + return py::make_tuple(header, image); +} + + +//Disable warnings for unused parameters, as we ignore some +//in the __exit__ method +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +void define_raw_sub_file_io_bindings(py::module &m) { + py::class_(m, "RawSubFile") + .def(py::init()) + .def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame) + .def_property_readonly("pixels_per_frame", + &RawSubFile::pixels_per_frame) + .def_property_readonly("bytes_per_pixel", &RawSubFile::bytes_per_pixel) + .def("seek", &RawSubFile::seek) + .def("tell", &RawSubFile::tell) + .def_property_readonly("rows", &RawSubFile::rows) + .def_property_readonly("cols", &RawSubFile::cols) + .def_property_readonly("frames_in_file", &RawSubFile::frames_in_file) + .def("read_frame", &read_frame_from_RawSubFile) + .def("read_n", &read_n_frames_from_RawSubFile) + .def("read", [](RawSubFile &self){ + self.seek(0); + auto n_frames = self.frames_in_file(); + return read_n_frames_from_RawSubFile(self, n_frames); + }) + .def("__enter__", [](RawSubFile &self) { return &self; }) + .def("__exit__", + [](RawSubFile &self, + const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + }) + .def("__iter__", [](RawSubFile &self) { return &self; }) + .def("__next__", [](RawSubFile &self) { + try { + return read_frame_from_RawSubFile(self); + } catch (std::runtime_error &e) { + throw py::stop_iteration(); + } + }); + +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/tests/conftest.py b/python/tests/conftest.py new file mode 100644 index 0000000..5badf13 --- /dev/null +++ b/python/tests/conftest.py @@ -0,0 +1,29 @@ +import os +from pathlib import Path +import pytest + + + +def pytest_addoption(parser): + parser.addoption( + "--files", action="store_true", default=False, help="run slow tests" + ) + + +def pytest_configure(config): + config.addinivalue_line("markers", "files: mark test as needing image files to run") + + +def pytest_collection_modifyitems(config, items): + if config.getoption("--files"): + return + skip = pytest.mark.skip(reason="need --files option to run") + for item in items: + if "files" in item.keywords: + item.add_marker(skip) + + +@pytest.fixture +def test_data_path(): + return Path(os.environ["AARE_TEST_DATA"]) + diff --git a/python/tests/test_RawSubFile.py b/python/tests/test_RawSubFile.py new file mode 100644 index 0000000..a5eea91 --- /dev/null +++ b/python/tests/test_RawSubFile.py @@ -0,0 +1,36 @@ +import pytest +import numpy as np +from aare import RawSubFile, DetectorType + + +@pytest.mark.files +def test_read_a_jungfrau_RawSubFile(test_data_path): + with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f1_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f: + assert f.frames_in_file == 3 + + headers, frames = f.read() + + assert headers.size == 3 + assert frames.shape == (3, 512, 1024) + + # Frame numbers in this file should be 4, 5, 6 + for i,h in zip(range(4,7,1), headers): + assert h["frameNumber"] == i + + # Compare to canned data using numpy + data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy") + assert np.all(data[3:6] == frames) + +@pytest.mark.files +def test_iterate_over_a_jungfrau_RawSubFile(test_data_path): + + data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy") + + with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f: + i = 0 + for header, frame in f: + assert header["frameNumber"] == i+1 + assert np.all(frame == data[i]) + i += 1 + assert i == 3 + assert header["frameNumber"] == 3 \ No newline at end of file diff --git a/python/tests/test_jungfrau_dat_files.py b/python/tests/test_jungfrau_dat_files.py new file mode 100644 index 0000000..5d3fdf8 --- /dev/null +++ b/python/tests/test_jungfrau_dat_files.py @@ -0,0 +1,92 @@ +import pytest +import numpy as np +from aare import JungfrauDataFile + +@pytest.mark.files +def test_jfungfrau_dat_read_number_of_frames(test_data_path): + with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as dat_file: + assert dat_file.total_frames == 24 + + with JungfrauDataFile(test_data_path / "dat/AldoJF250k_000000.dat") as dat_file: + assert dat_file.total_frames == 53 + + with JungfrauDataFile(test_data_path / "dat/AldoJF65k_000000.dat") as dat_file: + assert dat_file.total_frames == 113 + + +@pytest.mark.files +def test_jfungfrau_dat_read_number_of_file(test_data_path): + with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as dat_file: + assert dat_file.n_files == 4 + + with JungfrauDataFile(test_data_path / "dat/AldoJF250k_000000.dat") as dat_file: + assert dat_file.n_files == 7 + + with JungfrauDataFile(test_data_path / "dat/AldoJF65k_000000.dat") as dat_file: + assert dat_file.n_files == 7 + + +@pytest.mark.files +def test_read_module(test_data_path): + """ + Read all frames from the series of .dat files. Compare to canned data in npz format. + """ + + # Read all frames from the .dat file + with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as f: + header, data = f.read() + + #Sanity check + n_frames = 24 + assert header.size == n_frames + assert data.shape == (n_frames, 512, 1024) + + # Read reference data using numpy + with np.load(test_data_path / "dat/AldoJF500k.npz") as f: + ref_header = f["headers"] + ref_data = f["frames"] + + # Check that the data is the same + assert np.all(ref_header == header) + assert np.all(ref_data == data) + +@pytest.mark.files +def test_read_half_module(test_data_path): + + # Read all frames from the .dat file + with JungfrauDataFile(test_data_path / "dat/AldoJF250k_000000.dat") as f: + header, data = f.read() + + n_frames = 53 + assert header.size == n_frames + assert data.shape == (n_frames, 256, 1024) + + # Read reference data using numpy + with np.load(test_data_path / "dat/AldoJF250k.npz") as f: + ref_header = f["headers"] + ref_data = f["frames"] + + # Check that the data is the same + assert np.all(ref_header == header) + assert np.all(ref_data == data) + + +@pytest.mark.files +def test_read_single_chip(test_data_path): + + # Read all frames from the .dat file + with JungfrauDataFile(test_data_path / "dat/AldoJF65k_000000.dat") as f: + header, data = f.read() + + n_frames = 113 + assert header.size == n_frames + assert data.shape == (n_frames, 256, 256) + + # Read reference data using numpy + with np.load(test_data_path / "dat/AldoJF65k.npz") as f: + ref_header = f["headers"] + ref_data = f["frames"] + + # Check that the data is the same + assert np.all(ref_header == header) + assert np.all(ref_data == data) \ No newline at end of file diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index f77ac92..d24e803 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -41,6 +41,12 @@ void ClusterFile::set_noise_map(const NDView noise_map){ void ClusterFile::set_gain_map(const NDView gain_map){ m_gain_map = NDArray(gain_map); + + // Gain map is passed as ADU/keV to avoid dividing in when applying the gain + // map we invert it here + for (auto &item : m_gain_map->view()) { + item = 1.0 / item; + } } ClusterFile::~ClusterFile() { close(); } diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp index a0eed04..4152ce0 100644 --- a/src/ClusterFile.test.cpp +++ b/src/ClusterFile.test.cpp @@ -11,9 +11,10 @@ using aare::ClusterFile; -TEST_CASE("Read one frame from a a cluster file", "[.integration]") { + +TEST_CASE("Read one frame from a a cluster file", "[.files]") { //We know that the frame has 97 clusters - auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); ClusterFile f(fpath); @@ -22,9 +23,10 @@ TEST_CASE("Read one frame from a a cluster file", "[.integration]") { REQUIRE(clusters.frame_number() == 135); } -TEST_CASE("Read one frame using ROI", "[.integration]") { + +TEST_CASE("Read one frame using ROI", "[.files]") { //We know that the frame has 97 clusters - auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); ClusterFile f(fpath); @@ -50,9 +52,11 @@ TEST_CASE("Read one frame using ROI", "[.integration]") { } -TEST_CASE("Read clusters from single frame file", "[.integration]") { - auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; +TEST_CASE("Read clusters from single frame file", "[.files]") { + + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); SECTION("Read fewer clusters than available") { diff --git a/src/File.cpp b/src/File.cpp index 3c68eff..eb04893 100644 --- a/src/File.cpp +++ b/src/File.cpp @@ -1,4 +1,5 @@ #include "aare/File.hpp" +#include "aare/JungfrauDataFile.hpp" #include "aare/NumpyFile.hpp" #include "aare/RawFile.hpp" @@ -27,6 +28,8 @@ File::File(const std::filesystem::path &fname, const std::string &mode, else if (fname.extension() == ".npy") { // file_impl = new NumpyFile(fname, mode, cfg); file_impl = std::make_unique(fname, mode, cfg); + }else if(fname.extension() == ".dat"){ + file_impl = std::make_unique(fname); } else { throw std::runtime_error("Unsupported file type"); } diff --git a/src/FilePtr.cpp b/src/FilePtr.cpp new file mode 100644 index 0000000..4fed3d7 --- /dev/null +++ b/src/FilePtr.cpp @@ -0,0 +1,44 @@ + +#include "aare/FilePtr.hpp" +#include +#include +#include + +namespace aare { + +FilePtr::FilePtr(const std::filesystem::path& fname, const std::string& mode = "rb") { + fp_ = fopen(fname.c_str(), mode.c_str()); + if (!fp_) + throw std::runtime_error(fmt::format("Could not open: {}", fname.c_str())); +} + +FilePtr::FilePtr(FilePtr &&other) { std::swap(fp_, other.fp_); } + +FilePtr &FilePtr::operator=(FilePtr &&other) { + std::swap(fp_, other.fp_); + return *this; +} + +FILE *FilePtr::get() { return fp_; } + +int64_t FilePtr::tell() { + auto pos = ftell(fp_); + if (pos == -1) + throw std::runtime_error(fmt::format("Error getting file position: {}", error_msg())); + return pos; +} +FilePtr::~FilePtr() { + if (fp_) + fclose(fp_); // check? +} + +std::string FilePtr::error_msg(){ + if (feof(fp_)) { + return "End of file reached"; + } + if (ferror(fp_)) { + return fmt::format("Error reading file: {}", std::strerror(errno)); + } + return ""; +} +} // namespace aare diff --git a/src/Fit.cpp b/src/Fit.cpp index 3001efd..9126109 100644 --- a/src/Fit.cpp +++ b/src/Fit.cpp @@ -18,7 +18,7 @@ double gaus(const double x, const double *par) { NDArray gaus(NDView x, NDView par) { NDArray y({x.shape(0)}, 0); - for (size_t i = 0; i < x.size(); i++) { + for (ssize_t i = 0; i < x.size(); i++) { y(i) = gaus(x(i), par.data()); } return y; @@ -28,7 +28,7 @@ double pol1(const double x, const double *par) { return par[0] * x + par[1]; } NDArray pol1(NDView x, NDView par) { NDArray y({x.shape()}, 0); - for (size_t i = 0; i < x.size(); i++) { + for (ssize_t i = 0; i < x.size(); i++) { y(i) = pol1(x(i), par.data()); } return y; @@ -153,7 +153,7 @@ void fit_gaus(NDView x, NDView y, NDView y_err, // Calculate chi2 chi2 = 0; - for (size_t i = 0; i < y.size(); i++) { + for (ssize_t i = 0; i < y.size(); i++) { chi2 += std::pow((y(i) - func::gaus(x(i), par_out.data())) / y_err(i), 2); } } @@ -205,7 +205,7 @@ void fit_pol1(NDView x, NDView y, NDView y_err, // Calculate chi2 chi2 = 0; - for (size_t i = 0; i < y.size(); i++) { + for (ssize_t i = 0; i < y.size(); i++) { chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); } } diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp index 7f82533..7034a83 100644 --- a/src/Interpolator.cpp +++ b/src/Interpolator.cpp @@ -68,19 +68,14 @@ std::vector Interpolator::interpolate(const ClusterVector& clus photon.y = cluster.y; photon.energy = eta.sum; - // auto ie = nearest_index(m_energy_bins, photon.energy)-1; - // auto ix = nearest_index(m_etabinsx, eta.x)-1; - // auto iy = nearest_index(m_etabinsy, eta.y)-1; + //Finding the index of the last element that is smaller //should work fine as long as we have many bins auto ie = last_smaller(m_energy_bins, photon.energy); auto ix = last_smaller(m_etabinsx, eta.x); auto iy = last_smaller(m_etabinsy, eta.y); - - // fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy); - double dX, dY; - int ex, ey; + double dX{}, dY{}; // cBottomLeft = 0, // cBottomRight = 1, // cTopLeft = 2, diff --git a/src/JungfrauDataFile.cpp b/src/JungfrauDataFile.cpp new file mode 100644 index 0000000..8f1f904 --- /dev/null +++ b/src/JungfrauDataFile.cpp @@ -0,0 +1,238 @@ +#include "aare/JungfrauDataFile.hpp" +#include "aare/algorithm.hpp" +#include "aare/defs.hpp" + +#include +#include + +namespace aare { + +JungfrauDataFile::JungfrauDataFile(const std::filesystem::path &fname) { + + if (!std::filesystem::exists(fname)) { + throw std::runtime_error(LOCATION + + "File does not exist: " + fname.string()); + } + find_frame_size(fname); + parse_fname(fname); + scan_files(); + open_file(m_current_file_index); +} + + +// FileInterface + +Frame JungfrauDataFile::read_frame(){ + Frame f(rows(), cols(), Dtype::UINT16); + read_into(reinterpret_cast(f.data()), nullptr); + return f; +} + +Frame JungfrauDataFile::read_frame(size_t frame_number){ + seek(frame_number); + Frame f(rows(), cols(), Dtype::UINT16); + read_into(reinterpret_cast(f.data()), nullptr); + return f; +} + +std::vector JungfrauDataFile::read_n(size_t n_frames) { + std::vector frames; + for(size_t i = 0; i < n_frames; ++i){ + frames.push_back(read_frame()); + } + return frames; +} + +void JungfrauDataFile::read_into(std::byte *image_buf) { + read_into(image_buf, nullptr); +} +void JungfrauDataFile::read_into(std::byte *image_buf, size_t n_frames) { + read_into(image_buf, n_frames, nullptr); +} + +size_t JungfrauDataFile::frame_number(size_t frame_index) { + seek(frame_index); + return read_header().framenum; +} + +std::array JungfrauDataFile::shape() const { + return {static_cast(rows()), static_cast(cols())}; +} + +DetectorType JungfrauDataFile::detector_type() const { return DetectorType::Jungfrau; } + +std::string JungfrauDataFile::base_name() const { return m_base_name; } + +size_t JungfrauDataFile::bytes_per_frame() { return m_bytes_per_frame; } + +size_t JungfrauDataFile::pixels_per_frame() { return m_rows * m_cols; } + +size_t JungfrauDataFile::bytes_per_pixel() const { return sizeof(pixel_type); } + +size_t JungfrauDataFile::bitdepth() const { + return bytes_per_pixel() * bits_per_byte; +} + +void JungfrauDataFile::seek(size_t frame_index) { + if (frame_index >= m_total_frames) { + throw std::runtime_error(LOCATION + "Frame index out of range: " + + std::to_string(frame_index)); + } + m_current_frame_index = frame_index; + auto file_index = first_larger(m_last_frame_in_file, frame_index); + + if (file_index != m_current_file_index) + open_file(file_index); + + auto frame_offset = (file_index) + ? frame_index - m_last_frame_in_file[file_index - 1] + : frame_index; + auto byte_offset = frame_offset * (m_bytes_per_frame + header_size); + m_fp.seek(byte_offset); +}; + +size_t JungfrauDataFile::tell() { return m_current_frame_index; } +size_t JungfrauDataFile::total_frames() const { return m_total_frames; } +size_t JungfrauDataFile::rows() const { return m_rows; } +size_t JungfrauDataFile::cols() const { return m_cols; } + +size_t JungfrauDataFile::n_files() const { return m_last_frame_in_file.size(); } + +void JungfrauDataFile::find_frame_size(const std::filesystem::path &fname) { + + static constexpr size_t module_data_size = + header_size + sizeof(pixel_type) * 512 * 1024; + static constexpr size_t half_data_size = + header_size + sizeof(pixel_type) * 256 * 1024; + static constexpr size_t chip_data_size = + header_size + sizeof(pixel_type) * 256 * 256; + + auto file_size = std::filesystem::file_size(fname); + if (file_size == 0) { + throw std::runtime_error(LOCATION + + "Cannot guess frame size: file is empty"); + } + + if (file_size % module_data_size == 0) { + m_rows = 512; + m_cols = 1024; + m_bytes_per_frame = module_data_size - header_size; + } else if (file_size % half_data_size == 0) { + m_rows = 256; + m_cols = 1024; + m_bytes_per_frame = half_data_size - header_size; + } else if (file_size % chip_data_size == 0) { + m_rows = 256; + m_cols = 256; + m_bytes_per_frame = chip_data_size - header_size; + } else { + throw std::runtime_error(LOCATION + + "Cannot find frame size: file size is not a " + "multiple of any known frame size"); + } +} + +void JungfrauDataFile::parse_fname(const std::filesystem::path &fname) { + m_path = fname.parent_path(); + m_base_name = fname.stem(); + + // find file index, then remove if from the base name + if (auto pos = m_base_name.find_last_of('_'); pos != std::string::npos) { + m_offset = std::stoul(m_base_name.substr(pos + 1)); + m_base_name.erase(pos); + } +} + +void JungfrauDataFile::scan_files() { + // find how many files we have and the number of frames in each file + m_last_frame_in_file.clear(); + size_t file_index = m_offset; + while (std::filesystem::exists(fpath(file_index))) { + auto n_frames = std::filesystem::file_size(fpath(file_index)) / + (m_bytes_per_frame + header_size); + m_last_frame_in_file.push_back(n_frames); + ++file_index; + } + + // find where we need to open the next file and total number of frames + m_last_frame_in_file = cumsum(m_last_frame_in_file); + m_total_frames = m_last_frame_in_file.back(); +} + +void JungfrauDataFile::read_into(std::byte *image_buf, + JungfrauDataHeader *header) { + + // read header if not passed nullptr + if (header) { + if (auto rc = fread(header, sizeof(JungfrauDataHeader), 1, m_fp.get()); + rc != 1) { + throw std::runtime_error( + LOCATION + + "Could not read header from file:" + m_fp.error_msg()); + } + } else { + m_fp.seek(header_size, SEEK_CUR); + } + + // read data + if (auto rc = fread(image_buf, 1, m_bytes_per_frame, m_fp.get()); + rc != m_bytes_per_frame) { + throw std::runtime_error(LOCATION + "Could not read image from file" + + m_fp.error_msg()); + } + + // prepare for next read + // if we are at the end of the file, open the next file + ++m_current_frame_index; + if (m_current_frame_index >= m_last_frame_in_file[m_current_file_index] && + (m_current_frame_index < m_total_frames)) { + ++m_current_file_index; + open_file(m_current_file_index); + } +} + +void JungfrauDataFile::read_into(std::byte *image_buf, size_t n_frames, + JungfrauDataHeader *header) { + if (header) { + for (size_t i = 0; i < n_frames; ++i) + read_into(image_buf + i * m_bytes_per_frame, header + i); + }else{ + for (size_t i = 0; i < n_frames; ++i) + read_into(image_buf + i * m_bytes_per_frame, nullptr); + } +} + +void JungfrauDataFile::read_into(NDArray* image, JungfrauDataHeader* header) { + if(image->shape()!=shape()){ + throw std::runtime_error(LOCATION + + "Image shape does not match file size: " + std::to_string(rows()) + "x" + std::to_string(cols())); + } + read_into(reinterpret_cast(image->data()), header); +} + + +JungfrauDataHeader JungfrauDataFile::read_header() { + JungfrauDataHeader header; + if (auto rc = fread(&header, 1, sizeof(header), m_fp.get()); + rc != sizeof(header)) { + throw std::runtime_error(LOCATION + "Could not read header from file" + + m_fp.error_msg()); + } + m_fp.seek(-header_size, SEEK_CUR); + return header; +} + +void JungfrauDataFile::open_file(size_t file_index) { + // fmt::print(stderr, "Opening file: {}\n", + // fpath(file_index+m_offset).string()); + m_fp = FilePtr(fpath(file_index + m_offset), "rb"); + m_current_file_index = file_index; +} + +std::filesystem::path JungfrauDataFile::fpath(size_t file_index) const { + auto fname = fmt::format("{}_{:0{}}.dat", m_base_name, file_index, + n_digits_in_file_index); + return m_path / fname; +} + +} // namespace aare \ No newline at end of file diff --git a/src/JungfrauDataFile.test.cpp b/src/JungfrauDataFile.test.cpp new file mode 100644 index 0000000..ce51168 --- /dev/null +++ b/src/JungfrauDataFile.test.cpp @@ -0,0 +1,114 @@ +#include "aare/JungfrauDataFile.hpp" + +#include +#include "test_config.hpp" + +using aare::JungfrauDataFile; +using aare::JungfrauDataHeader; +TEST_CASE("Open a Jungfrau data file", "[.files]") { + //we know we have 4 files with 7, 7, 7, and 3 frames + //firs frame number if 1 and the bunch id is frame_number**2 + //so we can check the header + auto fpath = test_data_path() / "dat" / "AldoJF500k_000000.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + REQUIRE(f.rows() == 512); + REQUIRE(f.cols() == 1024); + REQUIRE(f.bytes_per_frame() == 1048576); + REQUIRE(f.pixels_per_frame() == 524288); + REQUIRE(f.bytes_per_pixel() == 2); + REQUIRE(f.bitdepth() == 16); + REQUIRE(f.base_name() == "AldoJF500k"); + REQUIRE(f.n_files() == 4); + REQUIRE(f.tell() == 0); + REQUIRE(f.total_frames() == 24); + REQUIRE(f.current_file() == fpath); + + //Check that the frame number and buch id is read correctly + for (size_t i = 0; i < 24; ++i) { + JungfrauDataHeader header; + aare::NDArray image(f.shape()); + f.read_into(&image, &header); + REQUIRE(header.framenum == i + 1); + REQUIRE(header.bunchid == (i + 1) * (i + 1)); + REQUIRE(image.shape(0) == 512); + REQUIRE(image.shape(1) == 1024); + } +} + +TEST_CASE("Seek in a JungfrauDataFile", "[.files]"){ + auto fpath = test_data_path() / "dat" / "AldoJF65k_000000.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + + //The file should have 113 frames + f.seek(19); + REQUIRE(f.tell() == 19); + auto h = f.read_header(); + REQUIRE(h.framenum == 19+1); + + //Reading again does not change the file pointer + auto h2 = f.read_header(); + REQUIRE(h2.framenum == 19+1); + + f.seek(59); + REQUIRE(f.tell() == 59); + auto h3 = f.read_header(); + REQUIRE(h3.framenum == 59+1); + + JungfrauDataHeader h4; + aare::NDArray image(f.shape()); + f.read_into(&image, &h4); + REQUIRE(h4.framenum == 59+1); + + //now we should be on the next frame + REQUIRE(f.tell() == 60); + REQUIRE(f.read_header().framenum == 60+1); + + REQUIRE_THROWS(f.seek(86356)); //out of range +} + +TEST_CASE("Open a Jungfrau data file with non zero file index", "[.files]"){ + + auto fpath = test_data_path() / "dat" / "AldoJF65k_000003.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + + //18 files per data file, opening the 3rd file we ignore the first 3 + REQUIRE(f.total_frames() == 113-18*3); + REQUIRE(f.tell() == 0); + + //Frame numbers start at 1 in the first file + REQUIRE(f.read_header().framenum == 18*3+1); + + // moving relative to the third file + f.seek(5); + REQUIRE(f.read_header().framenum == 18*3+1+5); + + // ignoring the first 3 files + REQUIRE(f.n_files() == 4); + + REQUIRE(f.current_file().stem() == "AldoJF65k_000003"); + +} + +TEST_CASE("Read into throws if size doesn't match", "[.files]"){ + auto fpath = test_data_path() / "dat" / "AldoJF65k_000000.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + + aare::NDArray image({39, 85}); + JungfrauDataHeader header; + + REQUIRE_THROWS(f.read_into(&image, &header)); + REQUIRE_THROWS(f.read_into(&image, nullptr)); + REQUIRE_THROWS(f.read_into(&image)); + + REQUIRE(f.tell() == 0); + + +} \ No newline at end of file diff --git a/src/NDArray.test.cpp b/src/NDArray.test.cpp index eff3e2c..c37a285 100644 --- a/src/NDArray.test.cpp +++ b/src/NDArray.test.cpp @@ -183,14 +183,14 @@ TEST_CASE("Size and shape matches") { int64_t h = 75; std::array shape{w, h}; NDArray a{shape}; - REQUIRE(a.size() == static_cast(w * h)); + REQUIRE(a.size() == w * h); REQUIRE(a.shape() == shape); } TEST_CASE("Initial value matches for all elements") { double v = 4.35; NDArray a{{5, 5}, v}; - for (uint32_t i = 0; i < a.size(); ++i) { + for (int i = 0; i < a.size(); ++i) { REQUIRE(a(i) == v); } } diff --git a/src/NDView.test.cpp b/src/NDView.test.cpp index 3070de6..8750f3a 100644 --- a/src/NDView.test.cpp +++ b/src/NDView.test.cpp @@ -3,6 +3,7 @@ #include #include +#include using aare::NDView; using aare::Shape; @@ -21,10 +22,8 @@ TEST_CASE("Element reference 1D") { } TEST_CASE("Element reference 2D") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<2>{3, 4}); REQUIRE(vec.size() == static_cast(data.size())); @@ -58,10 +57,8 @@ TEST_CASE("Element reference 3D") { } TEST_CASE("Plus and miuns with single value") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<2>{3, 4}); data += 5; int i = 0; @@ -116,10 +113,8 @@ TEST_CASE("elementwise assign") { } TEST_CASE("iterators") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<1>{12}); int i = 0; for (const auto item : data) { @@ -167,27 +162,31 @@ TEST_CASE("divide with another span") { } TEST_CASE("Retrieve shape") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<2>{3, 4}); REQUIRE(data.shape()[0] == 3); REQUIRE(data.shape()[1] == 4); } TEST_CASE("compare two views") { - std::vector vec1; - for (int i = 0; i != 12; ++i) { - vec1.push_back(i); - } + std::vector vec1(12); + std::iota(vec1.begin(), vec1.end(), 0); NDView view1(vec1.data(), Shape<2>{3, 4}); - std::vector vec2; - for (int i = 0; i != 12; ++i) { - vec2.push_back(i); - } + std::vector vec2(12); + std::iota(vec2.begin(), vec2.end(), 0); NDView view2(vec2.data(), Shape<2>{3, 4}); REQUIRE((view1 == view2)); +} + + +TEST_CASE("Create a view over a vector"){ + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); + auto v = aare::make_view(vec); + REQUIRE(v.shape()[0] == 12); + REQUIRE(v[0] == 0); + REQUIRE(v[11] == 11); } \ No newline at end of file diff --git a/src/RawSubFile.cpp b/src/RawSubFile.cpp index a3bb79c..9e7a421 100644 --- a/src/RawSubFile.cpp +++ b/src/RawSubFile.cpp @@ -1,9 +1,12 @@ #include "aare/RawSubFile.hpp" #include "aare/PixelMap.hpp" +#include "aare/utils/ifstream_helpers.hpp" #include // memcpy #include #include + + namespace aare { RawSubFile::RawSubFile(const std::filesystem::path &fname, @@ -20,7 +23,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname, } if (std::filesystem::exists(fname)) { - n_frames = std::filesystem::file_size(fname) / + m_num_frames = std::filesystem::file_size(fname) / (sizeof(DetectorHeader) + rows * cols * bitdepth / 8); } else { throw std::runtime_error( @@ -35,7 +38,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname, } #ifdef AARE_VERBOSE - fmt::print("Opened file: {} with {} frames\n", m_fname.string(), n_frames); + fmt::print("Opened file: {} with {} frames\n", m_fname.string(), m_num_frames); fmt::print("m_rows: {}, m_cols: {}, m_bitdepth: {}\n", m_rows, m_cols, m_bitdepth); fmt::print("file size: {}\n", std::filesystem::file_size(fname)); @@ -43,8 +46,8 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname, } void RawSubFile::seek(size_t frame_index) { - if (frame_index >= n_frames) { - throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, n_frames)); + if (frame_index >= m_num_frames) { + throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, m_num_frames)); } m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index); } @@ -60,6 +63,10 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { m_file.seekg(sizeof(DetectorHeader), std::ios::cur); } + if (m_file.fail()){ + throw std::runtime_error(LOCATION + ifstream_error_msg(m_file)); + } + // TODO! expand support for different bitdepths if (m_pixel_map) { // read into a temporary buffer and then copy the data to the buffer @@ -79,8 +86,24 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { // read directly into the buffer m_file.read(reinterpret_cast(image_buf), bytes_per_frame()); } + + if (m_file.fail()){ + throw std::runtime_error(LOCATION + ifstream_error_msg(m_file)); + } } +void RawSubFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) { + for (size_t i = 0; i < n_frames; i++) { + read_into(image_buf, header); + image_buf += bytes_per_frame(); + if (header) { + ++header; + } + } +} + + + template void RawSubFile::read_with_map(std::byte *image_buf) { auto part_buffer = new std::byte[bytes_per_frame()]; diff --git a/src/algorithm.test.cpp b/src/algorithm.test.cpp index fcfa8d2..79541a1 100644 --- a/src/algorithm.test.cpp +++ b/src/algorithm.test.cpp @@ -6,7 +6,7 @@ TEST_CASE("Find the closed index in a 1D array", "[algorithm]") { aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 @@ -19,7 +19,7 @@ TEST_CASE("Find the closed index in a 1D array", "[algorithm]") { TEST_CASE("Passing integers to nearest_index works", "[algorithm]"){ aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 @@ -49,10 +49,20 @@ TEST_CASE("nearest index works with std::array", "[algorithm]"){ REQUIRE(aare::nearest_index(arr, -10.0) == 0); } +TEST_CASE("nearest index when there is no different uses the first element", "[algorithm]"){ + std::vector vec = {5, 5, 5, 5, 5}; + REQUIRE(aare::nearest_index(vec, 5) == 0); +} + +TEST_CASE("nearest index when there is no different uses the first element also when all smaller", "[algorithm]"){ + std::vector vec = {5, 5, 5, 5, 5}; + REQUIRE(aare::nearest_index(vec, 10) == 0); +} + TEST_CASE("last smaller", "[algorithm]"){ aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 @@ -64,10 +74,86 @@ TEST_CASE("last smaller", "[algorithm]"){ TEST_CASE("returns last bin strictly smaller", "[algorithm]"){ aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 - REQUIRE(aare::last_smaller(arr, 2.0) == 2); + REQUIRE(aare::last_smaller(arr, 2.0) == 1); + +} + +TEST_CASE("last_smaller with all elements smaller returns last element", "[algorithm]"){ + aare::NDArray arr({5}); + for (ssize_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, 50.) == 4); +} + +TEST_CASE("last_smaller with all elements bigger returns first element", "[algorithm]"){ + aare::NDArray arr({5}); + for (ssize_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, -50.) == 0); +} + +TEST_CASE("last smaller with all elements equal returns the first element", "[algorithm]"){ + std::vector vec = {5,5,5,5,5,5,5}; + REQUIRE(aare::last_smaller(vec, 5) == 0); +} + + +TEST_CASE("first_lager with vector", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::first_larger(vec, 2.5) == 3); +} + +TEST_CASE("first_lager with all elements smaller returns last element", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::first_larger(vec, 50.) == 4); +} + +TEST_CASE("first_lager with all elements bigger returns first element", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::first_larger(vec, -50.) == 0); +} + +TEST_CASE("first_lager with all elements the same as the check returns last", "[algorithm]"){ + std::vector vec = {14, 14, 14, 14, 14}; + REQUIRE(aare::first_larger(vec, 14) == 4); +} + +TEST_CASE("first larger with the same element", "[algorithm]"){ + std::vector vec = {7,8,9,10,11}; + REQUIRE(aare::first_larger(vec, 9) == 3); +} + +TEST_CASE("cumsum works", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == vec.size()); + REQUIRE(result[0] == 0); + REQUIRE(result[1] == 1); + REQUIRE(result[2] == 3); + REQUIRE(result[3] == 6); + REQUIRE(result[4] == 10); +} +TEST_CASE("cumsum works with empty vector", "[algorithm]"){ + std::vector vec = {}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == 0); +} +TEST_CASE("cumsum works with negative numbers", "[algorithm]"){ + std::vector vec = {0, -1, -2, -3, -4}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == vec.size()); + REQUIRE(result[0] == 0); + REQUIRE(result[1] == -1); + REQUIRE(result[2] == -3); + REQUIRE(result[3] == -6); + REQUIRE(result[4] == -10); +} -} \ No newline at end of file diff --git a/src/decode.cpp b/src/decode.cpp index 17c033d..8ac7bc0 100644 --- a/src/decode.cpp +++ b/src/decode.cpp @@ -1,5 +1,5 @@ #include "aare/decode.hpp" - +#include namespace aare { uint16_t adc_sar_05_decode64to16(uint64_t input){ @@ -22,6 +22,10 @@ uint16_t adc_sar_05_decode64to16(uint64_t input){ } void adc_sar_05_decode64to16(NDView input, NDView output){ + if(input.shape() != output.shape()){ + throw std::invalid_argument(LOCATION + " input and output shapes must match"); + } + for(int64_t i = 0; i < input.shape(0); i++){ for(int64_t j = 0; j < input.shape(1); j++){ output(i,j) = adc_sar_05_decode64to16(input(i,j)); @@ -49,6 +53,9 @@ uint16_t adc_sar_04_decode64to16(uint64_t input){ } void adc_sar_04_decode64to16(NDView input, NDView output){ + if(input.shape() != output.shape()){ + throw std::invalid_argument(LOCATION + " input and output shapes must match"); + } for(int64_t i = 0; i < input.shape(0); i++){ for(int64_t j = 0; j < input.shape(1); j++){ output(i,j) = adc_sar_04_decode64to16(input(i,j)); @@ -56,6 +63,40 @@ void adc_sar_04_decode64to16(NDView input, NDView outpu } } +double apply_custom_weights(uint16_t input, const NDView weights) { + if(weights.size() > 16){ + throw std::invalid_argument("weights size must be less than or equal to 16"); + } + + double result = 0.0; + for (ssize_t i = 0; i < weights.size(); ++i) { + result += ((input >> i) & 1) * std::pow(weights[i], i); + } + return result; + +} + +void apply_custom_weights(NDView input, NDView output, const NDView weights) { + if(input.shape() != output.shape()){ + throw std::invalid_argument(LOCATION + " input and output shapes must match"); + } + + //Calculate weights to avoid repeatedly calling std::pow + std::vector weights_powers(weights.size()); + for (ssize_t i = 0; i < weights.size(); ++i) { + weights_powers[i] = std::pow(weights[i], i); + } + + // Apply custom weights to each element in the input array + for (ssize_t i = 0; i < input.shape(0); i++) { + double result = 0.0; + for (size_t bit_index = 0; bit_index < weights_powers.size(); ++bit_index) { + result += ((input(i) >> bit_index) & 1) * weights_powers[bit_index]; + } + output(i) = result; + } +} + } // namespace aare diff --git a/src/decode.test.cpp b/src/decode.test.cpp new file mode 100644 index 0000000..1e4b2fc --- /dev/null +++ b/src/decode.test.cpp @@ -0,0 +1,80 @@ +#include "aare/decode.hpp" + +#include +#include +#include "aare/NDArray.hpp" +using Catch::Matchers::WithinAbs; +#include + +TEST_CASE("test_adc_sar_05_decode64to16"){ + uint64_t input = 0; + uint16_t output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 0); + + + // bit 29 on th input is bit 0 on the output + input = 1UL << 29; + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 1); + + // test all bits by iteratting through the bitlist + std::vector bitlist = {29, 19, 28, 18, 31, 21, 27, 20, 24, 23, 25, 22}; + for (size_t i = 0; i < bitlist.size(); i++) { + input = 1UL << bitlist[i]; + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == (1 << i)); + } + + + // test a few "random" values + input = 0; + input |= (1UL << 29); + input |= (1UL << 19); + input |= (1UL << 28); + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 7UL); + + + input = 0; + input |= (1UL << 18); + input |= (1UL << 27); + input |= (1UL << 25); + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 1096UL); + + input = 0; + input |= (1UL << 25); + input |= (1UL << 22); + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 3072UL); + } + + + TEST_CASE("test_apply_custom_weights") { + + uint16_t input = 1; + aare::NDArray weights_data({3}, 0.0); + weights_data(0) = 1.7; + weights_data(1) = 2.1; + weights_data(2) = 1.8; + + auto weights = weights_data.view(); + + + double output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(1.0, 0.001)); + + input = 1 << 1; + output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(2.1, 0.001)); + + + input = 1 << 2; + output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(3.24, 0.001)); + + input = 0b111; + output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(6.34, 0.001)); + + } \ No newline at end of file diff --git a/src/utils/ifstream_helpers.cpp b/src/utils/ifstream_helpers.cpp new file mode 100644 index 0000000..74c56f3 --- /dev/null +++ b/src/utils/ifstream_helpers.cpp @@ -0,0 +1,18 @@ +#include "aare/utils/ifstream_helpers.hpp" + +namespace aare { + +std::string ifstream_error_msg(std::ifstream &ifs) { + std::ios_base::iostate state = ifs.rdstate(); + if (state & std::ios_base::eofbit) { + return " End of file reached"; + } else if (state & std::ios_base::badbit) { + return " Bad file stream"; + } else if (state & std::ios_base::failbit) { + return " File read failed"; + }else{ + return " Unknown/no error"; + } +} + +} // namespace aare From a6eebbe9bd414ff71d8fdc25aa2a8effb42cc14a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 20 May 2025 15:27:38 +0200 Subject: [PATCH 02/13] removed extra const on return type, added cast (#177) Fixed warnings on apple clang: - removed extra const on return type - added cast to suppress a float to double conversion warning --- include/aare/ClusterVector.hpp | 4 ++-- include/aare/Interpolator.hpp | 4 ++-- src/Fit.cpp | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index c8b1ea1..9d575d9 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -133,9 +133,9 @@ class ClusterVector> { */ size_t capacity() const { return m_data.capacity(); } - const auto begin() const { return m_data.begin(); } + auto begin() const { return m_data.begin(); } - const auto end() const { return m_data.end(); } + auto end() const { return m_data.end(); } /** * @brief Return the size in bytes of a single cluster diff --git a/include/aare/Interpolator.hpp b/include/aare/Interpolator.hpp index d2b2322..8e65f38 100644 --- a/include/aare/Interpolator.hpp +++ b/include/aare/Interpolator.hpp @@ -51,7 +51,7 @@ Interpolator::interpolate(const ClusterVector &clusters) { Photon photon; photon.x = cluster.x; photon.y = cluster.y; - photon.energy = eta.sum; + photon.energy = static_cast(eta.sum); // auto ie = nearest_index(m_energy_bins, photon.energy)-1; // auto ix = nearest_index(m_etabinsx, eta.x)-1; @@ -99,7 +99,7 @@ Interpolator::interpolate(const ClusterVector &clusters) { Photon photon; photon.x = cluster.x; photon.y = cluster.y; - photon.energy = eta.sum; + photon.energy = static_cast(eta.sum); // Now do some actual interpolation. // Find which energy bin the cluster is in diff --git a/src/Fit.cpp b/src/Fit.cpp index d104675..25000de 100644 --- a/src/Fit.cpp +++ b/src/Fit.cpp @@ -105,7 +105,7 @@ std::array gaus_init_par(const NDView x, const NDView *e / 2; }) * + [e](double val) { return val > *e / 2; }) * delta / 2.35; return start_par; From 9e1b8731b03673d4938f2f4915a5c2f3f01aa3ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Thu, 22 May 2025 11:00:03 +0200 Subject: [PATCH 03/13] RawSubFile support multi file access (#173) This PR is a fix/improvement to a problem that Jonathan had. (#156) The original implementation opened all subfiles at once witch works for normal sized datasets but fails at a certain point (thousands of files). - This solution uses RawSubFile to manage the different file indicies and only opens the file we need - Added logger.h from slsDetectorPackage for debug printing (in production no messages should be visible) --- CMakeLists.txt | 5 ++ include/aare/RawFile.hpp | 22 +---- include/aare/RawMasterFile.hpp | 1 + include/aare/RawSubFile.hpp | 20 ++++- include/aare/algorithm.hpp | 11 +++ include/aare/defs.hpp | 2 + include/aare/logger.hpp | 139 ++++++++++++++++++++++++++++++++ python/examples/play.py | 126 ++++++++++++++++------------- python/src/raw_file.hpp | 8 +- python/tests/test_RawSubFile.py | 19 +++-- src/RawFile.cpp | 132 +++++++++--------------------- src/RawFile.test.cpp | 7 +- src/RawMasterFile.cpp | 4 + src/RawSubFile.cpp | 128 +++++++++++++++++++++++------ src/RawSubFile.test.cpp | 76 +++++++++++++++++ src/algorithm.test.cpp | 33 ++++++++ 16 files changed, 517 insertions(+), 216 deletions(-) create mode 100644 include/aare/logger.hpp create mode 100644 src/RawSubFile.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index fc51c14..dddb44b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -79,6 +79,9 @@ endif() if(AARE_VERBOSE) add_compile_definitions(AARE_VERBOSE) + add_compile_definitions(AARE_LOG_LEVEL=aare::logDEBUG5) +else() + add_compile_definitions(AARE_LOG_LEVEL=aare::logERROR) endif() if(AARE_CUSTOM_ASSERT) @@ -90,6 +93,7 @@ if(AARE_BENCHMARKS) endif() + set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if(AARE_FETCH_LMFIT) @@ -452,6 +456,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.test.cpp ) diff --git a/include/aare/RawFile.hpp b/include/aare/RawFile.hpp index f744ac2..1cca1fd 100644 --- a/include/aare/RawFile.hpp +++ b/include/aare/RawFile.hpp @@ -30,22 +30,11 @@ struct ModuleConfig { * Consider using that unless you need raw file specific functionality. */ class RawFile : public FileInterface { - size_t n_subfiles{}; //f0,f1...fn - size_t n_subfile_parts{}; // d0,d1...dn - //TODO! move to vector of SubFile instead of pointers - std::vector> subfiles; //subfiles[f0,f1...fn][d0,d1...dn] - // std::vector positions; - + std::vector> m_subfiles; ModuleConfig cfg{0, 0}; - RawMasterFile m_master; - size_t m_current_frame{}; - - // std::vector m_module_pixel_0; - // size_t m_rows{}; - // size_t m_cols{}; - + size_t m_current_subfile{}; DetectorGeometry m_geometry; public: @@ -56,7 +45,7 @@ class RawFile : public FileInterface { */ RawFile(const std::filesystem::path &fname, const std::string &mode = "r"); - virtual ~RawFile() override; + virtual ~RawFile() override = default; Frame read_frame() override; Frame read_frame(size_t frame_number) override; @@ -80,7 +69,7 @@ class RawFile : public FileInterface { size_t cols() const override; size_t bitdepth() const override; xy geometry(); - size_t n_mod() const; + size_t n_modules() const; RawMasterFile master() const; @@ -115,9 +104,6 @@ class RawFile : public FileInterface { */ static DetectorHeader read_header(const std::filesystem::path &fname); - // void update_geometry_with_roi(); - int find_number_of_subfiles(); - void open_subfiles(); void find_geometry(); }; diff --git a/include/aare/RawMasterFile.hpp b/include/aare/RawMasterFile.hpp index beaeb29..4d143a6 100644 --- a/include/aare/RawMasterFile.hpp +++ b/include/aare/RawMasterFile.hpp @@ -121,6 +121,7 @@ class RawMasterFile { size_t total_frames_expected() const; xy geometry() const; + size_t n_modules() const; std::optional analog_samples() const; std::optional digital_samples() const; diff --git a/include/aare/RawSubFile.hpp b/include/aare/RawSubFile.hpp index 350a475..1059843 100644 --- a/include/aare/RawSubFile.hpp +++ b/include/aare/RawSubFile.hpp @@ -18,11 +18,20 @@ class RawSubFile { std::ifstream m_file; DetectorType m_detector_type; size_t m_bitdepth; - std::filesystem::path m_fname; + std::filesystem::path m_path; //!< path to the subfile + std::string m_base_name; //!< base name used for formatting file names + size_t m_offset{}; //!< file index of the first file, allow starting at non zero file + size_t m_total_frames{}; //!< total number of frames in the series of files size_t m_rows{}; size_t m_cols{}; size_t m_bytes_per_frame{}; - size_t m_num_frames{}; + + + int m_module_index{}; + size_t m_current_file_index{}; //!< The index of the open file + size_t m_current_frame_index{}; //!< The index of the current frame (with reference to all files) + std::vector m_last_frame_in_file{}; //!< Used for seeking to the correct file + uint32_t m_pos_row{}; uint32_t m_pos_col{}; @@ -67,12 +76,17 @@ class RawSubFile { size_t pixels_per_frame() const { return m_rows * m_cols; } size_t bytes_per_pixel() const { return m_bitdepth / bits_per_byte; } - size_t frames_in_file() const { return m_num_frames; } + size_t frames_in_file() const { return m_total_frames; } private: template void read_with_map(std::byte *image_buf); + void parse_fname(const std::filesystem::path &fname); + void scan_files(); + void open_file(size_t file_index); + std::filesystem::path fpath(size_t file_index) const; + }; } // namespace aare \ No newline at end of file diff --git a/include/aare/algorithm.hpp b/include/aare/algorithm.hpp index fc7d51f..be2018f 100644 --- a/include/aare/algorithm.hpp +++ b/include/aare/algorithm.hpp @@ -107,5 +107,16 @@ std::vector cumsum(const std::vector& vec) { } +template bool all_equal(const Container &c) { + if (!c.empty() && + std::all_of(begin(c), end(c), + [c](const typename Container::value_type &element) { + return element == c.front(); + })) + return true; + return false; +} + + } // namespace aare \ No newline at end of file diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index 01d291b..ccf07a5 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -204,6 +204,8 @@ struct DetectorGeometry{ int module_gap_row{}; int module_gap_col{}; std::vector module_pixel_0; + + auto size() const { return module_pixel_0.size(); } }; struct ROI{ diff --git a/include/aare/logger.hpp b/include/aare/logger.hpp new file mode 100644 index 0000000..06e6feb --- /dev/null +++ b/include/aare/logger.hpp @@ -0,0 +1,139 @@ +#pragma once +/*Utility to log to console*/ + + +#include +#include +#include + +namespace aare { + +#define RED "\x1b[31m" +#define GREEN "\x1b[32m" +#define YELLOW "\x1b[33m" +#define BLUE "\x1b[34m" +#define MAGENTA "\x1b[35m" +#define CYAN "\x1b[36m" +#define GRAY "\x1b[37m" +#define DARKGRAY "\x1b[30m" + +#define BG_BLACK "\x1b[48;5;232m" +#define BG_RED "\x1b[41m" +#define BG_GREEN "\x1b[42m" +#define BG_YELLOW "\x1b[43m" +#define BG_BLUE "\x1b[44m" +#define BG_MAGENTA "\x1b[45m" +#define BG_CYAN "\x1b[46m" +#define RESET "\x1b[0m" +#define BOLD "\x1b[1m" + + +enum TLogLevel { + logERROR, + logWARNING, + logINFOBLUE, + logINFOGREEN, + logINFORED, + logINFOCYAN, + logINFOMAGENTA, + logINFO, + logDEBUG, + logDEBUG1, + logDEBUG2, + logDEBUG3, + logDEBUG4, + logDEBUG5 +}; + +// Compiler should optimize away anything below this value +#ifndef AARE_LOG_LEVEL +#define AARE_LOG_LEVEL "LOG LEVEL NOT SET IN CMAKE" //This is configured in the main CMakeLists.txt +#endif + +#define __AT__ \ + std::string(__FILE__) + std::string("::") + std::string(__func__) + \ + std::string("(): ") +#define __SHORT_FORM_OF_FILE__ \ + (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) +#define __SHORT_AT__ \ + std::string(__SHORT_FORM_OF_FILE__) + std::string("::") + \ + std::string(__func__) + std::string("(): ") + +class Logger { + std::ostringstream os; + TLogLevel m_level = AARE_LOG_LEVEL; + + public: + Logger() = default; + explicit Logger(TLogLevel level) : m_level(level){}; + ~Logger() { + // output in the destructor to allow for << syntax + os << RESET << '\n'; + std::clog << os.str() << std::flush; // Single write + } + + static TLogLevel &ReportingLevel() { // singelton eeh TODO! Do we need a runtime option? + static TLogLevel reportingLevel = logDEBUG5; + return reportingLevel; + } + + // Danger this buffer need as many elements as TLogLevel + static const char *Color(TLogLevel level) noexcept { + static const char *const colors[] = { + RED BOLD, YELLOW BOLD, BLUE, GREEN, RED, CYAN, MAGENTA, + RESET, RESET, RESET, RESET, RESET, RESET, RESET}; + // out of bounds + if (level < 0 || level >= sizeof(colors) / sizeof(colors[0])) { + return RESET; + } + return colors[level]; + } + + // Danger this buffer need as many elements as TLogLevel + static std::string ToString(TLogLevel level) { + static const char *const buffer[] = { + "ERROR", "WARNING", "INFO", "INFO", "INFO", + "INFO", "INFO", "INFO", "DEBUG", "DEBUG1", + "DEBUG2", "DEBUG3", "DEBUG4", "DEBUG5"}; + // out of bounds + if (level < 0 || level >= sizeof(buffer) / sizeof(buffer[0])) { + return "UNKNOWN"; + } + return buffer[level]; + } + + std::ostringstream &Get() { + os << Color(m_level) << "- " << Timestamp() << " " << ToString(m_level) + << ": "; + return os; + } + + static std::string Timestamp() { + constexpr size_t buffer_len = 12; + char buffer[buffer_len]; + time_t t; + ::time(&t); + tm r; + strftime(buffer, buffer_len, "%X", localtime_r(&t, &r)); + buffer[buffer_len - 1] = '\0'; + struct timeval tv; + gettimeofday(&tv, nullptr); + constexpr size_t result_len = 100; + char result[result_len]; + snprintf(result, result_len, "%s.%03ld", buffer, + static_cast(tv.tv_usec) / 1000); + result[result_len - 1] = '\0'; + return result; + } +}; + +// TODO! Do we need to keep the runtime option? +#define LOG(level) \ + if (level > AARE_LOG_LEVEL) \ + ; \ + else if (level > aare::Logger::ReportingLevel()) \ + ; \ + else \ + aare::Logger(level).Get() + +} // namespace aare diff --git a/python/examples/play.py b/python/examples/play.py index da469dc..0f4feca 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -1,79 +1,89 @@ import sys sys.path.append('/home/l_msdetect/erik/aare/build') -from aare._aare import ClusterVector_i, Interpolator -import pickle -import numpy as np -import matplotlib.pyplot as plt -import boost_histogram as bh -import torch -import math -import time +from aare import RawSubFile, DetectorType, RawFile + +from pathlib import Path +path = Path("/home/l_msdetect/erik/data/aare-test-data/raw/jungfrau/") +f = RawSubFile(path/"jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16) + +# f = RawFile(path/"jungfrau_single_master_0.json") + + +# from aare._aare import ClusterVector_i, Interpolator + +# import pickle +# import numpy as np +# import matplotlib.pyplot as plt +# import boost_histogram as bh +# import torch +# import math +# import time -def gaussian_2d(mx, my, sigma = 1, res=100, grid_size = 2): - """ - Generate a 2D gaussian as position mx, my, with sigma=sigma. - The gaussian is placed on a 2x2 pixel matrix with resolution - res in one dimesion. - """ - x = torch.linspace(0, pixel_size*grid_size, res) - x,y = torch.meshgrid(x,x, indexing="ij") - return 1 / (2*math.pi*sigma**2) * \ - torch.exp(-((x - my)**2 / (2*sigma**2) + (y - mx)**2 / (2*sigma**2))) +# def gaussian_2d(mx, my, sigma = 1, res=100, grid_size = 2): +# """ +# Generate a 2D gaussian as position mx, my, with sigma=sigma. +# The gaussian is placed on a 2x2 pixel matrix with resolution +# res in one dimesion. +# """ +# x = torch.linspace(0, pixel_size*grid_size, res) +# x,y = torch.meshgrid(x,x, indexing="ij") +# return 1 / (2*math.pi*sigma**2) * \ +# torch.exp(-((x - my)**2 / (2*sigma**2) + (y - mx)**2 / (2*sigma**2))) -scale = 1000 #Scale factor when converting to integer -pixel_size = 25 #um -grid = 2 -resolution = 100 -sigma_um = 10 -xa = np.linspace(0,grid*pixel_size,resolution) -ticks = [0, 25, 50] +# scale = 1000 #Scale factor when converting to integer +# pixel_size = 25 #um +# grid = 2 +# resolution = 100 +# sigma_um = 10 +# xa = np.linspace(0,grid*pixel_size,resolution) +# ticks = [0, 25, 50] -hit = np.array((20,20)) -etahist_fname = "/home/l_msdetect/erik/tmp/test_hist.pkl" +# hit = np.array((20,20)) +# etahist_fname = "/home/l_msdetect/erik/tmp/test_hist.pkl" -local_resolution = 99 -grid_size = 3 -xaxis = np.linspace(0,grid_size*pixel_size, local_resolution) -t = gaussian_2d(hit[0],hit[1], grid_size = grid_size, sigma = 10, res = local_resolution) -pixels = t.reshape(grid_size, t.shape[0] // grid_size, grid_size, t.shape[1] // grid_size).sum(axis = 3).sum(axis = 1) -pixels = pixels.numpy() -pixels = (pixels*scale).astype(np.int32) -v = ClusterVector_i(3,3) -v.push_back(1,1, pixels) +# local_resolution = 99 +# grid_size = 3 +# xaxis = np.linspace(0,grid_size*pixel_size, local_resolution) +# t = gaussian_2d(hit[0],hit[1], grid_size = grid_size, sigma = 10, res = local_resolution) +# pixels = t.reshape(grid_size, t.shape[0] // grid_size, grid_size, t.shape[1] // grid_size).sum(axis = 3).sum(axis = 1) +# pixels = pixels.numpy() +# pixels = (pixels*scale).astype(np.int32) +# v = ClusterVector_i(3,3) +# v.push_back(1,1, pixels) -with open(etahist_fname, "rb") as f: - hist = pickle.load(f) -eta = hist.view().copy() -etabinsx = np.array(hist.axes.edges.T[0].flat) -etabinsy = np.array(hist.axes.edges.T[1].flat) -ebins = np.array(hist.axes.edges.T[2].flat) -p = Interpolator(eta, etabinsx[0:-1], etabinsy[0:-1], ebins[0:-1]) +# with open(etahist_fname, "rb") as f: +# hist = pickle.load(f) +# eta = hist.view().copy() +# etabinsx = np.array(hist.axes.edges.T[0].flat) +# etabinsy = np.array(hist.axes.edges.T[1].flat) +# ebins = np.array(hist.axes.edges.T[2].flat) +# p = Interpolator(eta, etabinsx[0:-1], etabinsy[0:-1], ebins[0:-1]) -#Generate the hit +# #Generate the hit -tmp = p.interpolate(v) -print(f'tmp:{tmp}') -pos = np.array((tmp['x'], tmp['y']))*25 +# tmp = p.interpolate(v) +# print(f'tmp:{tmp}') +# pos = np.array((tmp['x'], tmp['y']))*25 -print(pixels) -fig, ax = plt.subplots(figsize = (7,7)) -ax.pcolormesh(xaxis, xaxis, t) -ax.plot(*pos, 'o') -ax.set_xticks([0,25,50,75]) -ax.set_yticks([0,25,50,75]) -ax.set_xlim(0,75) -ax.set_ylim(0,75) -ax.grid() -print(f'{hit=}') -print(f'{pos=}') \ No newline at end of file +# print(pixels) +# fig, ax = plt.subplots(figsize = (7,7)) +# ax.pcolormesh(xaxis, xaxis, t) +# ax.plot(*pos, 'o') +# ax.set_xticks([0,25,50,75]) +# ax.set_yticks([0,25,50,75]) +# ax.set_xlim(0,75) +# ax.set_ylim(0,75) +# ax.grid() +# print(f'{hit=}') +# print(f'{pos=}') \ No newline at end of file diff --git a/python/src/raw_file.hpp b/python/src/raw_file.hpp index 38b4896..8d72220 100644 --- a/python/src/raw_file.hpp +++ b/python/src/raw_file.hpp @@ -32,7 +32,7 @@ void define_raw_file_io_bindings(py::module &m) { shape.push_back(self.cols()); // return headers from all subfiles - py::array_t header(self.n_mod()); + py::array_t header(self.n_modules()); const uint8_t item_size = self.bytes_per_pixel(); if (item_size == 1) { @@ -61,10 +61,10 @@ void define_raw_file_io_bindings(py::module &m) { // return headers from all subfiles py::array_t header; - if (self.n_mod() == 1) { + if (self.n_modules() == 1) { header = py::array_t(n_frames); } else { - header = py::array_t({self.n_mod(), n_frames}); + header = py::array_t({self.n_modules(), n_frames}); } // py::array_t header({self.n_mod(), n_frames}); @@ -100,7 +100,7 @@ void define_raw_file_io_bindings(py::module &m) { .def_property_readonly("cols", &RawFile::cols) .def_property_readonly("bitdepth", &RawFile::bitdepth) .def_property_readonly("geometry", &RawFile::geometry) - .def_property_readonly("n_mod", &RawFile::n_mod) + .def_property_readonly("n_modules", &RawFile::n_modules) .def_property_readonly("detector_type", &RawFile::detector_type) .def_property_readonly("master", &RawFile::master); } \ No newline at end of file diff --git a/python/tests/test_RawSubFile.py b/python/tests/test_RawSubFile.py index a5eea91..cdde248 100644 --- a/python/tests/test_RawSubFile.py +++ b/python/tests/test_RawSubFile.py @@ -5,32 +5,35 @@ from aare import RawSubFile, DetectorType @pytest.mark.files def test_read_a_jungfrau_RawSubFile(test_data_path): + + # Starting with f1 there is now 7 frames left in the series of files with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f1_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f: - assert f.frames_in_file == 3 + assert f.frames_in_file == 7 headers, frames = f.read() - assert headers.size == 3 - assert frames.shape == (3, 512, 1024) + assert headers.size == 7 + assert frames.shape == (7, 512, 1024) - # Frame numbers in this file should be 4, 5, 6 - for i,h in zip(range(4,7,1), headers): + + for i,h in zip(range(4,11,1), headers): assert h["frameNumber"] == i # Compare to canned data using numpy data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy") - assert np.all(data[3:6] == frames) + assert np.all(data[3:] == frames) @pytest.mark.files def test_iterate_over_a_jungfrau_RawSubFile(test_data_path): data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy") + # Given the first subfile in a series we can read all frames from f0, f1, f2...fN with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f: i = 0 for header, frame in f: assert header["frameNumber"] == i+1 assert np.all(frame == data[i]) i += 1 - assert i == 3 - assert header["frameNumber"] == 3 \ No newline at end of file + assert i == 10 + assert header["frameNumber"] == 10 \ No newline at end of file diff --git a/src/RawFile.cpp b/src/RawFile.cpp index c576453..122cf96 100644 --- a/src/RawFile.cpp +++ b/src/RawFile.cpp @@ -1,6 +1,8 @@ #include "aare/RawFile.hpp" +#include "aare/algorithm.hpp" #include "aare/PixelMap.hpp" #include "aare/defs.hpp" +#include "aare/logger.hpp" #include "aare/geo_helpers.hpp" #include @@ -14,23 +16,14 @@ RawFile::RawFile(const std::filesystem::path &fname, const std::string &mode) : m_master(fname) { m_mode = mode; if (mode == "r") { - - n_subfiles = find_number_of_subfiles(); // f0,f1...fn - n_subfile_parts = - m_master.geometry().col * m_master.geometry().row; // d0,d1...dn - - - find_geometry(); - if (m_master.roi()){ m_geometry = update_geometry_with_roi(m_geometry, m_master.roi().value()); } - open_subfiles(); } else { throw std::runtime_error(LOCATION + - "Unsupported mode. Can only read RawFiles."); + " Unsupported mode. Can only read RawFiles."); } } @@ -67,12 +60,12 @@ void RawFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *h this->get_frame_into(m_current_frame++, image_buf, header); image_buf += bytes_per_frame(); if(header) - header+=n_mod(); + header+=n_modules(); } } -size_t RawFile::n_mod() const { return n_subfile_parts; } +size_t RawFile::n_modules() const { return m_master.n_modules(); } size_t RawFile::bytes_per_frame() { @@ -106,17 +99,11 @@ xy RawFile::geometry() { return m_master.geometry(); } void RawFile::open_subfiles() { if (m_mode == "r") - for (size_t i = 0; i != n_subfiles; ++i) { - auto v = std::vector(n_subfile_parts); - for (size_t j = 0; j != n_subfile_parts; ++j) { - auto pos = m_geometry.module_pixel_0[j]; - v[j] = new RawSubFile(m_master.data_fname(j, i), - m_master.detector_type(), pos.height, - pos.width, m_master.bitdepth(), - pos.row_index, pos.col_index); - - } - subfiles.push_back(v); + for (size_t i = 0; i != n_modules(); ++i) { + auto pos = m_geometry.module_pixel_0[i]; + m_subfiles.emplace_back(std::make_unique( + m_master.data_fname(i, 0), m_master.detector_type(), pos.height, + pos.width, m_master.bitdepth(), pos.row_index, pos.col_index)); } else { throw std::runtime_error(LOCATION + @@ -141,18 +128,6 @@ DetectorHeader RawFile::read_header(const std::filesystem::path &fname) { return h; } -int RawFile::find_number_of_subfiles() { - int n_files = 0; - // f0,f1...fn How many files is the data split into? - while (std::filesystem::exists(m_master.data_fname(0, n_files))) - n_files++; // increment after test - -#ifdef AARE_VERBOSE - fmt::print("Found: {} subfiles\n", n_files); -#endif - return n_files; - -} RawMasterFile RawFile::master() const { return m_master; } @@ -168,7 +143,7 @@ void RawFile::find_geometry() { uint16_t c{}; - for (size_t i = 0; i < n_subfile_parts; i++) { + for (size_t i = 0; i < n_modules(); i++) { auto h = read_header(m_master.data_fname(i, 0)); r = std::max(r, h.row); c = std::max(c, h.column); @@ -210,70 +185,58 @@ size_t RawFile::bytes_per_pixel() const { } void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, DetectorHeader *header) { + LOG(logDEBUG) << "RawFile::get_frame_into(" << frame_index << ")"; if (frame_index >= total_frames()) { throw std::runtime_error(LOCATION + "Frame number out of range"); } - std::vector frame_numbers(n_subfile_parts); - std::vector frame_indices(n_subfile_parts, frame_index); + std::vector frame_numbers(n_modules()); + std::vector frame_indices(n_modules(), frame_index); // sync the frame numbers - if (n_subfile_parts != 1) { - for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { - auto subfile_id = frame_index / m_master.max_frames_per_file(); - if (subfile_id >= subfiles.size()) { - throw std::runtime_error(LOCATION + - " Subfile out of range. Possible missing data."); - } - frame_numbers[part_idx] = - subfiles[subfile_id][part_idx]->frame_number( - frame_index % m_master.max_frames_per_file()); + if (n_modules() != 1) { //if we have more than one module + for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) { + frame_numbers[part_idx] = m_subfiles[part_idx]->frame_number(frame_index); } + // 1. if frame number vector is the same break - while (std::adjacent_find(frame_numbers.begin(), frame_numbers.end(), - std::not_equal_to<>()) != - frame_numbers.end()) { + while (!all_equal(frame_numbers)) { + // 2. find the index of the minimum frame number, auto min_frame_idx = std::distance( frame_numbers.begin(), std::min_element(frame_numbers.begin(), frame_numbers.end())); + // 3. increase its index and update its respective frame number frame_indices[min_frame_idx]++; + // 4. if we can't increase its index => throw error if (frame_indices[min_frame_idx] >= total_frames()) { throw std::runtime_error(LOCATION + "Frame number out of range"); } - auto subfile_id = - frame_indices[min_frame_idx] / m_master.max_frames_per_file(); + frame_numbers[min_frame_idx] = - subfiles[subfile_id][min_frame_idx]->frame_number( - frame_indices[min_frame_idx] % - m_master.max_frames_per_file()); + m_subfiles[min_frame_idx]->frame_number(frame_indices[min_frame_idx]); } } if (m_master.geometry().col == 1) { // get the part from each subfile and copy it to the frame - for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { + for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) { auto corrected_idx = frame_indices[part_idx]; - auto subfile_id = corrected_idx / m_master.max_frames_per_file(); - if (subfile_id >= subfiles.size()) { - throw std::runtime_error(LOCATION + - " Subfile out of range. Possible missing data."); - } - + // This is where we start writing auto offset = (m_geometry.module_pixel_0[part_idx].origin_y * m_geometry.pixels_x + m_geometry.module_pixel_0[part_idx].origin_x)*m_master.bitdepth()/8; if (m_geometry.module_pixel_0[part_idx].origin_x!=0) - throw std::runtime_error(LOCATION + "Implementation error. x pos not 0."); + throw std::runtime_error(LOCATION + " Implementation error. x pos not 0."); - //TODO! Risk for out of range access - subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file()); - subfiles[subfile_id][part_idx]->read_into(frame_buffer + offset, header); + //TODO! What if the files don't match? + m_subfiles[part_idx]->seek(corrected_idx); + m_subfiles[part_idx]->read_into(frame_buffer + offset, header); if (header) ++header; } @@ -282,26 +245,21 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect //TODO! should we read row by row? // create a buffer large enough to hold a full module - auto bytes_per_part = m_master.pixels_y() * m_master.pixels_x() * m_master.bitdepth() / 8; // TODO! replace with image_size_in_bytes + auto *part_buffer = new std::byte[bytes_per_part]; // TODO! if we have many submodules we should reorder them on the module // level - for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { + for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) { auto pos = m_geometry.module_pixel_0[part_idx]; auto corrected_idx = frame_indices[part_idx]; - auto subfile_id = corrected_idx / m_master.max_frames_per_file(); - if (subfile_id >= subfiles.size()) { - throw std::runtime_error(LOCATION + - " Subfile out of range. Possible missing data."); - } - subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file()); - subfiles[subfile_id][part_idx]->read_into(part_buffer, header); + m_subfiles[part_idx]->seek(corrected_idx); + m_subfiles[part_idx]->read_into(part_buffer, header); if(header) ++header; @@ -321,6 +279,7 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect } delete[] part_buffer; } + } std::vector RawFile::read_n(size_t n_frames) { @@ -337,27 +296,8 @@ size_t RawFile::frame_number(size_t frame_index) { if (frame_index >= m_master.frames_in_file()) { throw std::runtime_error(LOCATION + " Frame number out of range"); } - size_t subfile_id = frame_index / m_master.max_frames_per_file(); - if (subfile_id >= subfiles.size()) { - throw std::runtime_error( - LOCATION + " Subfile out of range. Possible missing data."); - } - return subfiles[subfile_id][0]->frame_number( - frame_index % m_master.max_frames_per_file()); + return m_subfiles[0]->frame_number(frame_index); } -RawFile::~RawFile() { - - // TODO! Fix this, for file closing - for (auto &vec : subfiles) { - for (auto *subfile : vec) { - delete subfile; - } - } -} - - - - } // namespace aare diff --git a/src/RawFile.test.cpp b/src/RawFile.test.cpp index 5f9b2e1..9109985 100644 --- a/src/RawFile.test.cpp +++ b/src/RawFile.test.cpp @@ -99,11 +99,11 @@ TEST_CASE("Read frame numbers from a raw file", "[.integration]") { } } -TEST_CASE("Compare reading from a numpy file with a raw file", "[.integration]") { - auto fpath_raw = test_data_path() / "jungfrau" / "jungfrau_single_master_0.json"; +TEST_CASE("Compare reading from a numpy file with a raw file", "[.files]") { + auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_master_0.json"; REQUIRE(std::filesystem::exists(fpath_raw)); - auto fpath_npy = test_data_path() / "jungfrau" / "jungfrau_single_0.npy"; + auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; REQUIRE(std::filesystem::exists(fpath_npy)); File raw(fpath_raw, "r"); @@ -113,6 +113,7 @@ TEST_CASE("Compare reading from a numpy file with a raw file", "[.integration]") CHECK(npy.total_frames() == 10); for (size_t i = 0; i < 10; ++i) { + CHECK(raw.tell() == i); auto raw_frame = raw.read_frame(); auto npy_frame = npy.read_frame(); CHECK((raw_frame.view() == npy_frame.view())); diff --git a/src/RawMasterFile.cpp b/src/RawMasterFile.cpp index 33807d4..8a2db87 100644 --- a/src/RawMasterFile.cpp +++ b/src/RawMasterFile.cpp @@ -140,6 +140,10 @@ std::optional RawMasterFile::number_of_rows() const { xy RawMasterFile::geometry() const { return m_geometry; } +size_t RawMasterFile::n_modules() const { + return m_geometry.row * m_geometry.col; +} + std::optional RawMasterFile::quad() const { return m_quad; } // optional values, these may or may not be present in the master file diff --git a/src/RawSubFile.cpp b/src/RawSubFile.cpp index 9e7a421..01ef48c 100644 --- a/src/RawSubFile.cpp +++ b/src/RawSubFile.cpp @@ -1,10 +1,14 @@ #include "aare/RawSubFile.hpp" #include "aare/PixelMap.hpp" +#include "aare/algorithm.hpp" #include "aare/utils/ifstream_helpers.hpp" +#include "aare/logger.hpp" + + #include // memcpy #include #include - +#include namespace aare { @@ -12,51 +16,51 @@ namespace aare { RawSubFile::RawSubFile(const std::filesystem::path &fname, DetectorType detector, size_t rows, size_t cols, size_t bitdepth, uint32_t pos_row, uint32_t pos_col) - : m_detector_type(detector), m_bitdepth(bitdepth), m_fname(fname), + : m_detector_type(detector), m_bitdepth(bitdepth), m_rows(rows), m_cols(cols), m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row), m_pos_col(pos_col) { + + LOG(logDEBUG) << "RawSubFile::RawSubFile()"; if (m_detector_type == DetectorType::Moench03_old) { m_pixel_map = GenerateMoench03PixelMap(); } else if (m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0) { m_pixel_map = GenerateEigerFlipRowsPixelMap(); } - if (std::filesystem::exists(fname)) { - m_num_frames = std::filesystem::file_size(fname) / - (sizeof(DetectorHeader) + rows * cols * bitdepth / 8); - } else { - throw std::runtime_error( - LOCATION + fmt::format("File {} does not exist", m_fname.string())); - } - // fp = fopen(m_fname.string().c_str(), "rb"); - m_file.open(m_fname, std::ios::binary); - if (!m_file.is_open()) { - throw std::runtime_error( - LOCATION + fmt::format("Could not open file {}", m_fname.string())); - } - -#ifdef AARE_VERBOSE - fmt::print("Opened file: {} with {} frames\n", m_fname.string(), m_num_frames); - fmt::print("m_rows: {}, m_cols: {}, m_bitdepth: {}\n", m_rows, m_cols, - m_bitdepth); - fmt::print("file size: {}\n", std::filesystem::file_size(fname)); -#endif + parse_fname(fname); + scan_files(); + open_file(m_current_file_index); // open the first file } void RawSubFile::seek(size_t frame_index) { - if (frame_index >= m_num_frames) { - throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, m_num_frames)); + LOG(logDEBUG) << "RawSubFile::seek(" << frame_index << ")"; + if (frame_index >= m_total_frames) { + throw std::runtime_error(LOCATION + " Frame index out of range: " + + std::to_string(frame_index)); } - m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index); + m_current_frame_index = frame_index; + auto file_index = first_larger(m_last_frame_in_file, frame_index); + + if (file_index != m_current_file_index) + open_file(file_index); + + auto frame_offset = (file_index) + ? frame_index - m_last_frame_in_file[file_index - 1] + : frame_index; + auto byte_offset = frame_offset * (m_bytes_per_frame + sizeof(DetectorHeader)); + m_file.seekg(byte_offset); } size_t RawSubFile::tell() { - return m_file.tellg() / (sizeof(DetectorHeader) + bytes_per_frame()); + LOG(logDEBUG) << "RawSubFile::tell():" << m_current_frame_index; + return m_current_frame_index; } void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { + LOG(logDEBUG) << "RawSubFile::read_into()"; + if (header) { m_file.read(reinterpret_cast(header), sizeof(DetectorHeader)); } else { @@ -90,6 +94,13 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { if (m_file.fail()){ throw std::runtime_error(LOCATION + ifstream_error_msg(m_file)); } + + ++ m_current_frame_index; + if (m_current_frame_index >= m_last_frame_in_file[m_current_file_index] && + (m_current_frame_index < m_total_frames)) { + ++m_current_file_index; + open_file(m_current_file_index); + } } void RawSubFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) { @@ -130,4 +141,69 @@ size_t RawSubFile::frame_number(size_t frame_index) { return h.frameNumber; } +void RawSubFile::parse_fname(const std::filesystem::path &fname) { + LOG(logDEBUG) << "RawSubFile::parse_fname()"; + // data has the format: /path/too/data/jungfrau_single_d0_f1_0.raw + // d0 is the module index, will not change for this file + // f1 is the file index - thi is the one we need + // 0 is the measurement index, will not change + m_path = fname.parent_path(); + m_base_name = fname.filename(); + + // Regex to extract numbers after 'd' and 'f' + std::regex pattern(R"(^(.*_d)(\d+)(_f)(\d+)(_\d+\.raw)$)"); + std::smatch match; + + if (std::regex_match(m_base_name, match, pattern)) { + m_offset = std::stoi(match[4].str()); // find the first file index in case of a truncated series + m_base_name = match[1].str() + match[2].str() + match[3].str() + "{}" + match[5].str(); + LOG(logDEBUG) << "Base name: " << m_base_name; + LOG(logDEBUG) << "Offset: " << m_offset; + LOG(logDEBUG) << "Path: " << m_path.string(); + } else { + throw std::runtime_error( + LOCATION + fmt::format("Could not parse file name {}", fname.string())); + } +} + +std::filesystem::path RawSubFile::fpath(size_t file_index) const { + auto fname = fmt::format(m_base_name, file_index); + return m_path / fname; +} + +void RawSubFile::open_file(size_t file_index) { + m_file.close(); + auto fname = fpath(file_index+m_offset); + LOG(logDEBUG) << "RawSubFile::open_file(): " << fname.string(); + m_file.open(fname, std::ios::binary); + if (!m_file.is_open()) { + throw std::runtime_error( + LOCATION + fmt::format("Could not open file {}", fpath(file_index).string())); + } + m_current_file_index = file_index; +} + +void RawSubFile::scan_files() { + LOG(logDEBUG) << "RawSubFile::scan_files()"; + // find how many files we have and the number of frames in each file + m_last_frame_in_file.clear(); + size_t file_index = m_offset; + + while (std::filesystem::exists(fpath(file_index))) { + auto n_frames = std::filesystem::file_size(fpath(file_index)) / + (m_bytes_per_frame + sizeof(DetectorHeader)); + m_last_frame_in_file.push_back(n_frames); + LOG(logDEBUG) << "Found: " << n_frames << " frames in file: " << fpath(file_index).string(); + ++file_index; + } + + // find where we need to open the next file and total number of frames + m_last_frame_in_file = cumsum(m_last_frame_in_file); + if(m_last_frame_in_file.empty()){ + m_total_frames = 0; + }else{ + m_total_frames = m_last_frame_in_file.back(); + } +} + } // namespace aare \ No newline at end of file diff --git a/src/RawSubFile.test.cpp b/src/RawSubFile.test.cpp new file mode 100644 index 0000000..89cf858 --- /dev/null +++ b/src/RawSubFile.test.cpp @@ -0,0 +1,76 @@ +#include "aare/RawSubFile.hpp" +#include "aare/File.hpp" +#include "aare/NDArray.hpp" +#include +#include "test_config.hpp" + +using namespace aare; + +TEST_CASE("Read frames directly from a RawSubFile", "[.files]"){ + auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f0_0.raw"; + REQUIRE(std::filesystem::exists(fpath_raw)); + + RawSubFile f(fpath_raw, DetectorType::Jungfrau, 512, 1024, 16); + REQUIRE(f.rows() == 512); + REQUIRE(f.cols() == 1024); + REQUIRE(f.pixels_per_frame() == 512 * 1024); + REQUIRE(f.bytes_per_frame() == 512 * 1024 * 2); + REQUIRE(f.bytes_per_pixel() == 2); + + + auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; + REQUIRE(std::filesystem::exists(fpath_npy)); + + //Numpy file with the same data to use as reference + File npy(fpath_npy, "r"); + + CHECK(f.frames_in_file() == 10); + CHECK(npy.total_frames() == 10); + + + DetectorHeader header{}; + NDArray image({static_cast(f.rows()), static_cast(f.cols())}); + for (size_t i = 0; i < 10; ++i) { + CHECK(f.tell() == i); + f.read_into(image.buffer(), &header); + auto npy_frame = npy.read_frame(); + CHECK((image.view() == npy_frame.view())); + } +} + +TEST_CASE("Read frames directly from a RawSubFile starting at the second file", "[.files]"){ + // we know this file has 10 frames with frame numbers 1 to 10 + // f0 1,2,3 + // f1 4,5,6 <-- starting here + // f2 7,8,9 + // f3 10 + + auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f1_0.raw"; + REQUIRE(std::filesystem::exists(fpath_raw)); + + RawSubFile f(fpath_raw, DetectorType::Jungfrau, 512, 1024, 16); + + + auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; + REQUIRE(std::filesystem::exists(fpath_npy)); + + //Numpy file with the same data to use as reference + File npy(fpath_npy, "r"); + npy.seek(3); + + CHECK(f.frames_in_file() == 7); + CHECK(npy.total_frames() == 10); + + + DetectorHeader header{}; + NDArray image({static_cast(f.rows()), static_cast(f.cols())}); + for (size_t i = 0; i < 7; ++i) { + CHECK(f.tell() == i); + f.read_into(image.buffer(), &header); + // frame numbers start at 1 frame index at 0 + // adding 3 + 1 to verify the frame number + CHECK(header.frameNumber == i + 4); + auto npy_frame = npy.read_frame(); + CHECK((image.view() == npy_frame.view())); + } +} \ No newline at end of file diff --git a/src/algorithm.test.cpp b/src/algorithm.test.cpp index 5452fcf..6bd707b 100644 --- a/src/algorithm.test.cpp +++ b/src/algorithm.test.cpp @@ -160,3 +160,36 @@ TEST_CASE("cumsum works with negative numbers", "[algorithm]") { REQUIRE(result[3] == -6); REQUIRE(result[4] == -10); } + + +TEST_CASE("cumsum on an empty vector", "[algorithm]") { + std::vector vec = {}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == 0); + +} + +TEST_CASE("All equal on an empty vector is false", "[algorithm]") { + std::vector vec = {}; + REQUIRE(aare::all_equal(vec) == false); +} + +TEST_CASE("All equal on a vector with 1 element is true", "[algorithm]") { + std::vector vec = {1}; + REQUIRE(aare::all_equal(vec) == true); +} + +TEST_CASE("All equal on a vector with 2 elements is true", "[algorithm]") { + std::vector vec = {1, 1}; + REQUIRE(aare::all_equal(vec) == true); +} + +TEST_CASE("All equal on a vector with two different elements is false", "[algorithm]") { + std::vector vec = {1, 2}; + REQUIRE(aare::all_equal(vec) == false); +} + +TEST_CASE("Last element is different", "[algorithm]") { + std::vector vec = {1, 1, 1, 1, 2}; + REQUIRE(aare::all_equal(vec) == false); +} \ No newline at end of file From f2a024644be83f26e224dd7ceb9d10bd4072cd67 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Thu, 22 May 2025 11:10:23 +0200 Subject: [PATCH 04/13] bumped version upload on release --- .github/workflows/build_and_deploy_conda.yml | 6 +++--- VERSION | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build_and_deploy_conda.yml b/.github/workflows/build_and_deploy_conda.yml index 65483c3..8917419 100644 --- a/.github/workflows/build_and_deploy_conda.yml +++ b/.github/workflows/build_and_deploy_conda.yml @@ -1,9 +1,9 @@ name: Build pkgs and deploy if on main on: - push: - branches: - - main + release: + types: + - published jobs: build: diff --git a/VERSION b/VERSION index bd52db8..ae365e4 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.0.0 \ No newline at end of file +2025.5.22 \ No newline at end of file From 94ac58b09e39bf0841f490aeb7390d0d2a9af86b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Thu, 22 May 2025 11:40:39 +0200 Subject: [PATCH 05/13] For 2025.5.22 release (#181) Co-authored-by: Patrick Co-authored-by: JulianHeymes Co-authored-by: Dhanya Thattil Co-authored-by: Xiangyu Xie <45243914+xiangyuxie@users.noreply.github.com> Co-authored-by: xiangyu.xie Co-authored-by: AliceMazzoleni99 Co-authored-by: Mazzoleni Alice Francesca Co-authored-by: siebsi --- .github/workflows/build_and_deploy_conda.yml | 14 +- .github/workflows/build_conda.yml | 9 +- CMakeLists.txt | 32 +- VERSION | 1 + benchmarks/CMakeLists.txt | 30 +- benchmarks/calculateeta_benchmark.cpp | 70 +++ conda-recipe/conda_build_config.yaml | 23 - conda-recipe/meta.yaml | 51 +- etc/dev-env.yml | 6 +- include/aare/ArrayExpr.hpp | 36 +- include/aare/CalculateEta.hpp | 170 +++++++ include/aare/Cluster.hpp | 94 +++- include/aare/ClusterCollector.hpp | 60 +-- include/aare/ClusterFile.hpp | 469 +++++++++++++++---- include/aare/ClusterFileSink.hpp | 58 ++- include/aare/ClusterFileV2.hpp | 148 ------ include/aare/ClusterFinder.hpp | 78 +-- include/aare/ClusterFinderMT.hpp | 35 +- include/aare/ClusterVector.hpp | 280 +++-------- include/aare/FilePtr.hpp | 4 +- include/aare/Fit.hpp | 29 +- include/aare/Frame.hpp | 4 +- include/aare/GainMap.hpp | 68 +++ include/aare/Interpolator.hpp | 121 ++++- include/aare/NDArray.hpp | 78 +-- include/aare/NDView.hpp | 44 +- include/aare/NumpyFile.hpp | 2 +- include/aare/Pedestal.hpp | 4 +- include/aare/RawFile.hpp | 22 +- include/aare/RawMasterFile.hpp | 1 + include/aare/RawSubFile.hpp | 20 +- include/aare/VarClusterFinder.hpp | 2 +- include/aare/algorithm.hpp | 11 + include/aare/defs.hpp | 18 +- include/aare/logger.hpp | 139 ++++++ pyproject.toml | 8 +- python/CMakeLists.txt | 4 + python/aare/ClusterFinder.py | 67 +++ python/aare/ClusterVector.py | 11 + python/aare/__init__.py | 13 +- python/aare/func.py | 2 +- python/examples/play.py | 126 ++--- python/src/bind_ClusterVector.hpp | 104 ++++ python/src/cluster.hpp | 292 ++++++------ python/src/cluster_file.hpp | 87 ++-- python/src/ctb_raw_file.hpp | 4 +- python/src/file.hpp | 2 +- python/src/fit.hpp | 215 +++++++++ python/src/interpolation.hpp | 68 ++- python/src/module.cpp | 86 +++- python/src/np_helper.hpp | 38 +- python/src/raw_file.hpp | 8 +- python/tests/conftest.py | 7 +- python/tests/test_Cluster.py | 110 +++++ python/tests/test_ClusterFile.py | 64 +++ python/tests/test_ClusterVector.py | 54 +++ python/tests/test_RawSubFile.py | 19 +- src/CalculateEta.test.cpp | 127 +++++ src/Cluster.test.cpp | 21 + src/ClusterFile.test.cpp | 301 +++++++++++- src/ClusterFinder.test.cpp | 24 +- src/ClusterFinderMT.test.cpp | 99 ++++ src/ClusterVector.test.cpp | 240 ++++++---- src/FilePtr.cpp | 2 +- src/Fit.cpp | 251 +++++++++- src/Interpolator.cpp | 87 +--- src/JungfrauDataFile.cpp | 4 +- src/NDArray.test.cpp | 20 +- src/NDView.test.cpp | 6 +- src/NumpyFile.cpp | 6 +- src/RawFile.cpp | 146 ++---- src/RawFile.test.cpp | 7 +- src/RawMasterFile.cpp | 8 +- src/RawSubFile.cpp | 128 ++++- src/RawSubFile.test.cpp | 76 +++ src/algorithm.test.cpp | 88 +++- src/decode.cpp | 8 +- update_version.py | 57 +++ 78 files changed, 3865 insertions(+), 1461 deletions(-) create mode 100644 VERSION create mode 100644 benchmarks/calculateeta_benchmark.cpp create mode 100644 include/aare/CalculateEta.hpp delete mode 100644 include/aare/ClusterFileV2.hpp create mode 100644 include/aare/GainMap.hpp create mode 100644 include/aare/logger.hpp create mode 100644 python/aare/ClusterFinder.py create mode 100644 python/aare/ClusterVector.py create mode 100644 python/src/bind_ClusterVector.hpp create mode 100644 python/tests/test_Cluster.py create mode 100644 python/tests/test_ClusterFile.py create mode 100644 python/tests/test_ClusterVector.py create mode 100644 src/CalculateEta.test.cpp create mode 100644 src/Cluster.test.cpp create mode 100644 src/ClusterFinderMT.test.cpp create mode 100644 src/RawSubFile.test.cpp create mode 100644 update_version.py diff --git a/.github/workflows/build_and_deploy_conda.yml b/.github/workflows/build_and_deploy_conda.yml index 90e75c1..8917419 100644 --- a/.github/workflows/build_and_deploy_conda.yml +++ b/.github/workflows/build_and_deploy_conda.yml @@ -1,9 +1,9 @@ name: Build pkgs and deploy if on main on: - push: - branches: - - main + release: + types: + - published jobs: build: @@ -24,13 +24,13 @@ jobs: - uses: actions/checkout@v4 - name: Get conda - uses: conda-incubator/setup-miniconda@v3.0.4 + uses: conda-incubator/setup-miniconda@v3 with: python-version: ${{ matrix.python-version }} + environment-file: etc/dev-env.yml + miniforge-version: latest channels: conda-forge - - - name: Prepare - run: conda install conda-build=24.9 conda-verify pytest anaconda-client + conda-remove-defaults: "true" - name: Enable upload run: conda config --set anaconda_upload yes diff --git a/.github/workflows/build_conda.yml b/.github/workflows/build_conda.yml index 0b3e55c..3bd465e 100644 --- a/.github/workflows/build_conda.yml +++ b/.github/workflows/build_conda.yml @@ -24,14 +24,15 @@ jobs: - uses: actions/checkout@v4 - name: Get conda - uses: conda-incubator/setup-miniconda@v3.0.4 + uses: conda-incubator/setup-miniconda@v3 with: python-version: ${{ matrix.python-version }} + environment-file: etc/dev-env.yml + miniforge-version: latest channels: conda-forge + conda-remove-defaults: "true" - - name: Prepare - run: conda install conda-build=24.9 conda-verify pytest anaconda-client - + - name: Disable upload run: conda config --set anaconda_upload no diff --git a/CMakeLists.txt b/CMakeLists.txt index b3d7377..fa9838e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,12 +1,17 @@ cmake_minimum_required(VERSION 3.15) project(aare - VERSION 1.0.0 DESCRIPTION "Data processing library for PSI detectors" HOMEPAGE_URL "https://github.com/slsdetectorgroup/aare" LANGUAGES C CXX ) +# Read VERSION file into project version +set(VERSION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/VERSION") +file(READ "${VERSION_FILE}" VERSION_CONTENT) +string(STRIP "${VERSION_CONTENT}" PROJECT_VERSION_STRING) +set(PROJECT_VERSION ${PROJECT_VERSION_STRING}) + set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) @@ -39,7 +44,7 @@ set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) # General options -option(AARE_PYTHON_BINDINGS "Build python bindings" ON) +option(AARE_PYTHON_BINDINGS "Build python bindings" OFF) option(AARE_TESTS "Build tests" OFF) option(AARE_BENCHMARKS "Build benchmarks" OFF) option(AARE_EXAMPLES "Build examples" OFF) @@ -74,6 +79,9 @@ endif() if(AARE_VERBOSE) add_compile_definitions(AARE_VERBOSE) + add_compile_definitions(AARE_LOG_LEVEL=aare::logDEBUG5) +else() + add_compile_definitions(AARE_LOG_LEVEL=aare::logERROR) endif() if(AARE_CUSTOM_ASSERT) @@ -85,6 +93,7 @@ if(AARE_BENCHMARKS) endif() + set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if(AARE_FETCH_LMFIT) @@ -340,6 +349,8 @@ endif() set(PUBLICHEADERS include/aare/ArrayExpr.hpp + include/aare/CalculateEta.hpp + include/aare/Cluster.hpp include/aare/ClusterFinder.hpp include/aare/ClusterFile.hpp include/aare/CtbRawFile.hpp @@ -352,6 +363,7 @@ set(PUBLICHEADERS include/aare/FileInterface.hpp include/aare/FilePtr.hpp include/aare/Frame.hpp + include/aare/GainMap.hpp include/aare/geo_helpers.hpp include/aare/JungfrauDataFile.hpp include/aare/NDArray.hpp @@ -365,13 +377,11 @@ set(PUBLICHEADERS include/aare/RawSubFile.hpp include/aare/VarClusterFinder.hpp include/aare/utils/task.hpp - ) set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/CtbRawFile.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp @@ -388,19 +398,18 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp ) - add_library(aare_core STATIC ${SourceFiles}) target_include_directories(aare_core PUBLIC "$" - "$" + "$" ) - +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads REQUIRED) target_link_libraries( aare_core @@ -410,7 +419,8 @@ target_link_libraries( ${STD_FS_LIB} # from helpers.cmake PRIVATE aare_compiler_flags - "$" + Threads::Threads + $ ) @@ -436,12 +446,16 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/Cluster.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/CalculateEta.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinderMT.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.test.cpp ) diff --git a/VERSION b/VERSION new file mode 100644 index 0000000..ae365e4 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +2025.5.22 \ No newline at end of file diff --git a/benchmarks/CMakeLists.txt b/benchmarks/CMakeLists.txt index d083bab..699b4c6 100644 --- a/benchmarks/CMakeLists.txt +++ b/benchmarks/CMakeLists.txt @@ -1,11 +1,27 @@ -find_package(benchmark REQUIRED) -add_executable(ndarray_benchmark ndarray_benchmark.cpp) +include(FetchContent) -target_link_libraries(ndarray_benchmark benchmark::benchmark aare_core aare_compiler_flags) -# target_link_libraries(tests PRIVATE aare_core aare_compiler_flags) -set_target_properties(ndarray_benchmark PROPERTIES - RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR} - # OUTPUT_NAME run_tests +FetchContent_Declare( + benchmark + GIT_REPOSITORY https://github.com/google/benchmark.git + GIT_TAG v1.8.3 # Change to the latest version if needed +) + +# Ensure Google Benchmark is built correctly +set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE) + +FetchContent_MakeAvailable(benchmark) + +add_executable(benchmarks) + +target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp) + +# Link Google Benchmark and other necessary libraries +target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags) + +# Set output properties +set_target_properties(benchmarks PROPERTIES + RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR} + OUTPUT_NAME run_benchmarks ) \ No newline at end of file diff --git a/benchmarks/calculateeta_benchmark.cpp b/benchmarks/calculateeta_benchmark.cpp new file mode 100644 index 0000000..a320188 --- /dev/null +++ b/benchmarks/calculateeta_benchmark.cpp @@ -0,0 +1,70 @@ +#include "aare/CalculateEta.hpp" +#include "aare/ClusterFile.hpp" +#include + +using namespace aare; + +class ClusterFixture : public benchmark::Fixture { + public: + Cluster cluster_2x2{}; + Cluster cluster_3x3{}; + + private: + using benchmark::Fixture::SetUp; + + void SetUp([[maybe_unused]] const benchmark::State &state) override { + int temp_data[4] = {1, 2, 3, 1}; + std::copy(std::begin(temp_data), std::end(temp_data), + std::begin(cluster_2x2.data)); + + cluster_2x2.x = 0; + cluster_2x2.y = 0; + + int temp_data2[9] = {1, 2, 3, 1, 3, 4, 5, 1, 20}; + std::copy(std::begin(temp_data2), std::end(temp_data2), + std::begin(cluster_3x3.data)); + + cluster_3x3.x = 0; + cluster_3x3.y = 0; + } + + // void TearDown(::benchmark::State& state) { + // } +}; + +BENCHMARK_F(ClusterFixture, Calculate2x2Eta)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + Eta2 eta = calculate_eta2(cluster_2x2); + benchmark::DoNotOptimize(eta); + } +} + +// almost takes double the time +BENCHMARK_F(ClusterFixture, + CalculateGeneralEtaFor2x2Cluster)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + Eta2 eta = calculate_eta2(cluster_2x2); + benchmark::DoNotOptimize(eta); + } +} + +BENCHMARK_F(ClusterFixture, Calculate3x3Eta)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + Eta2 eta = calculate_eta2(cluster_3x3); + benchmark::DoNotOptimize(eta); + } +} + +// almost takes double the time +BENCHMARK_F(ClusterFixture, + CalculateGeneralEtaFor3x3Cluster)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + Eta2 eta = calculate_eta2(cluster_3x3); + benchmark::DoNotOptimize(eta); + } +} +// BENCHMARK_MAIN(); \ No newline at end of file diff --git a/conda-recipe/conda_build_config.yaml b/conda-recipe/conda_build_config.yaml index 36a7465..6d3d479 100644 --- a/conda-recipe/conda_build_config.yaml +++ b/conda-recipe/conda_build_config.yaml @@ -1,28 +1,5 @@ python: - 3.11 - - 3.11 - - 3.11 - - 3.12 - - 3.12 - 3.12 - 3.13 - - -numpy: - - 1.26 - - 2.0 - - 2.1 - - 1.26 - - 2.0 - - 2.1 - - 2.1 - - -zip_keys: - - python - - numpy - -pin_run_as_build: - numpy: x.x - python: x.x \ No newline at end of file diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 12c6ca0..8fea745 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,11 +1,10 @@ +source: + path: ../ + +{% set version = load_file_regex(load_file = 'VERSION', regex_pattern = '(\d+(?:\.\d+)*(?:[\+\w\.]+))').group(1) %} package: name: aare - version: 2025.4.22 #TODO! how to not duplicate this? - - - - - + version: {{version}} source: path: .. @@ -13,45 +12,39 @@ source: build: number: 0 script: - - unset CMAKE_GENERATOR && {{ PYTHON }} -m pip install . -vv # [not win] - - {{ PYTHON }} -m pip install . -vv # [win] + - unset CMAKE_GENERATOR && {{ PYTHON }} -m pip install . -vv requirements: build: - - python {{python}} - - numpy {{ numpy }} - {{ compiler('cxx') }} - - - host: - cmake - ninja - - python {{python}} - - numpy {{ numpy }} + + host: + - python - pip + - numpy=2.1 - scikit-build-core - pybind11 >=2.13.0 - - fmt - - zeromq - - nlohmann_json - - catch2 + - matplotlib # needed in host to solve the environment for run run: - - python {{python}} - - numpy {{ numpy }} + - python + - {{ pin_compatible('numpy') }} - matplotlib + test: imports: - aare - # requires: - # - pytest - # source_files: - # - tests - # commands: - # - pytest tests + requires: + - pytest + - boost-histogram + source_files: + - python/tests + commands: + - python -m pytest python/tests about: - summary: An example project built with pybind11 and scikit-build. - # license_file: LICENSE \ No newline at end of file + summary: Data analysis library for hybrid pixel detectors from PSI diff --git a/etc/dev-env.yml b/etc/dev-env.yml index 25038ee..e580c81 100644 --- a/etc/dev-env.yml +++ b/etc/dev-env.yml @@ -3,13 +3,11 @@ channels: - conda-forge dependencies: - anaconda-client + - conda-build - doxygen - sphinx=7.1.2 - breathe - - pybind11 - sphinx_rtd_theme - furo - - nlohmann_json - zeromq - - fmt - - numpy + diff --git a/include/aare/ArrayExpr.hpp b/include/aare/ArrayExpr.hpp index 7f8015c..d326601 100644 --- a/include/aare/ArrayExpr.hpp +++ b/include/aare/ArrayExpr.hpp @@ -1,22 +1,24 @@ #pragma once -#include //int64_t -#include //size_t +#include +#include #include - #include +#include "aare/defs.hpp" + + namespace aare { -template class ArrayExpr { +template class ArrayExpr { public: static constexpr bool is_leaf = false; auto operator[](size_t i) const { return static_cast(*this)[i]; } auto operator()(size_t i) const { return static_cast(*this)[i]; } auto size() const { return static_cast(*this).size(); } - std::array shape() const { return static_cast(*this).shape(); } + std::array shape() const { return static_cast(*this).shape(); } }; -template +template class ArrayAdd : public ArrayExpr, Ndim> { const A &arr1_; const B &arr2_; @@ -27,10 +29,10 @@ class ArrayAdd : public ArrayExpr, Ndim> { } auto operator[](int i) const { return arr1_[i] + arr2_[i]; } size_t size() const { return arr1_.size(); } - std::array shape() const { return arr1_.shape(); } + std::array shape() const { return arr1_.shape(); } }; -template +template class ArraySub : public ArrayExpr, Ndim> { const A &arr1_; const B &arr2_; @@ -41,10 +43,10 @@ class ArraySub : public ArrayExpr, Ndim> { } auto operator[](int i) const { return arr1_[i] - arr2_[i]; } size_t size() const { return arr1_.size(); } - std::array shape() const { return arr1_.shape(); } + std::array shape() const { return arr1_.shape(); } }; -template +template class ArrayMul : public ArrayExpr,Ndim> { const A &arr1_; const B &arr2_; @@ -55,10 +57,10 @@ class ArrayMul : public ArrayExpr,Ndim> { } auto operator[](int i) const { return arr1_[i] * arr2_[i]; } size_t size() const { return arr1_.size(); } - std::array shape() const { return arr1_.shape(); } + std::array shape() const { return arr1_.shape(); } }; -template +template class ArrayDiv : public ArrayExpr, Ndim> { const A &arr1_; const B &arr2_; @@ -69,27 +71,27 @@ class ArrayDiv : public ArrayExpr, Ndim> { } auto operator[](int i) const { return arr1_[i] / arr2_[i]; } size_t size() const { return arr1_.size(); } - std::array shape() const { return arr1_.shape(); } + std::array shape() const { return arr1_.shape(); } }; -template +template auto operator+(const ArrayExpr &arr1, const ArrayExpr &arr2) { return ArrayAdd, ArrayExpr, Ndim>(arr1, arr2); } -template +template auto operator-(const ArrayExpr &arr1, const ArrayExpr &arr2) { return ArraySub, ArrayExpr, Ndim>(arr1, arr2); } -template +template auto operator*(const ArrayExpr &arr1, const ArrayExpr &arr2) { return ArrayMul, ArrayExpr, Ndim>(arr1, arr2); } -template +template auto operator/(const ArrayExpr &arr1, const ArrayExpr &arr2) { return ArrayDiv, ArrayExpr, Ndim>(arr1, arr2); } diff --git a/include/aare/CalculateEta.hpp b/include/aare/CalculateEta.hpp new file mode 100644 index 0000000..db17dad --- /dev/null +++ b/include/aare/CalculateEta.hpp @@ -0,0 +1,170 @@ +#pragma once + +#include "aare/Cluster.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDArray.hpp" + +namespace aare { + +enum class corner : int { + cBottomLeft = 0, + cBottomRight = 1, + cTopLeft = 2, + cTopRight = 3 +}; + +enum class pixel : int { + pBottomLeft = 0, + pBottom = 1, + pBottomRight = 2, + pLeft = 3, + pCenter = 4, + pRight = 5, + pTopLeft = 6, + pTop = 7, + pTopRight = 8 +}; + +template struct Eta2 { + double x; + double y; + int c; + T sum; +}; + +/** + * @brief Calculate the eta2 values for all clusters in a Clustervector + */ +template >> +NDArray calculate_eta2(const ClusterVector &clusters) { + NDArray eta2({static_cast(clusters.size()), 2}); + + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters[i]); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; + } + + return eta2; +} + +/** + * @brief Calculate the eta2 values for a generic sized cluster and return them + * in a Eta2 struct containing etay, etax and the index of the respective 2x2 + * subcluster. + */ +template +Eta2 +calculate_eta2(const Cluster &cl) { + Eta2 eta{}; + + auto max_sum = cl.max_sum_2x2(); + eta.sum = max_sum.first; + auto c = max_sum.second; + + size_t cluster_center_index = + (ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX; + + size_t index_bottom_left_max_2x2_subcluster = + (int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1); + + // check that cluster center is in max subcluster + if (cluster_center_index != index_bottom_left_max_2x2_subcluster && + cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 && + cluster_center_index != + index_bottom_left_max_2x2_subcluster + ClusterSizeX && + cluster_center_index != + index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1) + throw std::runtime_error("Photon center is not in max 2x2_subcluster"); + + if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) % + ClusterSizeX == + 0) { + if ((cl.data[cluster_center_index + 1] + + cl.data[cluster_center_index]) != 0) + + eta.x = static_cast(cl.data[cluster_center_index + 1]) / + static_cast((cl.data[cluster_center_index + 1] + + cl.data[cluster_center_index])); + } else { + if ((cl.data[cluster_center_index] + + cl.data[cluster_center_index - 1]) != 0) + + eta.x = static_cast(cl.data[cluster_center_index]) / + static_cast((cl.data[cluster_center_index - 1] + + cl.data[cluster_center_index])); + } + if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) / + ClusterSizeX < + 1) { + assert(cluster_center_index + ClusterSizeX < + ClusterSizeX * ClusterSizeY); // suppress warning + if ((cl.data[cluster_center_index] + + cl.data[cluster_center_index + ClusterSizeX]) != 0) + eta.y = static_cast( + cl.data[cluster_center_index + ClusterSizeX]) / + static_cast( + (cl.data[cluster_center_index] + + cl.data[cluster_center_index + ClusterSizeX])); + } else { + if ((cl.data[cluster_center_index] + + cl.data[cluster_center_index - ClusterSizeX]) != 0) + eta.y = static_cast(cl.data[cluster_center_index]) / + static_cast( + (cl.data[cluster_center_index] + + cl.data[cluster_center_index - ClusterSizeX])); + } + + eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no + // underyling enum class + return eta; +} + +// TODO! Look up eta2 calculation - photon center should be top right corner +template +Eta2 calculate_eta2(const Cluster &cl) { + Eta2 eta{}; + + if ((cl.data[0] + cl.data[1]) != 0) + eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); + if ((cl.data[0] + cl.data[2]) != 0) + eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); + eta.sum = cl.sum(); + eta.c = static_cast(corner::cBottomLeft); // TODO! This is not correct, + // but need to put something + return eta; +} + +// calculates Eta3 for 3x3 cluster based on code from analyze_cluster +// TODO only supported for 3x3 Clusters +template Eta2 calculate_eta3(const Cluster &cl) { + + Eta2 eta{}; + + T sum = 0; + + std::for_each(std::begin(cl.data), std::end(cl.data), + [&sum](T x) { sum += x; }); + + eta.sum = sum; + + eta.c = corner::cBottomLeft; + + if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0) + + eta.x = static_cast(-cl.data[3] + cl.data[3 + 2]) / + + (cl.data[3] + cl.data[4] + cl.data[5]); + + if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0) + + eta.y = static_cast(-cl.data[1] + cl.data[2 * 3 + 1]) / + + (cl.data[1] + cl.data[4] + cl.data[7]); + + return eta; +} + +} // namespace aare \ No newline at end of file diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp index 48f9ef0..889593b 100644 --- a/include/aare/Cluster.hpp +++ b/include/aare/Cluster.hpp @@ -1,36 +1,86 @@ + +/************************************************ + * @file Cluster.hpp + * @short definition of cluster, where CoordType (x,y) give + * the cluster center coordinates and data the actual cluster data + * cluster size is given as template parameters + ***********************************************/ + #pragma once #include #include -#include #include #include +#include namespace aare { -//TODO! Template this? -struct Cluster3x3 { - int16_t x; - int16_t y; - int32_t data[9]; +// requires clause c++20 maybe update +template +struct Cluster { - int32_t sum_2x2() const{ - std::array total; - total[0] = data[0] + data[1] + data[3] + data[4]; - total[1] = data[1] + data[2] + data[4] + data[5]; - total[2] = data[3] + data[4] + data[6] + data[7]; - total[3] = data[4] + data[5] + data[7] + data[8]; - return *std::max_element(total.begin(), total.end()); - } + static_assert(std::is_arithmetic_v, "T needs to be an arithmetic type"); + static_assert(std::is_integral_v, + "CoordType needs to be an integral type"); + static_assert(ClusterSizeX > 0 && ClusterSizeY > 0, + "Cluster sizes must be bigger than zero"); - int32_t sum() const{ - return std::accumulate(data, data + 9, 0); + CoordType x; + CoordType y; + std::array data; + + static constexpr uint8_t cluster_size_x = ClusterSizeX; + static constexpr uint8_t cluster_size_y = ClusterSizeY; + using value_type = T; + using coord_type = CoordType; + + T sum() const { return std::accumulate(data.begin(), data.end(), T{}); } + + std::pair max_sum_2x2() const { + + if constexpr (cluster_size_x == 3 && cluster_size_y == 3) { + std::array sum_2x2_subclusters; + sum_2x2_subclusters[0] = data[0] + data[1] + data[3] + data[4]; + sum_2x2_subclusters[1] = data[1] + data[2] + data[4] + data[5]; + sum_2x2_subclusters[2] = data[3] + data[4] + data[6] + data[7]; + sum_2x2_subclusters[3] = data[4] + data[5] + data[7] + data[8]; + int index = std::max_element(sum_2x2_subclusters.begin(), + sum_2x2_subclusters.end()) - + sum_2x2_subclusters.begin(); + return std::make_pair(sum_2x2_subclusters[index], index); + } else if constexpr (cluster_size_x == 2 && cluster_size_y == 2) { + return std::make_pair(data[0] + data[1] + data[2] + data[3], 0); + } else { + constexpr size_t num_2x2_subclusters = + (ClusterSizeX - 1) * (ClusterSizeY - 1); + + std::array sum_2x2_subcluster; + for (size_t i = 0; i < ClusterSizeY - 1; ++i) { + for (size_t j = 0; j < ClusterSizeX - 1; ++j) + sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] = + data[i * ClusterSizeX + j] + + data[i * ClusterSizeX + j + 1] + + data[(i + 1) * ClusterSizeX + j] + + data[(i + 1) * ClusterSizeX + j + 1]; + } + + int index = std::max_element(sum_2x2_subcluster.begin(), + sum_2x2_subcluster.end()) - + sum_2x2_subcluster.begin(); + return std::make_pair(sum_2x2_subcluster[index], index); + } } }; -struct Cluster2x2 { - int16_t x; - int16_t y; - int32_t data[4]; -}; -} // namespace aare \ No newline at end of file +// Type Traits for is_cluster_type +template +struct is_cluster : std::false_type {}; // Default case: Not a Cluster + +template +struct is_cluster> : std::true_type {}; // Cluster + +template constexpr bool is_cluster_v = is_cluster::value; + +} // namespace aare diff --git a/include/aare/ClusterCollector.hpp b/include/aare/ClusterCollector.hpp index 0738062..ae78a8e 100644 --- a/include/aare/ClusterCollector.hpp +++ b/include/aare/ClusterCollector.hpp @@ -2,29 +2,31 @@ #include #include -#include "aare/ProducerConsumerQueue.hpp" -#include "aare/ClusterVector.hpp" #include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ProducerConsumerQueue.hpp" namespace aare { -class ClusterCollector{ - ProducerConsumerQueue>* m_source; - std::atomic m_stop_requested{false}; - std::atomic m_stopped{true}; - std::chrono::milliseconds m_default_wait{1}; - std::thread m_thread; - std::vector> m_clusters; +template >> +class ClusterCollector { + ProducerConsumerQueue> *m_source; + std::atomic m_stop_requested{false}; + std::atomic m_stopped{true}; + std::chrono::milliseconds m_default_wait{1}; + std::thread m_thread; + std::vector> m_clusters; - void process(){ + void process() { m_stopped = false; fmt::print("ClusterCollector started\n"); - while (!m_stop_requested || !m_source->isEmpty()) { - if (ClusterVector *clusters = m_source->frontPtr(); + while (!m_stop_requested || !m_source->isEmpty()) { + if (ClusterVector *clusters = m_source->frontPtr(); clusters != nullptr) { m_clusters.push_back(std::move(*clusters)); m_source->popFront(); - }else{ + } else { std::this_thread::sleep_for(m_default_wait); } } @@ -32,21 +34,25 @@ class ClusterCollector{ m_stopped = true; } - public: - ClusterCollector(ClusterFinderMT* source){ - m_source = source->sink(); - m_thread = std::thread(&ClusterCollector::process, this); - } - void stop(){ - m_stop_requested = true; - m_thread.join(); - } - std::vector> steal_clusters(){ - if(!m_stopped){ - throw std::runtime_error("ClusterCollector is still running"); - } - return std::move(m_clusters); + public: + ClusterCollector(ClusterFinderMT *source) { + m_source = source->sink(); + m_thread = + std::thread(&ClusterCollector::process, + this); // only one process does that so why isnt it + // automatically written to m_cluster in collect + // - instead of writing first to m_sink? + } + void stop() { + m_stop_requested = true; + m_thread.join(); + } + std::vector> steal_clusters() { + if (!m_stopped) { + throw std::runtime_error("ClusterCollector is still running"); } + return std::move(m_clusters); + } }; } // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index b47a1d5..ef78874 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -2,6 +2,7 @@ #include "aare/Cluster.hpp" #include "aare/ClusterVector.hpp" +#include "aare/GainMap.hpp" #include "aare/NDArray.hpp" #include "aare/defs.hpp" #include @@ -10,43 +11,18 @@ namespace aare { +/* +Binary cluster file. Expects data to be layed out as: +int32_t frame_number +uint32_t number_of_clusters +int16_t x, int16_t y, int32_t data[9] x number_of_clusters +int32_t frame_number +uint32_t number_of_clusters +.... +*/ -//TODO! Legacy enums, migrate to enum class -typedef enum { - cBottomLeft = 0, - cBottomRight = 1, - cTopLeft = 2, - cTopRight = 3 -} corner; - -typedef enum { - pBottomLeft = 0, - pBottom = 1, - pBottomRight = 2, - pLeft = 3, - pCenter = 4, - pRight = 5, - pTopLeft = 6, - pTop = 7, - pTopRight = 8 -} pixel; - -struct Eta2 { - double x; - double y; - corner c; - int32_t sum; -}; - -struct ClusterAnalysis { - uint32_t c; - int32_t tot; - double etax; - double etay; -}; - - - +// TODO: change to support any type of clusters, e.g. header line with +// clsuter_size_x, cluster_size_y, /** * @brief Class to read and write cluster files * Expects data to be laid out as: @@ -59,14 +35,19 @@ struct ClusterAnalysis { * uint32_t number_of_clusters * etc. */ +template >> class ClusterFile { FILE *fp{}; - uint32_t m_num_left{}; /*Number of photons left in frame*/ - size_t m_chunk_size{}; /*Number of clusters to read at a time*/ - const std::string m_mode; /*Mode to open the file in*/ - std::optional m_roi; /*Region of interest, will be applied if set*/ - std::optional> m_noise_map; /*Noise map to cut photons, will be applied if set*/ - std::optional> m_gain_map; /*Gain map to apply to the clusters, will be applied if set*/ + const std::string m_filename{}; + uint32_t m_num_left{}; /*Number of photons left in frame*/ + size_t m_chunk_size{}; /*Number of clusters to read at a time*/ + std::string m_mode; /*Mode to open the file in*/ + std::optional m_roi; /*Region of interest, will be applied if set*/ + std::optional> + m_noise_map; /*Noise map to cut photons, will be applied if set*/ + std::optional m_gain_map; /*Gain map to apply to the + clusters, will be applied if set*/ public: /** @@ -79,74 +60,390 @@ class ClusterFile { * @throws std::runtime_error if the file could not be opened */ ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000, - const std::string &mode = "r"); - - - ~ClusterFile(); + const std::string &mode = "r") + + : m_filename(fname.string()), m_chunk_size(chunk_size), m_mode(mode) { + + if (mode == "r") { + fp = fopen(m_filename.c_str(), "rb"); + if (!fp) { + throw std::runtime_error("Could not open file for reading: " + + m_filename); + } + } else if (mode == "w") { + fp = fopen(m_filename.c_str(), "wb"); + if (!fp) { + throw std::runtime_error("Could not open file for writing: " + + m_filename); + } + } else if (mode == "a") { + fp = fopen(m_filename.c_str(), "ab"); + if (!fp) { + throw std::runtime_error("Could not open file for appending: " + + m_filename); + } + } else { + throw std::runtime_error("Unsupported mode: " + mode); + } + } + + ~ClusterFile() { close(); } /** - * @brief Read n_clusters clusters from the file discarding frame numbers. - * If EOF is reached the returned vector will have less than n_clusters - * clusters + * @brief Read n_clusters clusters from the file discarding + * frame numbers. If EOF is reached the returned vector will + * have less than n_clusters clusters */ - ClusterVector read_clusters(size_t n_clusters); - - ClusterVector read_clusters(size_t n_clusters, ROI roi); + ClusterVector read_clusters(size_t n_clusters) { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_noise_map || m_roi) { + return read_clusters_with_cut(n_clusters); + } else { + return read_clusters_without_cut(n_clusters); + } + } /** - * @brief Read a single frame from the file and return the clusters. The - * cluster vector will have the frame number set. - * @throws std::runtime_error if the file is not opened for reading or the file pointer not - * at the beginning of a frame + * @brief Read a single frame from the file and return the + * clusters. The cluster vector will have the frame number + * set. + * @throws std::runtime_error if the file is not opened for + * reading or the file pointer not at the beginning of a + * frame */ - ClusterVector read_frame(); + ClusterVector read_frame() { + if (m_mode != "r") { + throw std::runtime_error(LOCATION + "File not opened for reading"); + } + if (m_noise_map || m_roi) { + return read_frame_with_cut(); + } else { + return read_frame_without_cut(); + } + } + void write_frame(const ClusterVector &clusters) { + if (m_mode != "w" && m_mode != "a") { + throw std::runtime_error("File not opened for writing"); + } + + int32_t frame_number = clusters.frame_number(); + fwrite(&frame_number, sizeof(frame_number), 1, fp); + uint32_t n_clusters = clusters.size(); + fwrite(&n_clusters, sizeof(n_clusters), 1, fp); + fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp); + } - void write_frame(const ClusterVector &clusters); - /** * @brief Return the chunk size */ size_t chunk_size() const { return m_chunk_size; } /** - * @brief Set the region of interest to use when reading clusters. If set only clusters within - * the ROI will be read. + * @brief Set the region of interest to use when reading + * clusters. If set only clusters within the ROI will be + * read. */ - void set_roi(ROI roi); + void set_roi(ROI roi) { m_roi = roi; } /** - * @brief Set the noise map to use when reading clusters. If set clusters below the noise - * level will be discarded. Selection criteria one of: Central pixel above noise, highest - * 2x2 sum above 2 * noise, total sum above 3 * noise. + * @brief Set the noise map to use when reading clusters. If + * set clusters below the noise level will be discarded. + * Selection criteria one of: Central pixel above noise, + * highest 2x2 sum above 2 * noise, total sum above 3 * + * noise. */ - void set_noise_map(const NDView noise_map); + void set_noise_map(const NDView noise_map) { + m_noise_map = NDArray(noise_map); + } /** - * @brief Set the gain map to use when reading clusters. If set the gain map will be applied - * to the clusters that pass ROI and noise_map selection. The gain map is expected to be in ADU/energy. + * @brief Set the gain map to use when reading clusters. If set the gain map + * will be applied to the clusters that pass ROI and noise_map selection. + * The gain map is expected to be in ADU/energy. */ - void set_gain_map(const NDView gain_map); - - - /** - * @brief Close the file. If not closed the file will be closed in the destructor - */ - void close(); + void set_gain_map(const NDView gain_map) { + m_gain_map = InvertedGainMap(gain_map); + } - private: - ClusterVector read_clusters_with_cut(size_t n_clusters); - ClusterVector read_clusters_without_cut(size_t n_clusters); - ClusterVector read_frame_with_cut(); - ClusterVector read_frame_without_cut(); - bool is_selected(Cluster3x3 &cl); - Cluster3x3 read_one_cluster(); + void set_gain_map(const InvertedGainMap &gain_map) { + m_gain_map = gain_map; + } + + void set_gain_map(const InvertedGainMap &&gain_map) { + m_gain_map = gain_map; + } + + /** + * @brief Close the file. If not closed the file will be + * closed in the destructor + */ + void close() { + if (fp) { + fclose(fp); + fp = nullptr; + } + } + + /** @brief Open the file in specific mode + * + */ + void open(const std::string &mode) { + if (fp) { + close(); + } + + if (mode == "r") { + fp = fopen(m_filename.c_str(), "rb"); + if (!fp) { + throw std::runtime_error("Could not open file for reading: " + + m_filename); + } + m_mode = "r"; + } else if (mode == "w") { + fp = fopen(m_filename.c_str(), "wb"); + if (!fp) { + throw std::runtime_error("Could not open file for writing: " + + m_filename); + } + m_mode = "w"; + } else if (mode == "a") { + fp = fopen(m_filename.c_str(), "ab"); + if (!fp) { + throw std::runtime_error("Could not open file for appending: " + + m_filename); + } + m_mode = "a"; + } else { + throw std::runtime_error("Unsupported mode: " + mode); + } + } + + private: + ClusterVector read_clusters_with_cut(size_t n_clusters); + ClusterVector read_clusters_without_cut(size_t n_clusters); + ClusterVector read_frame_with_cut(); + ClusterVector read_frame_without_cut(); + bool is_selected(ClusterType &cl); + ClusterType read_one_cluster(); }; -//TODO! helper functions that doesn't really belong here -NDArray calculate_eta2(ClusterVector &clusters); -Eta2 calculate_eta2(Cluster3x3 &cl); -Eta2 calculate_eta2(Cluster2x2 &cl); +template +ClusterVector +ClusterFile::read_clusters_without_cut(size_t n_clusters) { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + ClusterVector clusters(n_clusters); + clusters.resize(n_clusters); + + int32_t iframe = 0; // frame number needs to be 4 bytes! + size_t nph_read = 0; + uint32_t nn = m_num_left; + uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 + + auto buf = clusters.data(); + // if there are photons left from previous frame read them first + if (nph) { + if (nph > n_clusters) { + // if we have more photons left in the frame then photons to + // read we read directly the requested number + nn = n_clusters; + } else { + nn = nph; + } + nph_read += fread((buf + nph_read), clusters.item_size(), nn, fp); + m_num_left = nph - nn; // write back the number of photons left + } + + if (nph_read < n_clusters) { + // keep on reading frames and photons until reaching n_clusters + while (fread(&iframe, sizeof(iframe), 1, fp)) { + clusters.set_frame_number(iframe); + // read number of clusters in frame + if (fread(&nph, sizeof(nph), 1, fp)) { + if (nph > (n_clusters - nph_read)) + nn = n_clusters - nph_read; + else + nn = nph; + + nph_read += + fread((buf + nph_read), clusters.item_size(), nn, fp); + m_num_left = nph - nn; + } + if (nph_read >= n_clusters) + break; + } + } + + // Resize the vector to the number o f clusters. + // No new allocation, only change bounds. + clusters.resize(nph_read); + if (m_gain_map) + m_gain_map->apply_gain_map(clusters); + return clusters; +} + +template +ClusterVector +ClusterFile::read_clusters_with_cut(size_t n_clusters) { + ClusterVector clusters; + clusters.reserve(n_clusters); + + // if there are photons left from previous frame read them first + if (m_num_left) { + while (m_num_left && clusters.size() < n_clusters) { + ClusterType c = read_one_cluster(); + if (is_selected(c)) { + clusters.push_back(c); + } + } + } + + // we did not have enough clusters left in the previous frame + // keep on reading frames until reaching n_clusters + if (clusters.size() < n_clusters) { + // sanity check + if (m_num_left) { + throw std::runtime_error( + LOCATION + "Entered second loop with clusters left\n"); + } + + int32_t frame_number = 0; // frame number needs to be 4 bytes! + while (fread(&frame_number, sizeof(frame_number), 1, fp)) { + if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) { + clusters.set_frame_number( + frame_number); // cluster vector will hold the last + // frame number + while (m_num_left && clusters.size() < n_clusters) { + ClusterType c = read_one_cluster(); + if (is_selected(c)) { + clusters.push_back(c); + } + } + } + + // we have enough clusters, break out of the outer while loop + if (clusters.size() >= n_clusters) + break; + } + } + if (m_gain_map) + m_gain_map->apply_gain_map(clusters); + + return clusters; +} + +template +ClusterType ClusterFile::read_one_cluster() { + ClusterType c; + auto rc = fread(&c, sizeof(c), 1, fp); + if (rc != 1) { + throw std::runtime_error(LOCATION + "Could not read cluster"); + } + --m_num_left; + return c; +} + +template +ClusterVector +ClusterFile::read_frame_without_cut() { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_num_left) { + throw std::runtime_error( + "There are still photons left in the last frame"); + } + int32_t frame_number; + if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { + throw std::runtime_error(LOCATION + "Could not read frame number"); + } + + int32_t n_clusters; // Saved as 32bit integer in the cluster file + if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { + throw std::runtime_error(LOCATION + + "Could not read number of clusters"); + } + + ClusterVector clusters(n_clusters); + clusters.set_frame_number(frame_number); + + clusters.resize(n_clusters); + + if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != + static_cast(n_clusters)) { + throw std::runtime_error(LOCATION + "Could not read clusters"); + } + + if (m_gain_map) + m_gain_map->apply_gain_map(clusters); + return clusters; +} + +template +ClusterVector +ClusterFile::read_frame_with_cut() { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_num_left) { + throw std::runtime_error( + "There are still photons left in the last frame"); + } + int32_t frame_number; + if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { + throw std::runtime_error("Could not read frame number"); + } + + if (fread(&m_num_left, sizeof(m_num_left), 1, fp) != 1) { + throw std::runtime_error("Could not read number of clusters"); + } + + ClusterVector clusters; + clusters.reserve(m_num_left); + clusters.set_frame_number(frame_number); + while (m_num_left) { + ClusterType c = read_one_cluster(); + if (is_selected(c)) { + clusters.push_back(c); + } + } + if (m_gain_map) + m_gain_map->apply_gain_map(clusters); + return clusters; +} + +template +bool ClusterFile::is_selected(ClusterType &cl) { + // Should fail fast + if (m_roi) { + if (!(m_roi->contains(cl.x, cl.y))) { + return false; + } + } + + size_t cluster_center_index = + (ClusterType::cluster_size_x / 2) + + (ClusterType::cluster_size_y / 2) * ClusterType::cluster_size_x; + + if (m_noise_map) { + auto sum_1x1 = cl.data[cluster_center_index]; // central pixel + auto sum_2x2 = cl.max_sum_2x2().first; // highest sum of 2x2 subclusters + auto total_sum = cl.sum(); // sum of all pixels + + auto noise = + (*m_noise_map)(cl.y, cl.x); // TODO! check if this is correct + if (sum_1x1 <= noise || sum_2x2 <= 2 * noise || + total_sum <= 3 * noise) { + return false; + } + } + // we passed all checks + return true; +} } // namespace aare diff --git a/include/aare/ClusterFileSink.hpp b/include/aare/ClusterFileSink.hpp index 158fdeb..810e63c 100644 --- a/include/aare/ClusterFileSink.hpp +++ b/include/aare/ClusterFileSink.hpp @@ -3,35 +3,41 @@ #include #include -#include "aare/ProducerConsumerQueue.hpp" -#include "aare/ClusterVector.hpp" #include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ProducerConsumerQueue.hpp" -namespace aare{ +namespace aare { -class ClusterFileSink{ - ProducerConsumerQueue>* m_source; +template >> +class ClusterFileSink { + ProducerConsumerQueue> *m_source; std::atomic m_stop_requested{false}; std::atomic m_stopped{true}; std::chrono::milliseconds m_default_wait{1}; std::thread m_thread; std::ofstream m_file; - - void process(){ + void process() { m_stopped = false; fmt::print("ClusterFileSink started\n"); - while (!m_stop_requested || !m_source->isEmpty()) { - if (ClusterVector *clusters = m_source->frontPtr(); + while (!m_stop_requested || !m_source->isEmpty()) { + if (ClusterVector *clusters = m_source->frontPtr(); clusters != nullptr) { // Write clusters to file - int32_t frame_number = clusters->frame_number(); //TODO! Should we store frame number already as int? + int32_t frame_number = + clusters->frame_number(); // TODO! Should we store frame + // number already as int? uint32_t num_clusters = clusters->size(); - m_file.write(reinterpret_cast(&frame_number), sizeof(frame_number)); - m_file.write(reinterpret_cast(&num_clusters), sizeof(num_clusters)); - m_file.write(reinterpret_cast(clusters->data()), clusters->size() * clusters->item_size()); + m_file.write(reinterpret_cast(&frame_number), + sizeof(frame_number)); + m_file.write(reinterpret_cast(&num_clusters), + sizeof(num_clusters)); + m_file.write(reinterpret_cast(clusters->data()), + clusters->size() * clusters->item_size()); m_source->popFront(); - }else{ + } else { std::this_thread::sleep_for(m_default_wait); } } @@ -39,18 +45,18 @@ class ClusterFileSink{ m_stopped = true; } - public: - ClusterFileSink(ClusterFinderMT* source, const std::filesystem::path& fname){ - m_source = source->sink(); - m_thread = std::thread(&ClusterFileSink::process, this); - m_file.open(fname, std::ios::binary); - } - void stop(){ - m_stop_requested = true; - m_thread.join(); - m_file.close(); - } + public: + ClusterFileSink(ClusterFinderMT *source, + const std::filesystem::path &fname) { + m_source = source->sink(); + m_thread = std::thread(&ClusterFileSink::process, this); + m_file.open(fname, std::ios::binary); + } + void stop() { + m_stop_requested = true; + m_thread.join(); + m_file.close(); + } }; - } // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFileV2.hpp b/include/aare/ClusterFileV2.hpp deleted file mode 100644 index 99f5976..0000000 --- a/include/aare/ClusterFileV2.hpp +++ /dev/null @@ -1,148 +0,0 @@ -#pragma once -#include "aare/core/defs.hpp" -#include -#include -#include - -namespace aare { -struct ClusterHeader { - int32_t frame_number; - int32_t n_clusters; - std::string to_string() const { - return "frame_number: " + std::to_string(frame_number) + ", n_clusters: " + std::to_string(n_clusters); - } -}; - -struct ClusterV2_ { - int16_t x; - int16_t y; - std::array data; - std::string to_string(bool detailed = false) const { - if (detailed) { - std::string data_str = "["; - for (auto &d : data) { - data_str += std::to_string(d) + ", "; - } - data_str += "]"; - return "x: " + std::to_string(x) + ", y: " + std::to_string(y) + ", data: " + data_str; - } - return "x: " + std::to_string(x) + ", y: " + std::to_string(y); - } -}; - -struct ClusterV2 { - ClusterV2_ cluster; - int32_t frame_number; - std::string to_string() const { - return "frame_number: " + std::to_string(frame_number) + ", " + cluster.to_string(); - } -}; - -/** - * @brief - * important not: fp always points to the clusters header and does not point to individual clusters - * - */ -class ClusterFileV2 { - std::filesystem::path m_fpath; - std::string m_mode; - FILE *fp{nullptr}; - - void check_open(){ - if (!fp) - throw std::runtime_error(fmt::format("File: {} not open", m_fpath.string())); - } - - public: - ClusterFileV2(std::filesystem::path const &fpath, std::string const &mode): m_fpath(fpath), m_mode(mode) { - if (m_mode != "r" && m_mode != "w") - throw std::invalid_argument("mode must be 'r' or 'w'"); - if (m_mode == "r" && !std::filesystem::exists(m_fpath)) - throw std::invalid_argument("File does not exist"); - if (mode == "r") { - fp = fopen(fpath.string().c_str(), "rb"); - } else if (mode == "w") { - if (std::filesystem::exists(fpath)) { - fp = fopen(fpath.string().c_str(), "r+b"); - } else { - fp = fopen(fpath.string().c_str(), "wb"); - } - } - if (fp == nullptr) { - throw std::runtime_error("Failed to open file"); - } - } - ~ClusterFileV2() { close(); } - std::vector read() { - check_open(); - - ClusterHeader header; - fread(&header, sizeof(ClusterHeader), 1, fp); - std::vector clusters_(header.n_clusters); - fread(clusters_.data(), sizeof(ClusterV2_), header.n_clusters, fp); - std::vector clusters; - for (auto &c : clusters_) { - ClusterV2 cluster; - cluster.cluster = std::move(c); - cluster.frame_number = header.frame_number; - clusters.push_back(cluster); - } - - return clusters; - } - std::vector> read(int n_frames) { - std::vector> clusters; - for (int i = 0; i < n_frames; i++) { - clusters.push_back(read()); - } - return clusters; - } - - size_t write(std::vector const &clusters) { - check_open(); - if (m_mode != "w") - throw std::runtime_error("File not opened in write mode"); - if (clusters.empty()) - return 0; - - ClusterHeader header; - header.frame_number = clusters[0].frame_number; - header.n_clusters = clusters.size(); - fwrite(&header, sizeof(ClusterHeader), 1, fp); - for (auto &c : clusters) { - fwrite(&c.cluster, sizeof(ClusterV2_), 1, fp); - } - return clusters.size(); - } - - size_t write(std::vector> const &clusters) { - check_open(); - if (m_mode != "w") - throw std::runtime_error("File not opened in write mode"); - - size_t n_clusters = 0; - for (auto &c : clusters) { - n_clusters += write(c); - } - return n_clusters; - } - - int seek_to_begin() { return fseek(fp, 0, SEEK_SET); } - int seek_to_end() { return fseek(fp, 0, SEEK_END); } - - int32_t frame_number() { - auto pos = ftell(fp); - ClusterHeader header; - fread(&header, sizeof(ClusterHeader), 1, fp); - fseek(fp, pos, SEEK_SET); - return header.frame_number; - } - - void close() { - if (fp) { - fclose(fp); - fp = nullptr; - } - } -}; -} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index 84b207b..ea11162 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -10,17 +10,19 @@ namespace aare { -template +template , + typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double> class ClusterFinder { Shape<2> m_image_size; - const int m_cluster_sizeX; - const int m_cluster_sizeY; const PEDESTAL_TYPE m_nSigma; const PEDESTAL_TYPE c2; const PEDESTAL_TYPE c3; Pedestal m_pedestal; - ClusterVector m_clusters; + ClusterVector m_clusters; + + static const uint8_t ClusterSizeX = ClusterType::cluster_size_x; + static const uint8_t ClusterSizeY = ClusterType::cluster_size_y; + using CT = typename ClusterType::value_type; public: /** @@ -31,15 +33,12 @@ class ClusterFinder { * @param capacity initial capacity of the cluster vector * */ - ClusterFinder(Shape<2> image_size, Shape<2> cluster_size, - PEDESTAL_TYPE nSigma = 5.0, size_t capacity = 1000000) - : m_image_size(image_size), m_cluster_sizeX(cluster_size[0]), - m_cluster_sizeY(cluster_size[1]), - m_nSigma(nSigma), - c2(sqrt((m_cluster_sizeY + 1) / 2 * (m_cluster_sizeX + 1) / 2)), - c3(sqrt(m_cluster_sizeX * m_cluster_sizeY)), - m_pedestal(image_size[0], image_size[1]), - m_clusters(m_cluster_sizeX, m_cluster_sizeY, capacity) {}; + ClusterFinder(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0, + size_t capacity = 1000000) + : m_image_size(image_size), m_nSigma(nSigma), + c2(sqrt((ClusterSizeY + 1) / 2 * (ClusterSizeX + 1) / 2)), + c3(sqrt(ClusterSizeX * ClusterSizeY)), + m_pedestal(image_size[0], image_size[1]), m_clusters(capacity) {}; void push_pedestal_frame(NDView frame) { m_pedestal.push(frame); @@ -56,23 +55,28 @@ class ClusterFinder { * same capacity as the old one * */ - ClusterVector steal_clusters(bool realloc_same_capacity = false) { - ClusterVector tmp = std::move(m_clusters); + ClusterVector + steal_clusters(bool realloc_same_capacity = false) { + ClusterVector tmp = std::move(m_clusters); if (realloc_same_capacity) - m_clusters = ClusterVector(m_cluster_sizeX, m_cluster_sizeY, - tmp.capacity()); + m_clusters = ClusterVector(tmp.capacity()); else - m_clusters = ClusterVector(m_cluster_sizeX, m_cluster_sizeY); + m_clusters = ClusterVector{}; return tmp; } void find_clusters(NDView frame, uint64_t frame_number = 0) { // // TODO! deal with even size clusters // // currently 3,3 -> +/- 1 // // 4,4 -> +/- 2 - int dy = m_cluster_sizeY / 2; - int dx = m_cluster_sizeX / 2; + int dy = ClusterSizeY / 2; + int dx = ClusterSizeX / 2; + int has_center_pixel_x = + ClusterSizeX % + 2; // for even sized clusters there is no proper cluster center and + // even amount of pixels around the center + int has_center_pixel_y = ClusterSizeY % 2; + m_clusters.set_frame_number(frame_number); - std::vector cluster_data(m_cluster_sizeX * m_cluster_sizeY); for (int iy = 0; iy < frame.shape(0); iy++) { for (int ix = 0; ix < frame.shape(1); ix++) { @@ -87,8 +91,8 @@ class ClusterFinder { continue; // NEGATIVE_PEDESTAL go to next pixel // TODO! No pedestal update??? - for (int ir = -dy; ir < dy + 1; ir++) { - for (int ic = -dx; ic < dx + 1; ic++) { + for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) { + for (int ic = -dx; ic < dx + has_center_pixel_x; ic++) { if (ix + ic >= 0 && ix + ic < frame.shape(1) && iy + ir >= 0 && iy + ir < frame.shape(0)) { PEDESTAL_TYPE val = @@ -109,27 +113,33 @@ class ClusterFinder { // pass } else { // m_pedestal.push(iy, ix, frame(iy, ix)); // Safe option - m_pedestal.push_fast(iy, ix, frame(iy, ix)); // Assume we have reached n_samples in the pedestal, slight performance improvement - continue; // It was a pedestal value nothing to store + m_pedestal.push_fast( + iy, ix, + frame(iy, + ix)); // Assume we have reached n_samples in the + // pedestal, slight performance improvement + continue; // It was a pedestal value nothing to store } // Store cluster if (value == max) { - // Zero out the cluster data - std::fill(cluster_data.begin(), cluster_data.end(), 0); + ClusterType cluster{}; + cluster.x = ix; + cluster.y = iy; // Fill the cluster data since we have a photon to store // It's worth redoing the look since most of the time we // don't have a photon int i = 0; - for (int ir = -dy; ir < dy + 1; ir++) { - for (int ic = -dx; ic < dx + 1; ic++) { + for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) { + for (int ic = -dx; ic < dx + has_center_pixel_y; ic++) { if (ix + ic >= 0 && ix + ic < frame.shape(1) && iy + ir >= 0 && iy + ir < frame.shape(0)) { CT tmp = static_cast(frame(iy + ir, ix + ic)) - - m_pedestal.mean(iy + ir, ix + ic); - cluster_data[i] = + static_cast( + m_pedestal.mean(iy + ir, ix + ic)); + cluster.data[i] = tmp; // Watch for out of bounds access i++; } @@ -137,9 +147,7 @@ class ClusterFinder { } // Add the cluster to the output ClusterVector - m_clusters.push_back( - ix, iy, - reinterpret_cast(cluster_data.data())); + m_clusters.push_back(cluster); } } } diff --git a/include/aare/ClusterFinderMT.hpp b/include/aare/ClusterFinderMT.hpp index 1efb843..2dfb279 100644 --- a/include/aare/ClusterFinderMT.hpp +++ b/include/aare/ClusterFinderMT.hpp @@ -30,14 +30,17 @@ struct FrameWrapper { * @tparam PEDESTAL_TYPE type of the pedestal data * @tparam CT type of the cluster data */ -template +template , + typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double> class ClusterFinderMT { + + protected: + using CT = typename ClusterType::value_type; size_t m_current_thread{0}; size_t m_n_threads{0}; - using Finder = ClusterFinder; + using Finder = ClusterFinder; using InputQueue = ProducerConsumerQueue; - using OutputQueue = ProducerConsumerQueue>; + using OutputQueue = ProducerConsumerQueue>; std::vector> m_input_queues; std::vector> m_output_queues; @@ -48,6 +51,7 @@ class ClusterFinderMT { std::thread m_collect_thread; std::chrono::milliseconds m_default_wait{1}; + private: std::atomic m_stop_requested{false}; std::atomic m_processing_threads_stopped{true}; @@ -66,7 +70,8 @@ class ClusterFinderMT { switch (frame->type) { case FrameType::DATA: cf->find_clusters(frame->data.view(), frame->frame_number); - m_output_queues[thread_id]->write(cf->steal_clusters(realloc_same_capacity)); + m_output_queues[thread_id]->write( + cf->steal_clusters(realloc_same_capacity)); break; case FrameType::PEDESTAL: @@ -114,28 +119,32 @@ class ClusterFinderMT { * expected number of clusters in a frame per frame. * @param n_threads number of threads to use */ - ClusterFinderMT(Shape<2> image_size, Shape<2> cluster_size, - PEDESTAL_TYPE nSigma = 5.0, size_t capacity = 2000, - size_t n_threads = 3) + ClusterFinderMT(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0, + size_t capacity = 2000, size_t n_threads = 3) : m_n_threads(n_threads) { + for (size_t i = 0; i < n_threads; i++) { m_cluster_finders.push_back( - std::make_unique>( - image_size, cluster_size, nSigma, capacity)); + std::make_unique< + ClusterFinder>( + image_size, nSigma, capacity)); } for (size_t i = 0; i < n_threads; i++) { m_input_queues.emplace_back(std::make_unique(200)); m_output_queues.emplace_back(std::make_unique(200)); } - //TODO! Should we start automatically? + // TODO! Should we start automatically? start(); } /** * @brief Return the sink queue where all the clusters are collected - * @warning You need to empty this queue otherwise the cluster finder will wait forever + * @warning You need to empty this queue otherwise the cluster finder will + * wait forever */ - ProducerConsumerQueue> *sink() { return &m_sink; } + ProducerConsumerQueue> *sink() { + return &m_sink; + } /** * @brief Start all processing threads diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index b91278c..9d575d9 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -1,4 +1,5 @@ #pragma once +#include "aare/Cluster.hpp" //TODO maybe store in seperate file !!! #include #include #include @@ -13,292 +14,157 @@ namespace aare { +template >> +class ClusterVector; // Forward declaration + /** - * @brief ClusterVector is a container for clusters of various sizes. It uses a - * contiguous memory buffer to store the clusters. It is templated on the data - * type and the coordinate type of the clusters. + * @brief ClusterVector is a container for clusters of various sizes. It + * uses a contiguous memory buffer to store the clusters. It is templated on + * the data type and the coordinate type of the clusters. * @note push_back can invalidate pointers to elements in the container - * @warning ClusterVector is currently move only to catch unintended copies, but - * this might change since there are probably use cases where copying is needed. + * @warning ClusterVector is currently move only to catch unintended copies, + * but this might change since there are probably use cases where copying is + * needed. * @tparam T data type of the pixels in the cluster * @tparam CoordType data type of the x and y coordinates of the cluster * (normally int16_t) */ -template class ClusterVector { - using value_type = T; - size_t m_cluster_size_x; - size_t m_cluster_size_y; - std::byte *m_data{}; - size_t m_size{0}; - size_t m_capacity; - uint64_t m_frame_number{0}; // TODO! Check frame number size and type - /* - Format string used in the python bindings to create a numpy - array from the buffer - = - native byte order - h - short - d - double - i - int - */ - constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:"; +template +class ClusterVector> { + + std::vector> m_data{}; + int32_t m_frame_number{0}; // TODO! Check frame number size and type public: + using value_type = T; + using ClusterType = Cluster; + /** * @brief Construct a new ClusterVector object - * @param cluster_size_x size of the cluster in x direction - * @param cluster_size_y size of the cluster in y direction * @param capacity initial capacity of the buffer in number of clusters * @param frame_number frame number of the clusters. Default is 0, which is * also used to indicate that the clusters come from many frames */ - ClusterVector(size_t cluster_size_x = 3, size_t cluster_size_y = 3, - size_t capacity = 1024, uint64_t frame_number = 0) - : m_cluster_size_x(cluster_size_x), m_cluster_size_y(cluster_size_y), - m_capacity(capacity), m_frame_number(frame_number) { - allocate_buffer(capacity); + ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0) + : m_frame_number(frame_number) { + m_data.reserve(capacity); } - ~ClusterVector() { delete[] m_data; } - // Move constructor ClusterVector(ClusterVector &&other) noexcept - : m_cluster_size_x(other.m_cluster_size_x), - m_cluster_size_y(other.m_cluster_size_y), m_data(other.m_data), - m_size(other.m_size), m_capacity(other.m_capacity), - m_frame_number(other.m_frame_number) { - other.m_data = nullptr; - other.m_size = 0; - other.m_capacity = 0; + : m_data(other.m_data), m_frame_number(other.m_frame_number) { + other.m_data.clear(); } // Move assignment operator ClusterVector &operator=(ClusterVector &&other) noexcept { if (this != &other) { - delete[] m_data; - m_cluster_size_x = other.m_cluster_size_x; - m_cluster_size_y = other.m_cluster_size_y; m_data = other.m_data; - m_size = other.m_size; - m_capacity = other.m_capacity; m_frame_number = other.m_frame_number; - other.m_data = nullptr; - other.m_size = 0; - other.m_capacity = 0; + other.m_data.clear(); other.m_frame_number = 0; } return *this; } - /** - * @brief Reserve space for at least capacity clusters - * @param capacity number of clusters to reserve space for - * @note If capacity is less than the current capacity, the function does - * nothing. - */ - void reserve(size_t capacity) { - if (capacity > m_capacity) { - allocate_buffer(capacity); - } - } - - /** - * @brief Add a cluster to the vector - * @param x x-coordinate of the cluster - * @param y y-coordinate of the cluster - * @param data pointer to the data of the cluster - * @warning The data pointer must point to a buffer of size cluster_size_x * - * cluster_size_y * sizeof(T) - */ - void push_back(CoordType x, CoordType y, const std::byte *data) { - if (m_size == m_capacity) { - allocate_buffer(m_capacity * 2); - } - std::byte *ptr = element_ptr(m_size); - *reinterpret_cast(ptr) = x; - ptr += sizeof(CoordType); - *reinterpret_cast(ptr) = y; - ptr += sizeof(CoordType); - - std::copy(data, data + m_cluster_size_x * m_cluster_size_y * sizeof(T), - ptr); - m_size++; - } - ClusterVector &operator+=(const ClusterVector &other) { - if (m_size + other.m_size > m_capacity) { - allocate_buffer(m_capacity + other.m_size); - } - std::copy(other.m_data, other.m_data + other.m_size * item_size(), - m_data + m_size * item_size()); - m_size += other.m_size; - return *this; - } - /** * @brief Sum the pixels in each cluster * @return std::vector vector of sums for each cluster */ std::vector sum() { - std::vector sums(m_size); - const size_t stride = item_size(); - const size_t n_pixels = m_cluster_size_x * m_cluster_size_y; - std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y + std::vector sums(m_data.size()); + + std::transform( + m_data.begin(), m_data.end(), sums.begin(), + [](const ClusterType &cluster) { return cluster.sum(); }); - for (size_t i = 0; i < m_size; i++) { - sums[i] = - std::accumulate(reinterpret_cast(ptr), - reinterpret_cast(ptr) + n_pixels, T{}); - ptr += stride; - } return sums; } /** - * @brief Return the maximum sum of the 2x2 subclusters in each cluster + * @brief Sum the pixels in the 2x2 subcluster with the biggest pixel sum in + * each cluster * @return std::vector vector of sums for each cluster - * @throws std::runtime_error if the cluster size is not 3x3 - * @warning Only 3x3 clusters are supported for the 2x2 sum. */ std::vector sum_2x2() { - std::vector sums(m_size); - const size_t stride = item_size(); + std::vector sums_2x2(m_data.size()); - if (m_cluster_size_x != 3 || m_cluster_size_y != 3) { - throw std::runtime_error( - "Only 3x3 clusters are supported for the 2x2 sum."); - } - std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y + std::transform(m_data.begin(), m_data.end(), sums_2x2.begin(), + [](const ClusterType &cluster) { + return cluster.max_sum_2x2().first; + }); - for (size_t i = 0; i < m_size; i++) { - std::array total; - auto T_ptr = reinterpret_cast(ptr); - total[0] = T_ptr[0] + T_ptr[1] + T_ptr[3] + T_ptr[4]; - total[1] = T_ptr[1] + T_ptr[2] + T_ptr[4] + T_ptr[5]; - total[2] = T_ptr[3] + T_ptr[4] + T_ptr[6] + T_ptr[7]; - total[3] = T_ptr[4] + T_ptr[5] + T_ptr[7] + T_ptr[8]; + return sums_2x2; + } - sums[i] = *std::max_element(total.begin(), total.end()); - ptr += stride; - } + /** + * @brief Reserve space for at least capacity clusters + * @param capacity number of clusters to reserve space for + * @note If capacity is less than the current capacity, the function does + * nothing. + */ + void reserve(size_t capacity) { m_data.reserve(capacity); } - return sums; + void resize(size_t size) { m_data.resize(size); } + + void push_back(const ClusterType &cluster) { m_data.push_back(cluster); } + + ClusterVector &operator+=(const ClusterVector &other) { + m_data.insert(m_data.end(), other.begin(), other.end()); + + return *this; } /** * @brief Return the number of clusters in the vector */ - size_t size() const { return m_size; } + size_t size() const { return m_data.size(); } + + uint8_t cluster_size_x() const { return ClusterSizeX; } + + uint8_t cluster_size_y() const { return ClusterSizeY; } /** * @brief Return the capacity of the buffer in number of clusters. This is * the number of clusters that can be stored in the current buffer without * reallocation. */ - size_t capacity() const { return m_capacity; } + size_t capacity() const { return m_data.capacity(); } + + auto begin() const { return m_data.begin(); } + + auto end() const { return m_data.end(); } /** * @brief Return the size in bytes of a single cluster */ size_t item_size() const { - return 2 * sizeof(CoordType) + - m_cluster_size_x * m_cluster_size_y * sizeof(T); + return sizeof(ClusterType); // 2 * sizeof(CoordType) + ClusterSizeX * + // ClusterSizeY * sizeof(T); } - /** - * @brief Return the offset in bytes for the i-th cluster - */ - size_t element_offset(size_t i) const { return item_size() * i; } - - /** - * @brief Return a pointer to the i-th cluster - */ - std::byte *element_ptr(size_t i) { return m_data + element_offset(i); } - - /** - * @brief Return a pointer to the i-th cluster - */ - const std::byte *element_ptr(size_t i) const { - return m_data + element_offset(i); - } - - size_t cluster_size_x() const { return m_cluster_size_x; } - size_t cluster_size_y() const { return m_cluster_size_y; } - - std::byte *data() { return m_data; } - std::byte const *data() const { return m_data; } + ClusterType *data() { return m_data.data(); } + ClusterType const *data() const { return m_data.data(); } /** * @brief Return a reference to the i-th cluster casted to type V * @tparam V type of the cluster */ - template V &at(size_t i) { - return *reinterpret_cast(element_ptr(i)); - } + ClusterType &operator[](size_t i) { return m_data[i]; } - template const V &at(size_t i) const { - return *reinterpret_cast(element_ptr(i)); - } - - const std::string_view fmt_base() const { - // TODO! how do we match on coord_t? - return m_fmt_base; - } + const ClusterType &operator[](size_t i) const { return m_data[i]; } /** * @brief Return the frame number of the clusters. 0 is used to indicate * that the clusters come from many frames */ - uint64_t frame_number() const { return m_frame_number; } + int32_t frame_number() const { return m_frame_number; } - void set_frame_number(uint64_t frame_number) { + void set_frame_number(int32_t frame_number) { m_frame_number = frame_number; } - - /** - * @brief Resize the vector to contain new_size clusters. If new_size is - * greater than the current capacity, a new buffer is allocated. If the size - * is smaller no memory is freed, size is just updated. - * @param new_size new size of the vector - * @warning The additional clusters are not initialized - */ - void resize(size_t new_size) { - // TODO! Should we initialize the new clusters? - if (new_size > m_capacity) { - allocate_buffer(new_size); - } - m_size = new_size; - } - - void apply_gain_map(const NDView gain_map){ - //in principle we need to know the size of the image for this lookup - //TODO! check orientations - std::array xcorr = {-1, 0, 1, -1, 0, 1, -1, 0, 1}; - std::array ycorr = {-1, -1, -1, 0, 0, 0, 1, 1, 1}; - for (size_t i=0; i(i); - - if (cl.x > 0 && cl.y > 0 && cl.x < gain_map.shape(1)-1 && cl.y < gain_map.shape(0)-1){ - for (size_t j=0; j<9; j++){ - size_t x = cl.x + xcorr[j]; - size_t y = cl.y + ycorr[j]; - cl.data[j] = static_cast(cl.data[j] * gain_map(y, x)); - } - }else{ - memset(cl.data, 0, 9*sizeof(T)); //clear edge clusters - } - - - } - } - - private: - void allocate_buffer(size_t new_capacity) { - size_t num_bytes = item_size() * new_capacity; - std::byte *new_data = new std::byte[num_bytes]{}; - std::copy(m_data, m_data + item_size() * m_size, new_data); - delete[] m_data; - m_data = new_data; - m_capacity = new_capacity; - } }; } // namespace aare \ No newline at end of file diff --git a/include/aare/FilePtr.hpp b/include/aare/FilePtr.hpp index 4c88ecb..4ddc76e 100644 --- a/include/aare/FilePtr.hpp +++ b/include/aare/FilePtr.hpp @@ -18,8 +18,8 @@ class FilePtr { FilePtr(FilePtr &&other); FilePtr &operator=(FilePtr &&other); FILE *get(); - int64_t tell(); - void seek(int64_t offset, int whence = SEEK_SET) { + ssize_t tell(); + void seek(ssize_t offset, int whence = SEEK_SET) { if (fseek(fp_, offset, whence) != 0) throw std::runtime_error("Error seeking in file"); } diff --git a/include/aare/Fit.hpp b/include/aare/Fit.hpp index 6fd10aa..eb9ac22 100644 --- a/include/aare/Fit.hpp +++ b/include/aare/Fit.hpp @@ -15,6 +15,12 @@ NDArray gaus(NDView x, NDView par); double pol1(const double x, const double *par); NDArray pol1(NDView x, NDView par); +double scurve(const double x, const double *par); +NDArray scurve(NDView x, NDView par); + +double scurve2(const double x, const double *par); +NDArray scurve2(NDView x, NDView par); + } // namespace func @@ -25,6 +31,9 @@ std::array gaus_init_par(const NDView x, const NDView pol1_init_par(const NDView x, const NDView y); +std::array scurve_init_par(const NDView x, const NDView y); +std::array scurve2_init_par(const NDView x, const NDView y); + static constexpr int DEFAULT_NUM_THREADS = 4; /** @@ -38,7 +47,7 @@ NDArray fit_gaus(NDView x, NDView y); /** * @brief Fit a 1D Gaussian to each pixel. Data layout [row, col, values] * @param x x values - * @param y y vales, layout [row, col, values] + * @param y y values, layout [row, col, values] * @param n_threads number of threads to use */ @@ -51,7 +60,7 @@ NDArray fit_gaus(NDView x, NDView y, /** * @brief Fit a 1D Gaussian with error estimates * @param x x values - * @param y y vales, layout [row, col, values] + * @param y y values, layout [row, col, values] * @param y_err error in y, layout [row, col, values] * @param par_out output parameters * @param par_err_out output error parameters @@ -64,7 +73,7 @@ void fit_gaus(NDView x, NDView y, NDView y_err, * @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout * [row, col, values] * @param x x values - * @param y y vales, layout [row, col, values] + * @param y y values, layout [row, col, values] * @param y_err error in y, layout [row, col, values] * @param par_out output parameters, layout [row, col, values] * @param par_err_out output parameter errors, layout [row, col, values] @@ -88,5 +97,19 @@ void fit_pol1(NDView x, NDView y, NDView y_err, NDView par_out, NDView par_err_out,NDView chi2_out, int n_threads = DEFAULT_NUM_THREADS); +NDArray fit_scurve(NDView x, NDView y); +NDArray fit_scurve(NDView x, NDView y, int n_threads); +void fit_scurve(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2); +void fit_scurve(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads); +NDArray fit_scurve2(NDView x, NDView y); +NDArray fit_scurve2(NDView x, NDView y, int n_threads); +void fit_scurve2(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2); +void fit_scurve2(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads); } // namespace aare \ No newline at end of file diff --git a/include/aare/Frame.hpp b/include/aare/Frame.hpp index 5ce63ac..02ea82f 100644 --- a/include/aare/Frame.hpp +++ b/include/aare/Frame.hpp @@ -107,8 +107,8 @@ class Frame { * @return NDView */ template NDView view() { - std::array shape = {static_cast(m_rows), - static_cast(m_cols)}; + std::array shape = {static_cast(m_rows), + static_cast(m_cols)}; T *data = reinterpret_cast(m_data); return NDView(data, shape); } diff --git a/include/aare/GainMap.hpp b/include/aare/GainMap.hpp new file mode 100644 index 0000000..ac558d0 --- /dev/null +++ b/include/aare/GainMap.hpp @@ -0,0 +1,68 @@ +/************************************************ + * @file GainMap.hpp + * @short function to apply gain map of image size to a vector of clusters - + *note stored gainmap is inverted for efficient aaplication to images + ***********************************************/ + +#pragma once +#include "aare/Cluster.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDArray.hpp" +#include "aare/NDView.hpp" +#include + +namespace aare { + +class InvertedGainMap { + + public: + explicit InvertedGainMap(const NDArray &gain_map) + : m_gain_map(gain_map) { + for (auto &item : m_gain_map) { + item = 1.0 / item; + } + }; + + explicit InvertedGainMap(const NDView gain_map) { + m_gain_map = NDArray(gain_map); + for (auto &item : m_gain_map) { + item = 1.0 / item; + } + } + + template >> + void apply_gain_map(ClusterVector &clustervec) { + // in principle we need to know the size of the image for this lookup + size_t ClusterSizeX = clustervec.cluster_size_x(); + size_t ClusterSizeY = clustervec.cluster_size_y(); + + using T = typename ClusterVector::value_type; + + int64_t index_cluster_center_x = ClusterSizeX / 2; + int64_t index_cluster_center_y = ClusterSizeY / 2; + for (size_t i = 0; i < clustervec.size(); i++) { + auto &cl = clustervec[i]; + + if (cl.x > 0 && cl.y > 0 && cl.x < m_gain_map.shape(1) - 1 && + cl.y < m_gain_map.shape(0) - 1) { + for (size_t j = 0; j < ClusterSizeX * ClusterSizeY; j++) { + size_t x = cl.x + j % ClusterSizeX - index_cluster_center_x; + size_t y = cl.y + j / ClusterSizeX - index_cluster_center_y; + cl.data[j] = static_cast( + static_cast(cl.data[j]) * + m_gain_map( + y, x)); // cast after conversion to keep precision + } + } else { + // clear edge clusters + cl.data.fill(0); + } + } + } + + private: + NDArray m_gain_map{}; +}; + +} // end of namespace aare \ No newline at end of file diff --git a/include/aare/Interpolator.hpp b/include/aare/Interpolator.hpp index 4905bce..8e65f38 100644 --- a/include/aare/Interpolator.hpp +++ b/include/aare/Interpolator.hpp @@ -1,29 +1,130 @@ #pragma once + +#include "aare/CalculateEta.hpp" +#include "aare/Cluster.hpp" +#include "aare/ClusterFile.hpp" //Cluster_3x3 +#include "aare/ClusterVector.hpp" #include "aare/NDArray.hpp" #include "aare/NDView.hpp" -#include "aare/ClusterVector.hpp" -#include "aare/ClusterFile.hpp" //Cluster_3x3 -namespace aare{ +#include "aare/algorithm.hpp" -struct Photon{ +namespace aare { + +struct Photon { double x; double y; double energy; }; -class Interpolator{ +class Interpolator { NDArray m_ietax; NDArray m_ietay; NDArray m_etabinsx; NDArray m_etabinsy; NDArray m_energy_bins; - public: - Interpolator(NDView etacube, NDView xbins, NDView ybins, NDView ebins); - NDArray get_ietax(){return m_ietax;} - NDArray get_ietay(){return m_ietay;} - std::vector interpolate(const ClusterVector& clusters); + public: + Interpolator(NDView etacube, NDView xbins, + NDView ybins, NDView ebins); + NDArray get_ietax() { return m_ietax; } + NDArray get_ietay() { return m_ietay; } + + template >> + std::vector interpolate(const ClusterVector &clusters); }; +// TODO: generalize to support any clustertype!!! otherwise add std::enable_if_t +// to only take Cluster2x2 and Cluster3x3 +template +std::vector +Interpolator::interpolate(const ClusterVector &clusters) { + std::vector photons; + photons.reserve(clusters.size()); + + if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { + for (const ClusterType &cluster : clusters) { + + auto eta = calculate_eta2(cluster); + + Photon photon; + photon.x = cluster.x; + photon.y = cluster.y; + photon.energy = static_cast(eta.sum); + + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + // Finding the index of the last element that is smaller + // should work fine as long as we have many bins + auto ie = last_smaller(m_energy_bins, photon.energy); + auto ix = last_smaller(m_etabinsx, eta.x); + auto iy = last_smaller(m_etabinsy, eta.y); + + // fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy); + + double dX, dY; + // cBottomLeft = 0, + // cBottomRight = 1, + // cTopLeft = 2, + // cTopRight = 3 + switch (static_cast(eta.c)) { + case corner::cTopLeft: + dX = -1.; + dY = 0; + break; + case corner::cTopRight:; + dX = 0; + dY = 0; + break; + case corner::cBottomLeft: + dX = -1.; + dY = -1.; + break; + case corner::cBottomRight: + dX = 0.; + dY = -1.; + break; + } + photon.x += m_ietax(ix, iy, ie) * 2 + dX; + photon.y += m_ietay(ix, iy, ie) * 2 + dY; + photons.push_back(photon); + } + } else if (clusters.cluster_size_x() == 2 || + clusters.cluster_size_y() == 2) { + for (const ClusterType &cluster : clusters) { + auto eta = calculate_eta2(cluster); + + Photon photon; + photon.x = cluster.x; + photon.y = cluster.y; + photon.energy = static_cast(eta.sum); + + // Now do some actual interpolation. + // Find which energy bin the cluster is in + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + // Finding the index of the last element that is smaller + // should work fine as long as we have many bins + auto ie = last_smaller(m_energy_bins, photon.energy); + auto ix = last_smaller(m_etabinsx, eta.x); + auto iy = last_smaller(m_etabinsy, eta.y); + + photon.x += m_ietax(ix, iy, ie) * + 2; // eta goes between 0 and 1 but we could move the hit + // anywhere in the 2x2 + photon.y += m_ietay(ix, iy, ie) * 2; + photons.push_back(photon); + } + + } else { + throw std::runtime_error( + "Only 3x3 and 2x2 clusters are supported for interpolation"); + } + + return photons; +} + } // namespace aare \ No newline at end of file diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index ceb1e0b..3c08a3c 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -22,10 +22,10 @@ TODO! Add expression templates for operators namespace aare { -template +template class NDArray : public ArrayExpr, Ndim> { - std::array shape_; - std::array strides_; + std::array shape_; + std::array strides_; size_t size_{}; T *data_; @@ -42,7 +42,7 @@ class NDArray : public ArrayExpr, Ndim> { * * @param shape shape of the new NDArray */ - explicit NDArray(std::array shape) + explicit NDArray(std::array shape) : shape_(shape), strides_(c_strides(shape_)), size_(std::accumulate(shape_.begin(), shape_.end(), 1, std::multiplies<>())), @@ -55,7 +55,7 @@ class NDArray : public ArrayExpr, Ndim> { * @param shape shape of the new array * @param value value to initialize the array with */ - NDArray(std::array shape, T value) : NDArray(shape) { + NDArray(std::array shape, T value) : NDArray(shape) { this->operator=(value); } @@ -186,22 +186,22 @@ class NDArray : public ArrayExpr, Ndim> { } // TODO! is int the right type for index? - T &operator()(int64_t i) { return data_[i]; } - const T &operator()(int64_t i) const { return data_[i]; } + T &operator()(ssize_t i) { return data_[i]; } + const T &operator()(ssize_t i) const { return data_[i]; } - T &operator[](int64_t i) { return data_[i]; } - const T &operator[](int64_t i) const { return data_[i]; } + T &operator[](ssize_t i) { return data_[i]; } + const T &operator[](ssize_t i) const { return data_[i]; } T *data() { return data_; } std::byte *buffer() { return reinterpret_cast(data_); } ssize_t size() const { return static_cast(size_); } size_t total_bytes() const { return size_ * sizeof(T); } - std::array shape() const noexcept { return shape_; } - int64_t shape(int64_t i) const noexcept { return shape_[i]; } - std::array strides() const noexcept { return strides_; } + std::array shape() const noexcept { return shape_; } + ssize_t shape(ssize_t i) const noexcept { return shape_[i]; } + std::array strides() const noexcept { return strides_; } size_t bitdepth() const noexcept { return sizeof(T) * 8; } - std::array byte_strides() const noexcept { + std::array byte_strides() const noexcept { auto byte_strides = strides_; for (auto &val : byte_strides) val *= sizeof(T); @@ -228,7 +228,7 @@ class NDArray : public ArrayExpr, Ndim> { }; // Move assign -template +template NDArray & NDArray::operator=(NDArray &&other) noexcept { if (this != &other) { @@ -242,7 +242,7 @@ NDArray::operator=(NDArray &&other) noexcept { return *this; } -template +template NDArray &NDArray::operator+=(const NDArray &other) { // check shape if (shape_ == other.shape_) { @@ -254,7 +254,7 @@ NDArray &NDArray::operator+=(const NDArray &other) { throw(std::runtime_error("Shape of ImageDatas must match")); } -template +template NDArray &NDArray::operator-=(const NDArray &other) { // check shape if (shape_ == other.shape_) { @@ -266,7 +266,7 @@ NDArray &NDArray::operator-=(const NDArray &other) { throw(std::runtime_error("Shape of ImageDatas must match")); } -template +template NDArray &NDArray::operator*=(const NDArray &other) { // check shape if (shape_ == other.shape_) { @@ -278,14 +278,14 @@ NDArray &NDArray::operator*=(const NDArray &other) { throw(std::runtime_error("Shape of ImageDatas must match")); } -template +template NDArray &NDArray::operator&=(const T &mask) { for (auto it = begin(); it != end(); ++it) *it &= mask; return *this; } -template +template NDArray NDArray::operator>(const NDArray &other) { if (shape_ == other.shape_) { NDArray result{shape_}; @@ -297,7 +297,7 @@ NDArray NDArray::operator>(const NDArray &other) { throw(std::runtime_error("Shape of ImageDatas must match")); } -template +template NDArray &NDArray::operator=(const NDArray &other) { if (this != &other) { delete[] data_; @@ -310,7 +310,7 @@ NDArray &NDArray::operator=(const NDArray &other) { return *this; } -template +template bool NDArray::operator==(const NDArray &other) const { if (shape_ != other.shape_) return false; @@ -322,23 +322,23 @@ bool NDArray::operator==(const NDArray &other) const { return true; } -template +template bool NDArray::operator!=(const NDArray &other) const { return !((*this) == other); } -template +template NDArray &NDArray::operator++() { for (uint32_t i = 0; i < size_; ++i) data_[i] += 1; return *this; } -template +template NDArray &NDArray::operator=(const T &value) { std::fill_n(data_, size_, value); return *this; } -template +template NDArray &NDArray::operator+=(const T &value) { for (uint32_t i = 0; i < size_; ++i) data_[i] += value; @@ -348,57 +348,57 @@ NDArray &NDArray::operator+=(const T &value) { -template +template NDArray NDArray::operator+(const T &value) { NDArray result = *this; result += value; return result; } -template +template NDArray &NDArray::operator-=(const T &value) { for (uint32_t i = 0; i < size_; ++i) data_[i] -= value; return *this; } -template +template NDArray NDArray::operator-(const T &value) { NDArray result = *this; result -= value; return result; } -template +template NDArray &NDArray::operator/=(const T &value) { for (uint32_t i = 0; i < size_; ++i) data_[i] /= value; return *this; } -template +template NDArray NDArray::operator/(const T &value) { NDArray result = *this; result /= value; return result; } -template +template NDArray &NDArray::operator*=(const T &value) { for (uint32_t i = 0; i < size_; ++i) data_[i] *= value; return *this; } -template +template NDArray NDArray::operator*(const T &value) { NDArray result = *this; result *= value; return result; } -// template void NDArray::Print() { +// template void NDArray::Print() { // if (shape_[0] < 20 && shape_[1] < 20) // Print_all(); // else // Print_some(); // } -template +template std::ostream &operator<<(std::ostream &os, const NDArray &arr) { for (auto row = 0; row < arr.shape(0); ++row) { for (auto col = 0; col < arr.shape(1); ++col) { @@ -410,7 +410,7 @@ std::ostream &operator<<(std::ostream &os, const NDArray &arr) { return os; } -template void NDArray::Print_all() { +template void NDArray::Print_all() { for (auto row = 0; row < shape_[0]; ++row) { for (auto col = 0; col < shape_[1]; ++col) { std::cout << std::setw(3); @@ -419,7 +419,7 @@ template void NDArray::Print_all() { std::cout << "\n"; } } -template void NDArray::Print_some() { +template void NDArray::Print_some() { for (auto row = 0; row < 5; ++row) { for (auto col = 0; col < 5; ++col) { std::cout << std::setw(7); @@ -429,7 +429,7 @@ template void NDArray::Print_some() { } } -template +template void save(NDArray &img, std::string &pathname) { std::ofstream f; f.open(pathname, std::ios::binary); @@ -437,9 +437,9 @@ void save(NDArray &img, std::string &pathname) { f.close(); } -template +template NDArray load(const std::string &pathname, - std::array shape) { + std::array shape) { NDArray img{shape}; std::ifstream f; f.open(pathname, std::ios::binary); diff --git a/include/aare/NDView.hpp b/include/aare/NDView.hpp index ddb5d1c..56054e2 100644 --- a/include/aare/NDView.hpp +++ b/include/aare/NDView.hpp @@ -14,10 +14,10 @@ #include namespace aare { -template using Shape = std::array; +template using Shape = std::array; // TODO! fix mismatch between signed and unsigned -template Shape make_shape(const std::vector &shape) { +template Shape make_shape(const std::vector &shape) { if (shape.size() != Ndim) throw std::runtime_error("Shape size mismatch"); Shape arr; @@ -25,41 +25,41 @@ template Shape make_shape(const std::vector &shape) return arr; } -template int64_t element_offset(const Strides & /*unused*/) { return 0; } +template ssize_t element_offset(const Strides & /*unused*/) { return 0; } -template -int64_t element_offset(const Strides &strides, int64_t i, Ix... index) { +template +ssize_t element_offset(const Strides &strides, ssize_t i, Ix... index) { return i * strides[Dim] + element_offset(strides, index...); } -template std::array c_strides(const std::array &shape) { - std::array strides{}; +template std::array c_strides(const std::array &shape) { + std::array strides{}; std::fill(strides.begin(), strides.end(), 1); - for (int64_t i = Ndim - 1; i > 0; --i) { + for (ssize_t i = Ndim - 1; i > 0; --i) { strides[i - 1] = strides[i] * shape[i]; } return strides; } -template std::array make_array(const std::vector &vec) { +template std::array make_array(const std::vector &vec) { assert(vec.size() == Ndim); - std::array arr{}; + std::array arr{}; std::copy_n(vec.begin(), Ndim, arr.begin()); return arr; } -template class NDView : public ArrayExpr, Ndim> { +template class NDView : public ArrayExpr, Ndim> { public: NDView() = default; ~NDView() = default; NDView(const NDView &) = default; NDView(NDView &&) = default; - NDView(T *buffer, std::array shape) + NDView(T *buffer, std::array shape) : buffer_(buffer), strides_(c_strides(shape)), shape_(shape), size_(std::accumulate(std::begin(shape), std::end(shape), 1, std::multiplies<>())) {} - // NDView(T *buffer, const std::vector &shape) + // NDView(T *buffer, const std::vector &shape) // : buffer_(buffer), strides_(c_strides(make_array(shape))), shape_(make_array(shape)), // size_(std::accumulate(std::begin(shape), std::end(shape), 1, std::multiplies<>())) {} @@ -73,14 +73,14 @@ template class NDView : public ArrayExpr(size_); } size_t total_bytes() const { return size_ * sizeof(T); } - std::array strides() const noexcept { return strides_; } + std::array strides() const noexcept { return strides_; } T *begin() { return buffer_; } T *end() { return buffer_ + size_; } T const *begin() const { return buffer_; } T const *end() const { return buffer_ + size_; } - T &operator()(int64_t i) const { return buffer_[i]; } - T &operator[](int64_t i) const { return buffer_[i]; } + T &operator()(ssize_t i) const { return buffer_[i]; } + T &operator[](ssize_t i) const { return buffer_[i]; } bool operator==(const NDView &other) const { if (size_ != other.size_) @@ -136,15 +136,15 @@ template class NDView : public ArrayExpr strides_{}; - std::array shape_{}; + std::array strides_{}; + std::array shape_{}; uint64_t size_{}; template NDView &elemenwise(T val, BinaryOperation op) { @@ -160,7 +160,7 @@ template class NDView : public ArrayExpr void NDView::print_all() const { +template void NDView::print_all() const { for (auto row = 0; row < shape_[0]; ++row) { for (auto col = 0; col < shape_[1]; ++col) { std::cout << std::setw(3); @@ -171,7 +171,7 @@ template void NDView::print_all() const { } -template +template std::ostream& operator <<(std::ostream& os, const NDView& arr){ for (auto row = 0; row < arr.shape(0); ++row) { for (auto col = 0; col < arr.shape(1); ++col) { @@ -186,7 +186,7 @@ std::ostream& operator <<(std::ostream& os, const NDView& arr){ template NDView make_view(std::vector& vec){ - return NDView(vec.data(), {static_cast(vec.size())}); + return NDView(vec.data(), {static_cast(vec.size())}); } } // namespace aare \ No newline at end of file diff --git a/include/aare/NumpyFile.hpp b/include/aare/NumpyFile.hpp index 9cd2d61..7381a76 100644 --- a/include/aare/NumpyFile.hpp +++ b/include/aare/NumpyFile.hpp @@ -69,7 +69,7 @@ class NumpyFile : public FileInterface { */ template NDArray load() { NDArray arr(make_shape(m_header.shape)); - if (fseek(fp, static_cast(header_size), SEEK_SET)) { + if (fseek(fp, static_cast(header_size), SEEK_SET)) { throw std::runtime_error(LOCATION + "Error seeking to the start of the data"); } size_t rc = fread(arr.data(), sizeof(T), arr.size(), fp); diff --git a/include/aare/Pedestal.hpp b/include/aare/Pedestal.hpp index 102d730..d6223c1 100644 --- a/include/aare/Pedestal.hpp +++ b/include/aare/Pedestal.hpp @@ -107,7 +107,7 @@ template class Pedestal { assert(frame.size() == m_rows * m_cols); // TODO! move away from m_rows, m_cols - if (frame.shape() != std::array{m_rows, m_cols}) { + if (frame.shape() != std::array{m_rows, m_cols}) { throw std::runtime_error( "Frame shape does not match pedestal shape"); } @@ -128,7 +128,7 @@ template class Pedestal { assert(frame.size() == m_rows * m_cols); // TODO! move away from m_rows, m_cols - if (frame.shape() != std::array{m_rows, m_cols}) { + if (frame.shape() != std::array{m_rows, m_cols}) { throw std::runtime_error( "Frame shape does not match pedestal shape"); } diff --git a/include/aare/RawFile.hpp b/include/aare/RawFile.hpp index f744ac2..1cca1fd 100644 --- a/include/aare/RawFile.hpp +++ b/include/aare/RawFile.hpp @@ -30,22 +30,11 @@ struct ModuleConfig { * Consider using that unless you need raw file specific functionality. */ class RawFile : public FileInterface { - size_t n_subfiles{}; //f0,f1...fn - size_t n_subfile_parts{}; // d0,d1...dn - //TODO! move to vector of SubFile instead of pointers - std::vector> subfiles; //subfiles[f0,f1...fn][d0,d1...dn] - // std::vector positions; - + std::vector> m_subfiles; ModuleConfig cfg{0, 0}; - RawMasterFile m_master; - size_t m_current_frame{}; - - // std::vector m_module_pixel_0; - // size_t m_rows{}; - // size_t m_cols{}; - + size_t m_current_subfile{}; DetectorGeometry m_geometry; public: @@ -56,7 +45,7 @@ class RawFile : public FileInterface { */ RawFile(const std::filesystem::path &fname, const std::string &mode = "r"); - virtual ~RawFile() override; + virtual ~RawFile() override = default; Frame read_frame() override; Frame read_frame(size_t frame_number) override; @@ -80,7 +69,7 @@ class RawFile : public FileInterface { size_t cols() const override; size_t bitdepth() const override; xy geometry(); - size_t n_mod() const; + size_t n_modules() const; RawMasterFile master() const; @@ -115,9 +104,6 @@ class RawFile : public FileInterface { */ static DetectorHeader read_header(const std::filesystem::path &fname); - // void update_geometry_with_roi(); - int find_number_of_subfiles(); - void open_subfiles(); void find_geometry(); }; diff --git a/include/aare/RawMasterFile.hpp b/include/aare/RawMasterFile.hpp index beaeb29..4d143a6 100644 --- a/include/aare/RawMasterFile.hpp +++ b/include/aare/RawMasterFile.hpp @@ -121,6 +121,7 @@ class RawMasterFile { size_t total_frames_expected() const; xy geometry() const; + size_t n_modules() const; std::optional analog_samples() const; std::optional digital_samples() const; diff --git a/include/aare/RawSubFile.hpp b/include/aare/RawSubFile.hpp index 350a475..1059843 100644 --- a/include/aare/RawSubFile.hpp +++ b/include/aare/RawSubFile.hpp @@ -18,11 +18,20 @@ class RawSubFile { std::ifstream m_file; DetectorType m_detector_type; size_t m_bitdepth; - std::filesystem::path m_fname; + std::filesystem::path m_path; //!< path to the subfile + std::string m_base_name; //!< base name used for formatting file names + size_t m_offset{}; //!< file index of the first file, allow starting at non zero file + size_t m_total_frames{}; //!< total number of frames in the series of files size_t m_rows{}; size_t m_cols{}; size_t m_bytes_per_frame{}; - size_t m_num_frames{}; + + + int m_module_index{}; + size_t m_current_file_index{}; //!< The index of the open file + size_t m_current_frame_index{}; //!< The index of the current frame (with reference to all files) + std::vector m_last_frame_in_file{}; //!< Used for seeking to the correct file + uint32_t m_pos_row{}; uint32_t m_pos_col{}; @@ -67,12 +76,17 @@ class RawSubFile { size_t pixels_per_frame() const { return m_rows * m_cols; } size_t bytes_per_pixel() const { return m_bitdepth / bits_per_byte; } - size_t frames_in_file() const { return m_num_frames; } + size_t frames_in_file() const { return m_total_frames; } private: template void read_with_map(std::byte *image_buf); + void parse_fname(const std::filesystem::path &fname); + void scan_files(); + void open_file(size_t file_index); + std::filesystem::path fpath(size_t file_index) const; + }; } // namespace aare \ No newline at end of file diff --git a/include/aare/VarClusterFinder.hpp b/include/aare/VarClusterFinder.hpp index 161941a..596bf06 100644 --- a/include/aare/VarClusterFinder.hpp +++ b/include/aare/VarClusterFinder.hpp @@ -28,7 +28,7 @@ template class VarClusterFinder { }; private: - const std::array shape_; + const std::array shape_; NDView original_; NDArray labeled_; NDArray peripheral_labeled_; diff --git a/include/aare/algorithm.hpp b/include/aare/algorithm.hpp index fc7d51f..be2018f 100644 --- a/include/aare/algorithm.hpp +++ b/include/aare/algorithm.hpp @@ -107,5 +107,16 @@ std::vector cumsum(const std::vector& vec) { } +template bool all_equal(const Container &c) { + if (!c.empty() && + std::all_of(begin(c), end(c), + [c](const typename Container::value_type &element) { + return element == c.front(); + })) + return true; + return false; +} + + } // namespace aare \ No newline at end of file diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index 4d22bd4..ccf07a5 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -204,23 +204,25 @@ struct DetectorGeometry{ int module_gap_row{}; int module_gap_col{}; std::vector module_pixel_0; + + auto size() const { return module_pixel_0.size(); } }; struct ROI{ - int64_t xmin{}; - int64_t xmax{}; - int64_t ymin{}; - int64_t ymax{}; + ssize_t xmin{}; + ssize_t xmax{}; + ssize_t ymin{}; + ssize_t ymax{}; - int64_t height() const { return ymax - ymin; } - int64_t width() const { return xmax - xmin; } - bool contains(int64_t x, int64_t y) const { + ssize_t height() const { return ymax - ymin; } + ssize_t width() const { return xmax - xmin; } + bool contains(ssize_t x, ssize_t y) const { return x >= xmin && x < xmax && y >= ymin && y < ymax; } }; -using dynamic_shape = std::vector; +using dynamic_shape = std::vector; //TODO! Can we uniform enums between the libraries? diff --git a/include/aare/logger.hpp b/include/aare/logger.hpp new file mode 100644 index 0000000..06e6feb --- /dev/null +++ b/include/aare/logger.hpp @@ -0,0 +1,139 @@ +#pragma once +/*Utility to log to console*/ + + +#include +#include +#include + +namespace aare { + +#define RED "\x1b[31m" +#define GREEN "\x1b[32m" +#define YELLOW "\x1b[33m" +#define BLUE "\x1b[34m" +#define MAGENTA "\x1b[35m" +#define CYAN "\x1b[36m" +#define GRAY "\x1b[37m" +#define DARKGRAY "\x1b[30m" + +#define BG_BLACK "\x1b[48;5;232m" +#define BG_RED "\x1b[41m" +#define BG_GREEN "\x1b[42m" +#define BG_YELLOW "\x1b[43m" +#define BG_BLUE "\x1b[44m" +#define BG_MAGENTA "\x1b[45m" +#define BG_CYAN "\x1b[46m" +#define RESET "\x1b[0m" +#define BOLD "\x1b[1m" + + +enum TLogLevel { + logERROR, + logWARNING, + logINFOBLUE, + logINFOGREEN, + logINFORED, + logINFOCYAN, + logINFOMAGENTA, + logINFO, + logDEBUG, + logDEBUG1, + logDEBUG2, + logDEBUG3, + logDEBUG4, + logDEBUG5 +}; + +// Compiler should optimize away anything below this value +#ifndef AARE_LOG_LEVEL +#define AARE_LOG_LEVEL "LOG LEVEL NOT SET IN CMAKE" //This is configured in the main CMakeLists.txt +#endif + +#define __AT__ \ + std::string(__FILE__) + std::string("::") + std::string(__func__) + \ + std::string("(): ") +#define __SHORT_FORM_OF_FILE__ \ + (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) +#define __SHORT_AT__ \ + std::string(__SHORT_FORM_OF_FILE__) + std::string("::") + \ + std::string(__func__) + std::string("(): ") + +class Logger { + std::ostringstream os; + TLogLevel m_level = AARE_LOG_LEVEL; + + public: + Logger() = default; + explicit Logger(TLogLevel level) : m_level(level){}; + ~Logger() { + // output in the destructor to allow for << syntax + os << RESET << '\n'; + std::clog << os.str() << std::flush; // Single write + } + + static TLogLevel &ReportingLevel() { // singelton eeh TODO! Do we need a runtime option? + static TLogLevel reportingLevel = logDEBUG5; + return reportingLevel; + } + + // Danger this buffer need as many elements as TLogLevel + static const char *Color(TLogLevel level) noexcept { + static const char *const colors[] = { + RED BOLD, YELLOW BOLD, BLUE, GREEN, RED, CYAN, MAGENTA, + RESET, RESET, RESET, RESET, RESET, RESET, RESET}; + // out of bounds + if (level < 0 || level >= sizeof(colors) / sizeof(colors[0])) { + return RESET; + } + return colors[level]; + } + + // Danger this buffer need as many elements as TLogLevel + static std::string ToString(TLogLevel level) { + static const char *const buffer[] = { + "ERROR", "WARNING", "INFO", "INFO", "INFO", + "INFO", "INFO", "INFO", "DEBUG", "DEBUG1", + "DEBUG2", "DEBUG3", "DEBUG4", "DEBUG5"}; + // out of bounds + if (level < 0 || level >= sizeof(buffer) / sizeof(buffer[0])) { + return "UNKNOWN"; + } + return buffer[level]; + } + + std::ostringstream &Get() { + os << Color(m_level) << "- " << Timestamp() << " " << ToString(m_level) + << ": "; + return os; + } + + static std::string Timestamp() { + constexpr size_t buffer_len = 12; + char buffer[buffer_len]; + time_t t; + ::time(&t); + tm r; + strftime(buffer, buffer_len, "%X", localtime_r(&t, &r)); + buffer[buffer_len - 1] = '\0'; + struct timeval tv; + gettimeofday(&tv, nullptr); + constexpr size_t result_len = 100; + char result[result_len]; + snprintf(result, result_len, "%s.%03ld", buffer, + static_cast(tv.tv_usec) / 1000); + result[result_len - 1] = '\0'; + return result; + } +}; + +// TODO! Do we need to keep the runtime option? +#define LOG(level) \ + if (level > AARE_LOG_LEVEL) \ + ; \ + else if (level > aare::Logger::ReportingLevel()) \ + ; \ + else \ + aare::Logger(level).Get() + +} // namespace aare diff --git a/pyproject.toml b/pyproject.toml index 7415062..db3cb3c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,16 @@ +[tool.scikit-build.metadata.version] +provider = "scikit_build_core.metadata.regex" +input = "VERSION" +regex = '^(?P\d+(?:\.\d+)*(?:[\.\+\w]+)?)$' +result = "{version}" + [build-system] requires = ["scikit-build-core>=0.10", "pybind11", "numpy"] build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.4.22" +dynamic = ["version"] requires-python = ">=3.11" dependencies = [ "numpy", diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 549205a..ae84baa 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -29,6 +29,9 @@ target_link_libraries(_aare PRIVATE aare_core aare_compiler_flags) set( PYTHON_FILES aare/__init__.py aare/CtbRawFile.py + aare/ClusterFinder.py + aare/ClusterVector.py + aare/func.py aare/RawFile.py aare/transform.py @@ -36,6 +39,7 @@ set( PYTHON_FILES aare/utils.py ) + # Copy the python files to the build directory foreach(FILE ${PYTHON_FILES}) configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} ) diff --git a/python/aare/ClusterFinder.py b/python/aare/ClusterFinder.py new file mode 100644 index 0000000..6e7c352 --- /dev/null +++ b/python/aare/ClusterFinder.py @@ -0,0 +1,67 @@ + +from ._aare import ClusterFinder_Cluster3x3i, ClusterFinder_Cluster2x2i, ClusterFinderMT_Cluster3x3i, ClusterFinderMT_Cluster2x2i, ClusterCollector_Cluster3x3i, ClusterCollector_Cluster2x2i + + +from ._aare import ClusterFileSink_Cluster3x3i, ClusterFileSink_Cluster2x2i +import numpy as np + +def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024): + """ + Factory function to create a ClusterFinder object. Provides a cleaner syntax for + the templated ClusterFinder in C++. + """ + if dtype == np.int32 and cluster_size == (3,3): + return ClusterFinder_Cluster3x3i(image_size, n_sigma = n_sigma, capacity=capacity) + elif dtype == np.int32 and cluster_size == (2,2): + return ClusterFinder_Cluster2x2i(image_size, n_sigma = n_sigma, capacity=capacity) + else: + #TODO! add the other formats + raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") + + +def ClusterFinderMT(image_size, cluster_size = (3,3), dtype=np.int32, n_sigma=5, capacity = 1024, n_threads = 3): + """ + Factory function to create a ClusterFinderMT object. Provides a cleaner syntax for + the templated ClusterFinderMT in C++. + """ + + if dtype == np.int32 and cluster_size == (3,3): + return ClusterFinderMT_Cluster3x3i(image_size, n_sigma = n_sigma, + capacity = capacity, n_threads = n_threads) + elif dtype == np.int32 and cluster_size == (2,2): + return ClusterFinderMT_Cluster2x2i(image_size, n_sigma = n_sigma, + capacity = capacity, n_threads = n_threads) + else: + #TODO! add the other formats + raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") + + +def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32): + """ + Factory function to create a ClusterCollector object. Provides a cleaner syntax for + the templated ClusterCollector in C++. + """ + + if dtype == np.int32 and cluster_size == (3,3): + return ClusterCollector_Cluster3x3i(clusterfindermt) + elif dtype == np.int32 and cluster_size == (2,2): + return ClusterCollector_Cluster2x2i(clusterfindermt) + + else: + #TODO! add the other formats + raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") + +def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32): + """ + Factory function to create a ClusterCollector object. Provides a cleaner syntax for + the templated ClusterCollector in C++. + """ + + if dtype == np.int32 and clusterfindermt.cluster_size == (3,3): + return ClusterFileSink_Cluster3x3i(clusterfindermt, cluster_file) + elif dtype == np.int32 and clusterfindermt.cluster_size == (2,2): + return ClusterFileSink_Cluster2x2i(clusterfindermt, cluster_file) + + else: + #TODO! add the other formats + raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") \ No newline at end of file diff --git a/python/aare/ClusterVector.py b/python/aare/ClusterVector.py new file mode 100644 index 0000000..b0dd453 --- /dev/null +++ b/python/aare/ClusterVector.py @@ -0,0 +1,11 @@ + + +from ._aare import ClusterVector_Cluster3x3i +import numpy as np + +def ClusterVector(cluster_size, dtype = np.int32): + + if dtype == np.int32 and cluster_size == (3,3): + return ClusterVector_Cluster3x3i() + else: + raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") diff --git a/python/aare/__init__.py b/python/aare/__init__.py index db9672f..d2bbe0a 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -3,16 +3,21 @@ from . import _aare from ._aare import File, RawMasterFile, RawSubFile, JungfrauDataFile -from ._aare import Pedestal_d, Pedestal_f, ClusterFinder, VarClusterFinder +from ._aare import Pedestal_d, Pedestal_f, ClusterFinder_Cluster3x3i, VarClusterFinder from ._aare import DetectorType -from ._aare import ClusterFile +from ._aare import ClusterFile_Cluster3x3i as ClusterFile from ._aare import hitmap from ._aare import ROI -from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i +# from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i -from ._aare import fit_gaus, fit_pol1 +from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT, ClusterFileSink +from .ClusterVector import ClusterVector + + +from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2 from ._aare import Interpolator +from ._aare import calculate_eta2 from ._aare import apply_custom_weights diff --git a/python/aare/func.py b/python/aare/func.py index ca60cf2..e8a7b46 100644 --- a/python/aare/func.py +++ b/python/aare/func.py @@ -1 +1 @@ -from ._aare import gaus, pol1 \ No newline at end of file +from ._aare import gaus, pol1, scurve, scurve2 \ No newline at end of file diff --git a/python/examples/play.py b/python/examples/play.py index da469dc..0f4feca 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -1,79 +1,89 @@ import sys sys.path.append('/home/l_msdetect/erik/aare/build') -from aare._aare import ClusterVector_i, Interpolator -import pickle -import numpy as np -import matplotlib.pyplot as plt -import boost_histogram as bh -import torch -import math -import time +from aare import RawSubFile, DetectorType, RawFile + +from pathlib import Path +path = Path("/home/l_msdetect/erik/data/aare-test-data/raw/jungfrau/") +f = RawSubFile(path/"jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16) + +# f = RawFile(path/"jungfrau_single_master_0.json") + + +# from aare._aare import ClusterVector_i, Interpolator + +# import pickle +# import numpy as np +# import matplotlib.pyplot as plt +# import boost_histogram as bh +# import torch +# import math +# import time -def gaussian_2d(mx, my, sigma = 1, res=100, grid_size = 2): - """ - Generate a 2D gaussian as position mx, my, with sigma=sigma. - The gaussian is placed on a 2x2 pixel matrix with resolution - res in one dimesion. - """ - x = torch.linspace(0, pixel_size*grid_size, res) - x,y = torch.meshgrid(x,x, indexing="ij") - return 1 / (2*math.pi*sigma**2) * \ - torch.exp(-((x - my)**2 / (2*sigma**2) + (y - mx)**2 / (2*sigma**2))) +# def gaussian_2d(mx, my, sigma = 1, res=100, grid_size = 2): +# """ +# Generate a 2D gaussian as position mx, my, with sigma=sigma. +# The gaussian is placed on a 2x2 pixel matrix with resolution +# res in one dimesion. +# """ +# x = torch.linspace(0, pixel_size*grid_size, res) +# x,y = torch.meshgrid(x,x, indexing="ij") +# return 1 / (2*math.pi*sigma**2) * \ +# torch.exp(-((x - my)**2 / (2*sigma**2) + (y - mx)**2 / (2*sigma**2))) -scale = 1000 #Scale factor when converting to integer -pixel_size = 25 #um -grid = 2 -resolution = 100 -sigma_um = 10 -xa = np.linspace(0,grid*pixel_size,resolution) -ticks = [0, 25, 50] +# scale = 1000 #Scale factor when converting to integer +# pixel_size = 25 #um +# grid = 2 +# resolution = 100 +# sigma_um = 10 +# xa = np.linspace(0,grid*pixel_size,resolution) +# ticks = [0, 25, 50] -hit = np.array((20,20)) -etahist_fname = "/home/l_msdetect/erik/tmp/test_hist.pkl" +# hit = np.array((20,20)) +# etahist_fname = "/home/l_msdetect/erik/tmp/test_hist.pkl" -local_resolution = 99 -grid_size = 3 -xaxis = np.linspace(0,grid_size*pixel_size, local_resolution) -t = gaussian_2d(hit[0],hit[1], grid_size = grid_size, sigma = 10, res = local_resolution) -pixels = t.reshape(grid_size, t.shape[0] // grid_size, grid_size, t.shape[1] // grid_size).sum(axis = 3).sum(axis = 1) -pixels = pixels.numpy() -pixels = (pixels*scale).astype(np.int32) -v = ClusterVector_i(3,3) -v.push_back(1,1, pixels) +# local_resolution = 99 +# grid_size = 3 +# xaxis = np.linspace(0,grid_size*pixel_size, local_resolution) +# t = gaussian_2d(hit[0],hit[1], grid_size = grid_size, sigma = 10, res = local_resolution) +# pixels = t.reshape(grid_size, t.shape[0] // grid_size, grid_size, t.shape[1] // grid_size).sum(axis = 3).sum(axis = 1) +# pixels = pixels.numpy() +# pixels = (pixels*scale).astype(np.int32) +# v = ClusterVector_i(3,3) +# v.push_back(1,1, pixels) -with open(etahist_fname, "rb") as f: - hist = pickle.load(f) -eta = hist.view().copy() -etabinsx = np.array(hist.axes.edges.T[0].flat) -etabinsy = np.array(hist.axes.edges.T[1].flat) -ebins = np.array(hist.axes.edges.T[2].flat) -p = Interpolator(eta, etabinsx[0:-1], etabinsy[0:-1], ebins[0:-1]) +# with open(etahist_fname, "rb") as f: +# hist = pickle.load(f) +# eta = hist.view().copy() +# etabinsx = np.array(hist.axes.edges.T[0].flat) +# etabinsy = np.array(hist.axes.edges.T[1].flat) +# ebins = np.array(hist.axes.edges.T[2].flat) +# p = Interpolator(eta, etabinsx[0:-1], etabinsy[0:-1], ebins[0:-1]) -#Generate the hit +# #Generate the hit -tmp = p.interpolate(v) -print(f'tmp:{tmp}') -pos = np.array((tmp['x'], tmp['y']))*25 +# tmp = p.interpolate(v) +# print(f'tmp:{tmp}') +# pos = np.array((tmp['x'], tmp['y']))*25 -print(pixels) -fig, ax = plt.subplots(figsize = (7,7)) -ax.pcolormesh(xaxis, xaxis, t) -ax.plot(*pos, 'o') -ax.set_xticks([0,25,50,75]) -ax.set_yticks([0,25,50,75]) -ax.set_xlim(0,75) -ax.set_ylim(0,75) -ax.grid() -print(f'{hit=}') -print(f'{pos=}') \ No newline at end of file +# print(pixels) +# fig, ax = plt.subplots(figsize = (7,7)) +# ax.pcolormesh(xaxis, xaxis, t) +# ax.plot(*pos, 'o') +# ax.set_xticks([0,25,50,75]) +# ax.set_yticks([0,25,50,75]) +# ax.set_xlim(0,75) +# ax.set_ylim(0,75) +# ax.grid() +# print(f'{hit=}') +# print(f'{pos=}') \ No newline at end of file diff --git a/python/src/bind_ClusterVector.hpp b/python/src/bind_ClusterVector.hpp new file mode 100644 index 0000000..db8c8a3 --- /dev/null +++ b/python/src/bind_ClusterVector.hpp @@ -0,0 +1,104 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" +#include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDView.hpp" +#include "aare/Pedestal.hpp" +#include "np_helper.hpp" + +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +template +void define_ClusterVector(py::module &m, const std::string &typestr) { + using ClusterType = Cluster; + auto class_name = fmt::format("ClusterVector_{}", typestr); + + py::class_, void>>( + m, class_name.c_str(), + py::buffer_protocol()) + + .def(py::init()) // TODO change!!! + + .def("push_back", + [](ClusterVector &self, const ClusterType &cluster) { + self.push_back(cluster); + }) + + .def("sum", + [](ClusterVector &self) { + auto *vec = new std::vector(self.sum()); + return return_vector(vec); + }) + .def("sum_2x2", [](ClusterVector &self){ + auto *vec = new std::vector(self.sum_2x2()); + return return_vector(vec); + }) + .def_property_readonly("size", &ClusterVector::size) + .def("item_size", &ClusterVector::item_size) + .def_property_readonly("fmt", + [typestr](ClusterVector &self) { + return fmt_format; + }) + + .def_property_readonly("cluster_size_x", + &ClusterVector::cluster_size_x) + .def_property_readonly("cluster_size_y", + &ClusterVector::cluster_size_y) + .def_property_readonly("capacity", + &ClusterVector::capacity) + .def_property("frame_number", &ClusterVector::frame_number, + &ClusterVector::set_frame_number) + .def_buffer( + [typestr](ClusterVector &self) -> py::buffer_info { + return py::buffer_info( + self.data(), /* Pointer to buffer */ + self.item_size(), /* Size of one scalar */ + fmt_format, /* Format descriptor */ + 1, /* Number of dimensions */ + {self.size()}, /* Buffer dimensions */ + {self.item_size()} /* Strides (in bytes) for each index */ + ); + }); + + // Free functions using ClusterVector + m.def("hitmap", + [](std::array image_size, ClusterVector &cv) { + // Create a numpy array to hold the hitmap + // The shape of the array is (image_size[0], image_size[1]) + // note that the python array is passed as [row, col] which + // is the opposite of the clusters [x,y] + py::array_t hitmap(image_size); + auto r = hitmap.mutable_unchecked<2>(); + + // Initialize hitmap to 0 + for (py::ssize_t i = 0; i < r.shape(0); i++) + for (py::ssize_t j = 0; j < r.shape(1); j++) + r(i, j) = 0; + + // Loop over the clusters and increment the hitmap + // Skip out of bound clusters + for (const auto &cluster : cv) { + auto x = cluster.x; + auto y = cluster.y; + if (x < image_size[1] && y < image_size[0]) + r(cluster.y, cluster.x) += 1; + } + + return hitmap; + }); +} \ No newline at end of file diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 3db816a..58f137c 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -16,194 +16,196 @@ namespace py = pybind11; using pd_type = double; -template -void define_cluster_vector(py::module &m, const std::string &typestr) { - auto class_name = fmt::format("ClusterVector_{}", typestr); - py::class_>(m, class_name.c_str(), py::buffer_protocol()) - .def(py::init(), - py::arg("cluster_size_x") = 3, py::arg("cluster_size_y") = 3) - .def("push_back", - [](ClusterVector &self, int x, int y, py::array_t data) { - // auto view = make_view_2d(data); - self.push_back(x, y, reinterpret_cast(data.data())); - }) - .def_property_readonly("size", &ClusterVector::size) - .def("item_size", &ClusterVector::item_size) - .def_property_readonly("fmt", - [typestr](ClusterVector &self) { - return fmt::format( - self.fmt_base(), self.cluster_size_x(), - self.cluster_size_y(), typestr); - }) - .def("sum", - [](ClusterVector &self) { - auto *vec = new std::vector(self.sum()); - return return_vector(vec); - }) - .def("sum_2x2", [](ClusterVector &self) { - auto *vec = new std::vector(self.sum_2x2()); - return return_vector(vec); - }) - .def_property_readonly("cluster_size_x", &ClusterVector::cluster_size_x) - .def_property_readonly("cluster_size_y", &ClusterVector::cluster_size_y) - .def_property_readonly("capacity", &ClusterVector::capacity) - .def_property("frame_number", &ClusterVector::frame_number, - &ClusterVector::set_frame_number) - .def_buffer([typestr](ClusterVector &self) -> py::buffer_info { - return py::buffer_info( - self.data(), /* Pointer to buffer */ - self.item_size(), /* Size of one scalar */ - fmt::format(self.fmt_base(), self.cluster_size_x(), - self.cluster_size_y(), - typestr), /* Format descriptor */ - 1, /* Number of dimensions */ - {self.size()}, /* Buffer dimensions */ - {self.item_size()} /* Strides (in bytes) for each index */ - ); +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +template +void define_cluster(py::module &m, const std::string &typestr) { + auto class_name = fmt::format("Cluster{}", typestr); + + py::class_>( + m, class_name.c_str(), py::buffer_protocol()) + + .def(py::init([](uint8_t x, uint8_t y, py::array_t data) { + py::buffer_info buf_info = data.request(); + Cluster cluster; + cluster.x = x; + cluster.y = y; + auto r = data.template unchecked<1>(); // no bounds checks + for (py::ssize_t i = 0; i < data.size(); ++i) { + cluster.data[i] = r(i); + } + return cluster; + })); + + /* + .def_property( + "data", + [](ClusterType &c) -> py::array { + return py::array(py::buffer_info( + c.data, sizeof(Type), + py::format_descriptor::format(), // Type + // format + 1, // Number of dimensions + {static_cast(ClusterSizeX * + ClusterSizeY)}, // Shape (flattened) + {sizeof(Type)} // Stride (step size between elements) + )); + }, + [](ClusterType &c, py::array_t arr) { + py::buffer_info buf_info = arr.request(); + Type *ptr = static_cast(buf_info.ptr); + std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY, + c.data); // TODO dont iterate over centers!!! + }); + */ } -void define_cluster_finder_mt_bindings(py::module &m) { - py::class_>(m, "ClusterFinderMT") - .def(py::init, Shape<2>, pd_type, size_t, size_t>(), - py::arg("image_size"), py::arg("cluster_size"), - py::arg("n_sigma") = 5.0, py::arg("capacity") = 2048, - py::arg("n_threads") = 3) +template +void define_cluster_finder_mt_bindings(py::module &m, + const std::string &typestr) { + auto class_name = fmt::format("ClusterFinderMT_{}", typestr); + + using ClusterType = Cluster; + + py::class_>( + m, class_name.c_str()) + .def(py::init, pd_type, size_t, size_t>(), + py::arg("image_size"), py::arg("n_sigma") = 5.0, + py::arg("capacity") = 2048, py::arg("n_threads") = 3) .def("push_pedestal_frame", - [](ClusterFinderMT &self, + [](ClusterFinderMT &self, py::array_t frame) { auto view = make_view_2d(frame); self.push_pedestal_frame(view); }) .def( "find_clusters", - [](ClusterFinderMT &self, + [](ClusterFinderMT &self, py::array_t frame, uint64_t frame_number) { auto view = make_view_2d(frame); self.find_clusters(view, frame_number); return; }, py::arg(), py::arg("frame_number") = 0) - .def("clear_pedestal", &ClusterFinderMT::clear_pedestal) - .def("sync", &ClusterFinderMT::sync) - .def("stop", &ClusterFinderMT::stop) - .def("start", &ClusterFinderMT::start) - .def("pedestal", - [](ClusterFinderMT &self, size_t thread_index) { - auto pd = new NDArray{}; - *pd = self.pedestal(thread_index); - return return_image_data(pd); - },py::arg("thread_index") = 0) - .def("noise", - [](ClusterFinderMT &self, size_t thread_index) { - auto arr = new NDArray{}; - *arr = self.noise(thread_index); - return return_image_data(arr); - },py::arg("thread_index") = 0); + .def_property_readonly("cluster_size", [](ClusterFinderMT &self){ + return py::make_tuple(ClusterSizeX, ClusterSizeY); + }) + .def("clear_pedestal", + &ClusterFinderMT::clear_pedestal) + .def("sync", &ClusterFinderMT::sync) + .def("stop", &ClusterFinderMT::stop) + .def("start", &ClusterFinderMT::start) + .def( + "pedestal", + [](ClusterFinderMT &self, + size_t thread_index) { + auto pd = new NDArray{}; + *pd = self.pedestal(thread_index); + return return_image_data(pd); + }, + py::arg("thread_index") = 0) + .def( + "noise", + [](ClusterFinderMT &self, + size_t thread_index) { + auto arr = new NDArray{}; + *arr = self.noise(thread_index); + return return_image_data(arr); + }, + py::arg("thread_index") = 0); } -void define_cluster_collector_bindings(py::module &m) { - py::class_(m, "ClusterCollector") - .def(py::init *>()) - .def("stop", &ClusterCollector::stop) +template +void define_cluster_collector_bindings(py::module &m, + const std::string &typestr) { + auto class_name = fmt::format("ClusterCollector_{}", typestr); + + using ClusterType = Cluster; + + py::class_>(m, class_name.c_str()) + .def(py::init *>()) + .def("stop", &ClusterCollector::stop) .def( "steal_clusters", - [](ClusterCollector &self) { - auto v = - new std::vector>(self.steal_clusters()); - return v; + [](ClusterCollector &self) { + auto v = new std::vector>( + self.steal_clusters()); + return v; // TODO change!!! }, py::return_value_policy::take_ownership); } -void define_cluster_file_sink_bindings(py::module &m) { - py::class_(m, "ClusterFileSink") - .def(py::init *, +template +void define_cluster_file_sink_bindings(py::module &m, + const std::string &typestr) { + auto class_name = fmt::format("ClusterFileSink_{}", typestr); + + using ClusterType = Cluster; + + py::class_>(m, class_name.c_str()) + .def(py::init *, const std::filesystem::path &>()) - .def("stop", &ClusterFileSink::stop); + .def("stop", &ClusterFileSink::stop); } -void define_cluster_finder_bindings(py::module &m) { - py::class_>(m, "ClusterFinder") - .def(py::init, Shape<2>, pd_type, size_t>(), - py::arg("image_size"), py::arg("cluster_size"), +template +void define_cluster_finder_bindings(py::module &m, const std::string &typestr) { + auto class_name = fmt::format("ClusterFinder_{}", typestr); + + using ClusterType = Cluster; + + py::class_>( + m, class_name.c_str()) + .def(py::init, pd_type, size_t>(), py::arg("image_size"), py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000) .def("push_pedestal_frame", - [](ClusterFinder &self, + [](ClusterFinder &self, py::array_t frame) { auto view = make_view_2d(frame); self.push_pedestal_frame(view); }) - .def("clear_pedestal", &ClusterFinder::clear_pedestal) - .def_property_readonly("pedestal", - [](ClusterFinder &self) { - auto pd = new NDArray{}; - *pd = self.pedestal(); - return return_image_data(pd); - }) - .def_property_readonly("noise", - [](ClusterFinder &self) { - auto arr = new NDArray{}; - *arr = self.noise(); - return return_image_data(arr); - }) + .def("clear_pedestal", + &ClusterFinder::clear_pedestal) + .def_property_readonly( + "pedestal", + [](ClusterFinder &self) { + auto pd = new NDArray{}; + *pd = self.pedestal(); + return return_image_data(pd); + }) + .def_property_readonly( + "noise", + [](ClusterFinder &self) { + auto arr = new NDArray{}; + *arr = self.noise(); + return return_image_data(arr); + }) .def( "steal_clusters", - [](ClusterFinder &self, + [](ClusterFinder &self, bool realloc_same_capacity) { - auto v = new ClusterVector( - self.steal_clusters(realloc_same_capacity)); - return v; + ClusterVector clusters = + self.steal_clusters(realloc_same_capacity); + return clusters; }, py::arg("realloc_same_capacity") = false) .def( "find_clusters", - [](ClusterFinder &self, + [](ClusterFinder &self, py::array_t frame, uint64_t frame_number) { auto view = make_view_2d(frame); self.find_clusters(view, frame_number); return; }, py::arg(), py::arg("frame_number") = 0); - - m.def("hitmap", - [](std::array image_size, ClusterVector &cv) { - py::array_t hitmap(image_size); - auto r = hitmap.mutable_unchecked<2>(); - - // Initialize hitmap to 0 - for (py::ssize_t i = 0; i < r.shape(0); i++) - for (py::ssize_t j = 0; j < r.shape(1); j++) - r(i, j) = 0; - - size_t stride = cv.item_size(); - auto ptr = cv.data(); - for (size_t i = 0; i < cv.size(); i++) { - auto x = *reinterpret_cast(ptr); - auto y = *reinterpret_cast(ptr + sizeof(int16_t)); - r(y, x) += 1; - ptr += stride; - } - return hitmap; - }); - define_cluster_vector(m, "i"); - define_cluster_vector(m, "d"); - define_cluster_vector(m, "f"); - - py::class_(m, "DynamicCluster", py::buffer_protocol()) - .def(py::init()) - .def("size", &DynamicCluster::size) - .def("begin", &DynamicCluster::begin) - .def("end", &DynamicCluster::end) - .def_readwrite("x", &DynamicCluster::x) - .def_readwrite("y", &DynamicCluster::y) - .def_buffer([](DynamicCluster &c) -> py::buffer_info { - return py::buffer_info(c.data(), c.dt.bytes(), c.dt.format_descr(), - 1, {c.size()}, {c.dt.bytes()}); - }) - - .def("__repr__", [](const DynamicCluster &a) { - return ""; - }); -} \ No newline at end of file +} +#pragma GCC diagnostic pop diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index ff46043..ac384b2 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -1,3 +1,4 @@ +#include "aare/CalculateEta.hpp" #include "aare/ClusterFile.hpp" #include "aare/defs.hpp" @@ -10,64 +11,84 @@ #include #include -//Disable warnings for unused parameters, as we ignore some -//in the __exit__ method +// Disable warnings for unused parameters, as we ignore some +// in the __exit__ method #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" - namespace py = pybind11; using namespace ::aare; -void define_cluster_file_io_bindings(py::module &m) { - PYBIND11_NUMPY_DTYPE(Cluster3x3, x, y, data); +template +void define_cluster_file_io_bindings(py::module &m, + const std::string &typestr) { - py::class_(m, "ClusterFile") + using ClusterType = Cluster; + + auto class_name = fmt::format("ClusterFile_{}", typestr); + + py::class_>(m, class_name.c_str()) .def(py::init(), py::arg(), py::arg("chunk_size") = 1000, py::arg("mode") = "r") - .def("read_clusters", - [](ClusterFile &self, size_t n_clusters) { - auto v = new ClusterVector(self.read_clusters(n_clusters)); + .def( + "read_clusters", + [](ClusterFile &self, size_t n_clusters) { + auto v = new ClusterVector( + self.read_clusters(n_clusters)); return v; - },py::return_value_policy::take_ownership) + }, + py::return_value_policy::take_ownership) .def("read_frame", - [](ClusterFile &self) { - auto v = new ClusterVector(self.read_frame()); - return v; + [](ClusterFile &self) { + auto v = new ClusterVector(self.read_frame()); + return v; }) - .def("set_roi", &ClusterFile::set_roi) - .def("set_noise_map", [](ClusterFile &self, py::array_t noise_map) { - auto view = make_view_2d(noise_map); - self.set_noise_map(view); - }) - .def("set_gain_map", [](ClusterFile &self, py::array_t gain_map) { - auto view = make_view_2d(gain_map); - self.set_gain_map(view); - }) - .def("close", &ClusterFile::close) - .def("write_frame", &ClusterFile::write_frame) - .def("__enter__", [](ClusterFile &self) { return &self; }) + .def("set_roi", &ClusterFile::set_roi) + .def( + "set_noise_map", + [](ClusterFile &self, py::array_t noise_map) { + auto view = make_view_2d(noise_map); + self.set_noise_map(view); + }) + + .def("set_gain_map", + [](ClusterFile &self, py::array_t gain_map) { + auto view = make_view_2d(gain_map); + self.set_gain_map(view); + }) + + .def("close", &ClusterFile::close) + .def("write_frame", &ClusterFile::write_frame) + .def("__enter__", [](ClusterFile &self) { return &self; }) .def("__exit__", - [](ClusterFile &self, + [](ClusterFile &self, const std::optional &exc_type, const std::optional &exc_value, const std::optional &traceback) { self.close(); }) - .def("__iter__", [](ClusterFile &self) { return &self; }) - .def("__next__", [](ClusterFile &self) { - auto v = new ClusterVector(self.read_clusters(self.chunk_size())); + .def("__iter__", [](ClusterFile &self) { return &self; }) + .def("__next__", [](ClusterFile &self) { + auto v = new ClusterVector( + self.read_clusters(self.chunk_size())); if (v->size() == 0) { throw py::stop_iteration(); } return v; }); +} - m.def("calculate_eta2", []( aare::ClusterVector &clusters) { - auto eta2 = new NDArray(calculate_eta2(clusters)); - return return_image_data(eta2); - }); +template +void register_calculate_eta(py::module &m) { + using ClusterType = Cluster; + m.def("calculate_eta2", + [](const aare::ClusterVector &clusters) { + auto eta2 = new NDArray(calculate_eta2(clusters)); + return return_image_data(eta2); + }); } #pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/ctb_raw_file.hpp b/python/src/ctb_raw_file.hpp index a88a9d1..c9b5310 100644 --- a/python/src/ctb_raw_file.hpp +++ b/python/src/ctb_raw_file.hpp @@ -34,7 +34,7 @@ m.def("adc_sar_05_decode64to16", [](py::array_t input) { } //Create a 2D output array with the same shape as the input - std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; + std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; py::array_t output(shape); //Create a view of the input and output arrays @@ -55,7 +55,7 @@ m.def("adc_sar_04_decode64to16", [](py::array_t input) { } //Create a 2D output array with the same shape as the input - std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; + std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; py::array_t output(shape); //Create a view of the input and output arrays diff --git a/python/src/file.hpp b/python/src/file.hpp index 2d0f53e..f97db96 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -198,7 +198,7 @@ void define_file_io_bindings(py::module &m) { py::class_(m, "ROI") .def(py::init<>()) - .def(py::init(), py::arg("xmin"), + .def(py::init(), py::arg("xmin"), py::arg("xmax"), py::arg("ymin"), py::arg("ymax")) .def_readwrite("xmin", &ROI::xmin) .def_readwrite("xmax", &ROI::xmax) diff --git a/python/src/fit.hpp b/python/src/fit.hpp index 8e6cfef..97dafb5 100644 --- a/python/src/fit.hpp +++ b/python/src/fit.hpp @@ -55,6 +55,47 @@ void define_fit_bindings(py::module &m) { )", py::arg("x"), py::arg("par")); + m.def( + "scurve", + [](py::array_t x, + py::array_t par) { + auto x_view = make_view_1d(x); + auto par_view = make_view_1d(par); + auto y = new NDArray{aare::func::scurve(x_view, par_view)}; + return return_image_data(y); + }, + R"( + Evaluate a 1D scurve function for all points in x using parameters par. + + Parameters + ---------- + x : array_like + The points at which to evaluate the scurve function. + par : array_like + The parameters of the scurve function. The first element is the background slope, the second element is the background intercept, the third element is the mean, the fourth element is the standard deviation, the fifth element is inflexion point count number, and the sixth element is C. + )", + py::arg("x"), py::arg("par")); + + m.def( + "scurve2", + [](py::array_t x, + py::array_t par) { + auto x_view = make_view_1d(x); + auto par_view = make_view_1d(par); + auto y = new NDArray{aare::func::scurve2(x_view, par_view)}; + return return_image_data(y); + }, + R"( + Evaluate a 1D scurve2 function for all points in x using parameters par. + + Parameters + ---------- + x : array_like + The points at which to evaluate the scurve function. + par : array_like + The parameters of the scurve2 function. The first element is the background slope, the second element is the background intercept, the third element is the mean, the fourth element is the standard deviation, the fifth element is inflexion point count number, and the sixth element is C. + )", + py::arg("x"), py::arg("par")); m.def( "fit_gaus", @@ -235,6 +276,180 @@ n_threads : int, optional R"( Fit a 1D polynomial to data with error estimates. +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +y_err : array_like + The error in the y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4); + +//========= + m.def( + "fit_scurve", + [](py::array_t x, + py::array_t y, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray{}; + + auto x_view = make_view_1d(x); + auto y_view = make_view_3d(y); + *par = aare::fit_scurve(x_view, y_view, n_threads); + return return_image_data(par); + } else if (y.ndim() == 1) { + auto par = new NDArray{}; + auto x_view = make_view_1d(x); + auto y_view = make_view_1d(y); + *par = aare::fit_scurve(x_view, y_view); + return return_image_data(par); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + py::arg("x"), py::arg("y"), py::arg("n_threads") = 4); + + m.def( + "fit_scurve", + [](py::array_t x, + py::array_t y, + py::array_t y_err, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray({y.shape(0), y.shape(1), 6}); + + auto par_err = + new NDArray({y.shape(0), y.shape(1), 6}); + + auto y_view = make_view_3d(y); + auto y_view_err = make_view_3d(y_err); + auto x_view = make_view_1d(x); + + auto chi2 = new NDArray({y.shape(0), y.shape(1)}); + + aare::fit_scurve(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2->view(), n_threads); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = return_image_data(chi2), + "Ndf"_a = y.shape(2) - 2); + + + } else if (y.ndim() == 1) { + auto par = new NDArray({2}); + auto par_err = new NDArray({2}); + + auto y_view = make_view_1d(y); + auto y_view_err = make_view_1d(y_err); + auto x_view = make_view_1d(x); + + double chi2 = 0; + + aare::fit_scurve(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = chi2, "Ndf"_a = y.size() - 2); + + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + R"( +Fit a 1D polynomial to data with error estimates. + +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +y_err : array_like + The error in the y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4); + + + m.def( + "fit_scurve2", + [](py::array_t x, + py::array_t y, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray{}; + + auto x_view = make_view_1d(x); + auto y_view = make_view_3d(y); + *par = aare::fit_scurve2(x_view, y_view, n_threads); + return return_image_data(par); + } else if (y.ndim() == 1) { + auto par = new NDArray{}; + auto x_view = make_view_1d(x); + auto y_view = make_view_1d(y); + *par = aare::fit_scurve2(x_view, y_view); + return return_image_data(par); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + py::arg("x"), py::arg("y"), py::arg("n_threads") = 4); + + m.def( + "fit_scurve2", + [](py::array_t x, + py::array_t y, + py::array_t y_err, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray({y.shape(0), y.shape(1), 6}); + + auto par_err = + new NDArray({y.shape(0), y.shape(1), 6}); + + auto y_view = make_view_3d(y); + auto y_view_err = make_view_3d(y_err); + auto x_view = make_view_1d(x); + + auto chi2 = new NDArray({y.shape(0), y.shape(1)}); + + aare::fit_scurve2(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2->view(), n_threads); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = return_image_data(chi2), + "Ndf"_a = y.shape(2) - 2); + + + } else if (y.ndim() == 1) { + auto par = new NDArray({6}); + auto par_err = new NDArray({6}); + + auto y_view = make_view_1d(y); + auto y_view_err = make_view_1d(y_err); + auto x_view = make_view_1d(x); + + double chi2 = 0; + + aare::fit_scurve2(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = chi2, "Ndf"_a = y.size() - 2); + + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + R"( +Fit a 1D polynomial to data with error estimates. + Parameters ---------- x : array_like diff --git a/python/src/interpolation.hpp b/python/src/interpolation.hpp index 02742e1..e667015 100644 --- a/python/src/interpolation.hpp +++ b/python/src/interpolation.hpp @@ -8,31 +8,55 @@ #include namespace py = pybind11; + +template +void register_interpolate(py::class_ &interpolator) { + + using ClusterType = Cluster; + + interpolator.def("interpolate", + [](aare::Interpolator &self, + const ClusterVector &clusters) { + auto photons = self.interpolate(clusters); + auto *ptr = new std::vector{photons}; + return return_vector(ptr); + }); +} + void define_interpolation_bindings(py::module &m) { - PYBIND11_NUMPY_DTYPE(aare::Photon, x,y,energy); + PYBIND11_NUMPY_DTYPE(aare::Photon, x, y, energy); - py::class_(m, "Interpolator") - .def(py::init([](py::array_t etacube, py::array_t xbins, - py::array_t ybins, py::array_t ebins) { - return Interpolator(make_view_3d(etacube), make_view_1d(xbins), - make_view_1d(ybins), make_view_1d(ebins)); - })) - .def("get_ietax", [](Interpolator& self){ - auto*ptr = new NDArray{}; - *ptr = self.get_ietax(); - return return_image_data(ptr); - }) - .def("get_ietay", [](Interpolator& self){ - auto*ptr = new NDArray{}; - *ptr = self.get_ietay(); - return return_image_data(ptr); - }) - .def("interpolate", [](Interpolator& self, const ClusterVector& clusters){ - auto photons = self.interpolate(clusters); - auto* ptr = new std::vector{photons}; - return return_vector(ptr); - }); + auto interpolator = + py::class_(m, "Interpolator") + .def(py::init([](py::array_t + etacube, + py::array_t xbins, + py::array_t ybins, + py::array_t ebins) { + return Interpolator(make_view_3d(etacube), make_view_1d(xbins), + make_view_1d(ybins), make_view_1d(ebins)); + })) + .def("get_ietax", + [](Interpolator &self) { + auto *ptr = new NDArray{}; + *ptr = self.get_ietax(); + return return_image_data(ptr); + }) + .def("get_ietay", [](Interpolator &self) { + auto *ptr = new NDArray{}; + *ptr = self.get_ietay(); + return return_image_data(ptr); + }); + + register_interpolate(interpolator); + register_interpolate(interpolator); + register_interpolate(interpolator); + register_interpolate(interpolator); + register_interpolate(interpolator); + register_interpolate(interpolator); // TODO! Evaluate without converting to double m.def( diff --git a/python/src/module.cpp b/python/src/module.cpp index 75fe237..946a41b 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -1,20 +1,24 @@ -//Files with bindings to the different classes -#include "file.hpp" -#include "raw_file.hpp" -#include "ctb_raw_file.hpp" -#include "raw_master_file.hpp" -#include "var_cluster.hpp" -#include "pixel_map.hpp" -#include "pedestal.hpp" +// Files with bindings to the different classes + +//New style file naming +#include "bind_ClusterVector.hpp" + +//TODO! migrate the other names #include "cluster.hpp" #include "cluster_file.hpp" +#include "ctb_raw_file.hpp" +#include "file.hpp" #include "fit.hpp" #include "interpolation.hpp" #include "raw_sub_file.hpp" - +#include "raw_master_file.hpp" +#include "raw_file.hpp" +#include "pixel_map.hpp" +#include "var_cluster.hpp" +#include "pedestal.hpp" #include "jungfrau_data_file.hpp" -//Pybind stuff +// Pybind stuff #include #include @@ -30,13 +34,63 @@ PYBIND11_MODULE(_aare, m) { define_pixel_map_bindings(m); define_pedestal_bindings(m, "Pedestal_d"); define_pedestal_bindings(m, "Pedestal_f"); - define_cluster_finder_bindings(m); - define_cluster_finder_mt_bindings(m); - define_cluster_file_io_bindings(m); - define_cluster_collector_bindings(m); - define_cluster_file_sink_bindings(m); define_fit_bindings(m); define_interpolation_bindings(m); define_jungfrau_data_file_io_bindings(m); -} \ No newline at end of file + define_cluster_file_io_bindings(m, "Cluster3x3i"); + define_cluster_file_io_bindings(m, "Cluster3x3d"); + define_cluster_file_io_bindings(m, "Cluster3x3f"); + define_cluster_file_io_bindings(m, "Cluster2x2i"); + define_cluster_file_io_bindings(m, "Cluster2x2f"); + define_cluster_file_io_bindings(m, "Cluster2x2d"); + + define_ClusterVector(m, "Cluster3x3i"); + define_ClusterVector(m, "Cluster3x3d"); + define_ClusterVector(m, "Cluster3x3f"); + define_ClusterVector(m, "Cluster2x2i"); + define_ClusterVector(m, "Cluster2x2d"); + define_ClusterVector(m, "Cluster2x2f"); + + define_cluster_finder_bindings(m, "Cluster3x3i"); + define_cluster_finder_bindings(m, "Cluster3x3d"); + define_cluster_finder_bindings(m, "Cluster3x3f"); + define_cluster_finder_bindings(m, "Cluster2x2i"); + define_cluster_finder_bindings(m, "Cluster2x2d"); + define_cluster_finder_bindings(m, "Cluster2x2f"); + + define_cluster_finder_mt_bindings(m, "Cluster3x3i"); + define_cluster_finder_mt_bindings(m, "Cluster3x3d"); + define_cluster_finder_mt_bindings(m, "Cluster3x3f"); + define_cluster_finder_mt_bindings(m, "Cluster2x2i"); + define_cluster_finder_mt_bindings(m, "Cluster2x2d"); + define_cluster_finder_mt_bindings(m, "Cluster2x2f"); + + define_cluster_file_sink_bindings(m, "Cluster3x3i"); + define_cluster_file_sink_bindings(m, "Cluster3x3d"); + define_cluster_file_sink_bindings(m, "Cluster3x3f"); + define_cluster_file_sink_bindings(m, "Cluster2x2i"); + define_cluster_file_sink_bindings(m, "Cluster2x2d"); + define_cluster_file_sink_bindings(m, "Cluster2x2f"); + + define_cluster_collector_bindings(m, "Cluster3x3i"); + define_cluster_collector_bindings(m, "Cluster3x3f"); + define_cluster_collector_bindings(m, "Cluster3x3d"); + define_cluster_collector_bindings(m, "Cluster2x2i"); + define_cluster_collector_bindings(m, "Cluster2x2f"); + define_cluster_collector_bindings(m, "Cluster2x2d"); + + define_cluster(m, "3x3i"); + define_cluster(m, "3x3f"); + define_cluster(m, "3x3d"); + define_cluster(m, "2x2i"); + define_cluster(m, "2x2f"); + define_cluster(m, "2x2d"); + + register_calculate_eta(m); + register_calculate_eta(m); + register_calculate_eta(m); + register_calculate_eta(m); + register_calculate_eta(m); + register_calculate_eta(m); +} diff --git a/python/src/np_helper.hpp b/python/src/np_helper.hpp index 1845196..78166aa 100644 --- a/python/src/np_helper.hpp +++ b/python/src/np_helper.hpp @@ -10,9 +10,10 @@ #include "aare/NDView.hpp" namespace py = pybind11; +using namespace aare; // Pass image data back to python as a numpy array -template +template py::array return_image_data(aare::NDArray *image) { py::capsule free_when_done(image, [](void *f) { @@ -40,25 +41,46 @@ template py::array return_vector(std::vector *vec) { } // todo rewrite generic -template auto get_shape_3d(const py::array_t& arr) { +template +auto get_shape_3d(const py::array_t &arr) { return aare::Shape<3>{arr.shape(0), arr.shape(1), arr.shape(2)}; } -template auto make_view_3d(py::array_t& arr) { +template auto make_view_3d(py::array_t &arr) { return aare::NDView(arr.mutable_data(), get_shape_3d(arr)); } -template auto get_shape_2d(const py::array_t& arr) { +template +auto get_shape_2d(const py::array_t &arr) { return aare::Shape<2>{arr.shape(0), arr.shape(1)}; } -template auto get_shape_1d(const py::array_t& arr) { +template +auto get_shape_1d(const py::array_t &arr) { return aare::Shape<1>{arr.shape(0)}; } -template auto make_view_2d(py::array_t& arr) { +template auto make_view_2d(py::array_t &arr) { return aare::NDView(arr.mutable_data(), get_shape_2d(arr)); } -template auto make_view_1d(py::array_t& arr) { +template auto make_view_1d(py::array_t &arr) { return aare::NDView(arr.mutable_data(), get_shape_1d(arr)); -} \ No newline at end of file +} + +template struct fmt_format_trait; // forward declaration + +template +struct fmt_format_trait> { + + static std::string value() { + return fmt::format("T{{{}:x:{}:y:{}:data:}}", + py::format_descriptor::format(), + py::format_descriptor::format(), + fmt::format("({},{}){}", ClusterSizeX, ClusterSizeY, + py::format_descriptor::format())); + } +}; + +template +auto fmt_format = fmt_format_trait::value(); \ No newline at end of file diff --git a/python/src/raw_file.hpp b/python/src/raw_file.hpp index 38b4896..8d72220 100644 --- a/python/src/raw_file.hpp +++ b/python/src/raw_file.hpp @@ -32,7 +32,7 @@ void define_raw_file_io_bindings(py::module &m) { shape.push_back(self.cols()); // return headers from all subfiles - py::array_t header(self.n_mod()); + py::array_t header(self.n_modules()); const uint8_t item_size = self.bytes_per_pixel(); if (item_size == 1) { @@ -61,10 +61,10 @@ void define_raw_file_io_bindings(py::module &m) { // return headers from all subfiles py::array_t header; - if (self.n_mod() == 1) { + if (self.n_modules() == 1) { header = py::array_t(n_frames); } else { - header = py::array_t({self.n_mod(), n_frames}); + header = py::array_t({self.n_modules(), n_frames}); } // py::array_t header({self.n_mod(), n_frames}); @@ -100,7 +100,7 @@ void define_raw_file_io_bindings(py::module &m) { .def_property_readonly("cols", &RawFile::cols) .def_property_readonly("bitdepth", &RawFile::bitdepth) .def_property_readonly("geometry", &RawFile::geometry) - .def_property_readonly("n_mod", &RawFile::n_mod) + .def_property_readonly("n_modules", &RawFile::n_modules) .def_property_readonly("detector_type", &RawFile::detector_type) .def_property_readonly("master", &RawFile::master); } \ No newline at end of file diff --git a/python/tests/conftest.py b/python/tests/conftest.py index 5badf13..fbcfeb3 100644 --- a/python/tests/conftest.py +++ b/python/tests/conftest.py @@ -25,5 +25,10 @@ def pytest_collection_modifyitems(config, items): @pytest.fixture def test_data_path(): - return Path(os.environ["AARE_TEST_DATA"]) + env_value = os.environ.get("AARE_TEST_DATA") + if not env_value: + raise RuntimeError("Environment variable AARE_TEST_DATA is not set or is empty") + + return Path(env_value) + diff --git a/python/tests/test_Cluster.py b/python/tests/test_Cluster.py new file mode 100644 index 0000000..ddaa6f3 --- /dev/null +++ b/python/tests/test_Cluster.py @@ -0,0 +1,110 @@ +import pytest +import numpy as np + +from aare import _aare #import the C++ module +from conftest import test_data_path + + +def test_cluster_vector_can_be_converted_to_numpy(): + cv = _aare.ClusterVector_Cluster3x3i() + arr = np.array(cv, copy=False) + assert arr.shape == (0,) # 4 for x, y, size, energy and 9 for the cluster data + + +def test_ClusterVector(): + """Test ClusterVector""" + + clustervector = _aare.ClusterVector_Cluster3x3i() + assert clustervector.cluster_size_x == 3 + assert clustervector.cluster_size_y == 3 + assert clustervector.item_size() == 4+9*4 + assert clustervector.frame_number == 0 + assert clustervector.size == 0 + + cluster = _aare.Cluster3x3i(0,0,np.ones(9, dtype=np.int32)) + + clustervector.push_back(cluster) + assert clustervector.size == 1 + + with pytest.raises(TypeError): # Or use the appropriate exception type + clustervector.push_back(_aare.Cluster2x2i(0,0,np.ones(4, dtype=np.int32))) + + with pytest.raises(TypeError): + clustervector.push_back(_aare.Cluster3x3f(0,0,np.ones(9, dtype=np.float32))) + +def test_Interpolator(): + """Test Interpolator""" + + ebins = np.linspace(0,10, 20, dtype=np.float64) + xbins = np.linspace(0, 5, 30, dtype=np.float64) + ybins = np.linspace(0, 5, 30, dtype=np.float64) + + etacube = np.zeros(shape=[30, 30, 20], dtype=np.float64) + interpolator = _aare.Interpolator(etacube, xbins, ybins, ebins) + + assert interpolator.get_ietax().shape == (30,30,20) + assert interpolator.get_ietay().shape == (30,30,20) + clustervector = _aare.ClusterVector_Cluster3x3i() + + cluster = _aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32)) + clustervector.push_back(cluster) + + interpolated_photons = interpolator.interpolate(clustervector) + + assert interpolated_photons.size == 1 + + assert interpolated_photons[0]["x"] == -1 + assert interpolated_photons[0]["y"] == -1 + assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0 + + clustervector = _aare.ClusterVector_Cluster2x2i() + + cluster = _aare.Cluster2x2i(0,0, np.ones(4, dtype=np.int32)) + clustervector.push_back(cluster) + + interpolated_photons = interpolator.interpolate(clustervector) + + assert interpolated_photons.size == 1 + + assert interpolated_photons[0]["x"] == 0 + assert interpolated_photons[0]["y"] == 0 + assert interpolated_photons[0]["energy"] == 4 + + + +def test_calculate_eta(): + """Calculate Eta""" + clusters = _aare.ClusterVector_Cluster3x3i() + clusters.push_back(_aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32))) + clusters.push_back(_aare.Cluster3x3i(0,0, np.array([1,1,1,2,2,2,3,3,3]))) + + eta2 = _aare.calculate_eta2(clusters) + + assert eta2.shape == (2,2) + assert eta2[0,0] == 0.5 + assert eta2[0,1] == 0.5 + assert eta2[1,0] == 0.5 + assert eta2[1,1] == 0.6 #1/5 + +def test_cluster_finder(): + """Test ClusterFinder""" + + clusterfinder = _aare.ClusterFinder_Cluster3x3i([100,100]) + + #frame = np.random.rand(100,100) + frame = np.zeros(shape=[100,100]) + + clusterfinder.find_clusters(frame) + + clusters = clusterfinder.steal_clusters(False) #conversion does not work + + assert clusters.size == 0 + + + + + + + + + diff --git a/python/tests/test_ClusterFile.py b/python/tests/test_ClusterFile.py new file mode 100644 index 0000000..4126a6c --- /dev/null +++ b/python/tests/test_ClusterFile.py @@ -0,0 +1,64 @@ + +import pytest +import numpy as np +import boost_histogram as bh +import time +from pathlib import Path +import pickle + +from aare import ClusterFile +from conftest import test_data_path + +@pytest.mark.files +def test_cluster_file(test_data_path): + """Test ClusterFile""" + f = ClusterFile(test_data_path / "clust/single_frame_97_clustrers.clust") + cv = f.read_clusters(10) #conversion does not work + + + assert cv.frame_number == 135 + assert cv.size == 10 + + #Known data + #frame_number, num_clusters [135] 97 + #[ 1 200] [0 1 2 3 4 5 6 7 8] + #[ 2 201] [ 9 10 11 12 13 14 15 16 17] + #[ 3 202] [18 19 20 21 22 23 24 25 26] + #[ 4 203] [27 28 29 30 31 32 33 34 35] + #[ 5 204] [36 37 38 39 40 41 42 43 44] + #[ 6 205] [45 46 47 48 49 50 51 52 53] + #[ 7 206] [54 55 56 57 58 59 60 61 62] + #[ 8 207] [63 64 65 66 67 68 69 70 71] + #[ 9 208] [72 73 74 75 76 77 78 79 80] + #[ 10 209] [81 82 83 84 85 86 87 88 89] + + #conversion to numpy array + arr = np.array(cv, copy = False) + + assert arr.size == 10 + for i in range(10): + assert arr[i]['x'] == i+1 + +@pytest.mark.files +def test_read_clusters_and_fill_histogram(test_data_path): + # Create the histogram + n_bins = 100 + xmin = -100 + xmax = 1e4 + hist_aare = bh.Histogram(bh.axis.Regular(n_bins, xmin, xmax)) + + fname = test_data_path / "clust/beam_En700eV_-40deg_300V_10us_d0_f0_100.clust" + + #Read clusters and fill the histogram with pixel values + with ClusterFile(fname, chunk_size = 10000) as f: + for clusters in f: + arr = np.array(clusters, copy = False) + hist_aare.fill(arr['data'].flat) + + + #Load the histogram from the pickle file + with open(fname.with_suffix('.pkl'), 'rb') as f: + hist_py = pickle.load(f) + + #Compare the two histograms + assert hist_aare == hist_py \ No newline at end of file diff --git a/python/tests/test_ClusterVector.py b/python/tests/test_ClusterVector.py new file mode 100644 index 0000000..b64aeef --- /dev/null +++ b/python/tests/test_ClusterVector.py @@ -0,0 +1,54 @@ +import pytest +import numpy as np +import boost_histogram as bh +import time +from pathlib import Path +import pickle + +from aare import ClusterFile +from aare import _aare +from conftest import test_data_path + + +def test_create_cluster_vector(): + cv = _aare.ClusterVector_Cluster3x3i() + assert cv.cluster_size_x == 3 + assert cv.cluster_size_y == 3 + assert cv.size == 0 + + +def test_push_back_on_cluster_vector(): + cv = _aare.ClusterVector_Cluster2x2i() + assert cv.cluster_size_x == 2 + assert cv.cluster_size_y == 2 + assert cv.size == 0 + + cluster = _aare.Cluster2x2i(19, 22, np.ones(4, dtype=np.int32)) + cv.push_back(cluster) + assert cv.size == 1 + + arr = np.array(cv, copy=False) + assert arr[0]['x'] == 19 + assert arr[0]['y'] == 22 + + +def test_make_a_hitmap_from_cluster_vector(): + cv = _aare.ClusterVector_Cluster3x3i() + + # Push back 4 clusters with different positions + cv.push_back(_aare.Cluster3x3i(0, 0, np.ones(9, dtype=np.int32))) + cv.push_back(_aare.Cluster3x3i(1, 1, np.ones(9, dtype=np.int32))) + cv.push_back(_aare.Cluster3x3i(1, 1, np.ones(9, dtype=np.int32))) + cv.push_back(_aare.Cluster3x3i(2, 2, np.ones(9, dtype=np.int32))) + + ref = np.zeros((5, 5), dtype=np.int32) + ref[0,0] = 1 + ref[1,1] = 2 + ref[2,2] = 1 + + + img = _aare.hitmap((5,5), cv) + # print(img) + # print(ref) + assert (img == ref).all() + \ No newline at end of file diff --git a/python/tests/test_RawSubFile.py b/python/tests/test_RawSubFile.py index a5eea91..aa4721a 100644 --- a/python/tests/test_RawSubFile.py +++ b/python/tests/test_RawSubFile.py @@ -5,32 +5,35 @@ from aare import RawSubFile, DetectorType @pytest.mark.files def test_read_a_jungfrau_RawSubFile(test_data_path): + + # Starting with f1 there is now 7 frames left in the series of files with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f1_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f: - assert f.frames_in_file == 3 + assert f.frames_in_file == 7 headers, frames = f.read() - assert headers.size == 3 - assert frames.shape == (3, 512, 1024) + assert headers.size == 7 + assert frames.shape == (7, 512, 1024) - # Frame numbers in this file should be 4, 5, 6 - for i,h in zip(range(4,7,1), headers): + + for i,h in zip(range(4,11,1), headers): assert h["frameNumber"] == i # Compare to canned data using numpy data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy") - assert np.all(data[3:6] == frames) + assert np.all(data[3:] == frames) @pytest.mark.files def test_iterate_over_a_jungfrau_RawSubFile(test_data_path): data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy") + # Given the first subfile in a series we can read all frames from f0, f1, f2...fN with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f: i = 0 for header, frame in f: assert header["frameNumber"] == i+1 assert np.all(frame == data[i]) i += 1 - assert i == 3 - assert header["frameNumber"] == 3 \ No newline at end of file + assert i == 10 + assert header["frameNumber"] == 10 diff --git a/src/CalculateEta.test.cpp b/src/CalculateEta.test.cpp new file mode 100644 index 0000000..820ab44 --- /dev/null +++ b/src/CalculateEta.test.cpp @@ -0,0 +1,127 @@ +/************************************************ + * @file CalculateEta.test.cpp + * @short test case to calculate_eta2 + ***********************************************/ + +#include "aare/CalculateEta.hpp" +#include "aare/Cluster.hpp" +#include "aare/ClusterFile.hpp" + +// #include "catch.hpp" +#include +#include +#include + +using namespace aare; + +using ClusterTypes = + std::variant, Cluster, Cluster, + Cluster, Cluster>; + +auto get_test_parameters() { + return GENERATE( + std::make_tuple(ClusterTypes{Cluster{0, 0, {1, 2, 3, 1}}}, + Eta2{2. / 3, 3. / 4, + static_cast(corner::cBottomLeft), 7}), + std::make_tuple( + ClusterTypes{Cluster{0, 0, {1, 2, 3, 4, 5, 6, 1, 2, 7}}}, + Eta2{6. / 11, 2. / 7, static_cast(corner::cTopRight), + 20}), + std::make_tuple(ClusterTypes{Cluster{ + 0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 2, 8, 9, 8, + 1, 4, 5, 6, 7, 8, 4, 1, 1, 1, 1, 1}}}, + Eta2{8. / 17, 7. / 15, 9, 30}), + std::make_tuple( + ClusterTypes{Cluster{0, 0, {1, 4, 7, 2, 5, 6, 4, 3}}}, + Eta2{4. / 10, 4. / 11, 1, 21}), + std::make_tuple( + ClusterTypes{Cluster{0, 0, {1, 3, 2, 3, 4, 2}}}, + Eta2{3. / 5, 2. / 5, 1, 11})); +} + +TEST_CASE("compute_largest_2x2_subcluster", "[eta_calculation]") { + auto [cluster, expected_eta] = get_test_parameters(); + + auto [sum, index] = std::visit( + [](const auto &clustertype) { return clustertype.max_sum_2x2(); }, + cluster); + CHECK(expected_eta.c == index); + CHECK(expected_eta.sum == sum); +} + +TEST_CASE("calculate_eta2", "[eta_calculation]") { + + auto [cluster, expected_eta] = get_test_parameters(); + + auto eta = std::visit( + [](const auto &clustertype) { return calculate_eta2(clustertype); }, + cluster); + + CHECK(eta.x == expected_eta.x); + CHECK(eta.y == expected_eta.y); + CHECK(eta.c == expected_eta.c); + CHECK(eta.sum == expected_eta.sum); +} + +// 3x3 cluster layout (rotated to match the cBottomLeft enum): +// 6, 7, 8 +// 3, 4, 5 +// 0, 1, 2 + +TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in " + "the bottom left", + "[eta_calculation]") { + + // Create a 3x3 cluster + Cluster cl; + cl.x = 0; + cl.y = 0; + cl.data[0] = 30; + cl.data[1] = 23; + cl.data[2] = 5; + cl.data[3] = 20; + cl.data[4] = 50; + cl.data[5] = 3; + cl.data[6] = 8; + cl.data[7] = 2; + cl.data[8] = 3; + + // 8, 2, 3 + // 20, 50, 3 + // 30, 23, 5 + + auto eta = calculate_eta2(cl); + CHECK(eta.c == static_cast(corner::cBottomLeft)); + CHECK(eta.x == 50.0 / (20 + 50)); // 4/(3+4) + CHECK(eta.y == 50.0 / (23 + 50)); // 4/(1+4) + CHECK(eta.sum == 30 + 23 + 20 + 50); +} + +TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in " + "the top left", + "[eta_calculation]") { + + // Create a 3x3 cluster + Cluster cl; + cl.x = 0; + cl.y = 0; + cl.data[0] = 8; + cl.data[1] = 12; + cl.data[2] = 5; + cl.data[3] = 77; + cl.data[4] = 80; + cl.data[5] = 3; + cl.data[6] = 82; + cl.data[7] = 91; + cl.data[8] = 3; + + // 82, 91, 3 + // 77, 80, 3 + // 8, 12, 5 + + auto eta = calculate_eta2(cl); + CHECK(eta.c == static_cast(corner::cTopLeft)); + CHECK(eta.x == 80. / (77 + 80)); // 4/(3+4) + CHECK(eta.y == 91.0 / (91 + 80)); // 7/(7+4) + CHECK(eta.sum == 77 + 80 + 82 + 91); +} diff --git a/src/Cluster.test.cpp b/src/Cluster.test.cpp new file mode 100644 index 0000000..ba9cda1 --- /dev/null +++ b/src/Cluster.test.cpp @@ -0,0 +1,21 @@ +/************************************************ + * @file test-Cluster.cpp + * @short test case for generic Cluster, ClusterVector, and calculate_eta2 + ***********************************************/ + +#include "aare/Cluster.hpp" +#include "aare/CalculateEta.hpp" +#include "aare/ClusterFile.hpp" + +// #include "catch.hpp" +#include +#include +#include + +using namespace aare; + +TEST_CASE("Test sum of Cluster", "[.cluster]") { + Cluster cluster{0, 0, {1, 2, 3, 4}}; + + CHECK(cluster.sum() == 10); +} \ No newline at end of file diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp index 4152ce0..6254b5d 100644 --- a/src/ClusterFile.test.cpp +++ b/src/ClusterFile.test.cpp @@ -1,35 +1,39 @@ #include "aare/ClusterFile.hpp" #include "test_config.hpp" - #include "aare/defs.hpp" +#include #include #include - - - +using aare::Cluster; using aare::ClusterFile; +using aare::ClusterVector; -TEST_CASE("Read one frame from a a cluster file", "[.files]") { +TEST_CASE("Read one frame from a cluster file", "[.files]") { //We know that the frame has 97 clusters auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); - ClusterFile f(fpath); + ClusterFile> f(fpath); auto clusters = f.read_frame(); - REQUIRE(clusters.size() == 97); - REQUIRE(clusters.frame_number() == 135); + CHECK(clusters.size() == 97); + CHECK(clusters.frame_number() == 135); + CHECK(clusters[0].x == 1); + CHECK(clusters[0].y == 200); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data), + std::begin(expected_cluster_data))); } TEST_CASE("Read one frame using ROI", "[.files]") { - //We know that the frame has 97 clusters + // We know that the frame has 97 clusters auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); - ClusterFile f(fpath); + ClusterFile> f(fpath); aare::ROI roi; roi.xmin = 0; roi.xmax = 50; @@ -40,45 +44,308 @@ TEST_CASE("Read one frame using ROI", "[.files]") { REQUIRE(clusters.size() == 49); REQUIRE(clusters.frame_number() == 135); - //Check that all clusters are within the ROI + // Check that all clusters are within the ROI for (size_t i = 0; i < clusters.size(); i++) { - auto c = clusters.at(i); + auto c = clusters[i]; REQUIRE(c.x >= roi.xmin); REQUIRE(c.x <= roi.xmax); REQUIRE(c.y >= roi.ymin); REQUIRE(c.y <= roi.ymax); } + CHECK(clusters[0].x == 1); + CHECK(clusters[0].y == 200); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data), + std::begin(expected_cluster_data))); } TEST_CASE("Read clusters from single frame file", "[.files]") { + // frame_number, num_clusters [135] 97 + // [ 1 200] [0 1 2 3 4 5 6 7 8] + // [ 2 201] [ 9 10 11 12 13 14 15 16 17] + // [ 3 202] [18 19 20 21 22 23 24 25 26] + // [ 4 203] [27 28 29 30 31 32 33 34 35] + // [ 5 204] [36 37 38 39 40 41 42 43 44] + // [ 6 205] [45 46 47 48 49 50 51 52 53] + // [ 7 206] [54 55 56 57 58 59 60 61 62] + // [ 8 207] [63 64 65 66 67 68 69 70 71] + // [ 9 208] [72 73 74 75 76 77 78 79 80] + // [ 10 209] [81 82 83 84 85 86 87 88 89] + // [ 11 210] [90 91 92 93 94 95 96 97 98] + // [ 12 211] [ 99 100 101 102 103 104 105 106 107] + // [ 13 212] [108 109 110 111 112 113 114 115 116] + // [ 14 213] [117 118 119 120 121 122 123 124 125] + // [ 15 214] [126 127 128 129 130 131 132 133 134] + // [ 16 215] [135 136 137 138 139 140 141 142 143] + // [ 17 216] [144 145 146 147 148 149 150 151 152] + // [ 18 217] [153 154 155 156 157 158 159 160 161] + // [ 19 218] [162 163 164 165 166 167 168 169 170] + // [ 20 219] [171 172 173 174 175 176 177 178 179] + // [ 21 220] [180 181 182 183 184 185 186 187 188] + // [ 22 221] [189 190 191 192 193 194 195 196 197] + // [ 23 222] [198 199 200 201 202 203 204 205 206] + // [ 24 223] [207 208 209 210 211 212 213 214 215] + // [ 25 224] [216 217 218 219 220 221 222 223 224] + // [ 26 225] [225 226 227 228 229 230 231 232 233] + // [ 27 226] [234 235 236 237 238 239 240 241 242] + // [ 28 227] [243 244 245 246 247 248 249 250 251] + // [ 29 228] [252 253 254 255 256 257 258 259 260] + // [ 30 229] [261 262 263 264 265 266 267 268 269] + // [ 31 230] [270 271 272 273 274 275 276 277 278] + // [ 32 231] [279 280 281 282 283 284 285 286 287] + // [ 33 232] [288 289 290 291 292 293 294 295 296] + // [ 34 233] [297 298 299 300 301 302 303 304 305] + // [ 35 234] [306 307 308 309 310 311 312 313 314] + // [ 36 235] [315 316 317 318 319 320 321 322 323] + // [ 37 236] [324 325 326 327 328 329 330 331 332] + // [ 38 237] [333 334 335 336 337 338 339 340 341] + // [ 39 238] [342 343 344 345 346 347 348 349 350] + // [ 40 239] [351 352 353 354 355 356 357 358 359] + // [ 41 240] [360 361 362 363 364 365 366 367 368] + // [ 42 241] [369 370 371 372 373 374 375 376 377] + // [ 43 242] [378 379 380 381 382 383 384 385 386] + // [ 44 243] [387 388 389 390 391 392 393 394 395] + // [ 45 244] [396 397 398 399 400 401 402 403 404] + // [ 46 245] [405 406 407 408 409 410 411 412 413] + // [ 47 246] [414 415 416 417 418 419 420 421 422] + // [ 48 247] [423 424 425 426 427 428 429 430 431] + // [ 49 248] [432 433 434 435 436 437 438 439 440] + // [ 50 249] [441 442 443 444 445 446 447 448 449] + // [ 51 250] [450 451 452 453 454 455 456 457 458] + // [ 52 251] [459 460 461 462 463 464 465 466 467] + // [ 53 252] [468 469 470 471 472 473 474 475 476] + // [ 54 253] [477 478 479 480 481 482 483 484 485] + // [ 55 254] [486 487 488 489 490 491 492 493 494] + // [ 56 255] [495 496 497 498 499 500 501 502 503] + // [ 57 256] [504 505 506 507 508 509 510 511 512] + // [ 58 257] [513 514 515 516 517 518 519 520 521] + // [ 59 258] [522 523 524 525 526 527 528 529 530] + // [ 60 259] [531 532 533 534 535 536 537 538 539] + // [ 61 260] [540 541 542 543 544 545 546 547 548] + // [ 62 261] [549 550 551 552 553 554 555 556 557] + // [ 63 262] [558 559 560 561 562 563 564 565 566] + // [ 64 263] [567 568 569 570 571 572 573 574 575] + // [ 65 264] [576 577 578 579 580 581 582 583 584] + // [ 66 265] [585 586 587 588 589 590 591 592 593] + // [ 67 266] [594 595 596 597 598 599 600 601 602] + // [ 68 267] [603 604 605 606 607 608 609 610 611] + // [ 69 268] [612 613 614 615 616 617 618 619 620] + // [ 70 269] [621 622 623 624 625 626 627 628 629] + // [ 71 270] [630 631 632 633 634 635 636 637 638] + // [ 72 271] [639 640 641 642 643 644 645 646 647] + // [ 73 272] [648 649 650 651 652 653 654 655 656] + // [ 74 273] [657 658 659 660 661 662 663 664 665] + // [ 75 274] [666 667 668 669 670 671 672 673 674] + // [ 76 275] [675 676 677 678 679 680 681 682 683] + // [ 77 276] [684 685 686 687 688 689 690 691 692] + // [ 78 277] [693 694 695 696 697 698 699 700 701] + // [ 79 278] [702 703 704 705 706 707 708 709 710] + // [ 80 279] [711 712 713 714 715 716 717 718 719] + // [ 81 280] [720 721 722 723 724 725 726 727 728] + // [ 82 281] [729 730 731 732 733 734 735 736 737] + // [ 83 282] [738 739 740 741 742 743 744 745 746] + // [ 84 283] [747 748 749 750 751 752 753 754 755] + // [ 85 284] [756 757 758 759 760 761 762 763 764] + // [ 86 285] [765 766 767 768 769 770 771 772 773] + // [ 87 286] [774 775 776 777 778 779 780 781 782] + // [ 88 287] [783 784 785 786 787 788 789 790 791] + // [ 89 288] [792 793 794 795 796 797 798 799 800] + // [ 90 289] [801 802 803 804 805 806 807 808 809] + // [ 91 290] [810 811 812 813 814 815 816 817 818] + // [ 92 291] [819 820 821 822 823 824 825 826 827] + // [ 93 292] [828 829 830 831 832 833 834 835 836] + // [ 94 293] [837 838 839 840 841 842 843 844 845] + // [ 95 294] [846 847 848 849 850 851 852 853 854] + // [ 96 295] [855 856 857 858 859 860 861 862 863] + // [ 97 296] [864 865 866 867 868 869 870 871 872] + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); SECTION("Read fewer clusters than available") { - ClusterFile f(fpath); + ClusterFile> f(fpath); auto clusters = f.read_clusters(50); REQUIRE(clusters.size() == 50); - REQUIRE(clusters.frame_number() == 135); + REQUIRE(clusters.frame_number() == 135); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + REQUIRE(clusters[0].x == 1); + REQUIRE(clusters[0].y == 200); + CHECK(std::equal(std::begin(clusters[0].data), + std::end(clusters[0].data), + std::begin(expected_cluster_data))); } SECTION("Read more clusters than available") { - ClusterFile f(fpath); + ClusterFile> f(fpath); // 100 is the maximum number of clusters read auto clusters = f.read_clusters(100); REQUIRE(clusters.size() == 97); REQUIRE(clusters.frame_number() == 135); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + REQUIRE(clusters[0].x == 1); + REQUIRE(clusters[0].y == 200); + CHECK(std::equal(std::begin(clusters[0].data), + std::end(clusters[0].data), + std::begin(expected_cluster_data))); } SECTION("Read all clusters") { - ClusterFile f(fpath); + ClusterFile> f(fpath); auto clusters = f.read_clusters(97); REQUIRE(clusters.size() == 97); REQUIRE(clusters.frame_number() == 135); + REQUIRE(clusters[0].x == 1); + REQUIRE(clusters[0].y == 200); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + CHECK(std::equal(std::begin(clusters[0].data), + std::end(clusters[0].data), + std::begin(expected_cluster_data))); + } +} + +TEST_CASE("Read clusters from single frame file with ROI", "[.files]") { + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile> f(fpath); + + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 50; + roi.ymin = 200; + roi.ymax = 249; + f.set_roi(roi); + + auto clusters = f.read_clusters(10); + + CHECK(clusters.size() == 10); + CHECK(clusters.frame_number() == 135); + CHECK(clusters[0].x == 1); + CHECK(clusters[0].y == 200); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data), + std::begin(expected_cluster_data))); +} + +TEST_CASE("Read cluster from multiple frame file", "[.files]") { + + using ClusterType = Cluster; + + auto fpath = + test_data_path() / "clust" / "Two_frames_2x2double_test_clusters.clust"; + + REQUIRE(std::filesystem::exists(fpath)); + + // Two_frames_2x2double_test_clusters.clust + // frame number, num_clusters 0, 4 + //[10, 20], {0. ,0., 0., 0.} + //[11, 30], {1., 1., 1., 1.} + //[12, 40], {2., 2., 2., 2.} + //[13, 50], {3., 3., 3., 3.} + // 1,4 + //[10, 20], {4., 4., 4., 4.} + //[11, 30], {5., 5., 5., 5.} + //[12, 40], {6., 6., 6., 6.} + //[13, 50], {7., 7., 7., 7.} + + SECTION("Read clusters from both frames") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(2); + REQUIRE(clusters.size() == 2); + REQUIRE(clusters.frame_number() == 0); + + auto clusters1 = f.read_clusters(3); + + REQUIRE(clusters1.size() == 3); + REQUIRE(clusters1.frame_number() == 1); } + SECTION("Read all clusters") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(8); + REQUIRE(clusters.size() == 8); + REQUIRE(clusters.frame_number() == 1); + } - + SECTION("Read clusters from one frame") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(2); + REQUIRE(clusters.size() == 2); + REQUIRE(clusters.frame_number() == 0); + + auto clusters1 = f.read_clusters(1); + + REQUIRE(clusters1.size() == 1); + REQUIRE(clusters1.frame_number() == 0); + } +} + +TEST_CASE("Write cluster with potential padding", "[.files][.ClusterFile]") { + + using ClusterType = Cluster; + + REQUIRE(std::filesystem::exists(test_data_path() / "clust")); + + auto fpath = test_data_path() / "clust" / "single_frame_2_clusters.clust"; + + ClusterFile file(fpath, 1000, "w"); + + ClusterVector clustervec(2); + int16_t coordinate = 5; + clustervec.push_back(ClusterType{ + coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}}); + clustervec.push_back(ClusterType{ + coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}}); + + file.write_frame(clustervec); + + file.close(); + + file.open("r"); + + auto read_cluster_vector = file.read_frame(); + + CHECK(read_cluster_vector.size() == 2); + CHECK(read_cluster_vector.frame_number() == 0); + + CHECK(read_cluster_vector[0].x == clustervec[0].x); + CHECK(read_cluster_vector[0].y == clustervec[0].y); + CHECK(std::equal( + clustervec[0].data.begin(), clustervec[0].data.end(), + read_cluster_vector[0].data.begin(), [](double a, double b) { + return std::abs(a - b) < std::numeric_limits::epsilon(); + })); + + CHECK(read_cluster_vector[1].x == clustervec[1].x); + CHECK(read_cluster_vector[1].y == clustervec[1].y); + CHECK(std::equal( + clustervec[1].data.begin(), clustervec[1].data.end(), + read_cluster_vector[1].data.begin(), [](double a, double b) { + return std::abs(a - b) < std::numeric_limits::epsilon(); + })); +} + +TEST_CASE("Read frame and modify cluster data", "[.files][.ClusterFile]") { + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile> f(fpath); + + auto clusters = f.read_frame(); + CHECK(clusters.size() == 97); + CHECK(clusters.frame_number() == 135); + + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + clusters.push_back( + Cluster{0, 0, {0, 1, 2, 3, 4, 5, 6, 7, 8}}); + + CHECK(clusters.size() == 98); + CHECK(clusters[0].x == 1); + CHECK(clusters[0].y == 200); + + CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data), + std::begin(expected_cluster_data))); } diff --git a/src/ClusterFinder.test.cpp b/src/ClusterFinder.test.cpp index 768e632..8989581 100644 --- a/src/ClusterFinder.test.cpp +++ b/src/ClusterFinder.test.cpp @@ -1,19 +1,18 @@ #include "aare/ClusterFinder.hpp" #include "aare/Pedestal.hpp" -#include #include +#include #include #include using namespace aare; -//TODO! Find a way to test the cluster finder - - +// TODO! Find a way to test the cluster finder // class ClusterFinderUnitTest : public ClusterFinder { // public: -// ClusterFinderUnitTest(int cluster_sizeX, int cluster_sizeY, double nSigma = 5.0, double threshold = 0.0) +// ClusterFinderUnitTest(int cluster_sizeX, int cluster_sizeY, double nSigma +// = 5.0, double threshold = 0.0) // : ClusterFinder(cluster_sizeX, cluster_sizeY, nSigma, threshold) {} // double get_c2() { return c2; } // double get_c3() { return c3; } @@ -37,8 +36,8 @@ using namespace aare; // REQUIRE_THAT(cf.get_c3(), Catch::Matchers::WithinRel(c3, 1e-9)); // } -TEST_CASE("Construct a cluster finder"){ - ClusterFinder clusterFinder({400,400}, {3,3}); +TEST_CASE("Construct a cluster finder") { + ClusterFinder clusterFinder({400, 400}); // REQUIRE(clusterFinder.get_cluster_sizeX() == 3); // REQUIRE(clusterFinder.get_cluster_sizeY() == 3); // REQUIRE(clusterFinder.get_threshold() == 1); @@ -49,16 +48,17 @@ TEST_CASE("Construct a cluster finder"){ // aare::Pedestal pedestal(10, 10, 5); // NDArray frame({10, 10}); // frame = 0; -// ClusterFinder clusterFinder(3, 3, 1, 1); // 3x3 cluster, 1 nSigma, 1 threshold +// ClusterFinder clusterFinder(3, 3, 1, 1); // 3x3 cluster, 1 nSigma, 1 +// threshold -// auto clusters = clusterFinder.find_clusters_without_threshold(frame.span(), pedestal); +// auto clusters = +// clusterFinder.find_clusters_without_threshold(frame.span(), pedestal); // REQUIRE(clusters.size() == 0); // frame(5, 5) = 10; -// clusters = clusterFinder.find_clusters_without_threshold(frame.span(), pedestal); -// REQUIRE(clusters.size() == 1); -// REQUIRE(clusters[0].x == 5); +// clusters = clusterFinder.find_clusters_without_threshold(frame.span(), +// pedestal); REQUIRE(clusters.size() == 1); REQUIRE(clusters[0].x == 5); // REQUIRE(clusters[0].y == 5); // for (int i = 0; i < 3; i++) { // for (int j = 0; j < 3; j++) { diff --git a/src/ClusterFinderMT.test.cpp b/src/ClusterFinderMT.test.cpp new file mode 100644 index 0000000..9289592 --- /dev/null +++ b/src/ClusterFinderMT.test.cpp @@ -0,0 +1,99 @@ + +#include "aare/ClusterFinderMT.hpp" +#include "aare/Cluster.hpp" +#include "aare/ClusterCollector.hpp" +#include "aare/File.hpp" + +#include "test_config.hpp" + +#include +#include +#include + +using namespace aare; + +// wrapper function to access private member variables for testing +template +class ClusterFinderMTWrapper + : public ClusterFinderMT { + + public: + ClusterFinderMTWrapper(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0, + size_t capacity = 2000, size_t n_threads = 3) + : ClusterFinderMT( + image_size, nSigma, capacity, n_threads) {} + + size_t get_m_input_queues_size() const { + return this->m_input_queues.size(); + } + + size_t get_m_output_queues_size() const { + return this->m_output_queues.size(); + } + + size_t get_m_cluster_finders_size() const { + return this->m_cluster_finders.size(); + } + + bool m_output_queues_are_empty() const { + for (auto &queue : this->m_output_queues) { + if (!queue->isEmpty()) + return false; + } + return true; + } + + bool m_input_queues_are_empty() const { + for (auto &queue : this->m_input_queues) { + if (!queue->isEmpty()) + return false; + } + return true; + } + + bool m_sink_is_empty() const { return this->m_sink.isEmpty(); } + + size_t m_sink_size() const { return this->m_sink.sizeGuess(); } +}; + +TEST_CASE("multithreaded cluster finder", "[.files][.ClusterFinder]") { + auto fpath = "/mnt/sls_det_storage/matterhorn_data/aare_test_data/" + "Moench03new/cu_half_speed_master_4.json"; + + File file(fpath); + + size_t n_threads = 2; + size_t n_frames_pd = 10; + + using ClusterType = Cluster; + + ClusterFinderMTWrapper cf( + {static_cast(file.rows()), static_cast(file.cols())}, + 5, 2000, n_threads); // no idea what frame type is!!! default uint16_t + + CHECK(cf.get_m_input_queues_size() == n_threads); + CHECK(cf.get_m_output_queues_size() == n_threads); + CHECK(cf.get_m_cluster_finders_size() == n_threads); + CHECK(cf.m_output_queues_are_empty() == true); + CHECK(cf.m_input_queues_are_empty() == true); + + for (size_t i = 0; i < n_frames_pd; ++i) { + cf.find_clusters(file.read_frame().view()); + } + + cf.stop(); + + CHECK(cf.m_output_queues_are_empty() == true); + CHECK(cf.m_input_queues_are_empty() == true); + + CHECK(cf.m_sink_size() == n_frames_pd); + ClusterCollector clustercollector(&cf); + + clustercollector.stop(); + + CHECK(cf.m_sink_size() == 0); + + auto clustervec = clustercollector.steal_clusters(); + // CHECK(clustervec.size() == ) //dont know how many clusters to expect +} diff --git a/src/ClusterVector.test.cpp b/src/ClusterVector.test.cpp index 8ca3b1e..1214b6b 100644 --- a/src/ClusterVector.test.cpp +++ b/src/ClusterVector.test.cpp @@ -1,21 +1,52 @@ -#include #include "aare/ClusterVector.hpp" +#include -#include +#include #include +#include +using aare::Cluster; using aare::ClusterVector; -struct Cluster_i2x2 { - int16_t x; - int16_t y; - int32_t data[4]; -}; +TEST_CASE("item_size return the size of the cluster stored") { + using C1 = Cluster; + ClusterVector cv(4); + CHECK(cv.item_size() == sizeof(C1)); -TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { - + // Sanity check + // 2*2*4 = 16 bytes of data for the cluster + // 2*2 = 4 bytes for the x and y coordinates + REQUIRE(cv.item_size() == 20); - ClusterVector cv(2, 2, 4); + using C2 = Cluster; + ClusterVector cv2(4); + CHECK(cv2.item_size() == sizeof(C2)); + + using C3 = Cluster; + ClusterVector cv3(4); + CHECK(cv3.item_size() == sizeof(C3)); + + using C4 = Cluster; + ClusterVector cv4(4); + CHECK(cv4.item_size() == sizeof(C4)); + + using C5 = Cluster; + ClusterVector cv5(4); + CHECK(cv5.item_size() == sizeof(C5)); + + using C6 = Cluster; + ClusterVector cv6(4); + CHECK(cv6.item_size() == sizeof(C6)); + + using C7 = Cluster; + ClusterVector cv7(4); + CHECK(cv7.item_size() == sizeof(C7)); +} + +TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read", + "[.ClusterVector]") { + + ClusterVector> cv(4); REQUIRE(cv.capacity() == 4); REQUIRE(cv.size() == 0); REQUIRE(cv.cluster_size_x() == 2); @@ -23,112 +54,102 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { // int16_t, int16_t, 2x2 int32_t = 20 bytes REQUIRE(cv.item_size() == 20); - //Create a cluster and push back into the vector - Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; - cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + // Create a cluster and push back into the vector + Cluster c1 = {1, 2, {3, 4, 5, 6}}; + cv.push_back(c1); REQUIRE(cv.size() == 1); REQUIRE(cv.capacity() == 4); - //Read the cluster back out using copy. TODO! Can we improve the API? - Cluster_i2x2 c2; - std::byte *ptr = cv.element_ptr(0); - std::copy(ptr, ptr + cv.item_size(), reinterpret_cast(&c2)); + auto c2 = cv[0]; - //Check that the data is the same + // Check that the data is the same REQUIRE(c1.x == c2.x); REQUIRE(c1.y == c2.y); - for(size_t i = 0; i < 4; i++) { + for (size_t i = 0; i < 4; i++) { REQUIRE(c1.data[i] == c2.data[i]); } } -TEST_CASE("Summing 3x1 clusters of int64"){ - struct Cluster_l3x1{ - int16_t x; - int16_t y; - int32_t data[3]; - }; - - ClusterVector cv(3, 1, 2); +TEST_CASE("Summing 3x1 clusters of int64", "[.ClusterVector]") { + ClusterVector> cv(2); REQUIRE(cv.capacity() == 2); REQUIRE(cv.size() == 0); REQUIRE(cv.cluster_size_x() == 3); REQUIRE(cv.cluster_size_y() == 1); - //Create a cluster and push back into the vector - Cluster_l3x1 c1 = {1, 2, {3, 4, 5}}; - cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + // Create a cluster and push back into the vector + Cluster c1 = {1, 2, {3, 4, 5}}; + cv.push_back(c1); REQUIRE(cv.capacity() == 2); REQUIRE(cv.size() == 1); - Cluster_l3x1 c2 = {6, 7, {8, 9, 10}}; - cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + Cluster c2 = {6, 7, {8, 9, 10}}; + cv.push_back(c2); REQUIRE(cv.capacity() == 2); REQUIRE(cv.size() == 2); - Cluster_l3x1 c3 = {11, 12, {13, 14, 15}}; - cv.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + Cluster c3 = {11, 12, {13, 14, 15}}; + cv.push_back(c3); REQUIRE(cv.capacity() == 4); REQUIRE(cv.size() == 3); + /* auto sums = cv.sum(); REQUIRE(sums.size() == 3); REQUIRE(sums[0] == 12); REQUIRE(sums[1] == 27); REQUIRE(sums[2] == 42); + */ } -TEST_CASE("Storing floats"){ - struct Cluster_f4x2{ - int16_t x; - int16_t y; - float data[8]; - }; - - ClusterVector cv(2, 4, 10); +TEST_CASE("Storing floats", "[.ClusterVector]") { + ClusterVector> cv(10); REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 0); REQUIRE(cv.cluster_size_x() == 2); REQUIRE(cv.cluster_size_y() == 4); - //Create a cluster and push back into the vector - Cluster_f4x2 c1 = {1, 2, {3.0, 4.0, 5.0, 6.0,3.0, 4.0, 5.0, 6.0}}; - cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + // Create a cluster and push back into the vector + Cluster c1 = {1, 2, {3.0, 4.0, 5.0, 6.0, 3.0, 4.0, 5.0, 6.0}}; + cv.push_back(c1); REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 1); - - Cluster_f4x2 c2 = {6, 7, {8.0, 9.0, 10.0, 11.0,8.0, 9.0, 10.0, 11.0}}; - cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + Cluster c2 = { + 6, 7, {8.0, 9.0, 10.0, 11.0, 8.0, 9.0, 10.0, 11.0}}; + cv.push_back(c2); REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 2); + /* auto sums = cv.sum(); REQUIRE(sums.size() == 2); REQUIRE_THAT(sums[0], Catch::Matchers::WithinAbs(36.0, 1e-6)); REQUIRE_THAT(sums[1], Catch::Matchers::WithinAbs(76.0, 1e-6)); + */ } -TEST_CASE("Push back more than initial capacity"){ - - ClusterVector cv(2, 2, 2); +TEST_CASE("Push back more than initial capacity", "[.ClusterVector]") { + + ClusterVector> cv(2); auto initial_data = cv.data(); - Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; - cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + Cluster c1 = {1, 2, {3, 4, 5, 6}}; + cv.push_back(c1); REQUIRE(cv.size() == 1); REQUIRE(cv.capacity() == 2); - Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}}; - cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + Cluster c2 = {6, 7, {8, 9, 10, 11}}; + cv.push_back(c2); REQUIRE(cv.size() == 2); REQUIRE(cv.capacity() == 2); - Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}}; - cv.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); - REQUIRE(cv.size() == 3); + Cluster c3 = {11, 12, {13, 14, 15, 16}}; + cv.push_back(c3); + REQUIRE(cv.size() == 3); REQUIRE(cv.capacity() == 4); - Cluster_i2x2* ptr = reinterpret_cast(cv.data()); + Cluster *ptr = + reinterpret_cast *>(cv.data()); REQUIRE(ptr[0].x == 1); REQUIRE(ptr[0].y == 2); REQUIRE(ptr[1].x == 6); @@ -136,29 +157,31 @@ TEST_CASE("Push back more than initial capacity"){ REQUIRE(ptr[2].x == 11); REQUIRE(ptr[2].y == 12); - //We should have allocated a new buffer, since we outgrew the initial capacity + // We should have allocated a new buffer, since we outgrew the initial + // capacity REQUIRE(initial_data != cv.data()); - } -TEST_CASE("Concatenate two cluster vectors where the first has enough capacity"){ - ClusterVector cv1(2, 2, 12); - Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; - cv1.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); - Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}}; - cv1.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); +TEST_CASE("Concatenate two cluster vectors where the first has enough capacity", + "[.ClusterVector]") { + ClusterVector> cv1(12); + Cluster c1 = {1, 2, {3, 4, 5, 6}}; + cv1.push_back(c1); + Cluster c2 = {6, 7, {8, 9, 10, 11}}; + cv1.push_back(c2); - ClusterVector cv2(2, 2, 2); - Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}}; - cv2.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); - Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}}; - cv2.push_back(c4.x, c4.y, reinterpret_cast(&c4.data[0])); + ClusterVector> cv2(2); + Cluster c3 = {11, 12, {13, 14, 15, 16}}; + cv2.push_back(c3); + Cluster c4 = {16, 17, {18, 19, 20, 21}}; + cv2.push_back(c4); cv1 += cv2; REQUIRE(cv1.size() == 4); REQUIRE(cv1.capacity() == 12); - Cluster_i2x2* ptr = reinterpret_cast(cv1.data()); + Cluster *ptr = + reinterpret_cast *>(cv1.data()); REQUIRE(ptr[0].x == 1); REQUIRE(ptr[0].y == 2); REQUIRE(ptr[1].x == 6); @@ -169,24 +192,26 @@ TEST_CASE("Concatenate two cluster vectors where the first has enough capacity") REQUIRE(ptr[3].y == 17); } -TEST_CASE("Concatenate two cluster vectors where we need to allocate"){ - ClusterVector cv1(2, 2, 2); - Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; - cv1.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); - Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}}; - cv1.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); +TEST_CASE("Concatenate two cluster vectors where we need to allocate", + "[.ClusterVector]") { + ClusterVector> cv1(2); + Cluster c1 = {1, 2, {3, 4, 5, 6}}; + cv1.push_back(c1); + Cluster c2 = {6, 7, {8, 9, 10, 11}}; + cv1.push_back(c2); - ClusterVector cv2(2, 2, 2); - Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}}; - cv2.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); - Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}}; - cv2.push_back(c4.x, c4.y, reinterpret_cast(&c4.data[0])); + ClusterVector> cv2(2); + Cluster c3 = {11, 12, {13, 14, 15, 16}}; + cv2.push_back(c3); + Cluster c4 = {16, 17, {18, 19, 20, 21}}; + cv2.push_back(c4); cv1 += cv2; REQUIRE(cv1.size() == 4); REQUIRE(cv1.capacity() == 4); - Cluster_i2x2* ptr = reinterpret_cast(cv1.data()); + Cluster *ptr = + reinterpret_cast *>(cv1.data()); REQUIRE(ptr[0].x == 1); REQUIRE(ptr[0].y == 2); REQUIRE(ptr[1].x == 6); @@ -195,4 +220,49 @@ TEST_CASE("Concatenate two cluster vectors where we need to allocate"){ REQUIRE(ptr[2].y == 12); REQUIRE(ptr[3].x == 16); REQUIRE(ptr[3].y == 17); +} + +struct ClusterTestData { + uint8_t ClusterSizeX; + uint8_t ClusterSizeY; + std::vector index_map_x; + std::vector index_map_y; +}; + +TEST_CASE("Gain Map Calculation Index Map", "[.ClusterVector][.gain_map]") { + + auto clustertestdata = GENERATE( + ClusterTestData{3, + 3, + {-1, 0, 1, -1, 0, 1, -1, 0, 1}, + {-1, -1, -1, 0, 0, 0, 1, 1, 1}}, + ClusterTestData{ + 4, + 4, + {-2, -1, 0, 1, -2, -1, 0, 1, -2, -1, 0, 1, -2, -1, 0, 1}, + {-2, -2, -2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1, 1, 1, 1}}, + ClusterTestData{2, 2, {-1, 0, -1, 0}, {-1, -1, 0, 0}}, + ClusterTestData{5, + 5, + {-2, -1, 0, 1, 2, -2, -1, 0, 1, 2, -2, -1, 0, + 1, 2, -2, -1, 0, 1, 2, -2, -1, 0, 1, 2}, + {-2, -2, -2, -2, -2, -1, -1, -1, -1, -1, 0, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2}}); + + uint8_t ClusterSizeX = clustertestdata.ClusterSizeX; + uint8_t ClusterSizeY = clustertestdata.ClusterSizeY; + + std::vector index_map_x(ClusterSizeX * ClusterSizeY); + std::vector index_map_y(ClusterSizeX * ClusterSizeY); + + int64_t index_cluster_center_x = ClusterSizeX / 2; + int64_t index_cluster_center_y = ClusterSizeY / 2; + + for (size_t j = 0; j < ClusterSizeX * ClusterSizeY; j++) { + index_map_x[j] = j % ClusterSizeX - index_cluster_center_x; + index_map_y[j] = j / ClusterSizeX - index_cluster_center_y; + } + + CHECK(index_map_x == clustertestdata.index_map_x); + CHECK(index_map_y == clustertestdata.index_map_y); } \ No newline at end of file diff --git a/src/FilePtr.cpp b/src/FilePtr.cpp index 4fed3d7..e3cdb4b 100644 --- a/src/FilePtr.cpp +++ b/src/FilePtr.cpp @@ -21,7 +21,7 @@ FilePtr &FilePtr::operator=(FilePtr &&other) { FILE *FilePtr::get() { return fp_; } -int64_t FilePtr::tell() { +ssize_t FilePtr::tell() { auto pos = ftell(fp_); if (pos == -1) throw std::runtime_error(fmt::format("Error getting file position: {}", error_msg())); diff --git a/src/Fit.cpp b/src/Fit.cpp index 9126109..25000de 100644 --- a/src/Fit.cpp +++ b/src/Fit.cpp @@ -34,6 +34,30 @@ NDArray pol1(NDView x, NDView par) { return y; } +double scurve(const double x, const double * par) { + return (par[0] + par[1] * x) + 0.5 * (1 + erf((x - par[2]) / (sqrt(2) * par[3]))) * (par[4] + par[5] * (x - par[2])); +} + +NDArray scurve(NDView x, NDView par) { + NDArray y({x.shape()}, 0); + for (ssize_t i = 0; i < x.size(); i++) { + y(i) = scurve(x(i), par.data()); + } + return y; +} + +double scurve2(const double x, const double * par) { + return (par[0] + par[1] * x) + 0.5 * (1 - erf((x - par[2]) / (sqrt(2) * par[3]))) * (par[4] + par[5] * (x - par[2])); +} + +NDArray scurve2(NDView x, NDView par) { + NDArray y({x.shape()}, 0); + for (ssize_t i = 0; i < x.size(); i++) { + y(i) = scurve2(x(i), par.data()); + } + return y; +} + } // namespace func NDArray fit_gaus(NDView x, NDView y) { @@ -81,7 +105,7 @@ std::array gaus_init_par(const NDView x, const NDView *e / 2; }) * + [e](double val) { return val > *e / 2; }) * delta / 2.35; return start_par; @@ -273,4 +297,229 @@ NDArray fit_pol1(NDView x, NDView y, return result; } +// ~~ S-CURVES ~~ + +// SCURVE -- +std::array scurve_init_par(const NDView x, const NDView y){ + // Estimate the initial parameters for the fit + std::array start_par{0, 0, 0, 0, 0, 0}; + + auto ymax = std::max_element(y.begin(), y.end()); + auto ymin = std::min_element(y.begin(), y.end()); + start_par[4] = *ymin + (*ymax - *ymin) / 2; + + // Find the first x where the corresponding y value is above the threshold (start_par[4]) + for (ssize_t i = 0; i < y.size(); ++i) { + if (y[i] >= start_par[4]) { + start_par[2] = x[i]; + break; // Exit the loop after finding the first valid x + } + } + + start_par[3] = 2 * sqrt(start_par[2]); + start_par[0] = 100; + start_par[1] = 0.25; + start_par[5] = 1; + return start_par; +} + +// - No error +NDArray fit_scurve(NDView x, NDView y) { + NDArray result = scurve_init_par(x, y); + lm_status_struct status; + + lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(), + aare::func::scurve, &lm_control_double, &status); + + return result; +} + +NDArray fit_scurve(NDView x, NDView y, int n_threads) { + NDArray result({y.shape(0), y.shape(1), 6}, 0); + + auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView values(&y(row, col, 0), {y.shape(2)}); + auto res = fit_scurve(x, values); + result(row, col, 0) = res(0); + result(row, col, 1) = res(1); + result(row, col, 2) = res(2); + result(row, col, 3) = res(3); + result(row, col, 4) = res(4); + result(row, col, 5) = res(5); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); + return result; +} + +// - Error +void fit_scurve(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2) { + + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 6 || par_err_out.size() != 6) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 6"); + } + + lm_status_struct status; + par_out = scurve_init_par(x, y); + std::array cov = {0}; // size 6x6 + // std::array cov{0, 0, 0, 0}; + + lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::scurve, + &lm_control_double, &status); + + // Calculate chi2 + chi2 = 0; + for (ssize_t i = 0; i < y.size(); i++) { + chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); + } +} + +void fit_scurve(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + + fit_scurve(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col)); + + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); + +} + +// SCURVE2 --- + +std::array scurve2_init_par(const NDView x, const NDView y){ + // Estimate the initial parameters for the fit + std::array start_par{0, 0, 0, 0, 0, 0}; + + auto ymax = std::max_element(y.begin(), y.end()); + auto ymin = std::min_element(y.begin(), y.end()); + start_par[4] = *ymin + (*ymax - *ymin) / 2; + + // Find the first x where the corresponding y value is above the threshold (start_par[4]) + for (ssize_t i = 0; i < y.size(); ++i) { + if (y[i] <= start_par[4]) { + start_par[2] = x[i]; + break; // Exit the loop after finding the first valid x + } + } + + start_par[3] = 2 * sqrt(start_par[2]); + start_par[0] = 100; + start_par[1] = 0.25; + start_par[5] = -1; + return start_par; +} + +// - No error +NDArray fit_scurve2(NDView x, NDView y) { + NDArray result = scurve2_init_par(x, y); + lm_status_struct status; + + lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(), + aare::func::scurve2, &lm_control_double, &status); + + return result; +} + +NDArray fit_scurve2(NDView x, NDView y, int n_threads) { + NDArray result({y.shape(0), y.shape(1), 6}, 0); + + auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView values(&y(row, col, 0), {y.shape(2)}); + auto res = fit_scurve2(x, values); + result(row, col, 0) = res(0); + result(row, col, 1) = res(1); + result(row, col, 2) = res(2); + result(row, col, 3) = res(3); + result(row, col, 4) = res(4); + result(row, col, 5) = res(5); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); + return result; +} + +// - Error +void fit_scurve2(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2) { + + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 6 || par_err_out.size() != 6) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 6"); + } + + lm_status_struct status; + par_out = scurve2_init_par(x, y); + std::array cov = {0}; // size 6x6 + // std::array cov{0, 0, 0, 0}; + + lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::scurve2, + &lm_control_double, &status); + + // Calculate chi2 + chi2 = 0; + for (ssize_t i = 0; i < y.size(); i++) { + chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); + } +} + +void fit_scurve2(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + + fit_scurve2(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col)); + + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); + +} + } // namespace aare \ No newline at end of file diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp index 7034a83..4bc2b34 100644 --- a/src/Interpolator.cpp +++ b/src/Interpolator.cpp @@ -1,11 +1,11 @@ #include "aare/Interpolator.hpp" -#include "aare/algorithm.hpp" namespace aare { Interpolator::Interpolator(NDView etacube, NDView xbins, NDView ybins, NDView ebins) - : m_ietax(etacube), m_ietay(etacube), m_etabinsx(xbins), m_etabinsy(ybins), m_energy_bins(ebins) { + : m_ietax(etacube), m_ietay(etacube), m_etabinsx(xbins), m_etabinsy(ybins), + m_energy_bins(ebins) { if (etacube.shape(0) != xbins.size() || etacube.shape(1) != ybins.size() || etacube.shape(2) != ebins.size()) { throw std::invalid_argument( @@ -53,87 +53,4 @@ Interpolator::Interpolator(NDView etacube, NDView xbins, } } -std::vector Interpolator::interpolate(const ClusterVector& clusters) { - std::vector photons; - photons.reserve(clusters.size()); - - if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { - for (size_t i = 0; i(i); - Eta2 eta= calculate_eta2(cluster); - - Photon photon; - photon.x = cluster.x; - photon.y = cluster.y; - photon.energy = eta.sum; - - - //Finding the index of the last element that is smaller - //should work fine as long as we have many bins - auto ie = last_smaller(m_energy_bins, photon.energy); - auto ix = last_smaller(m_etabinsx, eta.x); - auto iy = last_smaller(m_etabinsy, eta.y); - - double dX{}, dY{}; - // cBottomLeft = 0, - // cBottomRight = 1, - // cTopLeft = 2, - // cTopRight = 3 - switch (eta.c) { - case cTopLeft: - dX = -1.; - dY = 0.; - break; - case cTopRight:; - dX = 0.; - dY = 0.; - break; - case cBottomLeft: - dX = -1.; - dY = -1.; - break; - case cBottomRight: - dX = 0.; - dY = -1.; - break; - } - photon.x += m_ietax(ix, iy, ie)*2 + dX; - photon.y += m_ietay(ix, iy, ie)*2 + dY; - photons.push_back(photon); - } - }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ - for (size_t i = 0; i(i); - Eta2 eta= calculate_eta2(cluster); - - Photon photon; - photon.x = cluster.x; - photon.y = cluster.y; - photon.energy = eta.sum; - - //Now do some actual interpolation. - //Find which energy bin the cluster is in - // auto ie = nearest_index(m_energy_bins, photon.energy)-1; - // auto ix = nearest_index(m_etabinsx, eta.x)-1; - // auto iy = nearest_index(m_etabinsy, eta.y)-1; - //Finding the index of the last element that is smaller - //should work fine as long as we have many bins - auto ie = last_smaller(m_energy_bins, photon.energy); - auto ix = last_smaller(m_etabinsx, eta.x); - auto iy = last_smaller(m_etabinsy, eta.y); - - photon.x += m_ietax(ix, iy, ie)*2; //eta goes between 0 and 1 but we could move the hit anywhere in the 2x2 - photon.y += m_ietay(ix, iy, ie)*2; - photons.push_back(photon); - } - - }else{ - throw std::runtime_error("Only 3x3 and 2x2 clusters are supported for interpolation"); - } - - - return photons; -} - } // namespace aare \ No newline at end of file diff --git a/src/JungfrauDataFile.cpp b/src/JungfrauDataFile.cpp index 8f1f904..59a1a0a 100644 --- a/src/JungfrauDataFile.cpp +++ b/src/JungfrauDataFile.cpp @@ -89,7 +89,7 @@ void JungfrauDataFile::seek(size_t frame_index) { : frame_index; auto byte_offset = frame_offset * (m_bytes_per_frame + header_size); m_fp.seek(byte_offset); -}; +} size_t JungfrauDataFile::tell() { return m_current_frame_index; } size_t JungfrauDataFile::total_frames() const { return m_total_frames; } @@ -235,4 +235,4 @@ std::filesystem::path JungfrauDataFile::fpath(size_t file_index) const { return m_path / fname; } -} // namespace aare \ No newline at end of file +} // namespace aare diff --git a/src/NDArray.test.cpp b/src/NDArray.test.cpp index c37a285..819a1a9 100644 --- a/src/NDArray.test.cpp +++ b/src/NDArray.test.cpp @@ -44,9 +44,9 @@ TEST_CASE("3D NDArray from NDView"){ REQUIRE(image.size() == view.size()); REQUIRE(image.data() != view.data()); - for(int64_t i=0; i shape{{20}}; + std::array shape{{20}}; NDArray img(shape, 3); REQUIRE(img.size() == 20); REQUIRE(img(5) == 3); @@ -71,7 +71,7 @@ TEST_CASE("Accessing a const object") { } TEST_CASE("Indexing of a 2D image") { - std::array shape{{3, 7}}; + std::array shape{{3, 7}}; NDArray img(shape, 5); for (uint32_t i = 0; i != img.size(); ++i) { REQUIRE(img(i) == 5); @@ -114,7 +114,7 @@ TEST_CASE("Divide double by int") { } TEST_CASE("Elementwise multiplication of 3D image") { - std::array shape{3, 4, 2}; + std::array shape{3, 4, 2}; NDArray a{shape}; NDArray b{shape}; for (uint32_t i = 0; i != a.size(); ++i) { @@ -179,9 +179,9 @@ TEST_CASE("Compare two images") { } TEST_CASE("Size and shape matches") { - int64_t w = 15; - int64_t h = 75; - std::array shape{w, h}; + ssize_t w = 15; + ssize_t h = 75; + std::array shape{w, h}; NDArray a{shape}; REQUIRE(a.size() == w * h); REQUIRE(a.shape() == shape); @@ -224,7 +224,7 @@ TEST_CASE("Bitwise and on data") { TEST_CASE("Elementwise operations on images") { - std::array shape{5, 5}; + std::array shape{5, 5}; double a_val = 3.0; double b_val = 8.0; diff --git a/src/NDView.test.cpp b/src/NDView.test.cpp index 8750f3a..89e76e9 100644 --- a/src/NDView.test.cpp +++ b/src/NDView.test.cpp @@ -142,7 +142,7 @@ TEST_CASE("iterators") { // for (int i = 0; i != 12; ++i) { // vec.push_back(i); // } -// std::vector shape{3, 4}; +// std::vector shape{3, 4}; // NDView data(vec.data(), shape); // } @@ -151,8 +151,8 @@ TEST_CASE("divide with another span") { std::vector vec1{3, 2, 1}; std::vector result{3, 6, 3}; - NDView data0(vec0.data(), Shape<1>{static_cast(vec0.size())}); - NDView data1(vec1.data(), Shape<1>{static_cast(vec1.size())}); + NDView data0(vec0.data(), Shape<1>{static_cast(vec0.size())}); + NDView data1(vec1.data(), Shape<1>{static_cast(vec1.size())}); data0 /= data1; diff --git a/src/NumpyFile.cpp b/src/NumpyFile.cpp index 109439a..e375ce3 100644 --- a/src/NumpyFile.cpp +++ b/src/NumpyFile.cpp @@ -72,8 +72,8 @@ void NumpyFile::get_frame_into(size_t frame_number, std::byte *image_buf) { } } -size_t NumpyFile::pixels_per_frame() { return m_pixels_per_frame; }; -size_t NumpyFile::bytes_per_frame() { return m_bytes_per_frame; }; +size_t NumpyFile::pixels_per_frame() { return m_pixels_per_frame; } +size_t NumpyFile::bytes_per_frame() { return m_bytes_per_frame; } std::vector NumpyFile::read_n(size_t n_frames) { // TODO: implement this in a more efficient way @@ -197,4 +197,4 @@ void NumpyFile::load_metadata() { m_header = {dtype, fortran_order, shape}; } -} // namespace aare \ No newline at end of file +} // namespace aare diff --git a/src/RawFile.cpp b/src/RawFile.cpp index 78cb6c5..122cf96 100644 --- a/src/RawFile.cpp +++ b/src/RawFile.cpp @@ -1,6 +1,8 @@ #include "aare/RawFile.hpp" +#include "aare/algorithm.hpp" #include "aare/PixelMap.hpp" #include "aare/defs.hpp" +#include "aare/logger.hpp" #include "aare/geo_helpers.hpp" #include @@ -14,27 +16,18 @@ RawFile::RawFile(const std::filesystem::path &fname, const std::string &mode) : m_master(fname) { m_mode = mode; if (mode == "r") { - - n_subfiles = find_number_of_subfiles(); // f0,f1...fn - n_subfile_parts = - m_master.geometry().col * m_master.geometry().row; // d0,d1...dn - - - find_geometry(); - if (m_master.roi()){ m_geometry = update_geometry_with_roi(m_geometry, m_master.roi().value()); } - open_subfiles(); } else { throw std::runtime_error(LOCATION + - "Unsupported mode. Can only read RawFiles."); + " Unsupported mode. Can only read RawFiles."); } } -Frame RawFile::read_frame() { return get_frame(m_current_frame++); }; +Frame RawFile::read_frame() { return get_frame(m_current_frame++); } Frame RawFile::read_frame(size_t frame_number) { seek(frame_number); @@ -52,13 +45,13 @@ void RawFile::read_into(std::byte *image_buf, size_t n_frames) { void RawFile::read_into(std::byte *image_buf) { return get_frame_into(m_current_frame++, image_buf); -}; +} void RawFile::read_into(std::byte *image_buf, DetectorHeader *header) { return get_frame_into(m_current_frame++, image_buf, header); -}; +} void RawFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) { // return get_frame_into(m_current_frame++, image_buf, header); @@ -67,12 +60,12 @@ void RawFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *h this->get_frame_into(m_current_frame++, image_buf, header); image_buf += bytes_per_frame(); if(header) - header+=n_mod(); + header+=n_modules(); } -}; +} -size_t RawFile::n_mod() const { return n_subfile_parts; } +size_t RawFile::n_modules() const { return m_master.n_modules(); } size_t RawFile::bytes_per_frame() { @@ -94,9 +87,9 @@ void RawFile::seek(size_t frame_index) { frame_index, total_frames())); } m_current_frame = frame_index; -}; +} -size_t RawFile::tell() { return m_current_frame; }; +size_t RawFile::tell() { return m_current_frame; } size_t RawFile::total_frames() const { return m_master.frames_in_file(); } size_t RawFile::rows() const { return m_geometry.pixels_y; } @@ -106,17 +99,11 @@ xy RawFile::geometry() { return m_master.geometry(); } void RawFile::open_subfiles() { if (m_mode == "r") - for (size_t i = 0; i != n_subfiles; ++i) { - auto v = std::vector(n_subfile_parts); - for (size_t j = 0; j != n_subfile_parts; ++j) { - auto pos = m_geometry.module_pixel_0[j]; - v[j] = new RawSubFile(m_master.data_fname(j, i), - m_master.detector_type(), pos.height, - pos.width, m_master.bitdepth(), - pos.row_index, pos.col_index); - - } - subfiles.push_back(v); + for (size_t i = 0; i != n_modules(); ++i) { + auto pos = m_geometry.module_pixel_0[i]; + m_subfiles.emplace_back(std::make_unique( + m_master.data_fname(i, 0), m_master.detector_type(), pos.height, + pos.width, m_master.bitdepth(), pos.row_index, pos.col_index)); } else { throw std::runtime_error(LOCATION + @@ -141,18 +128,6 @@ DetectorHeader RawFile::read_header(const std::filesystem::path &fname) { return h; } -int RawFile::find_number_of_subfiles() { - int n_files = 0; - // f0,f1...fn How many files is the data split into? - while (std::filesystem::exists(m_master.data_fname(0, n_files))) - n_files++; // increment after test - -#ifdef AARE_VERBOSE - fmt::print("Found: {} subfiles\n", n_files); -#endif - return n_files; - -} RawMasterFile RawFile::master() const { return m_master; } @@ -168,7 +143,7 @@ void RawFile::find_geometry() { uint16_t c{}; - for (size_t i = 0; i < n_subfile_parts; i++) { + for (size_t i = 0; i < n_modules(); i++) { auto h = read_header(m_master.data_fname(i, 0)); r = std::max(r, h.row); c = std::max(c, h.column); @@ -210,70 +185,58 @@ size_t RawFile::bytes_per_pixel() const { } void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, DetectorHeader *header) { + LOG(logDEBUG) << "RawFile::get_frame_into(" << frame_index << ")"; if (frame_index >= total_frames()) { throw std::runtime_error(LOCATION + "Frame number out of range"); } - std::vector frame_numbers(n_subfile_parts); - std::vector frame_indices(n_subfile_parts, frame_index); + std::vector frame_numbers(n_modules()); + std::vector frame_indices(n_modules(), frame_index); // sync the frame numbers - if (n_subfile_parts != 1) { - for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { - auto subfile_id = frame_index / m_master.max_frames_per_file(); - if (subfile_id >= subfiles.size()) { - throw std::runtime_error(LOCATION + - " Subfile out of range. Possible missing data."); - } - frame_numbers[part_idx] = - subfiles[subfile_id][part_idx]->frame_number( - frame_index % m_master.max_frames_per_file()); + if (n_modules() != 1) { //if we have more than one module + for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) { + frame_numbers[part_idx] = m_subfiles[part_idx]->frame_number(frame_index); } + // 1. if frame number vector is the same break - while (std::adjacent_find(frame_numbers.begin(), frame_numbers.end(), - std::not_equal_to<>()) != - frame_numbers.end()) { + while (!all_equal(frame_numbers)) { + // 2. find the index of the minimum frame number, auto min_frame_idx = std::distance( frame_numbers.begin(), std::min_element(frame_numbers.begin(), frame_numbers.end())); + // 3. increase its index and update its respective frame number frame_indices[min_frame_idx]++; + // 4. if we can't increase its index => throw error if (frame_indices[min_frame_idx] >= total_frames()) { throw std::runtime_error(LOCATION + "Frame number out of range"); } - auto subfile_id = - frame_indices[min_frame_idx] / m_master.max_frames_per_file(); + frame_numbers[min_frame_idx] = - subfiles[subfile_id][min_frame_idx]->frame_number( - frame_indices[min_frame_idx] % - m_master.max_frames_per_file()); + m_subfiles[min_frame_idx]->frame_number(frame_indices[min_frame_idx]); } } if (m_master.geometry().col == 1) { // get the part from each subfile and copy it to the frame - for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { + for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) { auto corrected_idx = frame_indices[part_idx]; - auto subfile_id = corrected_idx / m_master.max_frames_per_file(); - if (subfile_id >= subfiles.size()) { - throw std::runtime_error(LOCATION + - " Subfile out of range. Possible missing data."); - } - + // This is where we start writing auto offset = (m_geometry.module_pixel_0[part_idx].origin_y * m_geometry.pixels_x + m_geometry.module_pixel_0[part_idx].origin_x)*m_master.bitdepth()/8; if (m_geometry.module_pixel_0[part_idx].origin_x!=0) - throw std::runtime_error(LOCATION + "Implementation error. x pos not 0."); + throw std::runtime_error(LOCATION + " Implementation error. x pos not 0."); - //TODO! Risk for out of range access - subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file()); - subfiles[subfile_id][part_idx]->read_into(frame_buffer + offset, header); + //TODO! What if the files don't match? + m_subfiles[part_idx]->seek(corrected_idx); + m_subfiles[part_idx]->read_into(frame_buffer + offset, header); if (header) ++header; } @@ -282,26 +245,21 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect //TODO! should we read row by row? // create a buffer large enough to hold a full module - auto bytes_per_part = m_master.pixels_y() * m_master.pixels_x() * m_master.bitdepth() / 8; // TODO! replace with image_size_in_bytes + auto *part_buffer = new std::byte[bytes_per_part]; // TODO! if we have many submodules we should reorder them on the module // level - for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { + for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) { auto pos = m_geometry.module_pixel_0[part_idx]; auto corrected_idx = frame_indices[part_idx]; - auto subfile_id = corrected_idx / m_master.max_frames_per_file(); - if (subfile_id >= subfiles.size()) { - throw std::runtime_error(LOCATION + - " Subfile out of range. Possible missing data."); - } - subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file()); - subfiles[subfile_id][part_idx]->read_into(part_buffer, header); + m_subfiles[part_idx]->seek(corrected_idx); + m_subfiles[part_idx]->read_into(part_buffer, header); if(header) ++header; @@ -321,6 +279,7 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect } delete[] part_buffer; } + } std::vector RawFile::read_n(size_t n_frames) { @@ -337,27 +296,8 @@ size_t RawFile::frame_number(size_t frame_index) { if (frame_index >= m_master.frames_in_file()) { throw std::runtime_error(LOCATION + " Frame number out of range"); } - size_t subfile_id = frame_index / m_master.max_frames_per_file(); - if (subfile_id >= subfiles.size()) { - throw std::runtime_error( - LOCATION + " Subfile out of range. Possible missing data."); - } - return subfiles[subfile_id][0]->frame_number( - frame_index % m_master.max_frames_per_file()); -} - -RawFile::~RawFile() { - - // TODO! Fix this, for file closing - for (auto &vec : subfiles) { - for (auto *subfile : vec) { - delete subfile; - } - } + return m_subfiles[0]->frame_number(frame_index); } - - - -} // namespace aare \ No newline at end of file +} // namespace aare diff --git a/src/RawFile.test.cpp b/src/RawFile.test.cpp index 5f9b2e1..9109985 100644 --- a/src/RawFile.test.cpp +++ b/src/RawFile.test.cpp @@ -99,11 +99,11 @@ TEST_CASE("Read frame numbers from a raw file", "[.integration]") { } } -TEST_CASE("Compare reading from a numpy file with a raw file", "[.integration]") { - auto fpath_raw = test_data_path() / "jungfrau" / "jungfrau_single_master_0.json"; +TEST_CASE("Compare reading from a numpy file with a raw file", "[.files]") { + auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_master_0.json"; REQUIRE(std::filesystem::exists(fpath_raw)); - auto fpath_npy = test_data_path() / "jungfrau" / "jungfrau_single_0.npy"; + auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; REQUIRE(std::filesystem::exists(fpath_npy)); File raw(fpath_raw, "r"); @@ -113,6 +113,7 @@ TEST_CASE("Compare reading from a numpy file with a raw file", "[.integration]") CHECK(npy.total_frames() == 10); for (size_t i = 0; i < 10; ++i) { + CHECK(raw.tell() == i); auto raw_frame = raw.read_frame(); auto npy_frame = npy.read_frame(); CHECK((raw_frame.view() == npy_frame.view())); diff --git a/src/RawMasterFile.cpp b/src/RawMasterFile.cpp index 052bb00..8a2db87 100644 --- a/src/RawMasterFile.cpp +++ b/src/RawMasterFile.cpp @@ -87,7 +87,7 @@ int ScanParameters::start() const { return m_start; } int ScanParameters::stop() const { return m_stop; } void ScanParameters::increment_stop(){ m_stop += 1; -}; +} int ScanParameters::step() const { return m_step; } const std::string &ScanParameters::dac() const { return m_dac; } bool ScanParameters::enabled() const { return m_enabled; } @@ -140,6 +140,10 @@ std::optional RawMasterFile::number_of_rows() const { xy RawMasterFile::geometry() const { return m_geometry; } +size_t RawMasterFile::n_modules() const { + return m_geometry.row * m_geometry.col; +} + std::optional RawMasterFile::quad() const { return m_quad; } // optional values, these may or may not be present in the master file @@ -417,4 +421,4 @@ void RawMasterFile::parse_raw(const std::filesystem::path &fpath) { if(m_frames_in_file==0) m_frames_in_file = m_total_frames_expected; } -} // namespace aare \ No newline at end of file +} // namespace aare diff --git a/src/RawSubFile.cpp b/src/RawSubFile.cpp index 9e7a421..a8d29ce 100644 --- a/src/RawSubFile.cpp +++ b/src/RawSubFile.cpp @@ -1,9 +1,15 @@ #include "aare/RawSubFile.hpp" #include "aare/PixelMap.hpp" +#include "aare/algorithm.hpp" #include "aare/utils/ifstream_helpers.hpp" +#include "aare/logger.hpp" + + #include // memcpy #include #include +#include + @@ -12,51 +18,51 @@ namespace aare { RawSubFile::RawSubFile(const std::filesystem::path &fname, DetectorType detector, size_t rows, size_t cols, size_t bitdepth, uint32_t pos_row, uint32_t pos_col) - : m_detector_type(detector), m_bitdepth(bitdepth), m_fname(fname), + : m_detector_type(detector), m_bitdepth(bitdepth), m_rows(rows), m_cols(cols), m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row), m_pos_col(pos_col) { + + LOG(logDEBUG) << "RawSubFile::RawSubFile()"; if (m_detector_type == DetectorType::Moench03_old) { m_pixel_map = GenerateMoench03PixelMap(); } else if (m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0) { m_pixel_map = GenerateEigerFlipRowsPixelMap(); } - if (std::filesystem::exists(fname)) { - m_num_frames = std::filesystem::file_size(fname) / - (sizeof(DetectorHeader) + rows * cols * bitdepth / 8); - } else { - throw std::runtime_error( - LOCATION + fmt::format("File {} does not exist", m_fname.string())); - } - // fp = fopen(m_fname.string().c_str(), "rb"); - m_file.open(m_fname, std::ios::binary); - if (!m_file.is_open()) { - throw std::runtime_error( - LOCATION + fmt::format("Could not open file {}", m_fname.string())); - } - -#ifdef AARE_VERBOSE - fmt::print("Opened file: {} with {} frames\n", m_fname.string(), m_num_frames); - fmt::print("m_rows: {}, m_cols: {}, m_bitdepth: {}\n", m_rows, m_cols, - m_bitdepth); - fmt::print("file size: {}\n", std::filesystem::file_size(fname)); -#endif + parse_fname(fname); + scan_files(); + open_file(m_current_file_index); // open the first file } void RawSubFile::seek(size_t frame_index) { - if (frame_index >= m_num_frames) { - throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, m_num_frames)); + LOG(logDEBUG) << "RawSubFile::seek(" << frame_index << ")"; + if (frame_index >= m_total_frames) { + throw std::runtime_error(LOCATION + " Frame index out of range: " + + std::to_string(frame_index)); } - m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index); + m_current_frame_index = frame_index; + auto file_index = first_larger(m_last_frame_in_file, frame_index); + + if (file_index != m_current_file_index) + open_file(file_index); + + auto frame_offset = (file_index) + ? frame_index - m_last_frame_in_file[file_index - 1] + : frame_index; + auto byte_offset = frame_offset * (m_bytes_per_frame + sizeof(DetectorHeader)); + m_file.seekg(byte_offset); } size_t RawSubFile::tell() { - return m_file.tellg() / (sizeof(DetectorHeader) + bytes_per_frame()); + LOG(logDEBUG) << "RawSubFile::tell():" << m_current_frame_index; + return m_current_frame_index; } void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { + LOG(logDEBUG) << "RawSubFile::read_into()"; + if (header) { m_file.read(reinterpret_cast(header), sizeof(DetectorHeader)); } else { @@ -90,6 +96,13 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { if (m_file.fail()){ throw std::runtime_error(LOCATION + ifstream_error_msg(m_file)); } + + ++ m_current_frame_index; + if (m_current_frame_index >= m_last_frame_in_file[m_current_file_index] && + (m_current_frame_index < m_total_frames)) { + ++m_current_file_index; + open_file(m_current_file_index); + } } void RawSubFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) { @@ -130,4 +143,69 @@ size_t RawSubFile::frame_number(size_t frame_index) { return h.frameNumber; } +void RawSubFile::parse_fname(const std::filesystem::path &fname) { + LOG(logDEBUG) << "RawSubFile::parse_fname()"; + // data has the format: /path/too/data/jungfrau_single_d0_f1_0.raw + // d0 is the module index, will not change for this file + // f1 is the file index - thi is the one we need + // 0 is the measurement index, will not change + m_path = fname.parent_path(); + m_base_name = fname.filename(); + + // Regex to extract numbers after 'd' and 'f' + std::regex pattern(R"(^(.*_d)(\d+)(_f)(\d+)(_\d+\.raw)$)"); + std::smatch match; + + if (std::regex_match(m_base_name, match, pattern)) { + m_offset = std::stoi(match[4].str()); // find the first file index in case of a truncated series + m_base_name = match[1].str() + match[2].str() + match[3].str() + "{}" + match[5].str(); + LOG(logDEBUG) << "Base name: " << m_base_name; + LOG(logDEBUG) << "Offset: " << m_offset; + LOG(logDEBUG) << "Path: " << m_path.string(); + } else { + throw std::runtime_error( + LOCATION + fmt::format("Could not parse file name {}", fname.string())); + } +} + +std::filesystem::path RawSubFile::fpath(size_t file_index) const { + auto fname = fmt::format(m_base_name, file_index); + return m_path / fname; +} + +void RawSubFile::open_file(size_t file_index) { + m_file.close(); + auto fname = fpath(file_index+m_offset); + LOG(logDEBUG) << "RawSubFile::open_file(): " << fname.string(); + m_file.open(fname, std::ios::binary); + if (!m_file.is_open()) { + throw std::runtime_error( + LOCATION + fmt::format("Could not open file {}", fpath(file_index).string())); + } + m_current_file_index = file_index; +} + +void RawSubFile::scan_files() { + LOG(logDEBUG) << "RawSubFile::scan_files()"; + // find how many files we have and the number of frames in each file + m_last_frame_in_file.clear(); + size_t file_index = m_offset; + + while (std::filesystem::exists(fpath(file_index))) { + auto n_frames = std::filesystem::file_size(fpath(file_index)) / + (m_bytes_per_frame + sizeof(DetectorHeader)); + m_last_frame_in_file.push_back(n_frames); + LOG(logDEBUG) << "Found: " << n_frames << " frames in file: " << fpath(file_index).string(); + ++file_index; + } + + // find where we need to open the next file and total number of frames + m_last_frame_in_file = cumsum(m_last_frame_in_file); + if(m_last_frame_in_file.empty()){ + m_total_frames = 0; + }else{ + m_total_frames = m_last_frame_in_file.back(); + } +} + } // namespace aare \ No newline at end of file diff --git a/src/RawSubFile.test.cpp b/src/RawSubFile.test.cpp new file mode 100644 index 0000000..89cf858 --- /dev/null +++ b/src/RawSubFile.test.cpp @@ -0,0 +1,76 @@ +#include "aare/RawSubFile.hpp" +#include "aare/File.hpp" +#include "aare/NDArray.hpp" +#include +#include "test_config.hpp" + +using namespace aare; + +TEST_CASE("Read frames directly from a RawSubFile", "[.files]"){ + auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f0_0.raw"; + REQUIRE(std::filesystem::exists(fpath_raw)); + + RawSubFile f(fpath_raw, DetectorType::Jungfrau, 512, 1024, 16); + REQUIRE(f.rows() == 512); + REQUIRE(f.cols() == 1024); + REQUIRE(f.pixels_per_frame() == 512 * 1024); + REQUIRE(f.bytes_per_frame() == 512 * 1024 * 2); + REQUIRE(f.bytes_per_pixel() == 2); + + + auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; + REQUIRE(std::filesystem::exists(fpath_npy)); + + //Numpy file with the same data to use as reference + File npy(fpath_npy, "r"); + + CHECK(f.frames_in_file() == 10); + CHECK(npy.total_frames() == 10); + + + DetectorHeader header{}; + NDArray image({static_cast(f.rows()), static_cast(f.cols())}); + for (size_t i = 0; i < 10; ++i) { + CHECK(f.tell() == i); + f.read_into(image.buffer(), &header); + auto npy_frame = npy.read_frame(); + CHECK((image.view() == npy_frame.view())); + } +} + +TEST_CASE("Read frames directly from a RawSubFile starting at the second file", "[.files]"){ + // we know this file has 10 frames with frame numbers 1 to 10 + // f0 1,2,3 + // f1 4,5,6 <-- starting here + // f2 7,8,9 + // f3 10 + + auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f1_0.raw"; + REQUIRE(std::filesystem::exists(fpath_raw)); + + RawSubFile f(fpath_raw, DetectorType::Jungfrau, 512, 1024, 16); + + + auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; + REQUIRE(std::filesystem::exists(fpath_npy)); + + //Numpy file with the same data to use as reference + File npy(fpath_npy, "r"); + npy.seek(3); + + CHECK(f.frames_in_file() == 7); + CHECK(npy.total_frames() == 10); + + + DetectorHeader header{}; + NDArray image({static_cast(f.rows()), static_cast(f.cols())}); + for (size_t i = 0; i < 7; ++i) { + CHECK(f.tell() == i); + f.read_into(image.buffer(), &header); + // frame numbers start at 1 frame index at 0 + // adding 3 + 1 to verify the frame number + CHECK(header.frameNumber == i + 4); + auto npy_frame = npy.read_frame(); + CHECK((image.view() == npy_frame.view())); + } +} \ No newline at end of file diff --git a/src/algorithm.test.cpp b/src/algorithm.test.cpp index 79541a1..bf49c52 100644 --- a/src/algorithm.test.cpp +++ b/src/algorithm.test.cpp @@ -1,8 +1,7 @@ -#include #include - +#include TEST_CASE("Find the closed index in a 1D array", "[algorithm]") { aare::NDArray arr({5}); @@ -17,7 +16,7 @@ TEST_CASE("Find the closed index in a 1D array", "[algorithm]") { REQUIRE(aare::nearest_index(arr, -1.0) == 0); } -TEST_CASE("Passing integers to nearest_index works", "[algorithm]"){ +TEST_CASE("Passing integers to nearest_index works", "[algorithm]") { aare::NDArray arr({5}); for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; @@ -30,8 +29,7 @@ TEST_CASE("Passing integers to nearest_index works", "[algorithm]"){ REQUIRE(aare::nearest_index(arr, -1) == 0); } - -TEST_CASE("nearest_index works with std::vector", "[algorithm]"){ +TEST_CASE("nearest_index works with std::vector", "[algorithm]") { std::vector vec = {0, 1, 2, 3, 4}; REQUIRE(aare::nearest_index(vec, 2.123) == 2); REQUIRE(aare::nearest_index(vec, 2.66) == 3); @@ -40,7 +38,7 @@ TEST_CASE("nearest_index works with std::vector", "[algorithm]"){ REQUIRE(aare::nearest_index(vec, -10.0) == 0); } -TEST_CASE("nearest index works with std::array", "[algorithm]"){ +TEST_CASE("nearest index works with std::array", "[algorithm]") { std::array arr = {0, 1, 2, 3, 4}; REQUIRE(aare::nearest_index(arr, 2.123) == 2); REQUIRE(aare::nearest_index(arr, 2.501) == 3); @@ -49,18 +47,20 @@ TEST_CASE("nearest index works with std::array", "[algorithm]"){ REQUIRE(aare::nearest_index(arr, -10.0) == 0); } -TEST_CASE("nearest index when there is no different uses the first element", "[algorithm]"){ +TEST_CASE("nearest index when there is no different uses the first element", + "[algorithm]") { std::vector vec = {5, 5, 5, 5, 5}; REQUIRE(aare::nearest_index(vec, 5) == 0); } -TEST_CASE("nearest index when there is no different uses the first element also when all smaller", "[algorithm]"){ +TEST_CASE("nearest index when there is no different uses the first element " + "also when all smaller", + "[algorithm]") { std::vector vec = {5, 5, 5, 5, 5}; REQUIRE(aare::nearest_index(vec, 10) == 0); } - -TEST_CASE("last smaller", "[algorithm]"){ +TEST_CASE("last smaller", "[algorithm]") { aare::NDArray arr({5}); for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; @@ -72,17 +72,17 @@ TEST_CASE("last smaller", "[algorithm]"){ REQUIRE(aare::last_smaller(arr, 253.) == 4); } -TEST_CASE("returns last bin strictly smaller", "[algorithm]"){ +TEST_CASE("returns last bin strictly smaller", "[algorithm]") { aare::NDArray arr({5}); for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 REQUIRE(aare::last_smaller(arr, 2.0) == 1); - } -TEST_CASE("last_smaller with all elements smaller returns last element", "[algorithm]"){ +TEST_CASE("last_smaller with all elements smaller returns last element", + "[algorithm]") { aare::NDArray arr({5}); for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; @@ -91,7 +91,8 @@ TEST_CASE("last_smaller with all elements smaller returns last element", "[algor REQUIRE(aare::last_smaller(arr, 50.) == 4); } -TEST_CASE("last_smaller with all elements bigger returns first element", "[algorithm]"){ +TEST_CASE("last_smaller with all elements bigger returns first element", + "[algorithm]") { aare::NDArray arr({5}); for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; @@ -100,38 +101,41 @@ TEST_CASE("last_smaller with all elements bigger returns first element", "[algor REQUIRE(aare::last_smaller(arr, -50.) == 0); } -TEST_CASE("last smaller with all elements equal returns the first element", "[algorithm]"){ - std::vector vec = {5,5,5,5,5,5,5}; +TEST_CASE("last smaller with all elements equal returns the first element", + "[algorithm]") { + std::vector vec = {5, 5, 5, 5, 5, 5, 5}; REQUIRE(aare::last_smaller(vec, 5) == 0); } - -TEST_CASE("first_lager with vector", "[algorithm]"){ +TEST_CASE("first_lager with vector", "[algorithm]") { std::vector vec = {0, 1, 2, 3, 4}; REQUIRE(aare::first_larger(vec, 2.5) == 3); } -TEST_CASE("first_lager with all elements smaller returns last element", "[algorithm]"){ +TEST_CASE("first_lager with all elements smaller returns last element", + "[algorithm]") { std::vector vec = {0, 1, 2, 3, 4}; REQUIRE(aare::first_larger(vec, 50.) == 4); } -TEST_CASE("first_lager with all elements bigger returns first element", "[algorithm]"){ +TEST_CASE("first_lager with all elements bigger returns first element", + "[algorithm]") { std::vector vec = {0, 1, 2, 3, 4}; REQUIRE(aare::first_larger(vec, -50.) == 0); } -TEST_CASE("first_lager with all elements the same as the check returns last", "[algorithm]"){ +TEST_CASE("first_lager with all elements the same as the check returns last", + "[algorithm]") { std::vector vec = {14, 14, 14, 14, 14}; REQUIRE(aare::first_larger(vec, 14) == 4); } -TEST_CASE("first larger with the same element", "[algorithm]"){ - std::vector vec = {7,8,9,10,11}; +TEST_CASE("first larger with the same element", "[algorithm]") { + std::vector vec = {7, 8, 9, 10, 11}; REQUIRE(aare::first_larger(vec, 9) == 3); } -TEST_CASE("cumsum works", "[algorithm]"){ +TEST_CASE("cumsum works", "[algorithm]") { std::vector vec = {0, 1, 2, 3, 4}; auto result = aare::cumsum(vec); REQUIRE(result.size() == vec.size()); @@ -141,12 +145,12 @@ TEST_CASE("cumsum works", "[algorithm]"){ REQUIRE(result[3] == 6); REQUIRE(result[4] == 10); } -TEST_CASE("cumsum works with empty vector", "[algorithm]"){ +TEST_CASE("cumsum works with empty vector", "[algorithm]") { std::vector vec = {}; auto result = aare::cumsum(vec); REQUIRE(result.size() == 0); } -TEST_CASE("cumsum works with negative numbers", "[algorithm]"){ +TEST_CASE("cumsum works with negative numbers", "[algorithm]") { std::vector vec = {0, -1, -2, -3, -4}; auto result = aare::cumsum(vec); REQUIRE(result.size() == vec.size()); @@ -157,3 +161,35 @@ TEST_CASE("cumsum works with negative numbers", "[algorithm]"){ REQUIRE(result[4] == -10); } + +TEST_CASE("cumsum on an empty vector", "[algorithm]") { + std::vector vec = {}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == 0); + +} + +TEST_CASE("All equal on an empty vector is false", "[algorithm]") { + std::vector vec = {}; + REQUIRE(aare::all_equal(vec) == false); +} + +TEST_CASE("All equal on a vector with 1 element is true", "[algorithm]") { + std::vector vec = {1}; + REQUIRE(aare::all_equal(vec) == true); +} + +TEST_CASE("All equal on a vector with 2 elements is true", "[algorithm]") { + std::vector vec = {1, 1}; + REQUIRE(aare::all_equal(vec) == true); +} + +TEST_CASE("All equal on a vector with two different elements is false", "[algorithm]") { + std::vector vec = {1, 2}; + REQUIRE(aare::all_equal(vec) == false); +} + +TEST_CASE("Last element is different", "[algorithm]") { + std::vector vec = {1, 1, 1, 1, 2}; + REQUIRE(aare::all_equal(vec) == false); +} diff --git a/src/decode.cpp b/src/decode.cpp index 8ac7bc0..436ad7b 100644 --- a/src/decode.cpp +++ b/src/decode.cpp @@ -26,8 +26,8 @@ void adc_sar_05_decode64to16(NDView input, NDView outpu throw std::invalid_argument(LOCATION + " input and output shapes must match"); } - for(int64_t i = 0; i < input.shape(0); i++){ - for(int64_t j = 0; j < input.shape(1); j++){ + for(ssize_t i = 0; i < input.shape(0); i++){ + for(ssize_t j = 0; j < input.shape(1); j++){ output(i,j) = adc_sar_05_decode64to16(input(i,j)); } } @@ -56,8 +56,8 @@ void adc_sar_04_decode64to16(NDView input, NDView outpu if(input.shape() != output.shape()){ throw std::invalid_argument(LOCATION + " input and output shapes must match"); } - for(int64_t i = 0; i < input.shape(0); i++){ - for(int64_t j = 0; j < input.shape(1); j++){ + for(ssize_t i = 0; i < input.shape(0); i++){ + for(ssize_t j = 0; j < input.shape(1); j++){ output(i,j) = adc_sar_04_decode64to16(input(i,j)); } } diff --git a/update_version.py b/update_version.py new file mode 100644 index 0000000..476895a --- /dev/null +++ b/update_version.py @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: LGPL-3.0-or-other +# Copyright (C) 2021 Contributors to the Aare Package +""" +Script to update VERSION file with semantic versioning if provided as an argument, or with 0.0.0 if no argument is provided. +""" + +import sys +import os +import re + +from packaging.version import Version, InvalidVersion + + +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + +def is_integer(value): + try: + int(value) + except ValueError: + return False + else: + return True + + +def get_version(): + + # Check at least one argument is passed + if len(sys.argv) < 2: + return "0.0.0" + + version = sys.argv[1] + + try: + v = Version(version) # normalize check if version follows PEP 440 specification + + version_normalized = version.replace("-", ".") + + version_normalized = re.sub(r'0*(\d+)', lambda m : str(int(m.group(0))), version_normalized) #remove leading zeros + + return version_normalized + + except InvalidVersion as e: + print(f"Invalid version {version}. Version format must follow semantic versioning format of python PEP 440 version identification specification.") + sys.exit(1) + + +def write_version_to_file(version): + version_file_path = os.path.join(SCRIPT_DIR, "VERSION") + with open(version_file_path, "w") as version_file: + version_file.write(version) + print(f"Version {version} written to VERSION file.") + +# Main script +if __name__ == "__main__": + + version = get_version() + write_version_to_file(version) \ No newline at end of file From 69964e08d59b48cf82239bf637215f17989ea224 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 3 Jun 2025 08:43:40 +0200 Subject: [PATCH 06/13] Refactor cluster bindings (#185) - Split up the file for cluster bindings - new file names according to bind_ClassName.hpp --- python/src/bind_Cluster.hpp | 64 ++++++ python/src/bind_ClusterCollector.hpp | 46 ++++ ...{cluster_file.hpp => bind_ClusterFile.hpp} | 2 +- python/src/bind_ClusterFileSink.hpp | 44 ++++ python/src/bind_ClusterFinder.hpp | 77 +++++++ python/src/bind_ClusterFinderMT.hpp | 81 +++++++ python/src/bind_ClusterVector.hpp | 4 +- python/src/cluster.hpp | 211 ------------------ python/src/module.cpp | 80 +++---- 9 files changed, 358 insertions(+), 251 deletions(-) create mode 100644 python/src/bind_Cluster.hpp create mode 100644 python/src/bind_ClusterCollector.hpp rename python/src/{cluster_file.hpp => bind_ClusterFile.hpp} (98%) create mode 100644 python/src/bind_ClusterFileSink.hpp create mode 100644 python/src/bind_ClusterFinder.hpp create mode 100644 python/src/bind_ClusterFinderMT.hpp delete mode 100644 python/src/cluster.hpp diff --git a/python/src/bind_Cluster.hpp b/python/src/bind_Cluster.hpp new file mode 100644 index 0000000..daf0946 --- /dev/null +++ b/python/src/bind_Cluster.hpp @@ -0,0 +1,64 @@ +#include "aare/Cluster.hpp" + +#include +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +template +void define_Cluster(py::module &m, const std::string &typestr) { + auto class_name = fmt::format("Cluster{}", typestr); + + py::class_>( + m, class_name.c_str(), py::buffer_protocol()) + + .def(py::init([](uint8_t x, uint8_t y, py::array_t data) { + py::buffer_info buf_info = data.request(); + Cluster cluster; + cluster.x = x; + cluster.y = y; + auto r = data.template unchecked<1>(); // no bounds checks + for (py::ssize_t i = 0; i < data.size(); ++i) { + cluster.data[i] = r(i); + } + return cluster; + })); + + /* + //TODO! Review if to keep or not + .def_property( + "data", + [](ClusterType &c) -> py::array { + return py::array(py::buffer_info( + c.data, sizeof(Type), + py::format_descriptor::format(), // Type + // format + 1, // Number of dimensions + {static_cast(ClusterSizeX * + ClusterSizeY)}, // Shape (flattened) + {sizeof(Type)} // Stride (step size between elements) + )); + }, + [](ClusterType &c, py::array_t arr) { + py::buffer_info buf_info = arr.request(); + Type *ptr = static_cast(buf_info.ptr); + std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY, + c.data); // TODO dont iterate over centers!!! + + }); + */ +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/bind_ClusterCollector.hpp b/python/src/bind_ClusterCollector.hpp new file mode 100644 index 0000000..4836e6e --- /dev/null +++ b/python/src/bind_ClusterCollector.hpp @@ -0,0 +1,46 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" +#include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDView.hpp" +#include "aare/Pedestal.hpp" +#include "np_helper.hpp" + +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + + +template +void define_ClusterCollector(py::module &m, + const std::string &typestr) { + auto class_name = fmt::format("ClusterCollector_{}", typestr); + + using ClusterType = Cluster; + + py::class_>(m, class_name.c_str()) + .def(py::init *>()) + .def("stop", &ClusterCollector::stop) + .def( + "steal_clusters", + [](ClusterCollector &self) { + auto v = new std::vector>( + self.steal_clusters()); + return v; // TODO change!!! + }, + py::return_value_policy::take_ownership); +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/cluster_file.hpp b/python/src/bind_ClusterFile.hpp similarity index 98% rename from python/src/cluster_file.hpp rename to python/src/bind_ClusterFile.hpp index ac384b2..8ce5360 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/bind_ClusterFile.hpp @@ -21,7 +21,7 @@ using namespace ::aare; template -void define_cluster_file_io_bindings(py::module &m, +void define_ClusterFile(py::module &m, const std::string &typestr) { using ClusterType = Cluster; diff --git a/python/src/bind_ClusterFileSink.hpp b/python/src/bind_ClusterFileSink.hpp new file mode 100644 index 0000000..9b3a74d --- /dev/null +++ b/python/src/bind_ClusterFileSink.hpp @@ -0,0 +1,44 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" +#include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDView.hpp" +#include "aare/Pedestal.hpp" +#include "np_helper.hpp" + +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + + + + + + +template +void define_ClusterFileSink(py::module &m, + const std::string &typestr) { + auto class_name = fmt::format("ClusterFileSink_{}", typestr); + + using ClusterType = Cluster; + + py::class_>(m, class_name.c_str()) + .def(py::init *, + const std::filesystem::path &>()) + .def("stop", &ClusterFileSink::stop); +} + + +#pragma GCC diagnostic pop diff --git a/python/src/bind_ClusterFinder.hpp b/python/src/bind_ClusterFinder.hpp new file mode 100644 index 0000000..5f0fe8d --- /dev/null +++ b/python/src/bind_ClusterFinder.hpp @@ -0,0 +1,77 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" +#include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDView.hpp" +#include "aare/Pedestal.hpp" +#include "np_helper.hpp" + +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +template +void define_ClusterFinder(py::module &m, const std::string &typestr) { + auto class_name = fmt::format("ClusterFinder_{}", typestr); + + using ClusterType = Cluster; + + py::class_>( + m, class_name.c_str()) + .def(py::init, pd_type, size_t>(), py::arg("image_size"), + py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000) + .def("push_pedestal_frame", + [](ClusterFinder &self, + py::array_t frame) { + auto view = make_view_2d(frame); + self.push_pedestal_frame(view); + }) + .def("clear_pedestal", + &ClusterFinder::clear_pedestal) + .def_property_readonly( + "pedestal", + [](ClusterFinder &self) { + auto pd = new NDArray{}; + *pd = self.pedestal(); + return return_image_data(pd); + }) + .def_property_readonly( + "noise", + [](ClusterFinder &self) { + auto arr = new NDArray{}; + *arr = self.noise(); + return return_image_data(arr); + }) + .def( + "steal_clusters", + [](ClusterFinder &self, + bool realloc_same_capacity) { + ClusterVector clusters = + self.steal_clusters(realloc_same_capacity); + return clusters; + }, + py::arg("realloc_same_capacity") = false) + .def( + "find_clusters", + [](ClusterFinder &self, + py::array_t frame, uint64_t frame_number) { + auto view = make_view_2d(frame); + self.find_clusters(view, frame_number); + return; + }, + py::arg(), py::arg("frame_number") = 0); +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/bind_ClusterFinderMT.hpp b/python/src/bind_ClusterFinderMT.hpp new file mode 100644 index 0000000..d1769db --- /dev/null +++ b/python/src/bind_ClusterFinderMT.hpp @@ -0,0 +1,81 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" +#include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDView.hpp" +#include "aare/Pedestal.hpp" +#include "np_helper.hpp" + +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +template +void define_ClusterFinderMT(py::module &m, + const std::string &typestr) { + auto class_name = fmt::format("ClusterFinderMT_{}", typestr); + + using ClusterType = Cluster; + + py::class_>( + m, class_name.c_str()) + .def(py::init, pd_type, size_t, size_t>(), + py::arg("image_size"), py::arg("n_sigma") = 5.0, + py::arg("capacity") = 2048, py::arg("n_threads") = 3) + .def("push_pedestal_frame", + [](ClusterFinderMT &self, + py::array_t frame) { + auto view = make_view_2d(frame); + self.push_pedestal_frame(view); + }) + .def( + "find_clusters", + [](ClusterFinderMT &self, + py::array_t frame, uint64_t frame_number) { + auto view = make_view_2d(frame); + self.find_clusters(view, frame_number); + return; + }, + py::arg(), py::arg("frame_number") = 0) + .def_property_readonly("cluster_size", [](ClusterFinderMT &self){ + return py::make_tuple(ClusterSizeX, ClusterSizeY); + }) + .def("clear_pedestal", + &ClusterFinderMT::clear_pedestal) + .def("sync", &ClusterFinderMT::sync) + .def("stop", &ClusterFinderMT::stop) + .def("start", &ClusterFinderMT::start) + .def( + "pedestal", + [](ClusterFinderMT &self, + size_t thread_index) { + auto pd = new NDArray{}; + *pd = self.pedestal(thread_index); + return return_image_data(pd); + }, + py::arg("thread_index") = 0) + .def( + "noise", + [](ClusterFinderMT &self, + size_t thread_index) { + auto arr = new NDArray{}; + *arr = self.noise(thread_index); + return return_image_data(arr); + }, + py::arg("thread_index") = 0); +} + + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/bind_ClusterVector.hpp b/python/src/bind_ClusterVector.hpp index db8c8a3..550db9a 100644 --- a/python/src/bind_ClusterVector.hpp +++ b/python/src/bind_ClusterVector.hpp @@ -101,4 +101,6 @@ void define_ClusterVector(py::module &m, const std::string &typestr) { return hitmap; }); -} \ No newline at end of file +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp deleted file mode 100644 index 58f137c..0000000 --- a/python/src/cluster.hpp +++ /dev/null @@ -1,211 +0,0 @@ -#include "aare/ClusterCollector.hpp" -#include "aare/ClusterFileSink.hpp" -#include "aare/ClusterFinder.hpp" -#include "aare/ClusterFinderMT.hpp" -#include "aare/ClusterVector.hpp" -#include "aare/NDView.hpp" -#include "aare/Pedestal.hpp" -#include "np_helper.hpp" - -#include -#include -#include -#include -#include - -namespace py = pybind11; -using pd_type = double; - -using namespace aare; - -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" - -template -void define_cluster(py::module &m, const std::string &typestr) { - auto class_name = fmt::format("Cluster{}", typestr); - - py::class_>( - m, class_name.c_str(), py::buffer_protocol()) - - .def(py::init([](uint8_t x, uint8_t y, py::array_t data) { - py::buffer_info buf_info = data.request(); - Cluster cluster; - cluster.x = x; - cluster.y = y; - auto r = data.template unchecked<1>(); // no bounds checks - for (py::ssize_t i = 0; i < data.size(); ++i) { - cluster.data[i] = r(i); - } - return cluster; - })); - - /* - .def_property( - "data", - [](ClusterType &c) -> py::array { - return py::array(py::buffer_info( - c.data, sizeof(Type), - py::format_descriptor::format(), // Type - // format - 1, // Number of dimensions - {static_cast(ClusterSizeX * - ClusterSizeY)}, // Shape (flattened) - {sizeof(Type)} // Stride (step size between elements) - )); - }, - [](ClusterType &c, py::array_t arr) { - py::buffer_info buf_info = arr.request(); - Type *ptr = static_cast(buf_info.ptr); - std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY, - c.data); // TODO dont iterate over centers!!! - - }); - */ -} - -template -void define_cluster_finder_mt_bindings(py::module &m, - const std::string &typestr) { - auto class_name = fmt::format("ClusterFinderMT_{}", typestr); - - using ClusterType = Cluster; - - py::class_>( - m, class_name.c_str()) - .def(py::init, pd_type, size_t, size_t>(), - py::arg("image_size"), py::arg("n_sigma") = 5.0, - py::arg("capacity") = 2048, py::arg("n_threads") = 3) - .def("push_pedestal_frame", - [](ClusterFinderMT &self, - py::array_t frame) { - auto view = make_view_2d(frame); - self.push_pedestal_frame(view); - }) - .def( - "find_clusters", - [](ClusterFinderMT &self, - py::array_t frame, uint64_t frame_number) { - auto view = make_view_2d(frame); - self.find_clusters(view, frame_number); - return; - }, - py::arg(), py::arg("frame_number") = 0) - .def_property_readonly("cluster_size", [](ClusterFinderMT &self){ - return py::make_tuple(ClusterSizeX, ClusterSizeY); - }) - .def("clear_pedestal", - &ClusterFinderMT::clear_pedestal) - .def("sync", &ClusterFinderMT::sync) - .def("stop", &ClusterFinderMT::stop) - .def("start", &ClusterFinderMT::start) - .def( - "pedestal", - [](ClusterFinderMT &self, - size_t thread_index) { - auto pd = new NDArray{}; - *pd = self.pedestal(thread_index); - return return_image_data(pd); - }, - py::arg("thread_index") = 0) - .def( - "noise", - [](ClusterFinderMT &self, - size_t thread_index) { - auto arr = new NDArray{}; - *arr = self.noise(thread_index); - return return_image_data(arr); - }, - py::arg("thread_index") = 0); -} - -template -void define_cluster_collector_bindings(py::module &m, - const std::string &typestr) { - auto class_name = fmt::format("ClusterCollector_{}", typestr); - - using ClusterType = Cluster; - - py::class_>(m, class_name.c_str()) - .def(py::init *>()) - .def("stop", &ClusterCollector::stop) - .def( - "steal_clusters", - [](ClusterCollector &self) { - auto v = new std::vector>( - self.steal_clusters()); - return v; // TODO change!!! - }, - py::return_value_policy::take_ownership); -} - -template -void define_cluster_file_sink_bindings(py::module &m, - const std::string &typestr) { - auto class_name = fmt::format("ClusterFileSink_{}", typestr); - - using ClusterType = Cluster; - - py::class_>(m, class_name.c_str()) - .def(py::init *, - const std::filesystem::path &>()) - .def("stop", &ClusterFileSink::stop); -} - -template -void define_cluster_finder_bindings(py::module &m, const std::string &typestr) { - auto class_name = fmt::format("ClusterFinder_{}", typestr); - - using ClusterType = Cluster; - - py::class_>( - m, class_name.c_str()) - .def(py::init, pd_type, size_t>(), py::arg("image_size"), - py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000) - .def("push_pedestal_frame", - [](ClusterFinder &self, - py::array_t frame) { - auto view = make_view_2d(frame); - self.push_pedestal_frame(view); - }) - .def("clear_pedestal", - &ClusterFinder::clear_pedestal) - .def_property_readonly( - "pedestal", - [](ClusterFinder &self) { - auto pd = new NDArray{}; - *pd = self.pedestal(); - return return_image_data(pd); - }) - .def_property_readonly( - "noise", - [](ClusterFinder &self) { - auto arr = new NDArray{}; - *arr = self.noise(); - return return_image_data(arr); - }) - .def( - "steal_clusters", - [](ClusterFinder &self, - bool realloc_same_capacity) { - ClusterVector clusters = - self.steal_clusters(realloc_same_capacity); - return clusters; - }, - py::arg("realloc_same_capacity") = false) - .def( - "find_clusters", - [](ClusterFinder &self, - py::array_t frame, uint64_t frame_number) { - auto view = make_view_2d(frame); - self.find_clusters(view, frame_number); - return; - }, - py::arg(), py::arg("frame_number") = 0); -} -#pragma GCC diagnostic pop diff --git a/python/src/module.cpp b/python/src/module.cpp index 946a41b..5945afb 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -1,11 +1,15 @@ // Files with bindings to the different classes //New style file naming +#include "bind_Cluster.hpp" +#include "bind_ClusterCollector.hpp" +#include "bind_ClusterFinder.hpp" +#include "bind_ClusterFinderMT.hpp" +#include "bind_ClusterFile.hpp" +#include "bind_ClusterFileSink.hpp" #include "bind_ClusterVector.hpp" //TODO! migrate the other names -#include "cluster.hpp" -#include "cluster_file.hpp" #include "ctb_raw_file.hpp" #include "file.hpp" #include "fit.hpp" @@ -38,12 +42,12 @@ PYBIND11_MODULE(_aare, m) { define_interpolation_bindings(m); define_jungfrau_data_file_io_bindings(m); - define_cluster_file_io_bindings(m, "Cluster3x3i"); - define_cluster_file_io_bindings(m, "Cluster3x3d"); - define_cluster_file_io_bindings(m, "Cluster3x3f"); - define_cluster_file_io_bindings(m, "Cluster2x2i"); - define_cluster_file_io_bindings(m, "Cluster2x2f"); - define_cluster_file_io_bindings(m, "Cluster2x2d"); + define_ClusterFile(m, "Cluster3x3i"); + define_ClusterFile(m, "Cluster3x3d"); + define_ClusterFile(m, "Cluster3x3f"); + define_ClusterFile(m, "Cluster2x2i"); + define_ClusterFile(m, "Cluster2x2f"); + define_ClusterFile(m, "Cluster2x2d"); define_ClusterVector(m, "Cluster3x3i"); define_ClusterVector(m, "Cluster3x3d"); @@ -52,40 +56,40 @@ PYBIND11_MODULE(_aare, m) { define_ClusterVector(m, "Cluster2x2d"); define_ClusterVector(m, "Cluster2x2f"); - define_cluster_finder_bindings(m, "Cluster3x3i"); - define_cluster_finder_bindings(m, "Cluster3x3d"); - define_cluster_finder_bindings(m, "Cluster3x3f"); - define_cluster_finder_bindings(m, "Cluster2x2i"); - define_cluster_finder_bindings(m, "Cluster2x2d"); - define_cluster_finder_bindings(m, "Cluster2x2f"); + define_ClusterFinder(m, "Cluster3x3i"); + define_ClusterFinder(m, "Cluster3x3d"); + define_ClusterFinder(m, "Cluster3x3f"); + define_ClusterFinder(m, "Cluster2x2i"); + define_ClusterFinder(m, "Cluster2x2d"); + define_ClusterFinder(m, "Cluster2x2f"); - define_cluster_finder_mt_bindings(m, "Cluster3x3i"); - define_cluster_finder_mt_bindings(m, "Cluster3x3d"); - define_cluster_finder_mt_bindings(m, "Cluster3x3f"); - define_cluster_finder_mt_bindings(m, "Cluster2x2i"); - define_cluster_finder_mt_bindings(m, "Cluster2x2d"); - define_cluster_finder_mt_bindings(m, "Cluster2x2f"); + define_ClusterFinderMT(m, "Cluster3x3i"); + define_ClusterFinderMT(m, "Cluster3x3d"); + define_ClusterFinderMT(m, "Cluster3x3f"); + define_ClusterFinderMT(m, "Cluster2x2i"); + define_ClusterFinderMT(m, "Cluster2x2d"); + define_ClusterFinderMT(m, "Cluster2x2f"); - define_cluster_file_sink_bindings(m, "Cluster3x3i"); - define_cluster_file_sink_bindings(m, "Cluster3x3d"); - define_cluster_file_sink_bindings(m, "Cluster3x3f"); - define_cluster_file_sink_bindings(m, "Cluster2x2i"); - define_cluster_file_sink_bindings(m, "Cluster2x2d"); - define_cluster_file_sink_bindings(m, "Cluster2x2f"); + define_ClusterFileSink(m, "Cluster3x3i"); + define_ClusterFileSink(m, "Cluster3x3d"); + define_ClusterFileSink(m, "Cluster3x3f"); + define_ClusterFileSink(m, "Cluster2x2i"); + define_ClusterFileSink(m, "Cluster2x2d"); + define_ClusterFileSink(m, "Cluster2x2f"); - define_cluster_collector_bindings(m, "Cluster3x3i"); - define_cluster_collector_bindings(m, "Cluster3x3f"); - define_cluster_collector_bindings(m, "Cluster3x3d"); - define_cluster_collector_bindings(m, "Cluster2x2i"); - define_cluster_collector_bindings(m, "Cluster2x2f"); - define_cluster_collector_bindings(m, "Cluster2x2d"); + define_ClusterCollector(m, "Cluster3x3i"); + define_ClusterCollector(m, "Cluster3x3d"); + define_ClusterCollector(m, "Cluster3x3f"); + define_ClusterCollector(m, "Cluster2x2i"); + define_ClusterCollector(m, "Cluster2x2d"); + define_ClusterCollector(m, "Cluster2x2f"); - define_cluster(m, "3x3i"); - define_cluster(m, "3x3f"); - define_cluster(m, "3x3d"); - define_cluster(m, "2x2i"); - define_cluster(m, "2x2f"); - define_cluster(m, "2x2d"); + define_Cluster(m, "3x3i"); + define_Cluster(m, "3x3f"); + define_Cluster(m, "3x3d"); + define_Cluster(m, "2x2i"); + define_Cluster(m, "2x2f"); + define_Cluster(m, "2x2d"); register_calculate_eta(m); register_calculate_eta(m); From 1bc2fd770af08bd8d5aeedadec9145ff74c4b968 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Thu, 5 Jun 2025 08:57:59 +0200 Subject: [PATCH 07/13] Binding 5x5, 7x7 and 9x9 clusters in python (#188) - New binding code with macros to bind all cluster templates - Simplified factory function on the python side - 5x5, 7x7 and 9x9 bindings in python --- include/aare/ClusterFile.hpp | 8 ++- include/aare/ClusterFileSink.hpp | 7 ++- include/aare/ClusterFinder.hpp | 7 ++- include/aare/ClusterFinderMT.hpp | 7 +++ include/aare/logger.hpp | 2 +- python/aare/ClusterFinder.py | 85 +++++++++++++++++++------------- python/aare/__init__.py | 3 +- python/src/module.cpp | 85 +++++++++++++------------------- 8 files changed, 112 insertions(+), 92 deletions(-) diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index ef78874..e26e765 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -5,6 +5,8 @@ #include "aare/GainMap.hpp" #include "aare/NDArray.hpp" #include "aare/defs.hpp" +#include "aare/logger.hpp" + #include #include #include @@ -369,11 +371,15 @@ ClusterFile::read_frame_without_cut() { "Could not read number of clusters"); } + LOG(logDEBUG1) << "Reading " << n_clusters + << " clusters from frame " << frame_number; + ClusterVector clusters(n_clusters); clusters.set_frame_number(frame_number); - clusters.resize(n_clusters); + LOG(logDEBUG1) << "clusters.item_size(): " << clusters.item_size(); + if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != static_cast(n_clusters)) { throw std::runtime_error(LOCATION + "Could not read clusters"); diff --git a/include/aare/ClusterFileSink.hpp b/include/aare/ClusterFileSink.hpp index 810e63c..09190fe 100644 --- a/include/aare/ClusterFileSink.hpp +++ b/include/aare/ClusterFileSink.hpp @@ -21,7 +21,7 @@ class ClusterFileSink { void process() { m_stopped = false; - fmt::print("ClusterFileSink started\n"); + LOG(logDEBUG) << "ClusterFileSink started"; while (!m_stop_requested || !m_source->isEmpty()) { if (ClusterVector *clusters = m_source->frontPtr(); clusters != nullptr) { @@ -41,13 +41,16 @@ class ClusterFileSink { std::this_thread::sleep_for(m_default_wait); } } - fmt::print("ClusterFileSink stopped\n"); + LOG(logDEBUG) << "ClusterFileSink stopped"; m_stopped = true; } public: ClusterFileSink(ClusterFinderMT *source, const std::filesystem::path &fname) { + LOG(logDEBUG) << "ClusterFileSink: " + << "source: " << source->sink() + << ", file: " << fname.string(); m_source = source->sink(); m_thread = std::thread(&ClusterFileSink::process, this); m_file.open(fname, std::ios::binary); diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index ea11162..7a34722 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -38,7 +38,12 @@ class ClusterFinder { : m_image_size(image_size), m_nSigma(nSigma), c2(sqrt((ClusterSizeY + 1) / 2 * (ClusterSizeX + 1) / 2)), c3(sqrt(ClusterSizeX * ClusterSizeY)), - m_pedestal(image_size[0], image_size[1]), m_clusters(capacity) {}; + m_pedestal(image_size[0], image_size[1]), m_clusters(capacity) { + LOG(logDEBUG ) << "ClusterFinder: " + << "image_size: " << image_size[0] << "x" << image_size[1] + << ", nSigma: " << nSigma + << ", capacity: " << capacity; + } void push_pedestal_frame(NDView frame) { m_pedestal.push(frame); diff --git a/include/aare/ClusterFinderMT.hpp b/include/aare/ClusterFinderMT.hpp index 2dfb279..efc22d4 100644 --- a/include/aare/ClusterFinderMT.hpp +++ b/include/aare/ClusterFinderMT.hpp @@ -7,6 +7,7 @@ #include "aare/ClusterFinder.hpp" #include "aare/NDArray.hpp" +#include "aare/logger.hpp" #include "aare/ProducerConsumerQueue.hpp" namespace aare { @@ -123,6 +124,12 @@ class ClusterFinderMT { size_t capacity = 2000, size_t n_threads = 3) : m_n_threads(n_threads) { + LOG(logDEBUG1) << "ClusterFinderMT: " + << "image_size: " << image_size[0] << "x" << image_size[1] + << ", nSigma: " << nSigma + << ", capacity: " << capacity + << ", n_threads: " << n_threads; + for (size_t i = 0; i < n_threads; i++) { m_cluster_finders.push_back( std::make_unique< diff --git a/include/aare/logger.hpp b/include/aare/logger.hpp index 06e6feb..b93c091 100644 --- a/include/aare/logger.hpp +++ b/include/aare/logger.hpp @@ -37,7 +37,7 @@ enum TLogLevel { logINFOCYAN, logINFOMAGENTA, logINFO, - logDEBUG, + logDEBUG, // constructors, destructors etc. should still give too much output logDEBUG1, logDEBUG2, logDEBUG3, diff --git a/python/aare/ClusterFinder.py b/python/aare/ClusterFinder.py index 6e7c352..99bcc5f 100644 --- a/python/aare/ClusterFinder.py +++ b/python/aare/ClusterFinder.py @@ -1,22 +1,47 @@ -from ._aare import ClusterFinder_Cluster3x3i, ClusterFinder_Cluster2x2i, ClusterFinderMT_Cluster3x3i, ClusterFinderMT_Cluster2x2i, ClusterCollector_Cluster3x3i, ClusterCollector_Cluster2x2i +# from ._aare import ClusterFinder_Cluster3x3i, ClusterFinder_Cluster2x2i, ClusterFinderMT_Cluster3x3i, ClusterFinderMT_Cluster2x2i, ClusterCollector_Cluster3x3i, ClusterCollector_Cluster2x2i -from ._aare import ClusterFileSink_Cluster3x3i, ClusterFileSink_Cluster2x2i +# from ._aare import ClusterFileSink_Cluster3x3i, ClusterFileSink_Cluster2x2i + +from . import _aare import numpy as np +_supported_cluster_sizes = [(2,2), (3,3), (5,5), (7,7), (9,9),] + +# def _get_class() + +def _type_to_char(dtype): + if dtype == np.int32: + return 'i' + elif dtype == np.float32: + return 'f' + elif dtype == np.float64: + return 'd' + else: + raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32, np.float32, and np.float64 are supported.") + +def _get_class(name, cluster_size, dtype): + """ + Helper function to get the class based on the name, cluster size, and dtype. + """ + try: + class_name = f"{name}_Cluster{cluster_size[0]}x{cluster_size[1]}{_type_to_char(dtype)}" + cls = getattr(_aare, class_name) + except AttributeError: + raise ValueError(f"Unsupported combination of type and cluster size: {dtype}/{cluster_size} when requesting {class_name}") + return cls + + + def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024): """ Factory function to create a ClusterFinder object. Provides a cleaner syntax for the templated ClusterFinder in C++. """ - if dtype == np.int32 and cluster_size == (3,3): - return ClusterFinder_Cluster3x3i(image_size, n_sigma = n_sigma, capacity=capacity) - elif dtype == np.int32 and cluster_size == (2,2): - return ClusterFinder_Cluster2x2i(image_size, n_sigma = n_sigma, capacity=capacity) - else: - #TODO! add the other formats - raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") + cls = _get_class("ClusterFinder", cluster_size, dtype) + return cls(image_size, n_sigma=n_sigma, capacity=capacity) + def ClusterFinderMT(image_size, cluster_size = (3,3), dtype=np.int32, n_sigma=5, capacity = 1024, n_threads = 3): @@ -25,15 +50,9 @@ def ClusterFinderMT(image_size, cluster_size = (3,3), dtype=np.int32, n_sigma=5, the templated ClusterFinderMT in C++. """ - if dtype == np.int32 and cluster_size == (3,3): - return ClusterFinderMT_Cluster3x3i(image_size, n_sigma = n_sigma, - capacity = capacity, n_threads = n_threads) - elif dtype == np.int32 and cluster_size == (2,2): - return ClusterFinderMT_Cluster2x2i(image_size, n_sigma = n_sigma, - capacity = capacity, n_threads = n_threads) - else: - #TODO! add the other formats - raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") + cls = _get_class("ClusterFinderMT", cluster_size, dtype) + return cls(image_size, n_sigma=n_sigma, capacity=capacity, n_threads=n_threads) + def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32): @@ -42,14 +61,8 @@ def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32): the templated ClusterCollector in C++. """ - if dtype == np.int32 and cluster_size == (3,3): - return ClusterCollector_Cluster3x3i(clusterfindermt) - elif dtype == np.int32 and cluster_size == (2,2): - return ClusterCollector_Cluster2x2i(clusterfindermt) - - else: - #TODO! add the other formats - raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") + cls = _get_class("ClusterCollector", cluster_size, dtype) + return cls(clusterfindermt) def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32): """ @@ -57,11 +70,15 @@ def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32): the templated ClusterCollector in C++. """ - if dtype == np.int32 and clusterfindermt.cluster_size == (3,3): - return ClusterFileSink_Cluster3x3i(clusterfindermt, cluster_file) - elif dtype == np.int32 and clusterfindermt.cluster_size == (2,2): - return ClusterFileSink_Cluster2x2i(clusterfindermt, cluster_file) - - else: - #TODO! add the other formats - raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") \ No newline at end of file + cls = _get_class("ClusterFileSink", clusterfindermt.cluster_size, dtype) + return cls(clusterfindermt, cluster_file) + + +def ClusterFile(fname, cluster_size=(3,3), dtype=np.int32): + """ + Factory function to create a ClusterFile object. Provides a cleaner syntax for + the templated ClusterFile in C++. + """ + + cls = _get_class("ClusterFile", cluster_size, dtype) + return cls(fname) diff --git a/python/aare/__init__.py b/python/aare/__init__.py index d2bbe0a..0b95702 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -5,13 +5,12 @@ from . import _aare from ._aare import File, RawMasterFile, RawSubFile, JungfrauDataFile from ._aare import Pedestal_d, Pedestal_f, ClusterFinder_Cluster3x3i, VarClusterFinder from ._aare import DetectorType -from ._aare import ClusterFile_Cluster3x3i as ClusterFile from ._aare import hitmap from ._aare import ROI # from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i -from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT, ClusterFileSink +from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT, ClusterFileSink, ClusterFile from .ClusterVector import ClusterVector diff --git a/python/src/module.cpp b/python/src/module.cpp index 5945afb..681dd4b 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -28,6 +28,25 @@ namespace py = pybind11; +/* MACRO that defines Cluster bindings for a specific size and type + +T - Storage type of the cluster data (int, float, double) +N - Number of rows in the cluster +M - Number of columns in the cluster +U - Type of the pixel data (e.g., uint16_t) +TYPE_CODE - A character representing the type code (e.g., 'i' for int, 'd' for double, 'f' for float) + +*/ +#define DEFINE_CLUSTER_BINDINGS(T, N, M, U, TYPE_CODE) \ + define_ClusterFile(m, "Cluster" #N "x" #M #TYPE_CODE); \ + define_ClusterVector(m, "Cluster" #N "x" #M #TYPE_CODE); \ + define_ClusterFinder(m, "Cluster" #N "x" #M #TYPE_CODE); \ + define_ClusterFinderMT(m, "Cluster" #N "x" #M #TYPE_CODE); \ + define_ClusterFileSink(m, "Cluster" #N "x" #M #TYPE_CODE); \ + define_ClusterCollector(m, "Cluster" #N "x" #M #TYPE_CODE); \ + define_Cluster(m, #N "x" #M #TYPE_CODE); \ + register_calculate_eta(m); + PYBIND11_MODULE(_aare, m) { define_file_io_bindings(m); define_raw_file_io_bindings(m); @@ -42,59 +61,23 @@ PYBIND11_MODULE(_aare, m) { define_interpolation_bindings(m); define_jungfrau_data_file_io_bindings(m); - define_ClusterFile(m, "Cluster3x3i"); - define_ClusterFile(m, "Cluster3x3d"); - define_ClusterFile(m, "Cluster3x3f"); - define_ClusterFile(m, "Cluster2x2i"); - define_ClusterFile(m, "Cluster2x2f"); - define_ClusterFile(m, "Cluster2x2d"); + DEFINE_CLUSTER_BINDINGS(int, 3, 3, uint16_t, i); + DEFINE_CLUSTER_BINDINGS(double, 3, 3, uint16_t, d); + DEFINE_CLUSTER_BINDINGS(float, 3, 3, uint16_t, f); - define_ClusterVector(m, "Cluster3x3i"); - define_ClusterVector(m, "Cluster3x3d"); - define_ClusterVector(m, "Cluster3x3f"); - define_ClusterVector(m, "Cluster2x2i"); - define_ClusterVector(m, "Cluster2x2d"); - define_ClusterVector(m, "Cluster2x2f"); + DEFINE_CLUSTER_BINDINGS(int, 2, 2, uint16_t, i); + DEFINE_CLUSTER_BINDINGS(double, 2, 2, uint16_t, d); + DEFINE_CLUSTER_BINDINGS(float, 2, 2, uint16_t, f); - define_ClusterFinder(m, "Cluster3x3i"); - define_ClusterFinder(m, "Cluster3x3d"); - define_ClusterFinder(m, "Cluster3x3f"); - define_ClusterFinder(m, "Cluster2x2i"); - define_ClusterFinder(m, "Cluster2x2d"); - define_ClusterFinder(m, "Cluster2x2f"); + DEFINE_CLUSTER_BINDINGS(int, 5, 5, uint16_t, i); + DEFINE_CLUSTER_BINDINGS(double, 5, 5, uint16_t, d); + DEFINE_CLUSTER_BINDINGS(float, 5, 5, uint16_t, f); - define_ClusterFinderMT(m, "Cluster3x3i"); - define_ClusterFinderMT(m, "Cluster3x3d"); - define_ClusterFinderMT(m, "Cluster3x3f"); - define_ClusterFinderMT(m, "Cluster2x2i"); - define_ClusterFinderMT(m, "Cluster2x2d"); - define_ClusterFinderMT(m, "Cluster2x2f"); + DEFINE_CLUSTER_BINDINGS(int, 7, 7, uint16_t, i); + DEFINE_CLUSTER_BINDINGS(double, 7, 7, uint16_t, d); + DEFINE_CLUSTER_BINDINGS(float, 7, 7, uint16_t, f); - define_ClusterFileSink(m, "Cluster3x3i"); - define_ClusterFileSink(m, "Cluster3x3d"); - define_ClusterFileSink(m, "Cluster3x3f"); - define_ClusterFileSink(m, "Cluster2x2i"); - define_ClusterFileSink(m, "Cluster2x2d"); - define_ClusterFileSink(m, "Cluster2x2f"); - - define_ClusterCollector(m, "Cluster3x3i"); - define_ClusterCollector(m, "Cluster3x3d"); - define_ClusterCollector(m, "Cluster3x3f"); - define_ClusterCollector(m, "Cluster2x2i"); - define_ClusterCollector(m, "Cluster2x2d"); - define_ClusterCollector(m, "Cluster2x2f"); - - define_Cluster(m, "3x3i"); - define_Cluster(m, "3x3f"); - define_Cluster(m, "3x3d"); - define_Cluster(m, "2x2i"); - define_Cluster(m, "2x2f"); - define_Cluster(m, "2x2d"); - - register_calculate_eta(m); - register_calculate_eta(m); - register_calculate_eta(m); - register_calculate_eta(m); - register_calculate_eta(m); - register_calculate_eta(m); + DEFINE_CLUSTER_BINDINGS(int, 9, 9, uint16_t, i); + DEFINE_CLUSTER_BINDINGS(double, 9, 9, uint16_t, d); + DEFINE_CLUSTER_BINDINGS(float, 9, 9, uint16_t, f); } From efd2338f549af11d1fa65de47b57a3f949a3f4b1 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Thu, 5 Jun 2025 14:55:00 +0200 Subject: [PATCH 08/13] deploy docs on release only --- .github/workflows/build_docs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index 24050a3..153c210 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -55,7 +55,7 @@ jobs: url: ${{ steps.deployment.outputs.page_url }} runs-on: ubuntu-latest needs: build - if: github.ref == 'refs/heads/main' + if: github.event_name == 'release' && github.event.action == 'published' steps: - name: Deploy to GitHub Pages id: deployment From f9751902a294c22df8a6a0212f2801be69e4f2d8 Mon Sep 17 00:00:00 2001 From: Alice Date: Tue, 10 Jun 2025 16:09:06 +0200 Subject: [PATCH 09/13] formatted main branch --- benchmarks/calculateeta_benchmark.cpp | 8 +- benchmarks/ndarray_benchmark.cpp | 210 ++++++++++----------- include/aare/ArrayExpr.hpp | 21 +-- include/aare/CircularFifo.hpp | 6 +- include/aare/ClusterFile.hpp | 4 +- include/aare/ClusterFileSink.hpp | 4 +- include/aare/ClusterFinder.hpp | 7 +- include/aare/ClusterFinderMT.hpp | 10 +- include/aare/CtbRawFile.hpp | 22 +-- include/aare/Dtype.hpp | 51 +++-- include/aare/File.hpp | 56 +++--- include/aare/FileInterface.hpp | 26 ++- include/aare/FilePtr.hpp | 2 +- include/aare/Fit.hpp | 65 ++++--- include/aare/Frame.hpp | 13 +- include/aare/JungfrauDataFile.hpp | 103 +++++----- include/aare/NDArray.hpp | 27 +-- include/aare/NDView.hpp | 71 ++++--- include/aare/NumpyFile.hpp | 42 +++-- include/aare/NumpyHelpers.hpp | 9 +- include/aare/Pedestal.hpp | 48 ++--- include/aare/PixelMap.hpp | 10 +- include/aare/ProducerConsumerQueue.hpp | 28 +-- include/aare/RawFile.hpp | 21 +-- include/aare/RawMasterFile.hpp | 6 +- include/aare/RawSubFile.hpp | 55 +++--- include/aare/VarClusterFinder.hpp | 40 ++-- include/aare/algorithm.hpp | 58 +++--- include/aare/decode.hpp | 17 +- include/aare/defs.hpp | 57 +++--- include/aare/geo_helpers.hpp | 13 +- include/aare/logger.hpp | 12 +- include/aare/utils/ifstream_helpers.hpp | 4 +- include/aare/utils/par.hpp | 22 +-- python/src/bind_Cluster.hpp | 4 +- python/src/bind_ClusterCollector.hpp | 4 +- python/src/bind_ClusterFile.hpp | 3 +- python/src/bind_ClusterFileSink.hpp | 9 +- python/src/bind_ClusterFinderMT.hpp | 12 +- python/src/bind_ClusterVector.hpp | 9 +- python/src/ctb_raw_file.hpp | 164 ++++++++-------- python/src/file.hpp | 120 ++++++------ python/src/fit.hpp | 29 ++- python/src/jungfrau_data_file.hpp | 9 +- python/src/module.cpp | 39 ++-- python/src/pedestal.hpp | 48 +++-- python/src/pixel_map.hpp | 63 ++++--- python/src/raw_file.hpp | 5 +- python/src/raw_master_file.hpp | 5 +- python/src/raw_sub_file.hpp | 43 ++--- python/src/var_cluster.hpp | 17 +- src/ClusterFile.cpp | 161 ++++++++-------- src/ClusterFile.test.cpp | 6 +- src/CtbRawFile.cpp | 30 +-- src/Dtype.cpp | 34 +++- src/Dtype.test.cpp | 4 +- src/File.cpp | 21 +-- src/FilePtr.cpp | 13 +- src/Fit.cpp | 238 +++++++++++++----------- src/Frame.cpp | 5 +- src/Frame.test.cpp | 4 +- src/JungfrauDataFile.cpp | 28 +-- src/JungfrauDataFile.test.cpp | 49 +++-- src/NDArray.test.cpp | 21 ++- src/NDView.test.cpp | 11 +- src/NumpyFile.cpp | 33 ++-- src/NumpyFile.test.cpp | 4 +- src/NumpyHelpers.cpp | 44 +++-- src/NumpyHelpers.test.cpp | 7 +- src/Pedestal.test.cpp | 15 +- src/PixelMap.cpp | 49 +++-- src/RawFile.cpp | 93 +++++---- src/RawFile.test.cpp | 61 +++--- src/RawMasterFile.cpp | 121 ++++++------ src/RawMasterFile.test.cpp | 215 +++++++++++---------- src/RawSubFile.cpp | 65 ++++--- src/RawSubFile.test.cpp | 39 ++-- src/algorithm.test.cpp | 5 +- src/decode.cpp | 57 +++--- src/decode.test.cpp | 19 +- src/defs.cpp | 22 +-- src/defs.test.cpp | 55 +++--- src/geo_helpers.cpp | 23 +-- src/geo_helpers.test.cpp | 50 +++-- src/utils/ifstream_helpers.cpp | 2 +- src/utils/task.test.cpp | 12 +- tests/test.cpp | 2 +- 87 files changed, 1710 insertions(+), 1639 deletions(-) diff --git a/benchmarks/calculateeta_benchmark.cpp b/benchmarks/calculateeta_benchmark.cpp index a320188..6c40c5c 100644 --- a/benchmarks/calculateeta_benchmark.cpp +++ b/benchmarks/calculateeta_benchmark.cpp @@ -41,8 +41,8 @@ BENCHMARK_F(ClusterFixture, Calculate2x2Eta)(benchmark::State &st) { } // almost takes double the time -BENCHMARK_F(ClusterFixture, - CalculateGeneralEtaFor2x2Cluster)(benchmark::State &st) { +BENCHMARK_F(ClusterFixture, CalculateGeneralEtaFor2x2Cluster) +(benchmark::State &st) { for (auto _ : st) { // This code gets timed Eta2 eta = calculate_eta2(cluster_2x2); @@ -59,8 +59,8 @@ BENCHMARK_F(ClusterFixture, Calculate3x3Eta)(benchmark::State &st) { } // almost takes double the time -BENCHMARK_F(ClusterFixture, - CalculateGeneralEtaFor3x3Cluster)(benchmark::State &st) { +BENCHMARK_F(ClusterFixture, CalculateGeneralEtaFor3x3Cluster) +(benchmark::State &st) { for (auto _ : st) { // This code gets timed Eta2 eta = calculate_eta2(cluster_3x3); diff --git a/benchmarks/ndarray_benchmark.cpp b/benchmarks/ndarray_benchmark.cpp index 55fa263..91a2d9b 100644 --- a/benchmarks/ndarray_benchmark.cpp +++ b/benchmarks/ndarray_benchmark.cpp @@ -1,136 +1,132 @@ -#include #include "aare/NDArray.hpp" - +#include using aare::NDArray; constexpr ssize_t size = 1024; class TwoArrays : public benchmark::Fixture { -public: - NDArray a{{size,size},0}; - NDArray b{{size,size},0}; - void SetUp(::benchmark::State& state) { - for(uint32_t i = 0; i < size; i++){ - for(uint32_t j = 0; j < size; j++){ - a(i, j)= i*j+1; - b(i, j)= i*j+1; - } + public: + NDArray a{{size, size}, 0}; + NDArray b{{size, size}, 0}; + void SetUp(::benchmark::State &state) { + for (uint32_t i = 0; i < size; i++) { + for (uint32_t j = 0; j < size; j++) { + a(i, j) = i * j + 1; + b(i, j) = i * j + 1; + } + } } - } - // void TearDown(::benchmark::State& state) { - // } + // void TearDown(::benchmark::State& state) { + // } }; - - - -BENCHMARK_F(TwoArrays, AddWithOperator)(benchmark::State& st) { - for (auto _ : st) { - // This code gets timed - NDArray res = a+b; - benchmark::DoNotOptimize(res); - } -} -BENCHMARK_F(TwoArrays, AddWithIndex)(benchmark::State& st) { - for (auto _ : st) { - // This code gets timed - NDArray res(a.shape()); - for (uint32_t i = 0; i < a.size(); i++) { - res(i) = a(i) + b(i); +BENCHMARK_F(TwoArrays, AddWithOperator)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + NDArray res = a + b; + benchmark::DoNotOptimize(res); + } +} +BENCHMARK_F(TwoArrays, AddWithIndex)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + NDArray res(a.shape()); + for (uint32_t i = 0; i < a.size(); i++) { + res(i) = a(i) + b(i); + } + benchmark::DoNotOptimize(res); } - benchmark::DoNotOptimize(res); - } } -BENCHMARK_F(TwoArrays, SubtractWithOperator)(benchmark::State& st) { - for (auto _ : st) { - // This code gets timed - NDArray res = a-b; - benchmark::DoNotOptimize(res); - } -} -BENCHMARK_F(TwoArrays, SubtractWithIndex)(benchmark::State& st) { - for (auto _ : st) { - // This code gets timed - NDArray res(a.shape()); - for (uint32_t i = 0; i < a.size(); i++) { - res(i) = a(i) - b(i); +BENCHMARK_F(TwoArrays, SubtractWithOperator)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + NDArray res = a - b; + benchmark::DoNotOptimize(res); + } +} +BENCHMARK_F(TwoArrays, SubtractWithIndex)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + NDArray res(a.shape()); + for (uint32_t i = 0; i < a.size(); i++) { + res(i) = a(i) - b(i); + } + benchmark::DoNotOptimize(res); } - benchmark::DoNotOptimize(res); - } } -BENCHMARK_F(TwoArrays, MultiplyWithOperator)(benchmark::State& st) { - for (auto _ : st) { - // This code gets timed - NDArray res = a*b; - benchmark::DoNotOptimize(res); - } -} -BENCHMARK_F(TwoArrays, MultiplyWithIndex)(benchmark::State& st) { - for (auto _ : st) { - // This code gets timed - NDArray res(a.shape()); - for (uint32_t i = 0; i < a.size(); i++) { - res(i) = a(i) * b(i); +BENCHMARK_F(TwoArrays, MultiplyWithOperator)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + NDArray res = a * b; + benchmark::DoNotOptimize(res); + } +} +BENCHMARK_F(TwoArrays, MultiplyWithIndex)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + NDArray res(a.shape()); + for (uint32_t i = 0; i < a.size(); i++) { + res(i) = a(i) * b(i); + } + benchmark::DoNotOptimize(res); } - benchmark::DoNotOptimize(res); - } } -BENCHMARK_F(TwoArrays, DivideWithOperator)(benchmark::State& st) { - for (auto _ : st) { - // This code gets timed - NDArray res = a/b; - benchmark::DoNotOptimize(res); - } -} -BENCHMARK_F(TwoArrays, DivideWithIndex)(benchmark::State& st) { - for (auto _ : st) { - // This code gets timed - NDArray res(a.shape()); - for (uint32_t i = 0; i < a.size(); i++) { - res(i) = a(i) / b(i); +BENCHMARK_F(TwoArrays, DivideWithOperator)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + NDArray res = a / b; + benchmark::DoNotOptimize(res); + } +} +BENCHMARK_F(TwoArrays, DivideWithIndex)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + NDArray res(a.shape()); + for (uint32_t i = 0; i < a.size(); i++) { + res(i) = a(i) / b(i); + } + benchmark::DoNotOptimize(res); } - benchmark::DoNotOptimize(res); - } } -BENCHMARK_F(TwoArrays, FourAddWithOperator)(benchmark::State& st) { - for (auto _ : st) { - // This code gets timed - NDArray res = a+b+a+b; - benchmark::DoNotOptimize(res); - } -} -BENCHMARK_F(TwoArrays, FourAddWithIndex)(benchmark::State& st) { - for (auto _ : st) { - // This code gets timed - NDArray res(a.shape()); - for (uint32_t i = 0; i < a.size(); i++) { - res(i) = a(i) + b(i) + a(i) + b(i); +BENCHMARK_F(TwoArrays, FourAddWithOperator)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + NDArray res = a + b + a + b; + benchmark::DoNotOptimize(res); + } +} +BENCHMARK_F(TwoArrays, FourAddWithIndex)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + NDArray res(a.shape()); + for (uint32_t i = 0; i < a.size(); i++) { + res(i) = a(i) + b(i) + a(i) + b(i); + } + benchmark::DoNotOptimize(res); } - benchmark::DoNotOptimize(res); - } } -BENCHMARK_F(TwoArrays, MultiplyAddDivideWithOperator)(benchmark::State& st) { - for (auto _ : st) { - // This code gets timed - NDArray res = a*a+b/a; - benchmark::DoNotOptimize(res); - } -} -BENCHMARK_F(TwoArrays, MultiplyAddDivideWithIndex)(benchmark::State& st) { - for (auto _ : st) { - // This code gets timed - NDArray res(a.shape()); - for (uint32_t i = 0; i < a.size(); i++) { - res(i) = a(i) * a(i) + b(i) / a(i); +BENCHMARK_F(TwoArrays, MultiplyAddDivideWithOperator)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + NDArray res = a * a + b / a; + benchmark::DoNotOptimize(res); + } +} +BENCHMARK_F(TwoArrays, MultiplyAddDivideWithIndex)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + NDArray res(a.shape()); + for (uint32_t i = 0; i < a.size(); i++) { + res(i) = a(i) * a(i) + b(i) / a(i); + } + benchmark::DoNotOptimize(res); } - benchmark::DoNotOptimize(res); - } } BENCHMARK_MAIN(); \ No newline at end of file diff --git a/include/aare/ArrayExpr.hpp b/include/aare/ArrayExpr.hpp index d326601..e5fb5d7 100644 --- a/include/aare/ArrayExpr.hpp +++ b/include/aare/ArrayExpr.hpp @@ -1,10 +1,9 @@ #pragma once -#include -#include -#include -#include #include "aare/defs.hpp" - +#include +#include +#include +#include namespace aare { @@ -15,7 +14,9 @@ template class ArrayExpr { auto operator[](size_t i) const { return static_cast(*this)[i]; } auto operator()(size_t i) const { return static_cast(*this)[i]; } auto size() const { return static_cast(*this).size(); } - std::array shape() const { return static_cast(*this).shape(); } + std::array shape() const { + return static_cast(*this).shape(); + } }; template @@ -47,7 +48,7 @@ class ArraySub : public ArrayExpr, Ndim> { }; template -class ArrayMul : public ArrayExpr,Ndim> { +class ArrayMul : public ArrayExpr, Ndim> { const A &arr1_; const B &arr2_; @@ -74,15 +75,13 @@ class ArrayDiv : public ArrayExpr, Ndim> { std::array shape() const { return arr1_.shape(); } }; - - template auto operator+(const ArrayExpr &arr1, const ArrayExpr &arr2) { return ArrayAdd, ArrayExpr, Ndim>(arr1, arr2); } template -auto operator-(const ArrayExpr &arr1, const ArrayExpr &arr2) { +auto operator-(const ArrayExpr &arr1, const ArrayExpr &arr2) { return ArraySub, ArrayExpr, Ndim>(arr1, arr2); } @@ -96,6 +95,4 @@ auto operator/(const ArrayExpr &arr1, const ArrayExpr &arr2) { return ArrayDiv, ArrayExpr, Ndim>(arr1, arr2); } - - } // namespace aare \ No newline at end of file diff --git a/include/aare/CircularFifo.hpp b/include/aare/CircularFifo.hpp index 8098082..853a89f 100644 --- a/include/aare/CircularFifo.hpp +++ b/include/aare/CircularFifo.hpp @@ -17,7 +17,8 @@ template class CircularFifo { public: CircularFifo() : CircularFifo(100){}; - CircularFifo(uint32_t size) : fifo_size(size), free_slots(size + 1), filled_slots(size + 1) { + CircularFifo(uint32_t size) + : fifo_size(size), free_slots(size + 1), filled_slots(size + 1) { // TODO! how do we deal with alignment for writing? alignas??? // Do we give the user a chance to provide memory locations? @@ -55,7 +56,8 @@ template class CircularFifo { bool try_pop_free(ItemType &v) { return free_slots.read(v); } - ItemType pop_value(std::chrono::nanoseconds wait, std::atomic &stopped) { + ItemType pop_value(std::chrono::nanoseconds wait, + std::atomic &stopped) { ItemType v; while (!filled_slots.read(v) && !stopped) { std::this_thread::sleep_for(wait); diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index e26e765..c0eca33 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -371,8 +371,8 @@ ClusterFile::read_frame_without_cut() { "Could not read number of clusters"); } - LOG(logDEBUG1) << "Reading " << n_clusters - << " clusters from frame " << frame_number; + LOG(logDEBUG1) << "Reading " << n_clusters << " clusters from frame " + << frame_number; ClusterVector clusters(n_clusters); clusters.set_frame_number(frame_number); diff --git a/include/aare/ClusterFileSink.hpp b/include/aare/ClusterFileSink.hpp index 09190fe..1900774 100644 --- a/include/aare/ClusterFileSink.hpp +++ b/include/aare/ClusterFileSink.hpp @@ -49,8 +49,8 @@ class ClusterFileSink { ClusterFileSink(ClusterFinderMT *source, const std::filesystem::path &fname) { LOG(logDEBUG) << "ClusterFileSink: " - << "source: " << source->sink() - << ", file: " << fname.string(); + << "source: " << source->sink() + << ", file: " << fname.string(); m_source = source->sink(); m_thread = std::thread(&ClusterFileSink::process, this); m_file.open(fname, std::ios::binary); diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index 7a34722..069d887 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -39,10 +39,9 @@ class ClusterFinder { c2(sqrt((ClusterSizeY + 1) / 2 * (ClusterSizeX + 1) / 2)), c3(sqrt(ClusterSizeX * ClusterSizeY)), m_pedestal(image_size[0], image_size[1]), m_clusters(capacity) { - LOG(logDEBUG ) << "ClusterFinder: " - << "image_size: " << image_size[0] << "x" << image_size[1] - << ", nSigma: " << nSigma - << ", capacity: " << capacity; + LOG(logDEBUG) << "ClusterFinder: " + << "image_size: " << image_size[0] << "x" << image_size[1] + << ", nSigma: " << nSigma << ", capacity: " << capacity; } void push_pedestal_frame(NDView frame) { diff --git a/include/aare/ClusterFinderMT.hpp b/include/aare/ClusterFinderMT.hpp index efc22d4..0340973 100644 --- a/include/aare/ClusterFinderMT.hpp +++ b/include/aare/ClusterFinderMT.hpp @@ -7,8 +7,8 @@ #include "aare/ClusterFinder.hpp" #include "aare/NDArray.hpp" -#include "aare/logger.hpp" #include "aare/ProducerConsumerQueue.hpp" +#include "aare/logger.hpp" namespace aare { @@ -125,10 +125,10 @@ class ClusterFinderMT { : m_n_threads(n_threads) { LOG(logDEBUG1) << "ClusterFinderMT: " - << "image_size: " << image_size[0] << "x" << image_size[1] - << ", nSigma: " << nSigma - << ", capacity: " << capacity - << ", n_threads: " << n_threads; + << "image_size: " << image_size[0] << "x" + << image_size[1] << ", nSigma: " << nSigma + << ", capacity: " << capacity + << ", n_threads: " << n_threads; for (size_t i = 0; i < n_threads; i++) { m_cluster_finders.push_back( diff --git a/include/aare/CtbRawFile.hpp b/include/aare/CtbRawFile.hpp index 68dab23..afae0a2 100644 --- a/include/aare/CtbRawFile.hpp +++ b/include/aare/CtbRawFile.hpp @@ -1,27 +1,27 @@ #pragma once #include "aare/FileInterface.hpp" -#include "aare/RawMasterFile.hpp" #include "aare/Frame.hpp" +#include "aare/RawMasterFile.hpp" #include #include -namespace aare{ +namespace aare { - -class CtbRawFile{ +class CtbRawFile { RawMasterFile m_master; std::ifstream m_file; size_t m_current_frame{0}; size_t m_current_subfile{0}; size_t m_num_subfiles{0}; -public: + + public: CtbRawFile(const std::filesystem::path &fname); - void read_into(std::byte *image_buf, DetectorHeader* header = nullptr); - void seek(size_t frame_index); //!< seek to the given frame index - size_t tell() const; //!< get the frame index of the file pointer + void read_into(std::byte *image_buf, DetectorHeader *header = nullptr); + void seek(size_t frame_index); //!< seek to the given frame index + size_t tell() const; //!< get the frame index of the file pointer // in the specific class we can expose more functionality @@ -29,13 +29,13 @@ public: size_t frames_in_file() const; RawMasterFile master() const; -private: + + private: void find_subfiles(); size_t sub_file_index(size_t frame_index) const { return frame_index / m_master.max_frames_per_file(); } void open_data_file(size_t subfile_index); - }; -} \ No newline at end of file +} // namespace aare \ No newline at end of file diff --git a/include/aare/Dtype.hpp b/include/aare/Dtype.hpp index 7e1e62a..7047264 100644 --- a/include/aare/Dtype.hpp +++ b/include/aare/Dtype.hpp @@ -6,31 +6,37 @@ namespace aare { -// The format descriptor is a single character that specifies the type of the data +// The format descriptor is a single character that specifies the type of the +// data // - python documentation: https://docs.python.org/3/c-api/arg.html#numbers -// - py::format_descriptor::format() (in pybind11) does not return the same format as +// - py::format_descriptor::format() (in pybind11) does not return the same +// format as // written in python.org documentation. -// - numpy also doesn't use the same format. and also numpy associates the format -// with variable bitdepth types. (e.g. long is int64 on linux64 and int32 on win64) -// https://numpy.org/doc/stable/reference/arrays.scalars.html +// - numpy also doesn't use the same format. and also numpy associates the +// format +// with variable bitdepth types. (e.g. long is int64 on linux64 and int32 on +// win64) https://numpy.org/doc/stable/reference/arrays.scalars.html // // github issue discussing this: // https://github.com/pybind/pybind11/issues/1908#issuecomment-658358767 // -// [IN LINUX] the difference is for int64 (long) and uint64 (unsigned long). The format -// descriptor is 'q' and 'Q' respectively and in the documentation it is 'l' and 'k'. +// [IN LINUX] the difference is for int64 (long) and uint64 (unsigned long). The +// format descriptor is 'q' and 'Q' respectively and in the documentation it is +// 'l' and 'k'. // in practice numpy doesn't seem to care when reading buffer info: the library // interprets 'q' or 'l' as int64 and 'Q' or 'L' as uint64. -// for this reason we decided to use the same format descriptor as pybind to avoid -// any further discrepancies. +// for this reason we decided to use the same format descriptor as pybind to +// avoid any further discrepancies. // in the following order: // int8, uint8, int16, uint16, int32, uint32, int64, uint64, float, double -const char DTYPE_FORMAT_DSC[] = {'b', 'B', 'h', 'H', 'i', 'I', 'q', 'Q', 'f', 'd'}; +const char DTYPE_FORMAT_DSC[] = {'b', 'B', 'h', 'H', 'i', + 'I', 'q', 'Q', 'f', 'd'}; // on linux64 & apple -const char NUMPY_FORMAT_DSC[] = {'b', 'B', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'}; +const char NUMPY_FORMAT_DSC[] = {'b', 'B', 'h', 'H', 'i', + 'I', 'l', 'L', 'f', 'd'}; /** * @brief enum class to define the endianess of the system */ @@ -52,12 +58,29 @@ enum class endian { */ class Dtype { public: - enum TypeIndex { INT8, UINT8, INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT, DOUBLE, ERROR, NONE }; + enum TypeIndex { + INT8, + UINT8, + INT16, + UINT16, + INT32, + UINT32, + INT64, + UINT64, + FLOAT, + DOUBLE, + ERROR, + NONE + }; uint8_t bitdepth() const; size_t bytes() const; - std::string format_descr() const { return std::string(1, DTYPE_FORMAT_DSC[static_cast(m_type)]); } - std::string numpy_descr() const { return std::string(1, NUMPY_FORMAT_DSC[static_cast(m_type)]); } + std::string format_descr() const { + return std::string(1, DTYPE_FORMAT_DSC[static_cast(m_type)]); + } + std::string numpy_descr() const { + return std::string(1, NUMPY_FORMAT_DSC[static_cast(m_type)]); + } explicit Dtype(const std::type_info &t); explicit Dtype(std::string_view sv); diff --git a/include/aare/File.hpp b/include/aare/File.hpp index 1cef898..e8f1589 100644 --- a/include/aare/File.hpp +++ b/include/aare/File.hpp @@ -5,12 +5,12 @@ namespace aare { /** - * @brief RAII File class for reading, and in the future potentially writing - * image files in various formats. Minimal generic interface. For specail fuctions - * plase use the RawFile or NumpyFile classes directly. - * Wraps FileInterface to abstract the underlying file format - * @note **frame_number** refers the the frame number sent by the detector while **frame_index** - * is the position of the frame in the file + * @brief RAII File class for reading, and in the future potentially writing + * image files in various formats. Minimal generic interface. For specail + * fuctions plase use the RawFile or NumpyFile classes directly. Wraps + * FileInterface to abstract the underlying file format + * @note **frame_number** refers the the frame number sent by the detector while + * **frame_index** is the position of the frame in the file */ class File { std::unique_ptr file_impl; @@ -25,42 +25,46 @@ class File { * @throws std::invalid_argument if the file mode is not supported * */ - File(const std::filesystem::path &fname, const std::string &mode="r", const FileConfig &cfg = {}); - - /**Since the object is responsible for managing the file we disable copy construction */ - File(File const &other) = delete; + File(const std::filesystem::path &fname, const std::string &mode = "r", + const FileConfig &cfg = {}); + + /**Since the object is responsible for managing the file we disable copy + * construction */ + File(File const &other) = delete; /**The same goes for copy assignment */ - File& operator=(File const &other) = delete; + File &operator=(File const &other) = delete; File(File &&other) noexcept; - File& operator=(File &&other) noexcept; + File &operator=(File &&other) noexcept; ~File() = default; // void close(); //!< close the file - - Frame read_frame(); //!< read one frame from the file at the current position - Frame read_frame(size_t frame_index); //!< read one frame at the position given by frame number - std::vector read_n(size_t n_frames); //!< read n_frames from the file at the current position + + Frame + read_frame(); //!< read one frame from the file at the current position + Frame read_frame(size_t frame_index); //!< read one frame at the position + //!< given by frame number + std::vector read_n(size_t n_frames); //!< read n_frames from the file + //!< at the current position void read_into(std::byte *image_buf); void read_into(std::byte *image_buf, size_t n_frames); - - size_t frame_number(); //!< get the frame number at the current position - size_t frame_number(size_t frame_index); //!< get the frame number at the given frame index - size_t bytes_per_frame() const; - size_t pixels_per_frame() const; - size_t bytes_per_pixel() const; + + size_t frame_number(); //!< get the frame number at the current position + size_t frame_number( + size_t frame_index); //!< get the frame number at the given frame index + size_t bytes_per_frame() const; + size_t pixels_per_frame() const; + size_t bytes_per_pixel() const; size_t bitdepth() const; - void seek(size_t frame_index); //!< seek to the given frame index - size_t tell() const; //!< get the frame index of the file pointer + void seek(size_t frame_index); //!< seek to the given frame index + size_t tell() const; //!< get the frame index of the file pointer size_t total_frames() const; size_t rows() const; size_t cols() const; DetectorType detector_type() const; - - }; } // namespace aare \ No newline at end of file diff --git a/include/aare/FileInterface.hpp b/include/aare/FileInterface.hpp index 3736c46..6ca4755 100644 --- a/include/aare/FileInterface.hpp +++ b/include/aare/FileInterface.hpp @@ -20,8 +20,10 @@ struct FileConfig { uint64_t rows{}; uint64_t cols{}; bool operator==(const FileConfig &other) const { - return dtype == other.dtype && rows == other.rows && cols == other.cols && geometry == other.geometry && - detector_type == other.detector_type && max_frames_per_file == other.max_frames_per_file; + return dtype == other.dtype && rows == other.rows && + cols == other.cols && geometry == other.geometry && + detector_type == other.detector_type && + max_frames_per_file == other.max_frames_per_file; } bool operator!=(const FileConfig &other) const { return !(*this == other); } @@ -32,8 +34,11 @@ struct FileConfig { int max_frames_per_file{}; size_t total_frames{}; std::string to_string() const { - return "{ dtype: " + dtype.to_string() + ", rows: " + std::to_string(rows) + ", cols: " + std::to_string(cols) + - ", geometry: " + geometry.to_string() + ", detector_type: " + ToString(detector_type) + + return "{ dtype: " + dtype.to_string() + + ", rows: " + std::to_string(rows) + + ", cols: " + std::to_string(cols) + + ", geometry: " + geometry.to_string() + + ", detector_type: " + ToString(detector_type) + ", max_frames_per_file: " + std::to_string(max_frames_per_file) + ", total_frames: " + std::to_string(total_frames) + " }"; } @@ -42,7 +47,8 @@ struct FileConfig { /** * @brief FileInterface class to define the interface for file operations * @note parent class for NumpyFile and RawFile - * @note all functions are pure virtual and must be implemented by the derived classes + * @note all functions are pure virtual and must be implemented by the derived + * classes */ class FileInterface { public: @@ -64,17 +70,20 @@ class FileInterface { * @param n_frames number of frames to read * @return vector of frames */ - virtual std::vector read_n(size_t n_frames) = 0; // Is this the right interface? + virtual std::vector + read_n(size_t n_frames) = 0; // Is this the right interface? /** - * @brief read one frame from the file at the current position and store it in the provided buffer + * @brief read one frame from the file at the current position and store it + * in the provided buffer * @param image_buf buffer to store the frame * @return void */ virtual void read_into(std::byte *image_buf) = 0; /** - * @brief read n_frames from the file at the current position and store them in the provided buffer + * @brief read n_frames from the file at the current position and store them + * in the provided buffer * @param image_buf buffer to store the frames * @param n_frames number of frames to read * @return void @@ -134,7 +143,6 @@ class FileInterface { */ virtual size_t bitdepth() const = 0; - virtual DetectorType detector_type() const = 0; // function to query the data type of the file diff --git a/include/aare/FilePtr.hpp b/include/aare/FilePtr.hpp index 4ddc76e..2ffc293 100644 --- a/include/aare/FilePtr.hpp +++ b/include/aare/FilePtr.hpp @@ -12,7 +12,7 @@ class FilePtr { public: FilePtr() = default; - FilePtr(const std::filesystem::path& fname, const std::string& mode); + FilePtr(const std::filesystem::path &fname, const std::string &mode); FilePtr(const FilePtr &) = delete; // we don't want a copy FilePtr &operator=(const FilePtr &) = delete; // since we handle a resource FilePtr(FilePtr &&other); diff --git a/include/aare/Fit.hpp b/include/aare/Fit.hpp index eb9ac22..1beec0a 100644 --- a/include/aare/Fit.hpp +++ b/include/aare/Fit.hpp @@ -23,16 +23,19 @@ NDArray scurve2(NDView x, NDView par); } // namespace func - /** * @brief Estimate the initial parameters for a Gaussian fit */ -std::array gaus_init_par(const NDView x, const NDView y); +std::array gaus_init_par(const NDView x, + const NDView y); -std::array pol1_init_par(const NDView x, const NDView y); +std::array pol1_init_par(const NDView x, + const NDView y); -std::array scurve_init_par(const NDView x, const NDView y); -std::array scurve2_init_par(const NDView x, const NDView y); +std::array scurve_init_par(const NDView x, + const NDView y); +std::array scurve2_init_par(const NDView x, + const NDView y); static constexpr int DEFAULT_NUM_THREADS = 4; @@ -43,7 +46,6 @@ static constexpr int DEFAULT_NUM_THREADS = 4; */ NDArray fit_gaus(NDView x, NDView y); - /** * @brief Fit a 1D Gaussian to each pixel. Data layout [row, col, values] * @param x x values @@ -54,9 +56,6 @@ NDArray fit_gaus(NDView x, NDView y); NDArray fit_gaus(NDView x, NDView y, int n_threads = DEFAULT_NUM_THREADS); - - - /** * @brief Fit a 1D Gaussian with error estimates * @param x x values @@ -67,7 +66,7 @@ NDArray fit_gaus(NDView x, NDView y, */ void fit_gaus(NDView x, NDView y, NDView y_err, NDView par_out, NDView par_err_out, - double& chi2); + double &chi2); /** * @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout @@ -80,9 +79,8 @@ void fit_gaus(NDView x, NDView y, NDView y_err, * @param n_threads number of threads to use */ void fit_gaus(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, NDView chi2_out, - int n_threads = DEFAULT_NUM_THREADS - ); + NDView par_out, NDView par_err_out, + NDView chi2_out, int n_threads = DEFAULT_NUM_THREADS); NDArray fit_pol1(NDView x, NDView y); @@ -90,26 +88,33 @@ NDArray fit_pol1(NDView x, NDView y, int n_threads = DEFAULT_NUM_THREADS); void fit_pol1(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, double& chi2); + NDView par_out, NDView par_err_out, + double &chi2); // TODO! not sure we need to offer the different version in C++ void fit_pol1(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out,NDView chi2_out, - int n_threads = DEFAULT_NUM_THREADS); + NDView par_out, NDView par_err_out, + NDView chi2_out, int n_threads = DEFAULT_NUM_THREADS); -NDArray fit_scurve(NDView x, NDView y); -NDArray fit_scurve(NDView x, NDView y, int n_threads); -void fit_scurve(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, double& chi2); -void fit_scurve(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, NDView chi2_out, - int n_threads); +NDArray fit_scurve(NDView x, NDView y); +NDArray fit_scurve(NDView x, NDView y, + int n_threads); +void fit_scurve(NDView x, NDView y, + NDView y_err, NDView par_out, + NDView par_err_out, double &chi2); +void fit_scurve(NDView x, NDView y, + NDView y_err, NDView par_out, + NDView par_err_out, NDView chi2_out, + int n_threads); -NDArray fit_scurve2(NDView x, NDView y); -NDArray fit_scurve2(NDView x, NDView y, int n_threads); -void fit_scurve2(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, double& chi2); -void fit_scurve2(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, NDView chi2_out, - int n_threads); +NDArray fit_scurve2(NDView x, NDView y); +NDArray fit_scurve2(NDView x, NDView y, + int n_threads); +void fit_scurve2(NDView x, NDView y, + NDView y_err, NDView par_out, + NDView par_err_out, double &chi2); +void fit_scurve2(NDView x, NDView y, + NDView y_err, NDView par_out, + NDView par_err_out, NDView chi2_out, + int n_threads); } // namespace aare \ No newline at end of file diff --git a/include/aare/Frame.hpp b/include/aare/Frame.hpp index 02ea82f..27a2a4a 100644 --- a/include/aare/Frame.hpp +++ b/include/aare/Frame.hpp @@ -19,7 +19,7 @@ class Frame { uint32_t m_cols; Dtype m_dtype; std::byte *m_data; - //TODO! Add frame number? + // TODO! Add frame number? public: /** @@ -39,7 +39,7 @@ class Frame { * @param dtype data type of the pixels */ Frame(const std::byte *bytes, uint32_t rows, uint32_t cols, Dtype dtype); - ~Frame(){ delete[] m_data; }; + ~Frame() { delete[] m_data; }; /** @warning Copy is disabled to ensure performance when passing * frames around. Can discuss enabling it. @@ -52,7 +52,6 @@ class Frame { Frame &operator=(Frame &&other) noexcept; Frame(Frame &&other) noexcept; - Frame clone() const; //<- Explicit copy uint32_t rows() const; @@ -93,7 +92,7 @@ class Frame { if (row >= m_rows || col >= m_cols) { throw std::out_of_range("Invalid row or column index"); } - //TODO! add tests then reimplement using pixel_ptr + // TODO! add tests then reimplement using pixel_ptr T data; std::memcpy(&data, m_data + (row * m_cols + col) * m_dtype.bytes(), m_dtype.bytes()); @@ -102,9 +101,9 @@ class Frame { /** * @brief Return an NDView of the frame. This is the preferred way to access * data in the frame. - * + * * @tparam T type of the pixels - * @return NDView + * @return NDView */ template NDView view() { std::array shape = {static_cast(m_rows), @@ -113,7 +112,7 @@ class Frame { return NDView(data, shape); } - /** + /** * @brief Copy the frame data into a new NDArray. This is a deep copy. */ template NDArray image() { diff --git a/include/aare/JungfrauDataFile.hpp b/include/aare/JungfrauDataFile.hpp index 9b1bc48..f871b86 100644 --- a/include/aare/JungfrauDataFile.hpp +++ b/include/aare/JungfrauDataFile.hpp @@ -3,104 +3,113 @@ #include #include -#include "aare/FilePtr.hpp" -#include "aare/defs.hpp" -#include "aare/NDArray.hpp" #include "aare/FileInterface.hpp" +#include "aare/FilePtr.hpp" +#include "aare/NDArray.hpp" +#include "aare/defs.hpp" namespace aare { - -struct JungfrauDataHeader{ +struct JungfrauDataHeader { uint64_t framenum; uint64_t bunchid; }; class JungfrauDataFile : public FileInterface { - size_t m_rows{}; //!< number of rows in the image, from find_frame_size(); - size_t m_cols{}; //!< number of columns in the image, from find_frame_size(); + size_t m_rows{}; //!< number of rows in the image, from find_frame_size(); + size_t + m_cols{}; //!< number of columns in the image, from find_frame_size(); size_t m_bytes_per_frame{}; //!< number of bytes per frame excluding header - size_t m_total_frames{}; //!< total number of frames in the series of files - size_t m_offset{}; //!< file index of the first file, allow starting at non zero file - size_t m_current_file_index{}; //!< The index of the open file - size_t m_current_frame_index{}; //!< The index of the current frame (with reference to all files) + size_t m_total_frames{}; //!< total number of frames in the series of files + size_t m_offset{}; //!< file index of the first file, allow starting at non + //!< zero file + size_t m_current_file_index{}; //!< The index of the open file + size_t m_current_frame_index{}; //!< The index of the current frame (with + //!< reference to all files) - std::vector m_last_frame_in_file{}; //!< Used for seeking to the correct file + std::vector + m_last_frame_in_file{}; //!< Used for seeking to the correct file std::filesystem::path m_path; //!< path to the files std::string m_base_name; //!< base name used for formatting file names - FilePtr m_fp; //!< RAII wrapper for a FILE* + FilePtr m_fp; //!< RAII wrapper for a FILE* - using pixel_type = uint16_t; - static constexpr size_t header_size = sizeof(JungfrauDataHeader); - static constexpr size_t n_digits_in_file_index = 6; //!< to format file names + static constexpr size_t header_size = sizeof(JungfrauDataHeader); + static constexpr size_t n_digits_in_file_index = + 6; //!< to format file names public: JungfrauDataFile(const std::filesystem::path &fname); - std::string base_name() const; //!< get the base name of the file (without path and extension) - size_t bytes_per_frame() override; - size_t pixels_per_frame() override; - size_t bytes_per_pixel() const; + std::string base_name() + const; //!< get the base name of the file (without path and extension) + size_t bytes_per_frame() override; + size_t pixels_per_frame() override; + size_t bytes_per_pixel() const; size_t bitdepth() const override; - void seek(size_t frame_index) override; //!< seek to the given frame index (note not byte offset) - size_t tell() override; //!< get the frame index of the file pointer + void seek(size_t frame_index) + override; //!< seek to the given frame index (note not byte offset) + size_t tell() override; //!< get the frame index of the file pointer size_t total_frames() const override; size_t rows() const override; size_t cols() const override; - std::array shape() const; - size_t n_files() const; //!< get the number of files in the series. + std::array shape() const; + size_t n_files() const; //!< get the number of files in the series. // Extra functions needed for FileInterface Frame read_frame() override; Frame read_frame(size_t frame_number) override; - std::vector read_n(size_t n_frames=0) override; + std::vector read_n(size_t n_frames = 0) override; void read_into(std::byte *image_buf) override; void read_into(std::byte *image_buf, size_t n_frames) override; size_t frame_number(size_t frame_index) override; DetectorType detector_type() const override; /** - * @brief Read a single frame from the file into the given buffer. - * @param image_buf buffer to read the frame into. (Note the caller is responsible for allocating the buffer) + * @brief Read a single frame from the file into the given buffer. + * @param image_buf buffer to read the frame into. (Note the caller is + * responsible for allocating the buffer) * @param header pointer to a JungfrauDataHeader or nullptr to skip header) */ void read_into(std::byte *image_buf, JungfrauDataHeader *header = nullptr); /** - * @brief Read a multiple frames from the file into the given buffer. - * @param image_buf buffer to read the frame into. (Note the caller is responsible for allocating the buffer) + * @brief Read a multiple frames from the file into the given buffer. + * @param image_buf buffer to read the frame into. (Note the caller is + * responsible for allocating the buffer) * @param n_frames number of frames to read * @param header pointer to a JungfrauDataHeader or nullptr to skip header) */ - void read_into(std::byte *image_buf, size_t n_frames, JungfrauDataHeader *header = nullptr); - - /** + void read_into(std::byte *image_buf, size_t n_frames, + JungfrauDataHeader *header = nullptr); + + /** * @brief Read a single frame from the file into the given NDArray * @param image NDArray to read the frame into. */ - void read_into(NDArray* image, JungfrauDataHeader* header = nullptr); + void read_into(NDArray *image, + JungfrauDataHeader *header = nullptr); JungfrauDataHeader read_header(); - std::filesystem::path current_file() const { return fpath(m_current_file_index+m_offset); } + std::filesystem::path current_file() const { + return fpath(m_current_file_index + m_offset); + } - - private: + private: /** - * @brief Find the size of the frame in the file. (256x256, 256x1024, 512x1024) + * @brief Find the size of the frame in the file. (256x256, 256x1024, + * 512x1024) * @param fname path to the file - * @throws std::runtime_error if the file is empty or the size cannot be determined + * @throws std::runtime_error if the file is empty or the size cannot be + * determined */ - void find_frame_size(const std::filesystem::path &fname); + void find_frame_size(const std::filesystem::path &fname); - - void parse_fname(const std::filesystem::path &fname); - void scan_files(); - void open_file(size_t file_index); - std::filesystem::path fpath(size_t frame_index) const; - - - }; + void parse_fname(const std::filesystem::path &fname); + void scan_files(); + void open_file(size_t file_index); + std::filesystem::path fpath(size_t frame_index) const; +}; } // namespace aare \ No newline at end of file diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index 3c08a3c..1a501eb 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -21,7 +21,6 @@ TODO! Add expression templates for operators namespace aare { - template class NDArray : public ArrayExpr, Ndim> { std::array shape_; @@ -34,7 +33,7 @@ class NDArray : public ArrayExpr, Ndim> { * @brief Default constructor. Will construct an empty NDArray. * */ - NDArray() : shape_(), strides_(c_strides(shape_)), data_(nullptr) {}; + NDArray() : shape_(), strides_(c_strides(shape_)), data_(nullptr){}; /** * @brief Construct a new NDArray object with a given shape. @@ -48,7 +47,6 @@ class NDArray : public ArrayExpr, Ndim> { std::multiplies<>())), data_(new T[size_]) {} - /** * @brief Construct a new NDArray object with a shape and value. * @@ -69,8 +67,8 @@ class NDArray : public ArrayExpr, Ndim> { std::copy(v.begin(), v.end(), begin()); } - template - NDArray(const std::array& arr) : NDArray({Size}) { + template + NDArray(const std::array &arr) : NDArray({Size}) { std::copy(arr.begin(), arr.end(), begin()); } @@ -79,7 +77,6 @@ class NDArray : public ArrayExpr, Ndim> { : shape_(other.shape_), strides_(c_strides(shape_)), size_(other.size_), data_(other.data_) { other.reset(); // TODO! is this necessary? - } // Copy constructor @@ -113,10 +110,10 @@ class NDArray : public ArrayExpr, Ndim> { NDArray &operator-=(const NDArray &other); NDArray &operator*=(const NDArray &other); - //Write directly to the data array, or create a new one - template - NDArray& operator=(const std::array &other){ - if(Size != size_){ + // Write directly to the data array, or create a new one + template + NDArray &operator=(const std::array &other) { + if (Size != size_) { delete[] data_; size_ = Size; data_ = new T[size_]; @@ -157,11 +154,6 @@ class NDArray : public ArrayExpr, Ndim> { NDArray &operator&=(const T & /*mask*/); - - - - - void sqrt() { for (int i = 0; i < size_; ++i) { data_[i] = std::sqrt(data_[i]); @@ -345,9 +337,6 @@ NDArray &NDArray::operator+=(const T &value) { return *this; } - - - template NDArray NDArray::operator+(const T &value) { NDArray result = *this; @@ -448,6 +437,4 @@ NDArray load(const std::string &pathname, return img; } - - } // namespace aare \ No newline at end of file diff --git a/include/aare/NDView.hpp b/include/aare/NDView.hpp index 56054e2..e7ad002 100644 --- a/include/aare/NDView.hpp +++ b/include/aare/NDView.hpp @@ -1,6 +1,6 @@ #pragma once -#include "aare/defs.hpp" #include "aare/ArrayExpr.hpp" +#include "aare/defs.hpp" #include #include @@ -17,7 +17,8 @@ namespace aare { template using Shape = std::array; // TODO! fix mismatch between signed and unsigned -template Shape make_shape(const std::vector &shape) { +template +Shape make_shape(const std::vector &shape) { if (shape.size() != Ndim) throw std::runtime_error("Shape size mismatch"); Shape arr; @@ -25,14 +26,18 @@ template Shape make_shape(const std::vector &shape) return arr; } -template ssize_t element_offset(const Strides & /*unused*/) { return 0; } +template +ssize_t element_offset(const Strides & /*unused*/) { + return 0; +} template ssize_t element_offset(const Strides &strides, ssize_t i, Ix... index) { return i * strides[Dim] + element_offset(strides, index...); } -template std::array c_strides(const std::array &shape) { +template +std::array c_strides(const std::array &shape) { std::array strides{}; std::fill(strides.begin(), strides.end(), 1); for (ssize_t i = Ndim - 1; i > 0; --i) { @@ -41,14 +46,16 @@ template std::array c_strides(const std::array std::array make_array(const std::vector &vec) { +template +std::array make_array(const std::vector &vec) { assert(vec.size() == Ndim); std::array arr{}; std::copy_n(vec.begin(), Ndim, arr.begin()); return arr; } -template class NDView : public ArrayExpr, Ndim> { +template +class NDView : public ArrayExpr, Ndim> { public: NDView() = default; ~NDView() = default; @@ -57,17 +64,23 @@ template class NDView : public ArrayExpr shape) : buffer_(buffer), strides_(c_strides(shape)), shape_(shape), - size_(std::accumulate(std::begin(shape), std::end(shape), 1, std::multiplies<>())) {} + size_(std::accumulate(std::begin(shape), std::end(shape), 1, + std::multiplies<>())) {} // NDView(T *buffer, const std::vector &shape) - // : buffer_(buffer), strides_(c_strides(make_array(shape))), shape_(make_array(shape)), - // size_(std::accumulate(std::begin(shape), std::end(shape), 1, std::multiplies<>())) {} + // : buffer_(buffer), + // strides_(c_strides(make_array(shape))), + // shape_(make_array(shape)), + // size_(std::accumulate(std::begin(shape), std::end(shape), 1, + // std::multiplies<>())) {} - template std::enable_if_t operator()(Ix... index) { + template + std::enable_if_t operator()(Ix... index) { return buffer_[element_offset(strides_, index...)]; } - template std::enable_if_t operator()(Ix... index) const { + template + std::enable_if_t operator()(Ix... index) const { return buffer_[element_offset(strides_, index...)]; } @@ -94,16 +107,21 @@ template class NDView : public ArrayExpr()); } NDView &operator-=(const T val) { return elemenwise(val, std::minus()); } - NDView &operator*=(const T val) { return elemenwise(val, std::multiplies()); } - NDView &operator/=(const T val) { return elemenwise(val, std::divides()); } + NDView &operator*=(const T val) { + return elemenwise(val, std::multiplies()); + } + NDView &operator/=(const T val) { + return elemenwise(val, std::divides()); + } - NDView &operator/=(const NDView &other) { return elemenwise(other, std::divides()); } + NDView &operator/=(const NDView &other) { + return elemenwise(other, std::divides()); + } - - template - NDView& operator=(const std::array &arr) { - if(size() != static_cast(arr.size())) - throw std::runtime_error(LOCATION + "Array and NDView size mismatch"); + template NDView &operator=(const std::array &arr) { + if (size() != static_cast(arr.size())) + throw std::runtime_error(LOCATION + + "Array and NDView size mismatch"); std::copy(arr.begin(), arr.end(), begin()); return *this; } @@ -147,13 +165,15 @@ template class NDView : public ArrayExpr shape_{}; uint64_t size_{}; - template NDView &elemenwise(T val, BinaryOperation op) { + template + NDView &elemenwise(T val, BinaryOperation op) { for (uint64_t i = 0; i != size_; ++i) { buffer_[i] = op(buffer_[i], val); } return *this; } - template NDView &elemenwise(const NDView &other, BinaryOperation op) { + template + NDView &elemenwise(const NDView &other, BinaryOperation op) { for (uint64_t i = 0; i != size_; ++i) { buffer_[i] = op(buffer_[i], other.buffer_[i]); } @@ -170,9 +190,8 @@ template void NDView::print_all() const { } } - template -std::ostream& operator <<(std::ostream& os, const NDView& arr){ +std::ostream &operator<<(std::ostream &os, const NDView &arr) { for (auto row = 0; row < arr.shape(0); ++row) { for (auto col = 0; col < arr.shape(1); ++col) { os << std::setw(3); @@ -183,10 +202,8 @@ std::ostream& operator <<(std::ostream& os, const NDView& arr){ return os; } - -template -NDView make_view(std::vector& vec){ - return NDView(vec.data(), {static_cast(vec.size())}); +template NDView make_view(std::vector &vec) { + return NDView(vec.data(), {static_cast(vec.size())}); } } // namespace aare \ No newline at end of file diff --git a/include/aare/NumpyFile.hpp b/include/aare/NumpyFile.hpp index 7381a76..481a1a0 100644 --- a/include/aare/NumpyFile.hpp +++ b/include/aare/NumpyFile.hpp @@ -1,9 +1,8 @@ #pragma once #include "aare/Dtype.hpp" -#include "aare/defs.hpp" #include "aare/FileInterface.hpp" #include "aare/NumpyHelpers.hpp" - +#include "aare/defs.hpp" #include #include @@ -11,13 +10,12 @@ namespace aare { - - /** * @brief NumpyFile class to read and write numpy files * @note derived from FileInterface * @note implements all the pure virtual functions from FileInterface - * @note documentation for the functions can also be found in the FileInterface class + * @note documentation for the functions can also be found in the FileInterface + * class */ class NumpyFile : public FileInterface { @@ -28,26 +26,35 @@ class NumpyFile : public FileInterface { * @param mode file mode (r, w) * @param cfg file configuration */ - explicit NumpyFile(const std::filesystem::path &fname, const std::string &mode = "r", FileConfig cfg = {}); + explicit NumpyFile(const std::filesystem::path &fname, + const std::string &mode = "r", FileConfig cfg = {}); void write(Frame &frame); Frame read_frame() override { return get_frame(this->current_frame++); } - Frame read_frame(size_t frame_number) override { return get_frame(frame_number); } + Frame read_frame(size_t frame_number) override { + return get_frame(frame_number); + } std::vector read_n(size_t n_frames) override; - void read_into(std::byte *image_buf) override { return get_frame_into(this->current_frame++, image_buf); } + void read_into(std::byte *image_buf) override { + return get_frame_into(this->current_frame++, image_buf); + } void read_into(std::byte *image_buf, size_t n_frames) override; size_t frame_number(size_t frame_index) override { return frame_index; }; size_t bytes_per_frame() override; size_t pixels_per_frame() override; - void seek(size_t frame_number) override { this->current_frame = frame_number; } + void seek(size_t frame_number) override { + this->current_frame = frame_number; + } size_t tell() override { return this->current_frame; } size_t total_frames() const override { return m_header.shape[0]; } size_t rows() const override { return m_header.shape[1]; } size_t cols() const override { return m_header.shape[2]; } size_t bitdepth() const override { return m_header.dtype.bitdepth(); } - DetectorType detector_type() const override { return DetectorType::Unknown; } + DetectorType detector_type() const override { + return DetectorType::Unknown; + } /** * @brief get the data type of the numpy file @@ -70,7 +77,8 @@ class NumpyFile : public FileInterface { template NDArray load() { NDArray arr(make_shape(m_header.shape)); if (fseek(fp, static_cast(header_size), SEEK_SET)) { - throw std::runtime_error(LOCATION + "Error seeking to the start of the data"); + throw std::runtime_error(LOCATION + + "Error seeking to the start of the data"); } size_t rc = fread(arr.data(), sizeof(T), arr.size(), fp); if (rc != static_cast(arr.size())) { @@ -78,16 +86,20 @@ class NumpyFile : public FileInterface { } return arr; } - template void write(NDView &frame) { + template + void write(NDView &frame) { write_impl(frame.data(), frame.total_bytes()); } - template void write(NDArray &frame) { + template + void write(NDArray &frame) { write_impl(frame.data(), frame.total_bytes()); } - template void write(NDView &&frame) { + template + void write(NDView &&frame) { write_impl(frame.data(), frame.total_bytes()); } - template void write(NDArray &&frame) { + template + void write(NDArray &&frame) { write_impl(frame.data(), frame.total_bytes()); } diff --git a/include/aare/NumpyHelpers.hpp b/include/aare/NumpyHelpers.hpp index 8ed0ec7..2facc4c 100644 --- a/include/aare/NumpyHelpers.hpp +++ b/include/aare/NumpyHelpers.hpp @@ -40,15 +40,18 @@ bool parse_bool(const std::string &in); std::string get_value_from_map(const std::string &mapstr); -std::unordered_map parse_dict(std::string in, const std::vector &keys); +std::unordered_map +parse_dict(std::string in, const std::vector &keys); -template bool in_array(T val, const std::array &arr) { +template +bool in_array(T val, const std::array &arr) { return std::find(std::begin(arr), std::end(arr), val) != std::end(arr); } bool is_digits(const std::string &str); aare::Dtype parse_descr(std::string typestring); -size_t write_header(const std::filesystem::path &fname, const NumpyHeader &header); +size_t write_header(const std::filesystem::path &fname, + const NumpyHeader &header); size_t write_header(std::ostream &out, const NumpyHeader &header); } // namespace NumpyHelpers diff --git a/include/aare/Pedestal.hpp b/include/aare/Pedestal.hpp index d6223c1..0efc4b7 100644 --- a/include/aare/Pedestal.hpp +++ b/include/aare/Pedestal.hpp @@ -18,15 +18,15 @@ template class Pedestal { uint32_t m_samples; NDArray m_cur_samples; - - //TODO! in case of int needs to be changed to uint64_t + + // TODO! in case of int needs to be changed to uint64_t NDArray m_sum; NDArray m_sum2; - //Cache mean since it is used over and over in the ClusterFinder - //This optimization is related to the access pattern of the ClusterFinder - //Relies on having more reads than pushes to the pedestal - NDArray m_mean; + // Cache mean since it is used over and over in the ClusterFinder + // This optimization is related to the access pattern of the ClusterFinder + // Relies on having more reads than pushes to the pedestal + NDArray m_mean; public: Pedestal(uint32_t rows, uint32_t cols, uint32_t n_samples = 1000) @@ -42,9 +42,7 @@ template class Pedestal { } ~Pedestal() = default; - NDArray mean() { - return m_mean; - } + NDArray mean() { return m_mean; } SUM_TYPE mean(const uint32_t row, const uint32_t col) const { return m_mean(row, col); @@ -71,8 +69,6 @@ template class Pedestal { return variance_array; } - - NDArray std() { NDArray standard_deviation_array({m_rows, m_cols}); for (uint32_t i = 0; i < m_rows * m_cols; i++) { @@ -83,8 +79,6 @@ template class Pedestal { return standard_deviation_array; } - - void clear() { m_sum = 0; m_sum2 = 0; @@ -92,16 +86,12 @@ template class Pedestal { m_mean = 0; } - - void clear(const uint32_t row, const uint32_t col) { m_sum(row, col) = 0; m_sum2(row, col) = 0; m_cur_samples(row, col) = 0; m_mean(row, col) = 0; } - - template void push(NDView frame) { assert(frame.size() == m_rows * m_cols); @@ -122,7 +112,7 @@ template class Pedestal { /** * Push but don't update the cached mean. Speeds up the process * when initializing the pedestal. - * + * */ template void push_no_update(NDView frame) { assert(frame.size() == m_rows * m_cols); @@ -140,9 +130,6 @@ template class Pedestal { } } - - - template void push(Frame &frame) { assert(frame.rows() == static_cast(m_rows) && frame.cols() == static_cast(m_cols)); @@ -170,7 +157,8 @@ template class Pedestal { m_sum(row, col) += val - m_sum(row, col) / m_samples; m_sum2(row, col) += val * val - m_sum2(row, col) / m_samples; } - //Since we just did a push we know that m_cur_samples(row, col) is at least 1 + // Since we just did a push we know that m_cur_samples(row, col) is at + // least 1 m_mean(row, col) = m_sum(row, col) / m_cur_samples(row, col); } @@ -183,7 +171,8 @@ template class Pedestal { m_cur_samples(row, col)++; } else { m_sum(row, col) += val - m_sum(row, col) / m_cur_samples(row, col); - m_sum2(row, col) += val * val - m_sum2(row, col) / m_cur_samples(row, col); + m_sum2(row, col) += + val * val - m_sum2(row, col) / m_cur_samples(row, col); } } @@ -191,19 +180,16 @@ template class Pedestal { * @brief Update the mean of the pedestal. This is used after having done * push_no_update. It is not necessary to call this function after push. */ - void update_mean(){ - m_mean = m_sum / m_cur_samples; - } + void update_mean() { m_mean = m_sum / m_cur_samples; } - template - void push_fast(const uint32_t row, const uint32_t col, const T val_){ - //Assume we reached the steady state where all pixels have - //m_samples samples + template + void push_fast(const uint32_t row, const uint32_t col, const T val_) { + // Assume we reached the steady state where all pixels have + // m_samples samples SUM_TYPE val = static_cast(val_); m_sum(row, col) += val - m_sum(row, col) / m_samples; m_sum2(row, col) += val * val - m_sum2(row, col) / m_samples; m_mean(row, col) = m_sum(row, col) / m_samples; } - }; } // namespace aare \ No newline at end of file diff --git a/include/aare/PixelMap.hpp b/include/aare/PixelMap.hpp index 1b7a890..9c30680 100644 --- a/include/aare/PixelMap.hpp +++ b/include/aare/PixelMap.hpp @@ -1,7 +1,7 @@ #pragma once -#include "aare/defs.hpp" #include "aare/NDArray.hpp" +#include "aare/defs.hpp" namespace aare { @@ -10,11 +10,11 @@ NDArray GenerateMoench05PixelMap(); NDArray GenerateMoench05PixelMap1g(); NDArray GenerateMoench05PixelMapOld(); -//Matterhorn02 -NDArrayGenerateMH02SingleCounterPixelMap(); +// Matterhorn02 +NDArray GenerateMH02SingleCounterPixelMap(); NDArray GenerateMH02FourCounterPixelMap(); -//Eiger -NDArrayGenerateEigerFlipRowsPixelMap(); +// Eiger +NDArray GenerateEigerFlipRowsPixelMap(); } // namespace aare \ No newline at end of file diff --git a/include/aare/ProducerConsumerQueue.hpp b/include/aare/ProducerConsumerQueue.hpp index 426b9e2..f189cec 100644 --- a/include/aare/ProducerConsumerQueue.hpp +++ b/include/aare/ProducerConsumerQueue.hpp @@ -18,9 +18,9 @@ // @author Jordan DeLong (delong.j@fb.com) // Changes made by PSD Detector Group: -// Copied: Line 34 constexpr std::size_t hardware_destructive_interference_size = 128; from folly/lang/Align.h -// Changed extension to .hpp -// Changed namespace to aare +// Copied: Line 34 constexpr std::size_t hardware_destructive_interference_size +// = 128; from folly/lang/Align.h Changed extension to .hpp Changed namespace to +// aare #pragma once @@ -45,15 +45,14 @@ template struct ProducerConsumerQueue { ProducerConsumerQueue(const ProducerConsumerQueue &) = delete; ProducerConsumerQueue &operator=(const ProducerConsumerQueue &) = delete; - - ProducerConsumerQueue(ProducerConsumerQueue &&other){ + ProducerConsumerQueue(ProducerConsumerQueue &&other) { size_ = other.size_; records_ = other.records_; other.records_ = nullptr; readIndex_ = other.readIndex_.load(std::memory_order_acquire); writeIndex_ = other.writeIndex_.load(std::memory_order_acquire); } - ProducerConsumerQueue &operator=(ProducerConsumerQueue &&other){ + ProducerConsumerQueue &operator=(ProducerConsumerQueue &&other) { size_ = other.size_; records_ = other.records_; other.records_ = nullptr; @@ -61,16 +60,17 @@ template struct ProducerConsumerQueue { writeIndex_ = other.writeIndex_.load(std::memory_order_acquire); return *this; } - - - ProducerConsumerQueue():ProducerConsumerQueue(2){}; + + ProducerConsumerQueue() : ProducerConsumerQueue(2){}; // size must be >= 2. // // Also, note that the number of usable slots in the queue at any // given time is actually (size-1), so if you start with an empty queue, // isFull() will return true after size-1 insertions. explicit ProducerConsumerQueue(uint32_t size) - : size_(size), records_(static_cast(std::malloc(sizeof(T) * size))), readIndex_(0), writeIndex_(0) { + : size_(size), + records_(static_cast(std::malloc(sizeof(T) * size))), + readIndex_(0), writeIndex_(0) { assert(size >= 2); if (!records_) { throw std::bad_alloc(); @@ -154,7 +154,8 @@ template struct ProducerConsumerQueue { } bool isEmpty() const { - return readIndex_.load(std::memory_order_acquire) == writeIndex_.load(std::memory_order_acquire); + return readIndex_.load(std::memory_order_acquire) == + writeIndex_.load(std::memory_order_acquire); } bool isFull() const { @@ -175,7 +176,8 @@ template struct ProducerConsumerQueue { // be removing items concurrently). // * It is undefined to call this from any other thread. size_t sizeGuess() const { - int ret = writeIndex_.load(std::memory_order_acquire) - readIndex_.load(std::memory_order_acquire); + int ret = writeIndex_.load(std::memory_order_acquire) - + readIndex_.load(std::memory_order_acquire); if (ret < 0) { ret += size_; } @@ -192,7 +194,7 @@ template struct ProducerConsumerQueue { // const uint32_t size_; uint32_t size_; // T *const records_; - T* records_; + T *records_; alignas(hardware_destructive_interference_size) AtomicIndex readIndex_; alignas(hardware_destructive_interference_size) AtomicIndex writeIndex_; diff --git a/include/aare/RawFile.hpp b/include/aare/RawFile.hpp index 1cca1fd..9ffdb7c 100644 --- a/include/aare/RawFile.hpp +++ b/include/aare/RawFile.hpp @@ -1,11 +1,10 @@ #pragma once #include "aare/FileInterface.hpp" -#include "aare/RawMasterFile.hpp" #include "aare/Frame.hpp" #include "aare/NDArray.hpp" //for pixel map +#include "aare/RawMasterFile.hpp" #include "aare/RawSubFile.hpp" - #include namespace aare { @@ -53,10 +52,10 @@ class RawFile : public FileInterface { void read_into(std::byte *image_buf) override; void read_into(std::byte *image_buf, size_t n_frames) override; - //TODO! do we need to adapt the API? + // TODO! do we need to adapt the API? void read_into(std::byte *image_buf, DetectorHeader *header); - void read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header); - + void read_into(std::byte *image_buf, size_t n_frames, + DetectorHeader *header); size_t frame_number(size_t frame_index) override; size_t bytes_per_frame() override; @@ -70,23 +69,20 @@ class RawFile : public FileInterface { size_t bitdepth() const override; xy geometry(); size_t n_modules() const; - + RawMasterFile master() const; - - DetectorType detector_type() const override; private: - /** * @brief read the frame at the given frame index into the image buffer * @param frame_number frame number to read * @param image_buf buffer to store the frame */ - void get_frame_into(size_t frame_index, std::byte *frame_buffer, DetectorHeader *header = nullptr); - + void get_frame_into(size_t frame_index, std::byte *frame_buffer, + DetectorHeader *header = nullptr); /** * @brief get the frame at the given frame index @@ -95,8 +91,6 @@ class RawFile : public FileInterface { */ Frame get_frame(size_t frame_index); - - /** * @brief read the header of the file * @param fname path to the data subfile @@ -108,5 +102,4 @@ class RawFile : public FileInterface { void find_geometry(); }; - } // namespace aare \ No newline at end of file diff --git a/include/aare/RawMasterFile.hpp b/include/aare/RawMasterFile.hpp index 4d143a6..2c64a90 100644 --- a/include/aare/RawMasterFile.hpp +++ b/include/aare/RawMasterFile.hpp @@ -45,7 +45,7 @@ class ScanParameters { int m_start = 0; int m_stop = 0; int m_step = 0; - //TODO! add settleTime, requires string to time conversion + // TODO! add settleTime, requires string to time conversion public: ScanParameters(const std::string &par); @@ -61,7 +61,6 @@ class ScanParameters { void increment_stop(); }; - /** * @brief Class for parsing a master file either in our .json format or the old * .raw format @@ -101,7 +100,6 @@ class RawMasterFile { std::optional m_roi; - public: RawMasterFile(const std::filesystem::path &fpath); @@ -129,10 +127,8 @@ class RawMasterFile { std::optional number_of_rows() const; std::optional quad() const; - std::optional roi() const; - ScanParameters scan_parameters() const; private: diff --git a/include/aare/RawSubFile.hpp b/include/aare/RawSubFile.hpp index 1059843..c38f540 100644 --- a/include/aare/RawSubFile.hpp +++ b/include/aare/RawSubFile.hpp @@ -10,32 +10,34 @@ namespace aare { /** - * @brief Class to read a singe subfile written in .raw format. Used from RawFile to read - * the entire detector. Can be used directly to read part of the image. + * @brief Class to read a singe subfile written in .raw format. Used from + * RawFile to read the entire detector. Can be used directly to read part of the + * image. */ class RawSubFile { protected: std::ifstream m_file; DetectorType m_detector_type; size_t m_bitdepth; - std::filesystem::path m_path; //!< path to the subfile - std::string m_base_name; //!< base name used for formatting file names - size_t m_offset{}; //!< file index of the first file, allow starting at non zero file - size_t m_total_frames{}; //!< total number of frames in the series of files + std::filesystem::path m_path; //!< path to the subfile + std::string m_base_name; //!< base name used for formatting file names + size_t m_offset{}; //!< file index of the first file, allow starting at non + //!< zero file + size_t m_total_frames{}; //!< total number of frames in the series of files size_t m_rows{}; size_t m_cols{}; size_t m_bytes_per_frame{}; - int m_module_index{}; - size_t m_current_file_index{}; //!< The index of the open file - size_t m_current_frame_index{}; //!< The index of the current frame (with reference to all files) - std::vector m_last_frame_in_file{}; //!< Used for seeking to the correct file + size_t m_current_file_index{}; //!< The index of the open file + size_t m_current_frame_index{}; //!< The index of the current frame (with + //!< reference to all files) + std::vector + m_last_frame_in_file{}; //!< Used for seeking to the correct file uint32_t m_pos_row{}; uint32_t m_pos_col{}; - - + std::optional> m_pixel_map; public: @@ -49,12 +51,14 @@ class RawSubFile { * @throws std::invalid_argument if the detector,type pair is not supported */ RawSubFile(const std::filesystem::path &fname, DetectorType detector, - size_t rows, size_t cols, size_t bitdepth, uint32_t pos_row = 0, uint32_t pos_col = 0); + size_t rows, size_t cols, size_t bitdepth, uint32_t pos_row = 0, + uint32_t pos_col = 0); ~RawSubFile() = default; /** * @brief Seek to the given frame number - * @note Puts the file pointer at the start of the header, not the start of the data + * @note Puts the file pointer at the start of the header, not the start of + * the data * @param frame_index frame position in file to seek to * @throws std::runtime_error if the frame number is out of range */ @@ -62,14 +66,15 @@ class RawSubFile { size_t tell(); void read_into(std::byte *image_buf, DetectorHeader *header = nullptr); - void read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header= nullptr); + void read_into(std::byte *image_buf, size_t n_frames, + DetectorHeader *header = nullptr); void get_part(std::byte *buffer, size_t frame_index); - + void read_header(DetectorHeader *header); - + size_t rows() const; size_t cols() const; - + size_t frame_number(size_t frame_index); size_t bytes_per_frame() const { return m_bytes_per_frame; } @@ -78,15 +83,13 @@ class RawSubFile { size_t frames_in_file() const { return m_total_frames; } -private: - template - void read_with_map(std::byte *image_buf); - - void parse_fname(const std::filesystem::path &fname); - void scan_files(); - void open_file(size_t file_index); - std::filesystem::path fpath(size_t file_index) const; + private: + template void read_with_map(std::byte *image_buf); + void parse_fname(const std::filesystem::path &fname); + void scan_files(); + void open_file(size_t file_index); + std::filesystem::path fpath(size_t file_index) const; }; } // namespace aare \ No newline at end of file diff --git a/include/aare/VarClusterFinder.hpp b/include/aare/VarClusterFinder.hpp index 596bf06..3679d39 100644 --- a/include/aare/VarClusterFinder.hpp +++ b/include/aare/VarClusterFinder.hpp @@ -38,11 +38,13 @@ template class VarClusterFinder { bool use_noise_map = false; int peripheralThresholdFactor_ = 5; int current_label; - const std::array di{{0, -1, -1, -1}}; // row ### 8-neighbour by scaning from left to right - const std::array dj{{-1, -1, 0, 1}}; // col ### 8-neighbour by scaning from top to bottom + const std::array di{ + {0, -1, -1, -1}}; // row ### 8-neighbour by scaning from left to right + const std::array dj{ + {-1, -1, 0, 1}}; // col ### 8-neighbour by scaning from top to bottom const std::array di_{{0, 0, -1, 1, -1, 1, -1, 1}}; // row const std::array dj_{{-1, 1, 0, 0, 1, -1, -1, 1}}; // col - std::map child; // heirachy: key: child; val: parent + std::map child; // heirachy: key: child; val: parent std::unordered_map h_size; std::vector hits; // std::vector> row @@ -50,7 +52,8 @@ template class VarClusterFinder { public: VarClusterFinder(Shape<2> shape, T threshold) - : shape_(shape), labeled_(shape, 0), peripheral_labeled_(shape, 0), binary_(shape), threshold_(threshold) { + : shape_(shape), labeled_(shape, 0), peripheral_labeled_(shape, 0), + binary_(shape), threshold_(threshold) { hits.reserve(2000); } @@ -60,7 +63,9 @@ template class VarClusterFinder { noiseMap = noise_map; use_noise_map = true; } - void set_peripheralThresholdFactor(int factor) { peripheralThresholdFactor_ = factor; } + void set_peripheralThresholdFactor(int factor) { + peripheralThresholdFactor_ = factor; + } void find_clusters(NDView img); void find_clusters_X(NDView img); void rec_FillHit(int clusterIndex, int i, int j); @@ -144,7 +149,8 @@ template int VarClusterFinder::check_neighbours(int i, int j) { } } -template void VarClusterFinder::find_clusters(NDView img) { +template +void VarClusterFinder::find_clusters(NDView img) { original_ = img; labeled_ = 0; peripheral_labeled_ = 0; @@ -156,7 +162,8 @@ template void VarClusterFinder::find_clusters(NDView img) store_clusters(); } -template void VarClusterFinder::find_clusters_X(NDView img) { +template +void VarClusterFinder::find_clusters_X(NDView img) { original_ = img; int clusterIndex = 0; for (int i = 0; i < shape_[0]; ++i) { @@ -175,7 +182,8 @@ template void VarClusterFinder::find_clusters_X(NDView img h_size.clear(); } -template void VarClusterFinder::rec_FillHit(int clusterIndex, int i, int j) { +template +void VarClusterFinder::rec_FillHit(int clusterIndex, int i, int j) { // printf("original_(%d, %d)=%f\n", i, j, original_(i,j)); // printf("h_size[%d].size=%d\n", clusterIndex, h_size[clusterIndex].size); if (h_size[clusterIndex].size < MAX_CLUSTER_SIZE) { @@ -203,11 +211,15 @@ template void VarClusterFinder::rec_FillHit(int clusterIndex, in } else { // if (h_size[clusterIndex].size < MAX_CLUSTER_SIZE){ // h_size[clusterIndex].size += 1; - // h_size[clusterIndex].rows[h_size[clusterIndex].size] = row; - // h_size[clusterIndex].cols[h_size[clusterIndex].size] = col; - // h_size[clusterIndex].enes[h_size[clusterIndex].size] = original_(row, col); + // h_size[clusterIndex].rows[h_size[clusterIndex].size] = + // row; h_size[clusterIndex].cols[h_size[clusterIndex].size] + // = col; + // h_size[clusterIndex].enes[h_size[clusterIndex].size] = + // original_(row, col); // }// ? weather to include peripheral pixels - original_(row, col) = 0; // remove peripheral pixels, to avoid potential influence for pedestal updating + original_(row, col) = + 0; // remove peripheral pixels, to avoid potential influence + // for pedestal updating } } } @@ -275,8 +287,8 @@ template void VarClusterFinder::store_clusters() { for (int i = 0; i < shape_[0]; ++i) { for (int j = 0; j < shape_[1]; ++j) { if (labeled_(i, j) != 0 || false - // (i-1 >= 0 and labeled_(i-1, j) != 0) or // another circle of peripheral pixels - // (j-1 >= 0 and labeled_(i, j-1) != 0) or + // (i-1 >= 0 and labeled_(i-1, j) != 0) or // another circle of + // peripheral pixels (j-1 >= 0 and labeled_(i, j-1) != 0) or // (i+1 < shape_[0] and labeled_(i+1, j) != 0) or // (j+1 < shape_[1] and labeled_(i, j+1) != 0) ) { diff --git a/include/aare/algorithm.hpp b/include/aare/algorithm.hpp index be2018f..c590e91 100644 --- a/include/aare/algorithm.hpp +++ b/include/aare/algorithm.hpp @@ -1,9 +1,9 @@ #pragma once +#include #include #include #include -#include namespace aare { /** @@ -15,26 +15,24 @@ namespace aare { * @param last iterator to the last element * @param val value to compare * @return index of the last element that is smaller than val - * + * */ template -size_t last_smaller(const T* first, const T* last, T val) { - for (auto iter = first+1; iter != last; ++iter) { +size_t last_smaller(const T *first, const T *last, T val) { + for (auto iter = first + 1; iter != last; ++iter) { if (*iter >= val) { - return std::distance(first, iter-1); + return std::distance(first, iter - 1); } } - return std::distance(first, last-1); + return std::distance(first, last - 1); } -template -size_t last_smaller(const NDArray& arr, T val) { +template size_t last_smaller(const NDArray &arr, T val) { return last_smaller(arr.begin(), arr.end(), val); } -template -size_t last_smaller(const std::vector& vec, T val) { - return last_smaller(vec.data(), vec.data()+vec.size(), val); +template size_t last_smaller(const std::vector &vec, T val) { + return last_smaller(vec.data(), vec.data() + vec.size(), val); } /** @@ -48,65 +46,59 @@ size_t last_smaller(const std::vector& vec, T val) { * @return index of the first element that is larger than val */ template -size_t first_larger(const T* first, const T* last, T val) { +size_t first_larger(const T *first, const T *last, T val) { for (auto iter = first; iter != last; ++iter) { if (*iter > val) { return std::distance(first, iter); } } - return std::distance(first, last-1); + return std::distance(first, last - 1); } -template -size_t first_larger(const NDArray& arr, T val) { +template size_t first_larger(const NDArray &arr, T val) { return first_larger(arr.begin(), arr.end(), val); } -template -size_t first_larger(const std::vector& vec, T val) { - return first_larger(vec.data(), vec.data()+vec.size(), val); +template size_t first_larger(const std::vector &vec, T val) { + return first_larger(vec.data(), vec.data() + vec.size(), val); } /** * @brief Index of the nearest element to val. - * Requires a sorted array. If there is no difference it takes the first element. + * Requires a sorted array. If there is no difference it takes the first + * element. * @param first iterator to the first element * @param last iterator to the last element * @param val value to compare * @return index of the nearest element */ template -size_t nearest_index(const T* first, const T* last, T val) { - auto iter = std::min_element(first, last, - [val](T a, T b) { +size_t nearest_index(const T *first, const T *last, T val) { + auto iter = std::min_element(first, last, [val](T a, T b) { return std::abs(a - val) < std::abs(b - val); }); return std::distance(first, iter); } -template -size_t nearest_index(const NDArray& arr, T val) { +template size_t nearest_index(const NDArray &arr, T val) { return nearest_index(arr.begin(), arr.end(), val); } -template -size_t nearest_index(const std::vector& vec, T val) { - return nearest_index(vec.data(), vec.data()+vec.size(), val); +template size_t nearest_index(const std::vector &vec, T val) { + return nearest_index(vec.data(), vec.data() + vec.size(), val); } template -size_t nearest_index(const std::array& arr, T val) { - return nearest_index(arr.data(), arr.data()+arr.size(), val); +size_t nearest_index(const std::array &arr, T val) { + return nearest_index(arr.data(), arr.data() + arr.size(), val); } -template -std::vector cumsum(const std::vector& vec) { +template std::vector cumsum(const std::vector &vec) { std::vector result(vec.size()); std::partial_sum(vec.begin(), vec.end(), result.begin()); return result; } - template bool all_equal(const Container &c) { if (!c.empty() && std::all_of(begin(c), end(c), @@ -117,6 +109,4 @@ template bool all_equal(const Container &c) { return false; } - - } // namespace aare \ No newline at end of file diff --git a/include/aare/decode.hpp b/include/aare/decode.hpp index e784c4a..ec24447 100644 --- a/include/aare/decode.hpp +++ b/include/aare/decode.hpp @@ -1,26 +1,27 @@ #pragma once +#include #include #include -#include namespace aare { - uint16_t adc_sar_05_decode64to16(uint64_t input); uint16_t adc_sar_04_decode64to16(uint64_t input); -void adc_sar_05_decode64to16(NDView input, NDView output); -void adc_sar_04_decode64to16(NDView input, NDView output); - +void adc_sar_05_decode64to16(NDView input, + NDView output); +void adc_sar_04_decode64to16(NDView input, + NDView output); /** - * @brief Apply custom weights to a 16-bit input value. Will sum up weights[i]**i - * for each bit i that is set in the input value. + * @brief Apply custom weights to a 16-bit input value. Will sum up + * weights[i]**i for each bit i that is set in the input value. * @throws std::out_of_range if weights.size() < 16 * @param input 16-bit input value * @param weights vector of weights, size must be less than or equal to 16 */ double apply_custom_weights(uint16_t input, const NDView weights); -void apply_custom_weights(NDView input, NDView output, const NDView weights); +void apply_custom_weights(NDView input, NDView output, + const NDView weights); } // namespace aare diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index ccf07a5..71d8c49 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -3,16 +3,15 @@ #include "aare/Dtype.hpp" #include -#include #include #include #include +#include #include #include #include #include - /** * @brief LOCATION macro to get the current location in the code */ @@ -20,28 +19,24 @@ std::string(__FILE__) + std::string(":") + std::to_string(__LINE__) + \ ":" + std::string(__func__) + ":" - - #ifdef AARE_CUSTOM_ASSERT -#define AARE_ASSERT(expr)\ - if (expr)\ - {}\ - else\ +#define AARE_ASSERT(expr) \ + if (expr) { \ + } else \ aare::assert_failed(LOCATION + " Assertion failed: " + #expr + "\n"); #else -#define AARE_ASSERT(cond)\ - do { (void)sizeof(cond); } while(0) +#define AARE_ASSERT(cond) \ + do { \ + (void)sizeof(cond); \ + } while (0) #endif - namespace aare { inline constexpr size_t bits_per_byte = 8; void assert_failed(const std::string &msg); - - class DynamicCluster { public: int cluster_sizeX; @@ -55,7 +50,7 @@ class DynamicCluster { public: DynamicCluster(int cluster_sizeX_, int cluster_sizeY_, - Dtype dt_ = Dtype(typeid(int32_t))) + Dtype dt_ = Dtype(typeid(int32_t))) : cluster_sizeX(cluster_sizeX_), cluster_sizeY(cluster_sizeY_), dt(dt_) { m_data = new std::byte[cluster_sizeX * cluster_sizeY * dt.bytes()]{}; @@ -179,24 +174,24 @@ template struct t_xy { }; using xy = t_xy; - /** - * @brief Class to hold the geometry of a module. Where pixel 0 is located and the size of the module + * @brief Class to hold the geometry of a module. Where pixel 0 is located and + * the size of the module */ -struct ModuleGeometry{ +struct ModuleGeometry { int origin_x{}; int origin_y{}; int height{}; int width{}; int row_index{}; - int col_index{}; + int col_index{}; }; /** - * @brief Class to hold the geometry of a detector. Number of modules, their size and where pixel 0 - * for each module is located + * @brief Class to hold the geometry of a detector. Number of modules, their + * size and where pixel 0 for each module is located */ -struct DetectorGeometry{ +struct DetectorGeometry { int modules_x{}; int modules_y{}; int pixels_x{}; @@ -204,35 +199,34 @@ struct DetectorGeometry{ int module_gap_row{}; int module_gap_col{}; std::vector module_pixel_0; - + auto size() const { return module_pixel_0.size(); } }; -struct ROI{ +struct ROI { ssize_t xmin{}; ssize_t xmax{}; ssize_t ymin{}; ssize_t ymax{}; - + ssize_t height() const { return ymax - ymin; } ssize_t width() const { return xmax - xmin; } bool contains(ssize_t x, ssize_t y) const { return x >= xmin && x < xmax && y >= ymin && y < ymax; } - }; - +}; using dynamic_shape = std::vector; -//TODO! Can we uniform enums between the libraries? +// TODO! Can we uniform enums between the libraries? /** - * @brief Enum class to identify different detectors. + * @brief Enum class to identify different detectors. * The values are the same as in slsDetectorPackage * Different spelling to avoid confusion with the slsDetectorPackage */ enum class DetectorType { - //Standard detectors match the enum values from slsDetectorPackage + // Standard detectors match the enum values from slsDetectorPackage Generic, Eiger, Gotthard, @@ -243,8 +237,9 @@ enum class DetectorType { Gotthard2, Xilinx_ChipTestBoard, - //Additional detectors used for defining processing. Variants of the standard ones. - Moench03=100, + // Additional detectors used for defining processing. Variants of the + // standard ones. + Moench03 = 100, Moench03_old, Unknown }; diff --git a/include/aare/geo_helpers.hpp b/include/aare/geo_helpers.hpp index d0d5d1a..c6454a5 100644 --- a/include/aare/geo_helpers.hpp +++ b/include/aare/geo_helpers.hpp @@ -1,16 +1,15 @@ #pragma once -#include "aare/defs.hpp" #include "aare/RawMasterFile.hpp" //ROI refactor away -namespace aare{ +#include "aare/defs.hpp" +namespace aare { /** * @brief Update the detector geometry given a region of interest - * - * @param geo - * @param roi - * @return DetectorGeometry + * + * @param geo + * @param roi + * @return DetectorGeometry */ DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, ROI roi); - } // namespace aare \ No newline at end of file diff --git a/include/aare/logger.hpp b/include/aare/logger.hpp index b93c091..0bedd7a 100644 --- a/include/aare/logger.hpp +++ b/include/aare/logger.hpp @@ -1,7 +1,6 @@ #pragma once /*Utility to log to console*/ - #include #include #include @@ -27,7 +26,6 @@ namespace aare { #define RESET "\x1b[0m" #define BOLD "\x1b[1m" - enum TLogLevel { logERROR, logWARNING, @@ -37,7 +35,8 @@ enum TLogLevel { logINFOCYAN, logINFOMAGENTA, logINFO, - logDEBUG, // constructors, destructors etc. should still give too much output + logDEBUG, // constructors, destructors etc. should still give too much + // output logDEBUG1, logDEBUG2, logDEBUG3, @@ -47,7 +46,9 @@ enum TLogLevel { // Compiler should optimize away anything below this value #ifndef AARE_LOG_LEVEL -#define AARE_LOG_LEVEL "LOG LEVEL NOT SET IN CMAKE" //This is configured in the main CMakeLists.txt +#define AARE_LOG_LEVEL \ + "LOG LEVEL NOT SET IN CMAKE" // This is configured in the main + // CMakeLists.txt #endif #define __AT__ \ @@ -72,7 +73,8 @@ class Logger { std::clog << os.str() << std::flush; // Single write } - static TLogLevel &ReportingLevel() { // singelton eeh TODO! Do we need a runtime option? + static TLogLevel & + ReportingLevel() { // singelton eeh TODO! Do we need a runtime option? static TLogLevel reportingLevel = logDEBUG5; return reportingLevel; } diff --git a/include/aare/utils/ifstream_helpers.hpp b/include/aare/utils/ifstream_helpers.hpp index 0a842ed..a8d0d21 100644 --- a/include/aare/utils/ifstream_helpers.hpp +++ b/include/aare/utils/ifstream_helpers.hpp @@ -4,9 +4,9 @@ #include namespace aare { -/** +/** * @brief Get the error message from an ifstream object -*/ + */ std::string ifstream_error_msg(std::ifstream &ifs); } // namespace aare \ No newline at end of file diff --git a/include/aare/utils/par.hpp b/include/aare/utils/par.hpp index efb1c77..e52c897 100644 --- a/include/aare/utils/par.hpp +++ b/include/aare/utils/par.hpp @@ -1,18 +1,18 @@ #include -#include #include +#include namespace aare { - template - void RunInParallel(F func, const std::vector>& tasks) { - // auto tasks = split_task(0, y.shape(0), n_threads); - std::vector threads; - for (auto &task : tasks) { - threads.push_back(std::thread(func, task.first, task.second)); - } - for (auto &thread : threads) { - thread.join(); - } +template +void RunInParallel(F func, const std::vector> &tasks) { + // auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(func, task.first, task.second)); } + for (auto &thread : threads) { + thread.join(); + } +} } // namespace aare \ No newline at end of file diff --git a/python/src/bind_Cluster.hpp b/python/src/bind_Cluster.hpp index daf0946..690d0e8 100644 --- a/python/src/bind_Cluster.hpp +++ b/python/src/bind_Cluster.hpp @@ -1,10 +1,10 @@ #include "aare/Cluster.hpp" #include -#include #include -#include +#include #include +#include #include #include diff --git a/python/src/bind_ClusterCollector.hpp b/python/src/bind_ClusterCollector.hpp index 4836e6e..84172cb 100644 --- a/python/src/bind_ClusterCollector.hpp +++ b/python/src/bind_ClusterCollector.hpp @@ -21,11 +21,9 @@ using namespace aare; #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" - template -void define_ClusterCollector(py::module &m, - const std::string &typestr) { +void define_ClusterCollector(py::module &m, const std::string &typestr) { auto class_name = fmt::format("ClusterCollector_{}", typestr); using ClusterType = Cluster; diff --git a/python/src/bind_ClusterFile.hpp b/python/src/bind_ClusterFile.hpp index 8ce5360..c2c801d 100644 --- a/python/src/bind_ClusterFile.hpp +++ b/python/src/bind_ClusterFile.hpp @@ -21,8 +21,7 @@ using namespace ::aare; template -void define_ClusterFile(py::module &m, - const std::string &typestr) { +void define_ClusterFile(py::module &m, const std::string &typestr) { using ClusterType = Cluster; diff --git a/python/src/bind_ClusterFileSink.hpp b/python/src/bind_ClusterFileSink.hpp index 9b3a74d..f717de6 100644 --- a/python/src/bind_ClusterFileSink.hpp +++ b/python/src/bind_ClusterFileSink.hpp @@ -21,15 +21,9 @@ using namespace aare; #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" - - - - - template -void define_ClusterFileSink(py::module &m, - const std::string &typestr) { +void define_ClusterFileSink(py::module &m, const std::string &typestr) { auto class_name = fmt::format("ClusterFileSink_{}", typestr); using ClusterType = Cluster; @@ -40,5 +34,4 @@ void define_ClusterFileSink(py::module &m, .def("stop", &ClusterFileSink::stop); } - #pragma GCC diagnostic pop diff --git a/python/src/bind_ClusterFinderMT.hpp b/python/src/bind_ClusterFinderMT.hpp index d1769db..0ecbbd1 100644 --- a/python/src/bind_ClusterFinderMT.hpp +++ b/python/src/bind_ClusterFinderMT.hpp @@ -23,8 +23,7 @@ using namespace aare; template -void define_ClusterFinderMT(py::module &m, - const std::string &typestr) { +void define_ClusterFinderMT(py::module &m, const std::string &typestr) { auto class_name = fmt::format("ClusterFinderMT_{}", typestr); using ClusterType = Cluster; @@ -49,9 +48,11 @@ void define_ClusterFinderMT(py::module &m, return; }, py::arg(), py::arg("frame_number") = 0) - .def_property_readonly("cluster_size", [](ClusterFinderMT &self){ - return py::make_tuple(ClusterSizeX, ClusterSizeY); - }) + .def_property_readonly( + "cluster_size", + [](ClusterFinderMT &self) { + return py::make_tuple(ClusterSizeX, ClusterSizeY); + }) .def("clear_pedestal", &ClusterFinderMT::clear_pedestal) .def("sync", &ClusterFinderMT::sync) @@ -77,5 +78,4 @@ void define_ClusterFinderMT(py::module &m, py::arg("thread_index") = 0); } - #pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/bind_ClusterVector.hpp b/python/src/bind_ClusterVector.hpp index 550db9a..9e9c4ab 100644 --- a/python/src/bind_ClusterVector.hpp +++ b/python/src/bind_ClusterVector.hpp @@ -44,10 +44,11 @@ void define_ClusterVector(py::module &m, const std::string &typestr) { auto *vec = new std::vector(self.sum()); return return_vector(vec); }) - .def("sum_2x2", [](ClusterVector &self){ - auto *vec = new std::vector(self.sum_2x2()); - return return_vector(vec); - }) + .def("sum_2x2", + [](ClusterVector &self) { + auto *vec = new std::vector(self.sum_2x2()); + return return_vector(vec); + }) .def_property_readonly("size", &ClusterVector::size) .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", diff --git a/python/src/ctb_raw_file.hpp b/python/src/ctb_raw_file.hpp index c9b5310..5eb9652 100644 --- a/python/src/ctb_raw_file.hpp +++ b/python/src/ctb_raw_file.hpp @@ -6,8 +6,8 @@ #include "aare/RawMasterFile.hpp" #include "aare/RawSubFile.hpp" -#include "aare/defs.hpp" #include "aare/decode.hpp" +#include "aare/defs.hpp" // #include "aare/fClusterFileV2.hpp" #include "np_helper.hpp" @@ -26,95 +26,103 @@ using namespace ::aare; void define_ctb_raw_file_io_bindings(py::module &m) { -m.def("adc_sar_05_decode64to16", [](py::array_t input) { + m.def("adc_sar_05_decode64to16", [](py::array_t input) { + if (input.ndim() != 2) { + throw std::runtime_error( + "Only 2D arrays are supported at this moment"); + } - - if(input.ndim() != 2){ - throw std::runtime_error("Only 2D arrays are supported at this moment"); - } + // Create a 2D output array with the same shape as the input + std::vector shape{input.shape(0), + input.shape(1) / + static_cast(bits_per_byte)}; + py::array_t output(shape); - //Create a 2D output array with the same shape as the input - std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; - py::array_t output(shape); + // Create a view of the input and output arrays + NDView input_view( + reinterpret_cast(input.mutable_data()), + {output.shape(0), output.shape(1)}); + NDView output_view(output.mutable_data(), + {output.shape(0), output.shape(1)}); - //Create a view of the input and output arrays - NDView input_view(reinterpret_cast(input.mutable_data()), {output.shape(0), output.shape(1)}); - NDView output_view(output.mutable_data(), {output.shape(0), output.shape(1)}); + adc_sar_05_decode64to16(input_view, output_view); - adc_sar_05_decode64to16(input_view, output_view); - - return output; -}); - - -m.def("adc_sar_04_decode64to16", [](py::array_t input) { - - - if(input.ndim() != 2){ - throw std::runtime_error("Only 2D arrays are supported at this moment"); - } - - //Create a 2D output array with the same shape as the input - std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; - py::array_t output(shape); - - //Create a view of the input and output arrays - NDView input_view(reinterpret_cast(input.mutable_data()), {output.shape(0), output.shape(1)}); - NDView output_view(output.mutable_data(), {output.shape(0), output.shape(1)}); - - adc_sar_04_decode64to16(input_view, output_view); - - return output; -}); - -m.def( - "apply_custom_weights", - [](py::array_t &input, - py::array_t - &weights) { - - - // Create new array with same shape as the input array (uninitialized values) - py::buffer_info buf = input.request(); - py::array_t output(buf.shape); - - // Use NDViews to call into the C++ library - auto weights_view = make_view_1d(weights); - NDView input_view(input.mutable_data(), {input.size()}); - NDView output_view(output.mutable_data(), {output.size()}); - - apply_custom_weights(input_view, output_view, weights_view); return output; }); -py::class_(m, "CtbRawFile") - .def(py::init()) - .def("read_frame", - [](CtbRawFile &self) { - size_t image_size = self.image_size_in_bytes(); - py::array image; - std::vector shape; - shape.reserve(2); - shape.push_back(1); - shape.push_back(image_size); + m.def("adc_sar_04_decode64to16", [](py::array_t input) { + if (input.ndim() != 2) { + throw std::runtime_error( + "Only 2D arrays are supported at this moment"); + } - py::array_t header(1); + // Create a 2D output array with the same shape as the input + std::vector shape{input.shape(0), + input.shape(1) / + static_cast(bits_per_byte)}; + py::array_t output(shape); - // always read bytes - image = py::array_t(shape); + // Create a view of the input and output arrays + NDView input_view( + reinterpret_cast(input.mutable_data()), + {output.shape(0), output.shape(1)}); + NDView output_view(output.mutable_data(), + {output.shape(0), output.shape(1)}); - self.read_into(reinterpret_cast(image.mutable_data()), - header.mutable_data()); + adc_sar_04_decode64to16(input_view, output_view); - return py::make_tuple(header, image); - }) - .def("seek", &CtbRawFile::seek) - .def("tell", &CtbRawFile::tell) - .def("master", &CtbRawFile::master) + return output; + }); - .def_property_readonly("image_size_in_bytes", - &CtbRawFile::image_size_in_bytes) + m.def("apply_custom_weights", + [](py::array_t + &input, + py::array_t + &weights) { + // Create new array with same shape as the input array + // (uninitialized values) + py::buffer_info buf = input.request(); + py::array_t output(buf.shape); - .def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file); + // Use NDViews to call into the C++ library + auto weights_view = make_view_1d(weights); + NDView input_view(input.mutable_data(), + {input.size()}); + NDView output_view(output.mutable_data(), + {output.size()}); + apply_custom_weights(input_view, output_view, weights_view); + return output; + }); + + py::class_(m, "CtbRawFile") + .def(py::init()) + .def("read_frame", + [](CtbRawFile &self) { + size_t image_size = self.image_size_in_bytes(); + py::array image; + std::vector shape; + shape.reserve(2); + shape.push_back(1); + shape.push_back(image_size); + + py::array_t header(1); + + // always read bytes + image = py::array_t(shape); + + self.read_into( + reinterpret_cast(image.mutable_data()), + header.mutable_data()); + + return py::make_tuple(header, image); + }) + .def("seek", &CtbRawFile::seek) + .def("tell", &CtbRawFile::tell) + .def("master", &CtbRawFile::master) + + .def_property_readonly("image_size_in_bytes", + &CtbRawFile::image_size_in_bytes) + + .def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file); } diff --git a/python/src/file.hpp b/python/src/file.hpp index f97db96..262b4f8 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -20,17 +20,13 @@ namespace py = pybind11; using namespace ::aare; - - - -//Disable warnings for unused parameters, as we ignore some -//in the __exit__ method +// Disable warnings for unused parameters, as we ignore some +// in the __exit__ method #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" void define_file_io_bindings(py::module &m) { - py::enum_(m, "DetectorType") .value("Jungfrau", DetectorType::Jungfrau) .value("Eiger", DetectorType::Eiger) @@ -41,13 +37,10 @@ void define_file_io_bindings(py::module &m) { .value("ChipTestBoard", DetectorType::ChipTestBoard) .value("Unknown", DetectorType::Unknown); - PYBIND11_NUMPY_DTYPE(DetectorHeader, frameNumber, expLength, packetNumber, bunchId, timestamp, modId, row, column, reserved, debug, roundRNumber, detType, version, packetMask); - - py::class_(m, "File") .def(py::init([](const std::filesystem::path &fname) { return File(fname, "r", {}); @@ -112,45 +105,18 @@ void define_file_io_bindings(py::module &m) { reinterpret_cast(image.mutable_data())); return image; }) - .def("read_n", [](File &self, size_t n_frames) { - //adjust for actual frames left in the file - n_frames = std::min(n_frames, self.total_frames()-self.tell()); - if(n_frames == 0){ - throw std::runtime_error("No frames left in file"); - } - std::vector shape{n_frames, self.rows(), self.cols()}; - - py::array image; - const uint8_t item_size = self.bytes_per_pixel(); - if (item_size == 1) { - image = py::array_t(shape); - } else if (item_size == 2) { - image = py::array_t(shape); - } else if (item_size == 4) { - image = py::array_t(shape); - } - self.read_into(reinterpret_cast(image.mutable_data()), - n_frames); - return image; - }) - .def("__enter__", [](File &self) { return &self; }) - .def("__exit__", - [](File &self, - const std::optional &exc_type, - const std::optional &exc_value, - const std::optional &traceback) { - // self.close(); - }) - .def("__iter__", [](File &self) { return &self; }) - .def("__next__", [](File &self) { + .def("read_n", + [](File &self, size_t n_frames) { + // adjust for actual frames left in the file + n_frames = + std::min(n_frames, self.total_frames() - self.tell()); + if (n_frames == 0) { + throw std::runtime_error("No frames left in file"); + } + std::vector shape{n_frames, self.rows(), self.cols()}; - try{ - const uint8_t item_size = self.bytes_per_pixel(); py::array image; - std::vector shape; - shape.reserve(2); - shape.push_back(self.rows()); - shape.push_back(self.cols()); + const uint8_t item_size = self.bytes_per_pixel(); if (item_size == 1) { image = py::array_t(shape); } else if (item_size == 2) { @@ -159,14 +125,41 @@ void define_file_io_bindings(py::module &m) { image = py::array_t(shape); } self.read_into( - reinterpret_cast(image.mutable_data())); + reinterpret_cast(image.mutable_data()), + n_frames); return image; - }catch(std::runtime_error &e){ + }) + .def("__enter__", [](File &self) { return &self; }) + .def("__exit__", + [](File &self, const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + // self.close(); + }) + .def("__iter__", [](File &self) { return &self; }) + .def("__next__", [](File &self) { + try { + const uint8_t item_size = self.bytes_per_pixel(); + py::array image; + std::vector shape; + shape.reserve(2); + shape.push_back(self.rows()); + shape.push_back(self.cols()); + if (item_size == 1) { + image = py::array_t(shape); + } else if (item_size == 2) { + image = py::array_t(shape); + } else if (item_size == 4) { + image = py::array_t(shape); + } + self.read_into( + reinterpret_cast(image.mutable_data())); + return image; + } catch (std::runtime_error &e) { throw py::stop_iteration(); } }); - py::class_(m, "FileConfig") .def(py::init<>()) .def_readwrite("rows", &FileConfig::rows) @@ -183,8 +176,6 @@ void define_file_io_bindings(py::module &m) { return ""; }); - - py::class_(m, "ScanParameters") .def(py::init()) .def(py::init()) @@ -195,7 +186,6 @@ void define_file_io_bindings(py::module &m) { .def_property_readonly("stop", &ScanParameters::stop) .def_property_readonly("step", &ScanParameters::step); - py::class_(m, "ROI") .def(py::init<>()) .def(py::init(), py::arg("xmin"), @@ -204,23 +194,21 @@ void define_file_io_bindings(py::module &m) { .def_readwrite("xmax", &ROI::xmax) .def_readwrite("ymin", &ROI::ymin) .def_readwrite("ymax", &ROI::ymax) - .def("__str__", [](const ROI& self){ - return fmt::format("ROI: xmin: {} xmax: {} ymin: {} ymax: {}", self.xmin, self.xmax, self.ymin, self.ymax); - }) - .def("__repr__", [](const ROI& self){ - return fmt::format("", self.xmin, self.xmax, self.ymin, self.ymax); - }) + .def("__str__", + [](const ROI &self) { + return fmt::format("ROI: xmin: {} xmax: {} ymin: {} ymax: {}", + self.xmin, self.xmax, self.ymin, self.ymax); + }) + .def("__repr__", + [](const ROI &self) { + return fmt::format( + "", self.xmin, + self.xmax, self.ymin, self.ymax); + }) .def("__iter__", [](const ROI &self) { - return py::make_iterator(&self.xmin, &self.ymax+1); //NOLINT + return py::make_iterator(&self.xmin, &self.ymax + 1); // NOLINT }); - - - - - - - #pragma GCC diagnostic pop // py::class_(m, "ClusterHeader") // .def(py::init<>()) diff --git a/python/src/fit.hpp b/python/src/fit.hpp index 97dafb5..47568d6 100644 --- a/python/src/fit.hpp +++ b/python/src/fit.hpp @@ -9,7 +9,6 @@ namespace py = pybind11; using namespace pybind11::literals; - void define_fit_bindings(py::module &m) { // TODO! Evaluate without converting to double @@ -61,7 +60,8 @@ void define_fit_bindings(py::module &m) { py::array_t par) { auto x_view = make_view_1d(x); auto par_view = make_view_1d(par); - auto y = new NDArray{aare::func::scurve(x_view, par_view)}; + auto y = + new NDArray{aare::func::scurve(x_view, par_view)}; return return_image_data(y); }, R"( @@ -82,7 +82,8 @@ void define_fit_bindings(py::module &m) { py::array_t par) { auto x_view = make_view_1d(x); auto par_view = make_view_1d(par); - auto y = new NDArray{aare::func::scurve2(x_view, par_view)}; + auto y = + new NDArray{aare::func::scurve2(x_view, par_view)}; return return_image_data(y); }, R"( @@ -139,7 +140,6 @@ n_threads : int, optional py::array_t y, py::array_t y_err, int n_threads) { - if (y.ndim() == 3) { // Allocate memory for the output // Need to have pointers to allow python to manage @@ -173,7 +173,6 @@ n_threads : int, optional auto y_view_err = make_view_1d(y_err); auto x_view = make_view_1d(x); - double chi2 = 0; aare::fit_gaus(x_view, y_view, y_view_err, par->view(), par_err->view(), chi2); @@ -248,11 +247,10 @@ n_threads : int, optional aare::fit_pol1(x_view, y_view, y_view_err, par->view(), par_err->view(), chi2->view(), n_threads); return py::dict("par"_a = return_image_data(par), - "par_err"_a = return_image_data(par_err), + "par_err"_a = return_image_data(par_err), "chi2"_a = return_image_data(chi2), "Ndf"_a = y.shape(2) - 2); - } else if (y.ndim() == 1) { auto par = new NDArray({2}); auto par_err = new NDArray({2}); @@ -289,7 +287,7 @@ n_threads : int, optional )", py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4); -//========= + //========= m.def( "fit_scurve", [](py::array_t x, @@ -333,13 +331,12 @@ n_threads : int, optional auto chi2 = new NDArray({y.shape(0), y.shape(1)}); aare::fit_scurve(x_view, y_view, y_view_err, par->view(), - par_err->view(), chi2->view(), n_threads); + par_err->view(), chi2->view(), n_threads); return py::dict("par"_a = return_image_data(par), - "par_err"_a = return_image_data(par_err), + "par_err"_a = return_image_data(par_err), "chi2"_a = return_image_data(chi2), "Ndf"_a = y.shape(2) - 2); - } else if (y.ndim() == 1) { auto par = new NDArray({2}); auto par_err = new NDArray({2}); @@ -351,7 +348,7 @@ n_threads : int, optional double chi2 = 0; aare::fit_scurve(x_view, y_view, y_view_err, par->view(), - par_err->view(), chi2); + par_err->view(), chi2); return py::dict("par"_a = return_image_data(par), "par_err"_a = return_image_data(par_err), "chi2"_a = chi2, "Ndf"_a = y.size() - 2); @@ -375,7 +372,6 @@ n_threads : int, optional The number of threads to use. Default is 4. )", py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4); - m.def( "fit_scurve2", @@ -420,13 +416,12 @@ n_threads : int, optional auto chi2 = new NDArray({y.shape(0), y.shape(1)}); aare::fit_scurve2(x_view, y_view, y_view_err, par->view(), - par_err->view(), chi2->view(), n_threads); + par_err->view(), chi2->view(), n_threads); return py::dict("par"_a = return_image_data(par), - "par_err"_a = return_image_data(par_err), + "par_err"_a = return_image_data(par_err), "chi2"_a = return_image_data(chi2), "Ndf"_a = y.shape(2) - 2); - } else if (y.ndim() == 1) { auto par = new NDArray({6}); auto par_err = new NDArray({6}); @@ -438,7 +433,7 @@ n_threads : int, optional double chi2 = 0; aare::fit_scurve2(x_view, y_view, y_view_err, par->view(), - par_err->view(), chi2); + par_err->view(), chi2); return py::dict("par"_a = return_image_data(par), "par_err"_a = return_image_data(par_err), "chi2"_a = chi2, "Ndf"_a = y.size() - 2); diff --git a/python/src/jungfrau_data_file.hpp b/python/src/jungfrau_data_file.hpp index 942f6a6..62a95c9 100644 --- a/python/src/jungfrau_data_file.hpp +++ b/python/src/jungfrau_data_file.hpp @@ -21,10 +21,7 @@ using namespace ::aare; auto read_dat_frame(JungfrauDataFile &self) { py::array_t header(1); - py::array_t image({ - self.rows(), - self.cols() - }); + py::array_t image({self.rows(), self.cols()}); self.read_into(reinterpret_cast(image.mutable_data()), header.mutable_data()); @@ -40,9 +37,7 @@ auto read_n_dat_frames(JungfrauDataFile &self, size_t n_frames) { } py::array_t header(n_frames); - py::array_t image({ - n_frames, self.rows(), - self.cols()}); + py::array_t image({n_frames, self.rows(), self.cols()}); self.read_into(reinterpret_cast(image.mutable_data()), n_frames, header.mutable_data()); diff --git a/python/src/module.cpp b/python/src/module.cpp index 681dd4b..fc04a9f 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -1,26 +1,26 @@ // Files with bindings to the different classes -//New style file naming +// New style file naming #include "bind_Cluster.hpp" #include "bind_ClusterCollector.hpp" -#include "bind_ClusterFinder.hpp" -#include "bind_ClusterFinderMT.hpp" #include "bind_ClusterFile.hpp" #include "bind_ClusterFileSink.hpp" +#include "bind_ClusterFinder.hpp" +#include "bind_ClusterFinderMT.hpp" #include "bind_ClusterVector.hpp" -//TODO! migrate the other names +// TODO! migrate the other names #include "ctb_raw_file.hpp" #include "file.hpp" #include "fit.hpp" #include "interpolation.hpp" -#include "raw_sub_file.hpp" -#include "raw_master_file.hpp" -#include "raw_file.hpp" -#include "pixel_map.hpp" -#include "var_cluster.hpp" -#include "pedestal.hpp" #include "jungfrau_data_file.hpp" +#include "pedestal.hpp" +#include "pixel_map.hpp" +#include "raw_file.hpp" +#include "raw_master_file.hpp" +#include "raw_sub_file.hpp" +#include "var_cluster.hpp" // Pybind stuff #include @@ -34,17 +34,18 @@ T - Storage type of the cluster data (int, float, double) N - Number of rows in the cluster M - Number of columns in the cluster U - Type of the pixel data (e.g., uint16_t) -TYPE_CODE - A character representing the type code (e.g., 'i' for int, 'd' for double, 'f' for float) +TYPE_CODE - A character representing the type code (e.g., 'i' for int, 'd' for +double, 'f' for float) */ -#define DEFINE_CLUSTER_BINDINGS(T, N, M, U, TYPE_CODE) \ - define_ClusterFile(m, "Cluster" #N "x" #M #TYPE_CODE); \ - define_ClusterVector(m, "Cluster" #N "x" #M #TYPE_CODE); \ - define_ClusterFinder(m, "Cluster" #N "x" #M #TYPE_CODE); \ - define_ClusterFinderMT(m, "Cluster" #N "x" #M #TYPE_CODE); \ - define_ClusterFileSink(m, "Cluster" #N "x" #M #TYPE_CODE); \ - define_ClusterCollector(m, "Cluster" #N "x" #M #TYPE_CODE); \ - define_Cluster(m, #N "x" #M #TYPE_CODE); \ +#define DEFINE_CLUSTER_BINDINGS(T, N, M, U, TYPE_CODE) \ + define_ClusterFile(m, "Cluster" #N "x" #M #TYPE_CODE); \ + define_ClusterVector(m, "Cluster" #N "x" #M #TYPE_CODE); \ + define_ClusterFinder(m, "Cluster" #N "x" #M #TYPE_CODE); \ + define_ClusterFinderMT(m, "Cluster" #N "x" #M #TYPE_CODE); \ + define_ClusterFileSink(m, "Cluster" #N "x" #M #TYPE_CODE); \ + define_ClusterCollector(m, "Cluster" #N "x" #M #TYPE_CODE); \ + define_Cluster(m, #N "x" #M #TYPE_CODE); \ register_calculate_eta(m); PYBIND11_MODULE(_aare, m) { diff --git a/python/src/pedestal.hpp b/python/src/pedestal.hpp index 77148dc..23d8247 100644 --- a/python/src/pedestal.hpp +++ b/python/src/pedestal.hpp @@ -9,7 +9,8 @@ namespace py = pybind11; -template void define_pedestal_bindings(py::module &m, const std::string &name) { +template +void define_pedestal_bindings(py::module &m, const std::string &name) { py::class_>(m, name.c_str()) .def(py::init()) .def(py::init()) @@ -19,16 +20,18 @@ template void define_pedestal_bindings(py::module &m, const *mea = self.mean(); return return_image_data(mea); }) - .def("variance", [](Pedestal &self) { - auto var = new NDArray{}; - *var = self.variance(); - return return_image_data(var); - }) - .def("std", [](Pedestal &self) { - auto std = new NDArray{}; - *std = self.std(); - return return_image_data(std); - }) + .def("variance", + [](Pedestal &self) { + auto var = new NDArray{}; + *var = self.variance(); + return return_image_data(var); + }) + .def("std", + [](Pedestal &self) { + auto std = new NDArray{}; + *std = self.std(); + return return_image_data(std); + }) .def("clear", py::overload_cast<>(&Pedestal::clear)) .def_property_readonly("rows", &Pedestal::rows) .def_property_readonly("cols", &Pedestal::cols) @@ -39,14 +42,19 @@ template void define_pedestal_bindings(py::module &m, const [&](Pedestal &pedestal) { return Pedestal(pedestal); }) - //TODO! add push for other data types - .def("push", [](Pedestal &pedestal, py::array_t &f) { - auto v = make_view_2d(f); - pedestal.push(v); - }) - .def("push_no_update", [](Pedestal &pedestal, py::array_t &f) { - auto v = make_view_2d(f); - pedestal.push_no_update(v); - }, py::arg().noconvert()) + // TODO! add push for other data types + .def("push", + [](Pedestal &pedestal, py::array_t &f) { + auto v = make_view_2d(f); + pedestal.push(v); + }) + .def( + "push_no_update", + [](Pedestal &pedestal, + py::array_t &f) { + auto v = make_view_2d(f); + pedestal.push_no_update(v); + }, + py::arg().noconvert()) .def("update_mean", &Pedestal::update_mean); } \ No newline at end of file diff --git a/python/src/pixel_map.hpp b/python/src/pixel_map.hpp index 46b1bc4..986728b 100644 --- a/python/src/pixel_map.hpp +++ b/python/src/pixel_map.hpp @@ -1,41 +1,46 @@ #include "aare/PixelMap.hpp" #include "np_helper.hpp" - #include #include #include #include - namespace py = pybind11; -using namespace::aare; - +using namespace ::aare; void define_pixel_map_bindings(py::module &m) { - m.def("GenerateMoench03PixelMap", []() { - auto ptr = new NDArray(GenerateMoench03PixelMap()); - return return_image_data(ptr); - }) - .def("GenerateMoench05PixelMap", []() { - auto ptr = new NDArray(GenerateMoench05PixelMap()); - return return_image_data(ptr); - }) - .def("GenerateMoench05PixelMap1g", []() { - auto ptr = new NDArray(GenerateMoench05PixelMap1g()); - return return_image_data(ptr); - }) - .def("GenerateMoench05PixelMapOld", []() { - auto ptr = new NDArray(GenerateMoench05PixelMapOld()); - return return_image_data(ptr); - }) - .def("GenerateMH02SingleCounterPixelMap", []() { - auto ptr = new NDArray(GenerateMH02SingleCounterPixelMap()); - return return_image_data(ptr); - }) - .def("GenerateMH02FourCounterPixelMap", []() { - auto ptr = new NDArray(GenerateMH02FourCounterPixelMap()); - return return_image_data(ptr); - }); - + m.def("GenerateMoench03PixelMap", + []() { + auto ptr = new NDArray(GenerateMoench03PixelMap()); + return return_image_data(ptr); + }) + .def("GenerateMoench05PixelMap", + []() { + auto ptr = new NDArray(GenerateMoench05PixelMap()); + return return_image_data(ptr); + }) + .def("GenerateMoench05PixelMap1g", + []() { + auto ptr = + new NDArray(GenerateMoench05PixelMap1g()); + return return_image_data(ptr); + }) + .def("GenerateMoench05PixelMapOld", + []() { + auto ptr = + new NDArray(GenerateMoench05PixelMapOld()); + return return_image_data(ptr); + }) + .def("GenerateMH02SingleCounterPixelMap", + []() { + auto ptr = new NDArray( + GenerateMH02SingleCounterPixelMap()); + return return_image_data(ptr); + }) + .def("GenerateMH02FourCounterPixelMap", []() { + auto ptr = + new NDArray(GenerateMH02FourCounterPixelMap()); + return return_image_data(ptr); + }); } \ No newline at end of file diff --git a/python/src/raw_file.hpp b/python/src/raw_file.hpp index 8d72220..689b84e 100644 --- a/python/src/raw_file.hpp +++ b/python/src/raw_file.hpp @@ -58,13 +58,14 @@ void define_raw_file_io_bindings(py::module &m) { throw std::runtime_error("No frames left in file"); } std::vector shape{n_frames, self.rows(), self.cols()}; - + // return headers from all subfiles py::array_t header; if (self.n_modules() == 1) { header = py::array_t(n_frames); } else { - header = py::array_t({self.n_modules(), n_frames}); + header = py::array_t( + {self.n_modules(), n_frames}); } // py::array_t header({self.n_mod(), n_frames}); diff --git a/python/src/raw_master_file.hpp b/python/src/raw_master_file.hpp index 943437f..9c2bd17 100644 --- a/python/src/raw_master_file.hpp +++ b/python/src/raw_master_file.hpp @@ -57,7 +57,8 @@ void define_raw_master_file_bindings(py::module &m) { .def_property_readonly("total_frames_expected", &RawMasterFile::total_frames_expected) .def_property_readonly("geometry", &RawMasterFile::geometry) - .def_property_readonly("analog_samples", &RawMasterFile::analog_samples, R"( + .def_property_readonly("analog_samples", &RawMasterFile::analog_samples, + R"( Number of analog samples Returns @@ -66,7 +67,7 @@ void define_raw_master_file_bindings(py::module &m) { The number of analog samples in the file (or None if not enabled) )") .def_property_readonly("digital_samples", - &RawMasterFile::digital_samples, R"( + &RawMasterFile::digital_samples, R"( Number of digital samples Returns diff --git a/python/src/raw_sub_file.hpp b/python/src/raw_sub_file.hpp index 2cb83fc..cff511b 100644 --- a/python/src/raw_sub_file.hpp +++ b/python/src/raw_sub_file.hpp @@ -24,8 +24,8 @@ auto read_frame_from_RawSubFile(RawSubFile &self) { py::array_t header(1); const uint8_t item_size = self.bytes_per_pixel(); std::vector shape{static_cast(self.rows()), - static_cast(self.cols())}; - + static_cast(self.cols())}; + py::array image; if (item_size == 1) { image = py::array_t(shape); @@ -43,12 +43,10 @@ auto read_frame_from_RawSubFile(RawSubFile &self) { auto read_n_frames_from_RawSubFile(RawSubFile &self, size_t n_frames) { py::array_t header(n_frames); const uint8_t item_size = self.bytes_per_pixel(); - std::vector shape{ - static_cast(n_frames), - static_cast(self.rows()), - static_cast(self.cols()) - }; - + std::vector shape{static_cast(n_frames), + static_cast(self.rows()), + static_cast(self.cols())}; + py::array image; if (item_size == 1) { image = py::array_t(shape); @@ -57,15 +55,14 @@ auto read_n_frames_from_RawSubFile(RawSubFile &self, size_t n_frames) { } else if (item_size == 4) { image = py::array_t(shape); } - self.read_into(reinterpret_cast(image.mutable_data()), n_frames, - header.mutable_data()); + self.read_into(reinterpret_cast(image.mutable_data()), + n_frames, header.mutable_data()); return py::make_tuple(header, image); } - -//Disable warnings for unused parameters, as we ignore some -//in the __exit__ method +// Disable warnings for unused parameters, as we ignore some +// in the __exit__ method #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" @@ -76,7 +73,7 @@ void define_raw_sub_file_io_bindings(py::module &m) { .def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame) .def_property_readonly("pixels_per_frame", &RawSubFile::pixels_per_frame) - .def_property_readonly("bytes_per_pixel", &RawSubFile::bytes_per_pixel) + .def_property_readonly("bytes_per_pixel", &RawSubFile::bytes_per_pixel) .def("seek", &RawSubFile::seek) .def("tell", &RawSubFile::tell) .def_property_readonly("rows", &RawSubFile::rows) @@ -84,18 +81,17 @@ void define_raw_sub_file_io_bindings(py::module &m) { .def_property_readonly("frames_in_file", &RawSubFile::frames_in_file) .def("read_frame", &read_frame_from_RawSubFile) .def("read_n", &read_n_frames_from_RawSubFile) - .def("read", [](RawSubFile &self){ - self.seek(0); - auto n_frames = self.frames_in_file(); - return read_n_frames_from_RawSubFile(self, n_frames); - }) + .def("read", + [](RawSubFile &self) { + self.seek(0); + auto n_frames = self.frames_in_file(); + return read_n_frames_from_RawSubFile(self, n_frames); + }) .def("__enter__", [](RawSubFile &self) { return &self; }) .def("__exit__", - [](RawSubFile &self, - const std::optional &exc_type, + [](RawSubFile &self, const std::optional &exc_type, const std::optional &exc_value, - const std::optional &traceback) { - }) + const std::optional &traceback) {}) .def("__iter__", [](RawSubFile &self) { return &self; }) .def("__next__", [](RawSubFile &self) { try { @@ -104,7 +100,6 @@ void define_raw_sub_file_io_bindings(py::module &m) { throw py::stop_iteration(); } }); - } #pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/var_cluster.hpp b/python/src/var_cluster.hpp index f7b373f..4e7302d 100644 --- a/python/src/var_cluster.hpp +++ b/python/src/var_cluster.hpp @@ -12,10 +12,8 @@ // #include // #include - namespace py = pybind11; -using namespace::aare; - +using namespace ::aare; void define_var_cluster_finder_bindings(py::module &m) { PYBIND11_NUMPY_DTYPE(VarClusterFinder::Hit, size, row, col, @@ -29,12 +27,12 @@ void define_var_cluster_finder_bindings(py::module &m) { return return_image_data(ptr); }) .def("set_noiseMap", - [](VarClusterFinder &self, + [](VarClusterFinder &self, py::array_t noise_map) { - auto noise_map_span = make_view_2d(noise_map); - self.set_noiseMap(noise_map_span); - }) + auto noise_map_span = make_view_2d(noise_map); + self.set_noiseMap(noise_map_span); + }) .def("set_peripheralThresholdFactor", &VarClusterFinder::set_peripheralThresholdFactor) .def("find_clusters", @@ -65,9 +63,7 @@ void define_var_cluster_finder_bindings(py::module &m) { return return_vector(ptr); }) .def("clear_hits", - [](VarClusterFinder &self) { - self.clear_hits(); - }) + [](VarClusterFinder &self) { self.clear_hits(); }) .def("steal_hits", [](VarClusterFinder &self) { auto ptr = new std::vector::Hit>( @@ -75,5 +71,4 @@ void define_var_cluster_finder_bindings(py::module &m) { return return_vector(ptr); }) .def("total_clusters", &VarClusterFinder::total_clusters); - } \ No newline at end of file diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index d24e803..13f7364 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -31,17 +31,15 @@ ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size, } } -void ClusterFile::set_roi(ROI roi){ - m_roi = roi; -} +void ClusterFile::set_roi(ROI roi) { m_roi = roi; } -void ClusterFile::set_noise_map(const NDView noise_map){ +void ClusterFile::set_noise_map(const NDView noise_map) { m_noise_map = NDArray(noise_map); } -void ClusterFile::set_gain_map(const NDView gain_map){ +void ClusterFile::set_gain_map(const NDView gain_map) { m_gain_map = NDArray(gain_map); - + // Gain map is passed as ADU/keV to avoid dividing in when applying the gain // map we invert it here for (auto &item : m_gain_map->view()) { @@ -66,42 +64,44 @@ void ClusterFile::write_frame(const ClusterVector &clusters) { !(clusters.cluster_size_y() == 3)) { throw std::runtime_error("Only 3x3 clusters are supported"); } - //First write the frame number - 4 bytes + // First write the frame number - 4 bytes int32_t frame_number = clusters.frame_number(); - if(fwrite(&frame_number, sizeof(frame_number), 1, fp)!=1){ + if (fwrite(&frame_number, sizeof(frame_number), 1, fp) != 1) { throw std::runtime_error(LOCATION + "Could not write frame number"); } - //Then write the number of clusters - 4 bytes + // Then write the number of clusters - 4 bytes uint32_t n_clusters = clusters.size(); - if(fwrite(&n_clusters, sizeof(n_clusters), 1, fp)!=1){ - throw std::runtime_error(LOCATION + "Could not write number of clusters"); + if (fwrite(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { + throw std::runtime_error(LOCATION + + "Could not write number of clusters"); } - //Now write the clusters in the frame - if(fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp)!=clusters.size()){ + // Now write the clusters in the frame + if (fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp) != + clusters.size()) { throw std::runtime_error(LOCATION + "Could not write clusters"); } } - -ClusterVector ClusterFile::read_clusters(size_t n_clusters){ +ClusterVector ClusterFile::read_clusters(size_t n_clusters) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } - if (m_noise_map || m_roi){ + if (m_noise_map || m_roi) { return read_clusters_with_cut(n_clusters); - }else{ + } else { return read_clusters_without_cut(n_clusters); } } -ClusterVector ClusterFile::read_clusters_without_cut(size_t n_clusters) { +ClusterVector +ClusterFile::read_clusters_without_cut(size_t n_clusters) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } - - ClusterVector clusters(3,3, n_clusters); + + ClusterVector clusters(3, 3, n_clusters); int32_t iframe = 0; // frame number needs to be 4 bytes! size_t nph_read = 0; @@ -119,7 +119,7 @@ ClusterVector ClusterFile::read_clusters_without_cut(size_t n_clusters) } else { nn = nph; } - nph_read += fread((buf + nph_read*clusters.item_size()), + nph_read += fread((buf + nph_read * clusters.item_size()), clusters.item_size(), nn, fp); m_num_left = nph - nn; // write back the number of photons left } @@ -135,7 +135,7 @@ ClusterVector ClusterFile::read_clusters_without_cut(size_t n_clusters) else nn = nph; - nph_read += fread((buf + nph_read*clusters.item_size()), + nph_read += fread((buf + nph_read * clusters.item_size()), clusters.item_size(), nn, fp); m_num_left = nph - nn; } @@ -147,22 +147,22 @@ ClusterVector ClusterFile::read_clusters_without_cut(size_t n_clusters) // Resize the vector to the number of clusters. // No new allocation, only change bounds. clusters.resize(nph_read); - if(m_gain_map) + if (m_gain_map) clusters.apply_gain_map(m_gain_map->view()); return clusters; } - ClusterVector ClusterFile::read_clusters_with_cut(size_t n_clusters) { - ClusterVector clusters(3,3); + ClusterVector clusters(3, 3); clusters.reserve(n_clusters); // if there are photons left from previous frame read them first if (m_num_left) { - while(m_num_left && clusters.size() < n_clusters){ + while (m_num_left && clusters.size() < n_clusters) { Cluster3x3 c = read_one_cluster(); - if(is_selected(c)){ - clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); + if (is_selected(c)) { + clusters.push_back(c.x, c.y, + reinterpret_cast(c.data)); } } } @@ -172,17 +172,21 @@ ClusterVector ClusterFile::read_clusters_with_cut(size_t n_clusters) { if (clusters.size() < n_clusters) { // sanity check if (m_num_left) { - throw std::runtime_error(LOCATION + "Entered second loop with clusters left\n"); + throw std::runtime_error( + LOCATION + "Entered second loop with clusters left\n"); } - + int32_t frame_number = 0; // frame number needs to be 4 bytes! while (fread(&frame_number, sizeof(frame_number), 1, fp)) { if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) { - clusters.set_frame_number(frame_number); //cluster vector will hold the last frame number - while(m_num_left && clusters.size() < n_clusters){ + clusters.set_frame_number( + frame_number); // cluster vector will hold the last frame + // number + while (m_num_left && clusters.size() < n_clusters) { Cluster3x3 c = read_one_cluster(); - if(is_selected(c)){ - clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); + if (is_selected(c)) { + clusters.push_back( + c.x, c.y, reinterpret_cast(c.data)); } } } @@ -191,15 +195,14 @@ ClusterVector ClusterFile::read_clusters_with_cut(size_t n_clusters) { if (clusters.size() >= n_clusters) break; } - } - if(m_gain_map) + if (m_gain_map) clusters.apply_gain_map(m_gain_map->view()); return clusters; } -Cluster3x3 ClusterFile::read_one_cluster(){ +Cluster3x3 ClusterFile::read_one_cluster() { Cluster3x3 c; auto rc = fread(&c, sizeof(c), 1, fp); if (rc != 1) { @@ -209,13 +212,13 @@ Cluster3x3 ClusterFile::read_one_cluster(){ return c; } -ClusterVector ClusterFile::read_frame(){ +ClusterVector ClusterFile::read_frame() { if (m_mode != "r") { throw std::runtime_error(LOCATION + "File not opened for reading"); } - if (m_noise_map || m_roi){ + if (m_noise_map || m_roi) { return read_frame_with_cut(); - }else{ + } else { return read_frame_without_cut(); } } @@ -235,7 +238,8 @@ ClusterVector ClusterFile::read_frame_without_cut() { int32_t n_clusters; // Saved as 32bit integer in the cluster file if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { - throw std::runtime_error(LOCATION + "Could not read number of clusters"); + throw std::runtime_error(LOCATION + + "Could not read number of clusters"); } ClusterVector clusters(3, 3, n_clusters); @@ -264,18 +268,17 @@ ClusterVector ClusterFile::read_frame_with_cut() { throw std::runtime_error("Could not read frame number"); } - if (fread(&m_num_left, sizeof(m_num_left), 1, fp) != 1) { throw std::runtime_error("Could not read number of clusters"); } - + ClusterVector clusters(3, 3); clusters.reserve(m_num_left); clusters.set_frame_number(frame_number); - while(m_num_left){ + while (m_num_left) { Cluster3x3 c = read_one_cluster(); - if(is_selected(c)){ - clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); + if (is_selected(c)) { + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); } } if (m_gain_map) @@ -283,56 +286,56 @@ ClusterVector ClusterFile::read_frame_with_cut() { return clusters; } - - bool ClusterFile::is_selected(Cluster3x3 &cl) { - //Should fail fast + // Should fail fast if (m_roi) { if (!(m_roi->contains(cl.x, cl.y))) { return false; } } - if (m_noise_map){ - int32_t sum_1x1 = cl.data[4]; // central pixel + if (m_noise_map) { + int32_t sum_1x1 = cl.data[4]; // central pixel int32_t sum_2x2 = cl.sum_2x2(); // highest sum of 2x2 subclusters - int32_t sum_3x3 = cl.sum(); // sum of all pixels + int32_t sum_3x3 = cl.sum(); // sum of all pixels - auto noise = (*m_noise_map)(cl.y, cl.x); //TODO! check if this is correct + auto noise = + (*m_noise_map)(cl.y, cl.x); // TODO! check if this is correct if (sum_1x1 <= noise || sum_2x2 <= 2 * noise || sum_3x3 <= 3 * noise) { return false; } } - //we passed all checks + // we passed all checks return true; } NDArray calculate_eta2(ClusterVector &clusters) { - //TOTO! make work with 2x2 clusters + // TOTO! make work with 2x2 clusters NDArray eta2({static_cast(clusters.size()), 2}); - + if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { for (size_t i = 0; i < clusters.size(); i++) { auto e = calculate_eta2(clusters.at(i)); eta2(i, 0) = e.x; eta2(i, 1) = e.y; } - }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ + } else if (clusters.cluster_size_x() == 2 || + clusters.cluster_size_y() == 2) { for (size_t i = 0; i < clusters.size(); i++) { auto e = calculate_eta2(clusters.at(i)); eta2(i, 0) = e.x; eta2(i, 1) = e.y; } - }else{ + } else { throw std::runtime_error("Only 3x3 and 2x2 clusters are supported"); } - + return eta2; } -/** - * @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 struct - * containing etay, etax and the corner of the cluster. -*/ +/** + * @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 + * struct containing etay, etax and the corner of the cluster. + */ Eta2 calculate_eta2(Cluster3x3 &cl) { Eta2 eta{}; @@ -347,56 +350,46 @@ Eta2 calculate_eta2(Cluster3x3 &cl) { switch (c) { case cBottomLeft: if ((cl.data[3] + cl.data[4]) != 0) - eta.x = - static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); + eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); if ((cl.data[1] + cl.data[4]) != 0) - eta.y = - static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); eta.c = cBottomLeft; break; case cBottomRight: if ((cl.data[2] + cl.data[5]) != 0) - eta.x = - static_cast(cl.data[5]) / (cl.data[4] + cl.data[5]); + eta.x = static_cast(cl.data[5]) / (cl.data[4] + cl.data[5]); if ((cl.data[1] + cl.data[4]) != 0) - eta.y = - static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); eta.c = cBottomRight; break; case cTopLeft: if ((cl.data[7] + cl.data[4]) != 0) - eta.x = - static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); + eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); if ((cl.data[7] + cl.data[4]) != 0) - eta.y = - static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); eta.c = cTopLeft; break; case cTopRight: if ((cl.data[5] + cl.data[4]) != 0) - eta.x = - static_cast(cl.data[5]) / (cl.data[5] + cl.data[4]); + eta.x = static_cast(cl.data[5]) / (cl.data[5] + cl.data[4]); if ((cl.data[7] + cl.data[4]) != 0) - eta.y = - static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); eta.c = cTopRight; break; - // no default to allow compiler to warn about missing cases + // no default to allow compiler to warn about missing cases } return eta; } - Eta2 calculate_eta2(Cluster2x2 &cl) { Eta2 eta{}; if ((cl.data[0] + cl.data[1]) != 0) eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); if ((cl.data[0] + cl.data[2]) != 0) eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); - eta.sum = cl.data[0] + cl.data[1] + cl.data[2]+ cl.data[3]; - eta.c = cBottomLeft; //TODO! This is not correct, but need to put something + eta.sum = cl.data[0] + cl.data[1] + cl.data[2] + cl.data[3]; + eta.c = cBottomLeft; // TODO! This is not correct, but need to put something return eta; } - } // namespace aare \ No newline at end of file diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp index 6254b5d..68e734d 100644 --- a/src/ClusterFile.test.cpp +++ b/src/ClusterFile.test.cpp @@ -10,9 +10,8 @@ using aare::Cluster; using aare::ClusterFile; using aare::ClusterVector; - TEST_CASE("Read one frame from a cluster file", "[.files]") { - //We know that the frame has 97 clusters + // We know that the frame has 97 clusters auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); @@ -27,7 +26,6 @@ TEST_CASE("Read one frame from a cluster file", "[.files]") { std::begin(expected_cluster_data))); } - TEST_CASE("Read one frame using ROI", "[.files]") { // We know that the frame has 97 clusters auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; @@ -60,8 +58,6 @@ TEST_CASE("Read one frame using ROI", "[.files]") { std::begin(expected_cluster_data))); } - - TEST_CASE("Read clusters from single frame file", "[.files]") { // frame_number, num_clusters [135] 97 diff --git a/src/CtbRawFile.cpp b/src/CtbRawFile.cpp index 4d9d895..a6a1d92 100644 --- a/src/CtbRawFile.cpp +++ b/src/CtbRawFile.cpp @@ -14,22 +14,24 @@ CtbRawFile::CtbRawFile(const std::filesystem::path &fname) : m_master(fname) { m_file.open(m_master.data_fname(0, 0), std::ios::binary); } -void CtbRawFile::read_into(std::byte *image_buf, DetectorHeader* header) { - if(m_current_frame >= m_master.frames_in_file()){ +void CtbRawFile::read_into(std::byte *image_buf, DetectorHeader *header) { + if (m_current_frame >= m_master.frames_in_file()) { throw std::runtime_error(LOCATION + " End of file reached"); } - if(m_current_frame != 0 && m_current_frame % m_master.max_frames_per_file() == 0){ - open_data_file(m_current_subfile+1); + if (m_current_frame != 0 && + m_current_frame % m_master.max_frames_per_file() == 0) { + open_data_file(m_current_subfile + 1); } - - if(header){ + + if (header) { m_file.read(reinterpret_cast(header), sizeof(DetectorHeader)); - }else{ + } else { m_file.seekg(sizeof(DetectorHeader), std::ios::cur); } - m_file.read(reinterpret_cast(image_buf), m_master.image_size_in_bytes()); + m_file.read(reinterpret_cast(image_buf), + m_master.image_size_in_bytes()); m_current_frame++; } @@ -38,13 +40,16 @@ void CtbRawFile::seek(size_t frame_number) { open_data_file(index); } size_t frame_number_in_file = frame_number % m_master.max_frames_per_file(); - m_file.seekg((sizeof(DetectorHeader)+m_master.image_size_in_bytes()) * frame_number_in_file); + m_file.seekg((sizeof(DetectorHeader) + m_master.image_size_in_bytes()) * + frame_number_in_file); m_current_frame = frame_number; } size_t CtbRawFile::tell() const { return m_current_frame; } -size_t CtbRawFile::image_size_in_bytes() const { return m_master.image_size_in_bytes(); } +size_t CtbRawFile::image_size_in_bytes() const { + return m_master.image_size_in_bytes(); +} size_t CtbRawFile::frames_in_file() const { return m_master.frames_in_file(); } @@ -63,12 +68,11 @@ void CtbRawFile::open_data_file(size_t subfile_index) { throw std::runtime_error(LOCATION + "Subfile index out of range"); } m_current_subfile = subfile_index; - m_file = std::ifstream(m_master.data_fname(0, subfile_index), std::ios::binary); // only one module for CTB + m_file = std::ifstream(m_master.data_fname(0, subfile_index), + std::ios::binary); // only one module for CTB if (!m_file.is_open()) { throw std::runtime_error(LOCATION + "Could not open data file"); } } - - } // namespace aare \ No newline at end of file diff --git a/src/Dtype.cpp b/src/Dtype.cpp index b818ea3..0fdffec 100644 --- a/src/Dtype.cpp +++ b/src/Dtype.cpp @@ -10,7 +10,8 @@ namespace aare { * @brief Construct a DType object from a type_info object * @param t type_info object * @throw runtime_error if the type is not supported - * @note supported types are: int8_t, uint8_t, int16_t, uint16_t, int32_t, uint32_t, int64_t, uint64_t, float, double + * @note supported types are: int8_t, uint8_t, int16_t, uint16_t, int32_t, + * uint32_t, int64_t, uint64_t, float, double * @note the type_info object is obtained using typeid (e.g. typeid(int)) */ Dtype::Dtype(const std::type_info &t) { @@ -35,7 +36,8 @@ Dtype::Dtype(const std::type_info &t) { else if (t == typeid(double)) m_type = TypeIndex::DOUBLE; else - throw std::runtime_error("Could not construct data type. Type not supported."); + throw std::runtime_error( + "Could not construct data type. Type not supported."); } /** @@ -63,7 +65,8 @@ uint8_t Dtype::bitdepth() const { case TypeIndex::NONE: return 0; default: - throw std::runtime_error(LOCATION + "Could not get bitdepth. Type not supported."); + throw std::runtime_error(LOCATION + + "Could not get bitdepth. Type not supported."); } } @@ -138,7 +141,8 @@ Dtype Dtype::from_bitdepth(uint8_t bitdepth) { case 64: return Dtype(TypeIndex::UINT64); default: - throw std::runtime_error("Could not construct data type from bitdepth."); + throw std::runtime_error( + "Could not construct data type from bitdepth."); } } /** @@ -175,17 +179,27 @@ std::string Dtype::to_string() const { case TypeIndex::DOUBLE: return "f8"; case TypeIndex::ERROR: - throw std::runtime_error("Could not get string representation. Type not supported."); + throw std::runtime_error( + "Could not get string representation. Type not supported."); case TypeIndex::NONE: - throw std::runtime_error("Could not get string representation. Type not supported."); + throw std::runtime_error( + "Could not get string representation. Type not supported."); } return {}; } -bool Dtype::operator==(const Dtype &other) const noexcept { return m_type == other.m_type; } -bool Dtype::operator!=(const Dtype &other) const noexcept { return !(*this == other); } +bool Dtype::operator==(const Dtype &other) const noexcept { + return m_type == other.m_type; +} +bool Dtype::operator!=(const Dtype &other) const noexcept { + return !(*this == other); +} -bool Dtype::operator==(const std::type_info &t) const { return Dtype(t) == *this; } -bool Dtype::operator!=(const std::type_info &t) const { return Dtype(t) != *this; } +bool Dtype::operator==(const std::type_info &t) const { + return Dtype(t) == *this; +} +bool Dtype::operator!=(const std::type_info &t) const { + return Dtype(t) != *this; +} } // namespace aare diff --git a/src/Dtype.test.cpp b/src/Dtype.test.cpp index b252267..256be64 100644 --- a/src/Dtype.test.cpp +++ b/src/Dtype.test.cpp @@ -51,4 +51,6 @@ TEST_CASE("Construct from string with endianess") { REQUIRE_THROWS(Dtype(">i4") == typeid(int32_t)); } -TEST_CASE("Convert to string") { REQUIRE(Dtype(typeid(int)).to_string() == "(fname, mode); - } - else if (fname.extension() == ".npy") { + } else if (fname.extension() == ".npy") { // file_impl = new NumpyFile(fname, mode, cfg); file_impl = std::make_unique(fname, mode, cfg); - }else if(fname.extension() == ".dat"){ + } else if (fname.extension() == ".dat") { file_impl = std::make_unique(fname); } else { throw std::runtime_error("Unsupported file type"); } } +File::File(File &&other) noexcept { std::swap(file_impl, other.file_impl); } -File::File(File &&other) noexcept{ - std::swap(file_impl, other.file_impl); -} - -File& File::operator=(File &&other) noexcept { +File &File::operator=(File &&other) noexcept { if (this != &other) { File tmp(std::move(other)); std::swap(file_impl, tmp.file_impl); @@ -70,15 +66,16 @@ size_t File::frame_number(size_t frame_index) { } size_t File::bytes_per_frame() const { return file_impl->bytes_per_frame(); } -size_t File::pixels_per_frame() const{ return file_impl->pixels_per_frame(); } +size_t File::pixels_per_frame() const { return file_impl->pixels_per_frame(); } void File::seek(size_t frame_index) { file_impl->seek(frame_index); } size_t File::tell() const { return file_impl->tell(); } size_t File::rows() const { return file_impl->rows(); } size_t File::cols() const { return file_impl->cols(); } size_t File::bitdepth() const { return file_impl->bitdepth(); } -size_t File::bytes_per_pixel() const { return file_impl->bitdepth() / bits_per_byte; } +size_t File::bytes_per_pixel() const { + return file_impl->bitdepth() / bits_per_byte; +} DetectorType File::detector_type() const { return file_impl->detector_type(); } - } // namespace aare \ No newline at end of file diff --git a/src/FilePtr.cpp b/src/FilePtr.cpp index e3cdb4b..f850080 100644 --- a/src/FilePtr.cpp +++ b/src/FilePtr.cpp @@ -6,10 +6,12 @@ namespace aare { -FilePtr::FilePtr(const std::filesystem::path& fname, const std::string& mode = "rb") { +FilePtr::FilePtr(const std::filesystem::path &fname, + const std::string &mode = "rb") { fp_ = fopen(fname.c_str(), mode.c_str()); if (!fp_) - throw std::runtime_error(fmt::format("Could not open: {}", fname.c_str())); + throw std::runtime_error( + fmt::format("Could not open: {}", fname.c_str())); } FilePtr::FilePtr(FilePtr &&other) { std::swap(fp_, other.fp_); } @@ -24,15 +26,16 @@ FILE *FilePtr::get() { return fp_; } ssize_t FilePtr::tell() { auto pos = ftell(fp_); if (pos == -1) - throw std::runtime_error(fmt::format("Error getting file position: {}", error_msg())); + throw std::runtime_error( + fmt::format("Error getting file position: {}", error_msg())); return pos; -} +} FilePtr::~FilePtr() { if (fp_) fclose(fp_); // check? } -std::string FilePtr::error_msg(){ +std::string FilePtr::error_msg() { if (feof(fp_)) { return "End of file reached"; } diff --git a/src/Fit.cpp b/src/Fit.cpp index 25000de..9d4b70b 100644 --- a/src/Fit.cpp +++ b/src/Fit.cpp @@ -1,13 +1,12 @@ #include "aare/Fit.hpp" -#include "aare/utils/task.hpp" #include "aare/utils/par.hpp" +#include "aare/utils/task.hpp" #include #include #include #include - namespace aare { namespace func { @@ -34,8 +33,10 @@ NDArray pol1(NDView x, NDView par) { return y; } -double scurve(const double x, const double * par) { - return (par[0] + par[1] * x) + 0.5 * (1 + erf((x - par[2]) / (sqrt(2) * par[3]))) * (par[4] + par[5] * (x - par[2])); +double scurve(const double x, const double *par) { + return (par[0] + par[1] * x) + + 0.5 * (1 + erf((x - par[2]) / (sqrt(2) * par[3]))) * + (par[4] + par[5] * (x - par[2])); } NDArray scurve(NDView x, NDView par) { @@ -46,8 +47,10 @@ NDArray scurve(NDView x, NDView par) { return y; } -double scurve2(const double x, const double * par) { - return (par[0] + par[1] * x) + 0.5 * (1 - erf((x - par[2]) / (sqrt(2) * par[3]))) * (par[4] + par[5] * (x - par[2])); +double scurve2(const double x, const double *par) { + return (par[0] + par[1] * x) + + 0.5 * (1 - erf((x - par[2]) / (sqrt(2) * par[3]))) * + (par[4] + par[5] * (x - par[2])); } NDArray scurve2(NDView x, NDView par) { @@ -91,7 +94,8 @@ NDArray fit_gaus(NDView x, NDView y, return result; } -std::array gaus_init_par(const NDView x, const NDView y) { +std::array gaus_init_par(const NDView x, + const NDView y) { std::array start_par{0, 0, 0}; auto e = std::max_element(y.begin(), y.end()); auto idx = std::distance(y.begin(), e); @@ -103,31 +107,29 @@ std::array gaus_init_par(const NDView x, const NDView *e / 2; }) * - delta / 2.35; + start_par[2] = std::count_if(y.begin(), y.end(), + [e](double val) { return val > *e / 2; }) * + delta / 2.35; return start_par; } +std::array pol1_init_par(const NDView x, + const NDView y) { + // Estimate the initial parameters for the fit + std::array start_par{0, 0}; -std::array pol1_init_par(const NDView x, const NDView y){ - // Estimate the initial parameters for the fit - std::array start_par{0, 0}; + auto y2 = std::max_element(y.begin(), y.end()); + auto x2 = x[std::distance(y.begin(), y2)]; + auto y1 = std::min_element(y.begin(), y.end()); + auto x1 = x[std::distance(y.begin(), y1)]; - - auto y2 = std::max_element(y.begin(), y.end()); - auto x2 = x[std::distance(y.begin(), y2)]; - auto y1 = std::min_element(y.begin(), y.end()); - auto x1 = x[std::distance(y.begin(), y1)]; - - start_par[0] = - (*y2 - *y1) / (x2 - x1); // For amplitude we use the maximum value - start_par[1] = - *y1 - ((*y2 - *y1) / (x2 - x1)) * - x1; // For the mean we use the x value of the maximum value - return start_par; + start_par[0] = + (*y2 - *y1) / (x2 - x1); // For amplitude we use the maximum value + start_par[1] = + *y1 - ((*y2 - *y1) / (x2 - x1)) * + x1; // For the mean we use the x value of the maximum value + return start_par; } void fit_gaus(NDView x, NDView y, NDView y_err, @@ -141,7 +143,6 @@ void fit_gaus(NDView x, NDView y, NDView y_err, "and par_out, par_err_out must have size 3"); } - // /* Collection of output parameters for status info. */ // typedef struct { // double fnorm; /* norm of the residue vector fvec. */ @@ -153,23 +154,32 @@ void fit_gaus(NDView x, NDView y, NDView y_err, // */ // } lm_status_struct; - lm_status_struct status; par_out = gaus_init_par(x, y); - std::array cov{0, 0, 0, 0, 0, 0, 0 , 0 , 0}; + std::array cov{0, 0, 0, 0, 0, 0, 0, 0, 0}; - // void lmcurve2( const int n_par, double *par, double *parerr, double *covar, const int m_dat, const double *t, const double *y, const double *dy, double (*f)( const double ti, const double *par ), const lm_control_struct *control, lm_status_struct *status); - // n_par - Number of free variables. Length of parameter vector par. - // par - Parameter vector. On input, it must contain a reasonable guess. On output, it contains the solution found to minimize ||r||. - // parerr - Parameter uncertainties vector. Array of length n_par or NULL. On output, unless it or covar is NULL, it contains the weighted parameter uncertainties for the found parameters. - // covar - Covariance matrix. Array of length n_par * n_par or NULL. On output, unless it is NULL, it contains the covariance matrix. - // m_dat - Number of data points. Length of vectors t, y, dy. Must statisfy n_par <= m_dat. - // t - Array of length m_dat. Contains the abcissae (time, or "x") for which function f will be evaluated. - // y - Array of length m_dat. Contains the ordinate values that shall be fitted. - // dy - Array of length m_dat. Contains the standard deviations of the values y. - // f - A user-supplied parametric function f(ti;par). - // control - Parameter collection for tuning the fit procedure. In most cases, the default &lm_control_double is adequate. If f is only computed with single-precision accuracy, &lm_control_float should be used. Parameters are explained in lmmin2(3). - // status - A record used to return information about the minimization process: For details, see lmmin2(3). + // void lmcurve2( const int n_par, double *par, double *parerr, double + // *covar, const int m_dat, const double *t, const double *y, const double + // *dy, double (*f)( const double ti, const double *par ), const + // lm_control_struct *control, lm_status_struct *status); n_par - Number of + // free variables. Length of parameter vector par. par - Parameter vector. + // On input, it must contain a reasonable guess. On output, it contains the + // solution found to minimize ||r||. parerr - Parameter uncertainties + // vector. Array of length n_par or NULL. On output, unless it or covar is + // NULL, it contains the weighted parameter uncertainties for the found + // parameters. covar - Covariance matrix. Array of length n_par * n_par or + // NULL. On output, unless it is NULL, it contains the covariance matrix. + // m_dat - Number of data points. Length of vectors t, y, dy. Must statisfy + // n_par <= m_dat. t - Array of length m_dat. Contains the abcissae (time, + // or "x") for which function f will be evaluated. y - Array of length + // m_dat. Contains the ordinate values that shall be fitted. dy - Array of + // length m_dat. Contains the standard deviations of the values y. f - A + // user-supplied parametric function f(ti;par). control - Parameter + // collection for tuning the fit procedure. In most cases, the default + // &lm_control_double is adequate. If f is only computed with + // single-precision accuracy, &lm_control_float should be used. Parameters + // are explained in lmmin2(3). status - A record used to return information + // about the minimization process: For details, see lmmin2(3). lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(), x.size(), x.data(), y.data(), y_err.data(), aare::func::gaus, @@ -178,12 +188,14 @@ void fit_gaus(NDView x, NDView y, NDView y_err, // Calculate chi2 chi2 = 0; for (ssize_t i = 0; i < y.size(); i++) { - chi2 += std::pow((y(i) - func::gaus(x(i), par_out.data())) / y_err(i), 2); + chi2 += + std::pow((y(i) - func::gaus(x(i), par_out.data())) / y_err(i), 2); } } void fit_gaus(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, NDView chi2_out, + NDView par_out, NDView par_err_out, + NDView chi2_out, int n_threads) { @@ -197,10 +209,9 @@ void fit_gaus(NDView x, NDView y, NDView y_err, {par_out.shape(2)}); NDView par_err_out_view(&par_err_out(row, col, 0), {par_err_out.shape(2)}); - + fit_gaus(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col)); - } } }; @@ -210,7 +221,8 @@ void fit_gaus(NDView x, NDView y, NDView y_err, } void fit_pol1(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, double& chi2) { + NDView par_out, NDView par_err_out, + double &chi2) { // Check that we have the correct sizes if (y.size() != x.size() || y.size() != y_err.size() || @@ -230,13 +242,14 @@ void fit_pol1(NDView x, NDView y, NDView y_err, // Calculate chi2 chi2 = 0; for (ssize_t i = 0; i < y.size(); i++) { - chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); + chi2 += + std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); } } void fit_pol1(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, NDView chi2_out, - int n_threads) { + NDView par_out, NDView par_err_out, + NDView chi2_out, int n_threads) { auto process = [&](ssize_t first_row, ssize_t last_row) { for (ssize_t row = first_row; row < last_row; row++) { @@ -249,15 +262,14 @@ void fit_pol1(NDView x, NDView y, NDView y_err, NDView par_err_out_view(&par_err_out(row, col, 0), {par_err_out.shape(2)}); - fit_pol1(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col)); - + fit_pol1(x, y_view, y_err_view, par_out_view, par_err_out_view, + chi2_out(row, col)); } } }; auto tasks = split_task(0, y.shape(0), n_threads); RunInParallel(process, tasks); - } NDArray fit_pol1(NDView x, NDView y) { @@ -300,27 +312,29 @@ NDArray fit_pol1(NDView x, NDView y, // ~~ S-CURVES ~~ // SCURVE -- -std::array scurve_init_par(const NDView x, const NDView y){ - // Estimate the initial parameters for the fit - std::array start_par{0, 0, 0, 0, 0, 0}; +std::array scurve_init_par(const NDView x, + const NDView y) { + // Estimate the initial parameters for the fit + std::array start_par{0, 0, 0, 0, 0, 0}; - auto ymax = std::max_element(y.begin(), y.end()); - auto ymin = std::min_element(y.begin(), y.end()); - start_par[4] = *ymin + (*ymax - *ymin) / 2; - - // Find the first x where the corresponding y value is above the threshold (start_par[4]) - for (ssize_t i = 0; i < y.size(); ++i) { - if (y[i] >= start_par[4]) { - start_par[2] = x[i]; - break; // Exit the loop after finding the first valid x - } + auto ymax = std::max_element(y.begin(), y.end()); + auto ymin = std::min_element(y.begin(), y.end()); + start_par[4] = *ymin + (*ymax - *ymin) / 2; + + // Find the first x where the corresponding y value is above the threshold + // (start_par[4]) + for (ssize_t i = 0; i < y.size(); ++i) { + if (y[i] >= start_par[4]) { + start_par[2] = x[i]; + break; // Exit the loop after finding the first valid x } + } - start_par[3] = 2 * sqrt(start_par[2]); - start_par[0] = 100; - start_par[1] = 0.25; - start_par[5] = 1; - return start_par; + start_par[3] = 2 * sqrt(start_par[2]); + start_par[0] = 100; + start_par[1] = 0.25; + start_par[5] = 1; + return start_par; } // - No error @@ -334,7 +348,8 @@ NDArray fit_scurve(NDView x, NDView y) { return result; } -NDArray fit_scurve(NDView x, NDView y, int n_threads) { +NDArray fit_scurve(NDView x, NDView y, + int n_threads) { NDArray result({y.shape(0), y.shape(1), 6}, 0); auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) { @@ -358,8 +373,9 @@ NDArray fit_scurve(NDView x, NDView y, int n_th } // - Error -void fit_scurve(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, double& chi2) { +void fit_scurve(NDView x, NDView y, + NDView y_err, NDView par_out, + NDView par_err_out, double &chi2) { // Check that we have the correct sizes if (y.size() != x.size() || y.size() != y_err.size() || @@ -380,13 +396,15 @@ void fit_scurve(NDView x, NDView y, NDView y_er // Calculate chi2 chi2 = 0; for (ssize_t i = 0; i < y.size(); i++) { - chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); + chi2 += + std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); } } -void fit_scurve(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, NDView chi2_out, - int n_threads) { +void fit_scurve(NDView x, NDView y, + NDView y_err, NDView par_out, + NDView par_err_out, NDView chi2_out, + int n_threads) { auto process = [&](ssize_t first_row, ssize_t last_row) { for (ssize_t row = first_row; row < last_row; row++) { @@ -399,40 +417,41 @@ void fit_scurve(NDView x, NDView y, NDView y_er NDView par_err_out_view(&par_err_out(row, col, 0), {par_err_out.shape(2)}); - fit_scurve(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col)); - + fit_scurve(x, y_view, y_err_view, par_out_view, + par_err_out_view, chi2_out(row, col)); } } }; auto tasks = split_task(0, y.shape(0), n_threads); RunInParallel(process, tasks); - } // SCURVE2 --- -std::array scurve2_init_par(const NDView x, const NDView y){ - // Estimate the initial parameters for the fit - std::array start_par{0, 0, 0, 0, 0, 0}; +std::array scurve2_init_par(const NDView x, + const NDView y) { + // Estimate the initial parameters for the fit + std::array start_par{0, 0, 0, 0, 0, 0}; - auto ymax = std::max_element(y.begin(), y.end()); - auto ymin = std::min_element(y.begin(), y.end()); - start_par[4] = *ymin + (*ymax - *ymin) / 2; - - // Find the first x where the corresponding y value is above the threshold (start_par[4]) - for (ssize_t i = 0; i < y.size(); ++i) { - if (y[i] <= start_par[4]) { - start_par[2] = x[i]; - break; // Exit the loop after finding the first valid x - } + auto ymax = std::max_element(y.begin(), y.end()); + auto ymin = std::min_element(y.begin(), y.end()); + start_par[4] = *ymin + (*ymax - *ymin) / 2; + + // Find the first x where the corresponding y value is above the threshold + // (start_par[4]) + for (ssize_t i = 0; i < y.size(); ++i) { + if (y[i] <= start_par[4]) { + start_par[2] = x[i]; + break; // Exit the loop after finding the first valid x } + } - start_par[3] = 2 * sqrt(start_par[2]); - start_par[0] = 100; - start_par[1] = 0.25; - start_par[5] = -1; - return start_par; + start_par[3] = 2 * sqrt(start_par[2]); + start_par[0] = 100; + start_par[1] = 0.25; + start_par[5] = -1; + return start_par; } // - No error @@ -446,7 +465,8 @@ NDArray fit_scurve2(NDView x, NDView y) { return result; } -NDArray fit_scurve2(NDView x, NDView y, int n_threads) { +NDArray fit_scurve2(NDView x, NDView y, + int n_threads) { NDArray result({y.shape(0), y.shape(1), 6}, 0); auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) { @@ -470,8 +490,9 @@ NDArray fit_scurve2(NDView x, NDView y, int n_t } // - Error -void fit_scurve2(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, double& chi2) { +void fit_scurve2(NDView x, NDView y, + NDView y_err, NDView par_out, + NDView par_err_out, double &chi2) { // Check that we have the correct sizes if (y.size() != x.size() || y.size() != y_err.size() || @@ -492,13 +513,15 @@ void fit_scurve2(NDView x, NDView y, NDView y_e // Calculate chi2 chi2 = 0; for (ssize_t i = 0; i < y.size(); i++) { - chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); + chi2 += + std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); } } -void fit_scurve2(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, NDView chi2_out, - int n_threads) { +void fit_scurve2(NDView x, NDView y, + NDView y_err, NDView par_out, + NDView par_err_out, NDView chi2_out, + int n_threads) { auto process = [&](ssize_t first_row, ssize_t last_row) { for (ssize_t row = first_row; row < last_row; row++) { @@ -511,15 +534,14 @@ void fit_scurve2(NDView x, NDView y, NDView y_e NDView par_err_out_view(&par_err_out(row, col, 0), {par_err_out.shape(2)}); - fit_scurve2(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col)); - + fit_scurve2(x, y_view, y_err_view, par_out_view, + par_err_out_view, chi2_out(row, col)); } } }; auto tasks = split_task(0, y.shape(0), n_threads); RunInParallel(process, tasks); - } } // namespace aare \ No newline at end of file diff --git a/src/Frame.cpp b/src/Frame.cpp index d44bed5..ef7675f 100644 --- a/src/Frame.cpp +++ b/src/Frame.cpp @@ -29,8 +29,7 @@ uint64_t Frame::size() const { return m_rows * m_cols; } size_t Frame::bytes() const { return m_rows * m_cols * m_dtype.bytes(); } std::byte *Frame::data() const { return m_data; } - -std::byte *Frame::pixel_ptr(uint32_t row, uint32_t col) const{ +std::byte *Frame::pixel_ptr(uint32_t row, uint32_t col) const { if ((row >= m_rows) || (col >= m_cols)) { std::cerr << "Invalid row or column index" << '\n'; return nullptr; @@ -38,7 +37,6 @@ std::byte *Frame::pixel_ptr(uint32_t row, uint32_t col) const{ return m_data + (row * m_cols + col) * (m_dtype.bytes()); } - Frame &Frame::operator=(Frame &&other) noexcept { if (this == &other) { return *this; @@ -70,5 +68,4 @@ Frame Frame::clone() const { return frame; } - } // namespace aare diff --git a/src/Frame.test.cpp b/src/Frame.test.cpp index 4063701..bafb39c 100644 --- a/src/Frame.test.cpp +++ b/src/Frame.test.cpp @@ -65,7 +65,8 @@ TEST_CASE("Set a value in a 64 bit frame") { // only the value we did set should be non-zero for (size_t i = 0; i < rows; i++) { for (size_t j = 0; j < cols; j++) { - uint64_t *data = reinterpret_cast(frame.pixel_ptr(i, j)); + uint64_t *data = + reinterpret_cast(frame.pixel_ptr(i, j)); REQUIRE(data != nullptr); if (i == 5 && j == 7) { REQUIRE(*data == value); @@ -150,4 +151,3 @@ TEST_CASE("test explicit copy constructor") { REQUIRE(frame2.bytes() == rows * cols * bitdepth / 8); REQUIRE(frame2.data() != data); } - diff --git a/src/JungfrauDataFile.cpp b/src/JungfrauDataFile.cpp index 59a1a0a..5fb99a6 100644 --- a/src/JungfrauDataFile.cpp +++ b/src/JungfrauDataFile.cpp @@ -19,16 +19,15 @@ JungfrauDataFile::JungfrauDataFile(const std::filesystem::path &fname) { open_file(m_current_file_index); } - // FileInterface -Frame JungfrauDataFile::read_frame(){ +Frame JungfrauDataFile::read_frame() { Frame f(rows(), cols(), Dtype::UINT16); read_into(reinterpret_cast(f.data()), nullptr); return f; } -Frame JungfrauDataFile::read_frame(size_t frame_number){ +Frame JungfrauDataFile::read_frame(size_t frame_number) { seek(frame_number); Frame f(rows(), cols(), Dtype::UINT16); read_into(reinterpret_cast(f.data()), nullptr); @@ -37,7 +36,7 @@ Frame JungfrauDataFile::read_frame(size_t frame_number){ std::vector JungfrauDataFile::read_n(size_t n_frames) { std::vector frames; - for(size_t i = 0; i < n_frames; ++i){ + for (size_t i = 0; i < n_frames; ++i) { frames.push_back(read_frame()); } return frames; @@ -48,7 +47,7 @@ void JungfrauDataFile::read_into(std::byte *image_buf) { } void JungfrauDataFile::read_into(std::byte *image_buf, size_t n_frames) { read_into(image_buf, n_frames, nullptr); -} +} size_t JungfrauDataFile::frame_number(size_t frame_index) { seek(frame_index); @@ -59,7 +58,9 @@ std::array JungfrauDataFile::shape() const { return {static_cast(rows()), static_cast(cols())}; } -DetectorType JungfrauDataFile::detector_type() const { return DetectorType::Jungfrau; } +DetectorType JungfrauDataFile::detector_type() const { + return DetectorType::Jungfrau; +} std::string JungfrauDataFile::base_name() const { return m_base_name; } @@ -195,22 +196,23 @@ void JungfrauDataFile::read_into(std::byte *image_buf, size_t n_frames, JungfrauDataHeader *header) { if (header) { for (size_t i = 0; i < n_frames; ++i) - read_into(image_buf + i * m_bytes_per_frame, header + i); - }else{ + read_into(image_buf + i * m_bytes_per_frame, header + i); + } else { for (size_t i = 0; i < n_frames; ++i) read_into(image_buf + i * m_bytes_per_frame, nullptr); } } -void JungfrauDataFile::read_into(NDArray* image, JungfrauDataHeader* header) { - if(image->shape()!=shape()){ - throw std::runtime_error(LOCATION + - "Image shape does not match file size: " + std::to_string(rows()) + "x" + std::to_string(cols())); +void JungfrauDataFile::read_into(NDArray *image, + JungfrauDataHeader *header) { + if (image->shape() != shape()) { + throw std::runtime_error( + LOCATION + "Image shape does not match file size: " + + std::to_string(rows()) + "x" + std::to_string(cols())); } read_into(reinterpret_cast(image->data()), header); } - JungfrauDataHeader JungfrauDataFile::read_header() { JungfrauDataHeader header; if (auto rc = fread(&header, 1, sizeof(header), m_fp.get()); diff --git a/src/JungfrauDataFile.test.cpp b/src/JungfrauDataFile.test.cpp index ce51168..21a4e32 100644 --- a/src/JungfrauDataFile.test.cpp +++ b/src/JungfrauDataFile.test.cpp @@ -1,21 +1,21 @@ #include "aare/JungfrauDataFile.hpp" -#include #include "test_config.hpp" +#include using aare::JungfrauDataFile; using aare::JungfrauDataHeader; TEST_CASE("Open a Jungfrau data file", "[.files]") { - //we know we have 4 files with 7, 7, 7, and 3 frames - //firs frame number if 1 and the bunch id is frame_number**2 - //so we can check the header + // we know we have 4 files with 7, 7, 7, and 3 frames + // firs frame number if 1 and the bunch id is frame_number**2 + // so we can check the header auto fpath = test_data_path() / "dat" / "AldoJF500k_000000.dat"; REQUIRE(std::filesystem::exists(fpath)); JungfrauDataFile f(fpath); REQUIRE(f.rows() == 512); REQUIRE(f.cols() == 1024); - REQUIRE(f.bytes_per_frame() == 1048576); + REQUIRE(f.bytes_per_frame() == 1048576); REQUIRE(f.pixels_per_frame() == 524288); REQUIRE(f.bytes_per_pixel() == 2); REQUIRE(f.bitdepth() == 16); @@ -25,7 +25,7 @@ TEST_CASE("Open a Jungfrau data file", "[.files]") { REQUIRE(f.total_frames() == 24); REQUIRE(f.current_file() == fpath); - //Check that the frame number and buch id is read correctly + // Check that the frame number and buch id is read correctly for (size_t i = 0; i < 24; ++i) { JungfrauDataHeader header; aare::NDArray image(f.shape()); @@ -37,65 +37,64 @@ TEST_CASE("Open a Jungfrau data file", "[.files]") { } } -TEST_CASE("Seek in a JungfrauDataFile", "[.files]"){ +TEST_CASE("Seek in a JungfrauDataFile", "[.files]") { auto fpath = test_data_path() / "dat" / "AldoJF65k_000000.dat"; REQUIRE(std::filesystem::exists(fpath)); JungfrauDataFile f(fpath); - //The file should have 113 frames + // The file should have 113 frames f.seek(19); REQUIRE(f.tell() == 19); auto h = f.read_header(); - REQUIRE(h.framenum == 19+1); + REQUIRE(h.framenum == 19 + 1); - //Reading again does not change the file pointer + // Reading again does not change the file pointer auto h2 = f.read_header(); - REQUIRE(h2.framenum == 19+1); + REQUIRE(h2.framenum == 19 + 1); f.seek(59); REQUIRE(f.tell() == 59); auto h3 = f.read_header(); - REQUIRE(h3.framenum == 59+1); + REQUIRE(h3.framenum == 59 + 1); JungfrauDataHeader h4; aare::NDArray image(f.shape()); f.read_into(&image, &h4); - REQUIRE(h4.framenum == 59+1); + REQUIRE(h4.framenum == 59 + 1); - //now we should be on the next frame + // now we should be on the next frame REQUIRE(f.tell() == 60); - REQUIRE(f.read_header().framenum == 60+1); + REQUIRE(f.read_header().framenum == 60 + 1); - REQUIRE_THROWS(f.seek(86356)); //out of range + REQUIRE_THROWS(f.seek(86356)); // out of range } -TEST_CASE("Open a Jungfrau data file with non zero file index", "[.files]"){ +TEST_CASE("Open a Jungfrau data file with non zero file index", "[.files]") { auto fpath = test_data_path() / "dat" / "AldoJF65k_000003.dat"; REQUIRE(std::filesystem::exists(fpath)); JungfrauDataFile f(fpath); - //18 files per data file, opening the 3rd file we ignore the first 3 - REQUIRE(f.total_frames() == 113-18*3); + // 18 files per data file, opening the 3rd file we ignore the first 3 + REQUIRE(f.total_frames() == 113 - 18 * 3); REQUIRE(f.tell() == 0); - //Frame numbers start at 1 in the first file - REQUIRE(f.read_header().framenum == 18*3+1); + // Frame numbers start at 1 in the first file + REQUIRE(f.read_header().framenum == 18 * 3 + 1); // moving relative to the third file f.seek(5); - REQUIRE(f.read_header().framenum == 18*3+1+5); + REQUIRE(f.read_header().framenum == 18 * 3 + 1 + 5); // ignoring the first 3 files REQUIRE(f.n_files() == 4); REQUIRE(f.current_file().stem() == "AldoJF65k_000003"); - } -TEST_CASE("Read into throws if size doesn't match", "[.files]"){ +TEST_CASE("Read into throws if size doesn't match", "[.files]") { auto fpath = test_data_path() / "dat" / "AldoJF65k_000000.dat"; REQUIRE(std::filesystem::exists(fpath)); @@ -109,6 +108,4 @@ TEST_CASE("Read into throws if size doesn't match", "[.files]"){ REQUIRE_THROWS(f.read_into(&image)); REQUIRE(f.tell() == 0); - - } \ No newline at end of file diff --git a/src/NDArray.test.cpp b/src/NDArray.test.cpp index 819a1a9..91b5933 100644 --- a/src/NDArray.test.cpp +++ b/src/NDArray.test.cpp @@ -35,7 +35,7 @@ TEST_CASE("Construct from an NDView") { } } -TEST_CASE("3D NDArray from NDView"){ +TEST_CASE("3D NDArray from NDView") { std::vector data(27); std::iota(data.begin(), data.end(), 0); NDView view(data.data(), Shape<3>{3, 3, 3}); @@ -44,9 +44,9 @@ TEST_CASE("3D NDArray from NDView"){ REQUIRE(image.size() == view.size()); REQUIRE(image.data() != view.data()); - for(ssize_t i=0; i MultiplyNDArrayUsingOperator(NDArray &a, NDArray &b) { // return a * a * b * b; - NDArrayc = a*b; + NDArray c = a * b; return c; } @@ -162,7 +162,6 @@ NDArray AddNDArrayUsingIndex(NDArray &a, NDArray &b) { return res; } - TEST_CASE("Compare two images") { NDArray a; NDArray b; @@ -222,7 +221,6 @@ TEST_CASE("Bitwise and on data") { REQUIRE(a(2) == 384); } - TEST_CASE("Elementwise operations on images") { std::array shape{5, 5}; double a_val = 3.0; @@ -258,7 +256,8 @@ TEST_CASE("Elementwise operations on images") { NDArray A(shape, a_val); NDArray B(shape, b_val); NDArray C = A - B; - // auto C = A - B; // This works but the result is a lazy ArraySub object + // auto C = A - B; // This works but the result is a lazy ArraySub + // object // Value of C matches for (uint32_t i = 0; i < C.size(); ++i) { @@ -282,7 +281,8 @@ TEST_CASE("Elementwise operations on images") { SECTION("Multiply two images") { NDArray A(shape, a_val); NDArray B(shape, b_val); - // auto C = A * B; // This works but the result is a lazy ArrayMul object + // auto C = A * B; // This works but the result is a lazy ArrayMul + // object NDArray C = A * B; // Value of C matches @@ -307,7 +307,8 @@ TEST_CASE("Elementwise operations on images") { SECTION("Divide two images") { NDArray A(shape, a_val); NDArray B(shape, b_val); - // auto C = A / B; // This works but the result is a lazy ArrayDiv object + // auto C = A / B; // This works but the result is a lazy ArrayDiv + // object NDArray C = A / B; // Value of C matches diff --git a/src/NDView.test.cpp b/src/NDView.test.cpp index 89e76e9..a65758c 100644 --- a/src/NDView.test.cpp +++ b/src/NDView.test.cpp @@ -2,8 +2,8 @@ #include #include -#include #include +#include using aare::NDView; using aare::Shape; @@ -151,8 +151,10 @@ TEST_CASE("divide with another span") { std::vector vec1{3, 2, 1}; std::vector result{3, 6, 3}; - NDView data0(vec0.data(), Shape<1>{static_cast(vec0.size())}); - NDView data1(vec1.data(), Shape<1>{static_cast(vec1.size())}); + NDView data0(vec0.data(), + Shape<1>{static_cast(vec0.size())}); + NDView data1(vec1.data(), + Shape<1>{static_cast(vec1.size())}); data0 /= data1; @@ -181,8 +183,7 @@ TEST_CASE("compare two views") { REQUIRE((view1 == view2)); } - -TEST_CASE("Create a view over a vector"){ +TEST_CASE("Create a view over a vector") { std::vector vec(12); std::iota(vec.begin(), vec.end(), 0); auto v = aare::make_view(vec); diff --git a/src/NumpyFile.cpp b/src/NumpyFile.cpp index e375ce3..4e0c215 100644 --- a/src/NumpyFile.cpp +++ b/src/NumpyFile.cpp @@ -4,16 +4,16 @@ namespace aare { - - -NumpyFile::NumpyFile(const std::filesystem::path &fname, const std::string &mode, FileConfig cfg) { +NumpyFile::NumpyFile(const std::filesystem::path &fname, + const std::string &mode, FileConfig cfg) { // TODO! add opts to constructor m_mode = mode; if (mode == "r") { fp = fopen(fname.string().c_str(), "rb"); if (!fp) { - throw std::runtime_error(fmt::format("Could not open: {} for reading", fname.string())); + throw std::runtime_error( + fmt::format("Could not open: {} for reading", fname.string())); } load_metadata(); } else if (mode == "w") { @@ -24,11 +24,15 @@ NumpyFile::NumpyFile(const std::filesystem::path &fname, const std::string &mode m_header.shape = {0, cfg.rows, cfg.cols}; fp = fopen(fname.string().c_str(), "wb"); if (!fp) { - throw std::runtime_error(fmt::format("Could not open: {} for reading", fname.string())); + throw std::runtime_error( + fmt::format("Could not open: {} for reading", fname.string())); } - initial_header_len = aare::NumpyHelpers::write_header(std::filesystem::path(fname.c_str()), m_header); + initial_header_len = aare::NumpyHelpers::write_header( + std::filesystem::path(fname.c_str()), m_header); } - m_pixels_per_frame = std::accumulate(m_header.shape.begin() + 1, m_header.shape.end(), 1, std::multiplies<>()); + m_pixels_per_frame = + std::accumulate(m_header.shape.begin() + 1, m_header.shape.end(), 1, + std::multiplies<>()); m_bytes_per_frame = m_header.dtype.bitdepth() / 8 * m_pixels_per_frame; } @@ -63,7 +67,8 @@ void NumpyFile::get_frame_into(size_t frame_number, std::byte *image_buf) { if (frame_number > m_header.shape[0]) { throw std::invalid_argument("Frame number out of range"); } - if (fseek(fp, header_size + frame_number * m_bytes_per_frame, SEEK_SET)) // NOLINT + if (fseek(fp, header_size + frame_number * m_bytes_per_frame, + SEEK_SET)) // NOLINT throw std::runtime_error("Could not seek to frame"); size_t const rc = fread(image_buf, m_bytes_per_frame, 1, fp); @@ -113,7 +118,8 @@ NumpyFile::~NumpyFile() noexcept { // write header size_t const rc = fwrite(header_str.c_str(), header_str.size(), 1, fp); if (rc != 1) { - std::cout << "Error writing header to numpy file in destructor" << std::endl; + std::cout << "Error writing header to numpy file in destructor" + << std::endl; } } @@ -140,8 +146,10 @@ void NumpyFile::load_metadata() { } // read version - rc = fread(reinterpret_cast(&major_ver_), sizeof(major_ver_), 1, fp); - rc += fread(reinterpret_cast(&minor_ver_), sizeof(minor_ver_), 1, fp); + rc = + fread(reinterpret_cast(&major_ver_), sizeof(major_ver_), 1, fp); + rc += + fread(reinterpret_cast(&minor_ver_), sizeof(minor_ver_), 1, fp); if (rc != 2) { throw std::runtime_error("Error reading numpy version"); } @@ -159,7 +167,8 @@ void NumpyFile::load_metadata() { if (rc != 1) { throw std::runtime_error("Error reading header length"); } - header_size = aare::NumpyHelpers::magic_string_length + 2 + header_len_size + header_len; + header_size = aare::NumpyHelpers::magic_string_length + 2 + + header_len_size + header_len; if (header_size % 16 != 0) { fmt::print("Warning: header length is not a multiple of 16\n"); } diff --git a/src/NumpyFile.test.cpp b/src/NumpyFile.test.cpp index bdd6451..e687bea 100644 --- a/src/NumpyFile.test.cpp +++ b/src/NumpyFile.test.cpp @@ -1,8 +1,8 @@ #include "aare/NumpyFile.hpp" #include "aare/NDArray.hpp" -#include #include "test_config.hpp" +#include using aare::Dtype; using aare::NumpyFile; @@ -23,7 +23,7 @@ TEST_CASE("Read a 1D numpy file with int32 data type", "[.integration]") { REQUIRE(data(i) == i); } } - + TEST_CASE("Read a 3D numpy file with np.double data type", "[.integration]") { auto fpath = test_data_path() / "numpy" / "test_3d_double.npy"; diff --git a/src/NumpyHelpers.cpp b/src/NumpyHelpers.cpp index b8414d9..106327e 100644 --- a/src/NumpyHelpers.cpp +++ b/src/NumpyHelpers.cpp @@ -29,7 +29,8 @@ namespace aare { std::string NumpyHeader::to_string() const { std::stringstream sstm; - sstm << "dtype: " << dtype.to_string() << ", fortran_order: " << fortran_order << ' '; + sstm << "dtype: " << dtype.to_string() + << ", fortran_order: " << fortran_order << ' '; sstm << "shape: ("; for (auto item : shape) sstm << item << ','; @@ -37,10 +38,10 @@ std::string NumpyHeader::to_string() const { return sstm.str(); } - namespace NumpyHelpers { -std::unordered_map parse_dict(std::string in, const std::vector &keys) { +std::unordered_map +parse_dict(std::string in, const std::vector &keys) { std::unordered_map map; if (keys.empty()) return map; @@ -100,7 +101,8 @@ aare::Dtype parse_descr(std::string typestring) { constexpr char little_endian_char = '<'; constexpr char big_endian_char = '>'; constexpr char no_endian_char = '|'; - constexpr std::array endian_chars = {little_endian_char, big_endian_char, no_endian_char}; + constexpr std::array endian_chars = { + little_endian_char, big_endian_char, no_endian_char}; constexpr std::array numtype_chars = {'f', 'i', 'u', 'c'}; const char byteorder_c = typestring[0]; @@ -139,7 +141,9 @@ std::string get_value_from_map(const std::string &mapstr) { return trim(tmp); } -bool is_digits(const std::string &str) { return std::all_of(str.begin(), str.end(), ::isdigit); } +bool is_digits(const std::string &str) { + return std::all_of(str.begin(), str.end(), ::isdigit); +} std::vector parse_tuple(std::string in) { std::vector v; @@ -215,20 +219,25 @@ inline std::string write_boolean(bool b) { return "False"; } -inline std::string write_header_dict(const std::string &descr, bool fortran_order, const std::vector &shape) { +inline std::string write_header_dict(const std::string &descr, + bool fortran_order, + const std::vector &shape) { std::string const s_fortran_order = write_boolean(fortran_order); std::string const shape_s = write_tuple(shape); - return "{'descr': '" + descr + "', 'fortran_order': " + s_fortran_order + ", 'shape': " + shape_s + ", }"; + return "{'descr': '" + descr + "', 'fortran_order': " + s_fortran_order + + ", 'shape': " + shape_s + ", }"; } -size_t write_header(const std::filesystem::path &fname, const NumpyHeader &header) { +size_t write_header(const std::filesystem::path &fname, + const NumpyHeader &header) { std::ofstream out(fname, std::ios::binary | std::ios::out); return write_header(out, header); } size_t write_header(std::ostream &out, const NumpyHeader &header) { - std::string const header_dict = write_header_dict(header.dtype.to_string(), header.fortran_order, header.shape); + std::string const header_dict = write_header_dict( + header.dtype.to_string(), header.fortran_order, header.shape); size_t length = magic_string_length + 2 + 2 + header_dict.length() + 1; @@ -247,17 +256,22 @@ size_t write_header(std::ostream &out, const NumpyHeader &header) { // write header length if (version_major == 1 && version_minor == 0) { - auto header_len = static_cast(header_dict.length() + padding.length() + 1); + auto header_len = + static_cast(header_dict.length() + padding.length() + 1); - std::array header_len_le16{static_cast((header_len >> 0) & 0xff), - static_cast((header_len >> 8) & 0xff)}; + std::array header_len_le16{ + static_cast((header_len >> 0) & 0xff), + static_cast((header_len >> 8) & 0xff)}; out.write(reinterpret_cast(header_len_le16.data()), 2); } else { - auto header_len = static_cast(header_dict.length() + padding.length() + 1); + auto header_len = + static_cast(header_dict.length() + padding.length() + 1); std::array header_len_le32{ - static_cast((header_len >> 0) & 0xff), static_cast((header_len >> 8) & 0xff), - static_cast((header_len >> 16) & 0xff), static_cast((header_len >> 24) & 0xff)}; + static_cast((header_len >> 0) & 0xff), + static_cast((header_len >> 8) & 0xff), + static_cast((header_len >> 16) & 0xff), + static_cast((header_len >> 24) & 0xff)}; out.write(reinterpret_cast(header_len_le32.data()), 4); } diff --git a/src/NumpyHelpers.test.cpp b/src/NumpyHelpers.test.cpp index 36fcfe6..ad55b17 100644 --- a/src/NumpyHelpers.test.cpp +++ b/src/NumpyHelpers.test.cpp @@ -19,7 +19,9 @@ TEST_CASE("Check for quotes and return stripped string") { REQUIRE(parse_str("''") == ""); } -TEST_CASE("parsing a string without quotes throws") { REQUIRE_THROWS(parse_str("hej")); } +TEST_CASE("parsing a string without quotes throws") { + REQUIRE_THROWS(parse_str("hej")); +} TEST_CASE("trim whitespace") { REQUIRE(trim(" hej ") == "hej"); @@ -53,7 +55,8 @@ TEST_CASE("is element in array") { } TEST_CASE("Parse numpy dict") { - std::string in = "{'descr': ' keys{"descr", "fortran_order", "shape"}; auto map = parse_dict(in, keys); REQUIRE(map["descr"] == "' #include +#include #include #include @@ -58,7 +57,8 @@ TEST_CASE("test pedestal push") { if (k < 5) { REQUIRE(pedestal.cur_samples()(i, j) == k + 1); REQUIRE(pedestal.get_sum()(i, j) == (k + 1) * (i + j)); - REQUIRE(pedestal.get_sum2()(i, j) == (k + 1) * (i + j) * (i + j)); + REQUIRE(pedestal.get_sum2()(i, j) == + (k + 1) * (i + j) * (i + j)); } else { REQUIRE(pedestal.cur_samples()(i, j) == 5); REQUIRE(pedestal.get_sum()(i, j) == 5 * (i + j)); @@ -95,9 +95,12 @@ TEST_CASE("test pedestal with normal distribution") { for (int i = 0; i < 3; i++) { for (int j = 0; j < 5; j++) { - REQUIRE_THAT(mean(i, j), Catch::Matchers::WithinAbs(MEAN, MEAN * TOLERANCE)); - REQUIRE_THAT(variance(i, j), Catch::Matchers::WithinAbs(VAR, VAR * TOLERANCE)); - REQUIRE_THAT(standard_deviation(i, j), Catch::Matchers::WithinAbs(STD, STD * TOLERANCE)); + REQUIRE_THAT(mean(i, j), + Catch::Matchers::WithinAbs(MEAN, MEAN * TOLERANCE)); + REQUIRE_THAT(variance(i, j), + Catch::Matchers::WithinAbs(VAR, VAR * TOLERANCE)); + REQUIRE_THAT(standard_deviation(i, j), + Catch::Matchers::WithinAbs(STD, STD * TOLERANCE)); } } } \ No newline at end of file diff --git a/src/PixelMap.cpp b/src/PixelMap.cpp index d62759a..5be5ed4 100644 --- a/src/PixelMap.cpp +++ b/src/PixelMap.cpp @@ -31,7 +31,7 @@ NDArray GenerateMoench03PixelMap() { } NDArray GenerateMoench05PixelMap() { - std::array adc_numbers = {5, 9, 1}; + std::array adc_numbers = {5, 9, 1}; NDArray order_map({160, 150}); int n_pixel = 0; for (int row = 0; row < 160; row++) { @@ -40,11 +40,11 @@ NDArray GenerateMoench05PixelMap() { for (int i_sc = 0; i_sc < 3; i_sc++) { int col = 50 * i_sc + i_col; int adc_nr = adc_numbers[i_sc]; - int i_analog = n_pixel * 12 + adc_nr; + int i_analog = n_pixel * 12 + adc_nr; - // analog_frame[row * 150 + col] = analog_data[i_analog] & 0x3FFF; + // analog_frame[row * 150 + col] = analog_data[i_analog] & + // 0x3FFF; order_map(row, col) = i_analog; - } } } @@ -52,7 +52,7 @@ NDArray GenerateMoench05PixelMap() { } NDArray GenerateMoench05PixelMap1g() { - std::array adc_numbers = {1, 2, 0}; + std::array adc_numbers = {1, 2, 0}; NDArray order_map({160, 150}); int n_pixel = 0; for (int row = 0; row < 160; row++) { @@ -61,12 +61,11 @@ NDArray GenerateMoench05PixelMap1g() { for (int i_sc = 0; i_sc < 3; i_sc++) { int col = 50 * i_sc + i_col; int adc_nr = adc_numbers[i_sc]; - int i_analog = n_pixel * 3 + adc_nr; + int i_analog = n_pixel * 3 + adc_nr; - - // analog_frame[row * 150 + col] = analog_data[i_analog] & 0x3FFF; + // analog_frame[row * 150 + col] = analog_data[i_analog] & + // 0x3FFF; order_map(row, col) = i_analog; - } } } @@ -85,42 +84,42 @@ NDArray GenerateMoench05PixelMapOld() { int adc_nr = adc_numbers[i_sc]; int i_analog = n_pixel * 32 + adc_nr; - - // analog_frame[row * 150 + col] = analog_data[i_analog] & 0x3FFF; + // analog_frame[row * 150 + col] = analog_data[i_analog] & + // 0x3FFF; order_map(row, col) = i_analog; - } } } return order_map; } -NDArrayGenerateEigerFlipRowsPixelMap(){ +NDArray GenerateEigerFlipRowsPixelMap() { NDArray order_map({256, 512}); - for(int row = 0; row < 256; row++){ - for(int col = 0; col < 512; col++){ - order_map(row, col) = 255*512-row*512 + col; + for (int row = 0; row < 256; row++) { + for (int col = 0; col < 512; col++) { + order_map(row, col) = 255 * 512 - row * 512 + col; } } return order_map; } -NDArrayGenerateMH02SingleCounterPixelMap(){ +NDArray GenerateMH02SingleCounterPixelMap() { NDArray order_map({48, 48}); - for(int row = 0; row < 48; row++){ - for(int col = 0; col < 48; col++){ - order_map(row, col) = row*48 + col; + for (int row = 0; row < 48; row++) { + for (int col = 0; col < 48; col++) { + order_map(row, col) = row * 48 + col; } } return order_map; } -NDArray GenerateMH02FourCounterPixelMap(){ +NDArray GenerateMH02FourCounterPixelMap() { NDArray order_map({4, 48, 48}); - for (int counter=0; counter<4; counter++){ - for(int row = 0; row < 48; row++){ - for(int col = 0; col < 48; col++){ - order_map(counter, row, col) = counter*48*48 + row*48 + col; + for (int counter = 0; counter < 4; counter++) { + for (int row = 0; row < 48; row++) { + for (int col = 0; col < 48; col++) { + order_map(counter, row, col) = + counter * 48 * 48 + row * 48 + col; } } } diff --git a/src/RawFile.cpp b/src/RawFile.cpp index 122cf96..22bf8dd 100644 --- a/src/RawFile.cpp +++ b/src/RawFile.cpp @@ -1,9 +1,9 @@ #include "aare/RawFile.hpp" -#include "aare/algorithm.hpp" #include "aare/PixelMap.hpp" +#include "aare/algorithm.hpp" #include "aare/defs.hpp" -#include "aare/logger.hpp" #include "aare/geo_helpers.hpp" +#include "aare/logger.hpp" #include #include @@ -17,8 +17,9 @@ RawFile::RawFile(const std::filesystem::path &fname, const std::string &mode) m_mode = mode; if (mode == "r") { find_geometry(); - if (m_master.roi()){ - m_geometry = update_geometry_with_roi(m_geometry, m_master.roi().value()); + if (m_master.roi()) { + m_geometry = + update_geometry_with_roi(m_geometry, m_master.roi().value()); } open_subfiles(); } else { @@ -47,32 +48,31 @@ void RawFile::read_into(std::byte *image_buf) { return get_frame_into(m_current_frame++, image_buf); } - void RawFile::read_into(std::byte *image_buf, DetectorHeader *header) { return get_frame_into(m_current_frame++, image_buf, header); } -void RawFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) { +void RawFile::read_into(std::byte *image_buf, size_t n_frames, + DetectorHeader *header) { // return get_frame_into(m_current_frame++, image_buf, header); for (size_t i = 0; i < n_frames; i++) { this->get_frame_into(m_current_frame++, image_buf, header); image_buf += bytes_per_frame(); - if(header) - header+=n_modules(); + if (header) + header += n_modules(); } - } size_t RawFile::n_modules() const { return m_master.n_modules(); } - size_t RawFile::bytes_per_frame() { - return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() / bits_per_byte; + return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() / + bits_per_byte; } -size_t RawFile::pixels_per_frame() { - // return m_rows * m_cols; +size_t RawFile::pixels_per_frame() { + // return m_rows * m_cols; return m_geometry.pixels_x * m_geometry.pixels_y; } @@ -128,27 +128,25 @@ DetectorHeader RawFile::read_header(const std::filesystem::path &fname) { return h; } - RawMasterFile RawFile::master() const { return m_master; } /** * @brief Find the geometry of the detector by opening all the subfiles and - * reading the headers. + * reading the headers. */ void RawFile::find_geometry() { - - //Hold the maximal row and column number found - //Later used for calculating the total number of rows and columns + + // Hold the maximal row and column number found + // Later used for calculating the total number of rows and columns uint16_t r{}; uint16_t c{}; - for (size_t i = 0; i < n_modules(); i++) { auto h = read_header(m_master.data_fname(i, 0)); r = std::max(r, h.row); c = std::max(c, h.column); // positions.push_back({h.row, h.column}); - + ModuleGeometry g; g.origin_x = h.column * m_master.pixels_x(); g.origin_y = h.row * m_master.pixels_y(); @@ -157,34 +155,30 @@ void RawFile::find_geometry() { g.width = m_master.pixels_x(); g.height = m_master.pixels_y(); m_geometry.module_pixel_0.push_back(g); - } r++; c++; m_geometry.pixels_y = (r * m_master.pixels_y()); - m_geometry.pixels_x = (c * m_master.pixels_x()); + m_geometry.pixels_x = (c * m_master.pixels_x()); m_geometry.modules_x = c; m_geometry.modules_y = r; m_geometry.pixels_y += static_cast((r - 1) * cfg.module_gap_row); - } - Frame RawFile::get_frame(size_t frame_index) { - auto f = Frame(m_geometry.pixels_y, m_geometry.pixels_x, Dtype::from_bitdepth(m_master.bitdepth())); + auto f = Frame(m_geometry.pixels_y, m_geometry.pixels_x, + Dtype::from_bitdepth(m_master.bitdepth())); std::byte *frame_buffer = f.data(); get_frame_into(frame_index, frame_buffer); return f; } +size_t RawFile::bytes_per_pixel() const { return m_master.bitdepth() / 8; } -size_t RawFile::bytes_per_pixel() const { - return m_master.bitdepth() / 8; -} - -void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, DetectorHeader *header) { +void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, + DetectorHeader *header) { LOG(logDEBUG) << "RawFile::get_frame_into(" << frame_index << ")"; if (frame_index >= total_frames()) { throw std::runtime_error(LOCATION + "Frame number out of range"); @@ -192,12 +186,12 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect std::vector frame_numbers(n_modules()); std::vector frame_indices(n_modules(), frame_index); - // sync the frame numbers - if (n_modules() != 1) { //if we have more than one module + if (n_modules() != 1) { // if we have more than one module for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) { - frame_numbers[part_idx] = m_subfiles[part_idx]->frame_number(frame_index); + frame_numbers[part_idx] = + m_subfiles[part_idx]->frame_number(frame_index); } // 1. if frame number vector is the same break @@ -218,7 +212,8 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect } frame_numbers[min_frame_idx] = - m_subfiles[min_frame_idx]->frame_number(frame_indices[min_frame_idx]); + m_subfiles[min_frame_idx]->frame_number( + frame_indices[min_frame_idx]); } } @@ -226,15 +221,18 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect // get the part from each subfile and copy it to the frame for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) { auto corrected_idx = frame_indices[part_idx]; - - // This is where we start writing - auto offset = (m_geometry.module_pixel_0[part_idx].origin_y * m_geometry.pixels_x + - m_geometry.module_pixel_0[part_idx].origin_x)*m_master.bitdepth()/8; - if (m_geometry.module_pixel_0[part_idx].origin_x!=0) - throw std::runtime_error(LOCATION + " Implementation error. x pos not 0."); - - //TODO! What if the files don't match? + // This is where we start writing + auto offset = (m_geometry.module_pixel_0[part_idx].origin_y * + m_geometry.pixels_x + + m_geometry.module_pixel_0[part_idx].origin_x) * + m_master.bitdepth() / 8; + + if (m_geometry.module_pixel_0[part_idx].origin_x != 0) + throw std::runtime_error(LOCATION + + " Implementation error. x pos not 0."); + + // TODO! What if the files don't match? m_subfiles[part_idx]->seek(corrected_idx); m_subfiles[part_idx]->read_into(frame_buffer + offset, header); if (header) @@ -242,7 +240,7 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect } } else { - //TODO! should we read row by row? + // TODO! should we read row by row? // create a buffer large enough to hold a full module auto bytes_per_part = m_master.pixels_y() * m_master.pixels_x() * @@ -260,7 +258,7 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect m_subfiles[part_idx]->seek(corrected_idx); m_subfiles[part_idx]->read_into(part_buffer, header); - if(header) + if (header) ++header; for (size_t cur_row = 0; cur_row < static_cast(pos.height); @@ -271,15 +269,13 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect auto dest = (irow * this->m_geometry.pixels_x + icol); dest = dest * m_master.bitdepth() / 8; memcpy(frame_buffer + dest, - part_buffer + cur_row * pos.width * - m_master.bitdepth() / 8, + part_buffer + + cur_row * pos.width * m_master.bitdepth() / 8, pos.width * m_master.bitdepth() / 8); - } } delete[] part_buffer; } - } std::vector RawFile::read_n(size_t n_frames) { @@ -299,5 +295,4 @@ size_t RawFile::frame_number(size_t frame_index) { return m_subfiles[0]->frame_number(frame_index); } - } // namespace aare diff --git a/src/RawFile.test.cpp b/src/RawFile.test.cpp index 9109985..1fb441f 100644 --- a/src/RawFile.test.cpp +++ b/src/RawFile.test.cpp @@ -1,18 +1,18 @@ +#include "aare/RawFile.hpp" #include "aare/File.hpp" #include "aare/RawMasterFile.hpp" //needed for ROI -#include "aare/RawFile.hpp" #include #include #include "test_config.hpp" - using aare::File; TEST_CASE("Read number of frames from a jungfrau raw file", "[.integration]") { - auto fpath = test_data_path() / "jungfrau" / "jungfrau_single_master_0.json"; + auto fpath = + test_data_path() / "jungfrau" / "jungfrau_single_master_0.json"; REQUIRE(std::filesystem::exists(fpath)); File f(fpath, "r"); @@ -20,7 +20,8 @@ TEST_CASE("Read number of frames from a jungfrau raw file", "[.integration]") { } TEST_CASE("Read frame numbers from a jungfrau raw file", "[.integration]") { - auto fpath = test_data_path() / "jungfrau" / "jungfrau_single_master_0.json"; + auto fpath = + test_data_path() / "jungfrau" / "jungfrau_single_master_0.json"; REQUIRE(std::filesystem::exists(fpath)); File f(fpath, "r"); @@ -36,7 +37,8 @@ TEST_CASE("Read frame numbers from a jungfrau raw file", "[.integration]") { } TEST_CASE("Read a frame number too high throws", "[.integration]") { - auto fpath = test_data_path() / "jungfrau" / "jungfrau_single_master_0.json"; + auto fpath = + test_data_path() / "jungfrau" / "jungfrau_single_master_0.json"; REQUIRE(std::filesystem::exists(fpath)); File f(fpath, "r"); @@ -49,8 +51,10 @@ TEST_CASE("Read a frame number too high throws", "[.integration]") { REQUIRE_THROWS(f.frame_number(10)); } -TEST_CASE("Read a frame numbers where the subfile is missing throws", "[.integration]") { - auto fpath = test_data_path() / "jungfrau" / "jungfrau_missing_subfile_master_0.json"; +TEST_CASE("Read a frame numbers where the subfile is missing throws", + "[.integration]") { + auto fpath = test_data_path() / "jungfrau" / + "jungfrau_missing_subfile_master_0.json"; REQUIRE(std::filesystem::exists(fpath)); File f(fpath, "r"); @@ -58,7 +62,7 @@ TEST_CASE("Read a frame numbers where the subfile is missing throws", "[.integra // we know this file has 10 frames with frame numbers 1 to 10 // f0 1,2,3 // f1 4,5,6 - but files f1-f3 are missing - // f2 7,8,9 - gone + // f2 7,8,9 - gone // f3 10 - gone REQUIRE(f.frame_number(0) == 1); REQUIRE(f.frame_number(1) == 2); @@ -69,15 +73,18 @@ TEST_CASE("Read a frame numbers where the subfile is missing throws", "[.integra REQUIRE_THROWS(f.frame_number(10)); } - -TEST_CASE("Read data from a jungfrau 500k single port raw file", "[.integration]") { - auto fpath = test_data_path() / "jungfrau" / "jungfrau_single_master_0.json"; +TEST_CASE("Read data from a jungfrau 500k single port raw file", + "[.integration]") { + auto fpath = + test_data_path() / "jungfrau" / "jungfrau_single_master_0.json"; REQUIRE(std::filesystem::exists(fpath)); File f(fpath, "r"); - // we know this file has 10 frames with pixel 0,0 being: 2123, 2051, 2109, 2117, 2089, 2095, 2072, 2126, 2097, 2102 - std::vector pixel_0_0 = {2123, 2051, 2109, 2117, 2089, 2095, 2072, 2126, 2097, 2102}; + // we know this file has 10 frames with pixel 0,0 being: 2123, 2051, 2109, + // 2117, 2089, 2095, 2072, 2126, 2097, 2102 + std::vector pixel_0_0 = {2123, 2051, 2109, 2117, 2089, + 2095, 2072, 2126, 2097, 2102}; for (size_t i = 0; i < 10; i++) { auto frame = f.read_frame(); CHECK(frame.rows() == 512); @@ -100,10 +107,12 @@ TEST_CASE("Read frame numbers from a raw file", "[.integration]") { } TEST_CASE("Compare reading from a numpy file with a raw file", "[.files]") { - auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_master_0.json"; + auto fpath_raw = + test_data_path() / "raw/jungfrau" / "jungfrau_single_master_0.json"; REQUIRE(std::filesystem::exists(fpath_raw)); - auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; + auto fpath_npy = + test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; REQUIRE(std::filesystem::exists(fpath_npy)); File raw(fpath_raw, "r"); @@ -121,17 +130,23 @@ TEST_CASE("Compare reading from a numpy file with a raw file", "[.files]") { } TEST_CASE("Read multipart files", "[.integration]") { - auto fpath = test_data_path() / "jungfrau" / "jungfrau_double_master_0.json"; + auto fpath = + test_data_path() / "jungfrau" / "jungfrau_double_master_0.json"; REQUIRE(std::filesystem::exists(fpath)); File f(fpath, "r"); // we know this file has 10 frames check read_multiport.py for the values - std::vector pixel_0_0 = {2099, 2121, 2108, 2084, 2084, 2118, 2066, 2108, 2112, 2116}; - std::vector pixel_0_1 = {2842, 2796, 2865, 2798, 2805, 2817, 2852, 2789, 2792, 2833}; - std::vector pixel_255_1023 = {2149, 2037, 2115, 2102, 2118, 2090, 2036, 2071, 2073, 2142}; - std::vector pixel_511_1023 = {3231, 3169, 3167, 3162, 3168, 3160, 3171, 3171, 3169, 3171}; - std::vector pixel_1_0 = {2748, 2614, 2665, 2629, 2618, 2630, 2631, 2634, 2577, 2598}; + std::vector pixel_0_0 = {2099, 2121, 2108, 2084, 2084, + 2118, 2066, 2108, 2112, 2116}; + std::vector pixel_0_1 = {2842, 2796, 2865, 2798, 2805, + 2817, 2852, 2789, 2792, 2833}; + std::vector pixel_255_1023 = {2149, 2037, 2115, 2102, 2118, + 2090, 2036, 2071, 2073, 2142}; + std::vector pixel_511_1023 = {3231, 3169, 3167, 3162, 3168, + 3160, 3171, 3171, 3169, 3171}; + std::vector pixel_1_0 = {2748, 2614, 2665, 2629, 2618, + 2630, 2631, 2634, 2577, 2598}; for (size_t i = 0; i < 10; i++) { auto frame = f.read_frame(); @@ -146,11 +161,9 @@ TEST_CASE("Read multipart files", "[.integration]") { } TEST_CASE("Read file with unordered frames", "[.integration]") { - //TODO! Better explanation and error message + // TODO! Better explanation and error message auto fpath = test_data_path() / "mythen" / "scan242_master_3.raw"; REQUIRE(std::filesystem::exists(fpath)); File f(fpath); REQUIRE_THROWS((f.read_frame())); } - - diff --git a/src/RawMasterFile.cpp b/src/RawMasterFile.cpp index 8a2db87..508b396 100644 --- a/src/RawMasterFile.cpp +++ b/src/RawMasterFile.cpp @@ -1,5 +1,5 @@ #include "aare/RawMasterFile.hpp" -#include +#include namespace aare { RawFileNameComponents::RawFileNameComponents( @@ -37,18 +37,15 @@ std::filesystem::path RawFileNameComponents::master_fname() const { } std::filesystem::path RawFileNameComponents::data_fname(size_t mod_id, - size_t file_id - ) const { - - - + size_t file_id) const { + std::string fmt = "{}_d{}_f{}_{}.raw"; - //Before version X we used to name the data files f000000000000 + // Before version X we used to name the data files f000000000000 if (m_old_scheme) { fmt = "{}_d{}_f{:012}_{}.raw"; } - return m_base_path / fmt::format(fmt, m_base_name, mod_id, - file_id, m_file_index); + return m_base_path / + fmt::format(fmt, m_base_name, mod_id, file_id, m_file_index); } void RawFileNameComponents::set_old_scheme(bool old_scheme) { @@ -65,19 +62,19 @@ const std::string &RawFileNameComponents::ext() const { return m_ext; } int RawFileNameComponents::file_index() const { return m_file_index; } // "[enabled\ndac dac 4\nstart 500\nstop 2200\nstep 5\nsettleTime 100us\n]" -ScanParameters::ScanParameters(const std::string& par){ - std::istringstream iss(par.substr(1, par.size()-2)); +ScanParameters::ScanParameters(const std::string &par) { + std::istringstream iss(par.substr(1, par.size() - 2)); std::string line; - while(std::getline(iss, line)){ - if(line == "enabled"){ + while (std::getline(iss, line)) { + if (line == "enabled") { m_enabled = true; - }else if(line.find("dac") != std::string::npos){ + } else if (line.find("dac") != std::string::npos) { m_dac = line.substr(4); - }else if(line.find("start") != std::string::npos){ + } else if (line.find("start") != std::string::npos) { m_start = std::stoi(line.substr(6)); - }else if(line.find("stop") != std::string::npos){ + } else if (line.find("stop") != std::string::npos) { m_stop = std::stoi(line.substr(5)); - }else if(line.find("step") != std::string::npos){ + } else if (line.find("step") != std::string::npos) { m_step = std::stoi(line.substr(5)); } } @@ -85,14 +82,11 @@ ScanParameters::ScanParameters(const std::string& par){ int ScanParameters::start() const { return m_start; } int ScanParameters::stop() const { return m_stop; } -void ScanParameters::increment_stop(){ - m_stop += 1; -} +void ScanParameters::increment_stop() { m_stop += 1; } int ScanParameters::step() const { return m_step; } const std::string &ScanParameters::dac() const { return m_dac; } bool ScanParameters::enabled() const { return m_enabled; } - RawMasterFile::RawMasterFile(const std::filesystem::path &fpath) : m_fnc(fpath) { if (!std::filesystem::exists(fpath)) { @@ -163,10 +157,8 @@ ScanParameters RawMasterFile::scan_parameters() const { return m_scan_parameters; } - std::optional RawMasterFile::roi() const { return m_roi; } - void RawMasterFile::parse_json(const std::filesystem::path &fpath) { std::ifstream ifs(fpath); json j; @@ -205,17 +197,16 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) { // keep the optional empty } - // ---------------------------------------------------------------- // Special treatment of analog flag because of Moench03 - try{ + try { m_analog_flag = j.at("Analog Flag"); - }catch (const json::out_of_range &e) { + } catch (const json::out_of_range &e) { // if it doesn't work still set it to one // to try to decode analog samples (Old Moench03) m_analog_flag = 1; } - try { + try { if (m_analog_flag) { m_analog_samples = j.at("Analog Samples"); } @@ -248,27 +239,27 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) { // keep the optional empty } - try{ + try { m_transceiver_flag = j.at("Transceiver Flag"); - if(m_transceiver_flag){ + if (m_transceiver_flag) { m_transceiver_samples = j.at("Transceiver Samples"); } - }catch (const json::out_of_range &e) { + } catch (const json::out_of_range &e) { // keep the optional empty } - try{ + try { std::string scan_parameters = j.at("Scan Parameters"); m_scan_parameters = ScanParameters(scan_parameters); - if(v<7.21){ - m_scan_parameters.increment_stop(); //adjust for endpoint being included - } - }catch (const json::out_of_range &e) { + if (v < 7.21) { + m_scan_parameters + .increment_stop(); // adjust for endpoint being included + } + } catch (const json::out_of_range &e) { // not a scan } - - try{ + try { ROI tmp_roi; auto obj = j.at("Receiver Roi"); tmp_roi.xmin = obj.at("xmin"); @@ -276,37 +267,32 @@ void RawMasterFile::parse_json(const std::filesystem::path &fpath) { tmp_roi.ymin = obj.at("ymin"); tmp_roi.ymax = obj.at("ymax"); - //if any of the values are set update the roi + // if any of the values are set update the roi if (tmp_roi.xmin != 4294967295 || tmp_roi.xmax != 4294967295 || tmp_roi.ymin != 4294967295 || tmp_roi.ymax != 4294967295) { - - if(v<7.21){ + + if (v < 7.21) { tmp_roi.xmax++; tmp_roi.ymax++; } - + m_roi = tmp_roi; } - - }catch (const json::out_of_range &e) { + } catch (const json::out_of_range &e) { // leave the optional empty } - //if we have an roi we need to update the geometry for the subfiles - if (m_roi){ - + // if we have an roi we need to update the geometry for the subfiles + if (m_roi) { } - - - - // Update detector type for Moench - // TODO! How does this work with old .raw master files? - #ifdef AARE_VERBOSE +// Update detector type for Moench +// TODO! How does this work with old .raw master files? +#ifdef AARE_VERBOSE fmt::print("Detecting Moench03: m_pixels_y: {}, m_analog_samples: {}\n", m_pixels_y, m_analog_samples.value_or(0)); - #endif +#endif if (m_type == DetectorType::Moench && !m_analog_samples && m_pixels_y == 400) { m_type = DetectorType::Moench03; @@ -332,19 +318,19 @@ void RawMasterFile::parse_raw(const std::filesystem::path &fpath) { if (key == "Version") { m_version = value; - //TODO!: How old versions can we handle? + // TODO!: How old versions can we handle? auto v = std::stod(value); - //TODO! figure out exactly when we did the change - //This enables padding of f to 12 digits - if (v<4.0) + // TODO! figure out exactly when we did the change + // This enables padding of f to 12 digits + if (v < 4.0) m_fnc.set_old_scheme(true); } else if (key == "TimeStamp") { } else if (key == "Detector Type") { m_type = StringTo(value); - if(m_type==DetectorType::Moench){ + if (m_type == DetectorType::Moench) { m_type = DetectorType::Moench03_old; } } else if (key == "Timing Mode") { @@ -381,10 +367,10 @@ void RawMasterFile::parse_raw(const std::filesystem::path &fpath) { pos = value.find(','); m_pixels_x = std::stoi(value.substr(1, pos)); m_pixels_y = std::stoi(value.substr(pos + 1)); - }else if(key == "row"){ + } else if (key == "row") { pos = value.find('p'); m_pixels_y = std::stoi(value.substr(0, pos)); - }else if(key == "col"){ + } else if (key == "col") { pos = value.find('p'); m_pixels_x = std::stoi(value.substr(0, pos)); } else if (key == "Total Frames") { @@ -395,8 +381,8 @@ void RawMasterFile::parse_raw(const std::filesystem::path &fpath) { m_quad = std::stoi(value); } else if (key == "Max Frames Per File") { m_max_frames_per_file = std::stoi(value); - }else if(key == "Max. Frames Per File"){ - //Version 3.0 way of writing it + } else if (key == "Max. Frames Per File") { + // Version 3.0 way of writing it m_max_frames_per_file = std::stoi(value); } else if (key == "Geometry") { pos = value.find(','); @@ -410,15 +396,14 @@ void RawMasterFile::parse_raw(const std::filesystem::path &fpath) { m_type = DetectorType::Moench03_old; } - - //TODO! Look for d0, d1...dn and update geometry - if(m_geometry.col == 0 && m_geometry.row == 0){ - m_geometry = {1,1}; + // TODO! Look for d0, d1...dn and update geometry + if (m_geometry.col == 0 && m_geometry.row == 0) { + m_geometry = {1, 1}; fmt::print("Warning: No geometry found in master file. Assuming 1x1\n"); } - //TODO! Read files and find actual frames - if(m_frames_in_file==0) + // TODO! Read files and find actual frames + if (m_frames_in_file == 0) m_frames_in_file = m_total_frames_expected; } } // namespace aare diff --git a/src/RawMasterFile.test.cpp b/src/RawMasterFile.test.cpp index 560217f..0e75ec4 100644 --- a/src/RawMasterFile.test.cpp +++ b/src/RawMasterFile.test.cpp @@ -1,12 +1,11 @@ #include "aare/RawMasterFile.hpp" -#include #include "test_config.hpp" +#include using namespace aare; - -TEST_CASE("Parse a master file fname"){ +TEST_CASE("Parse a master file fname") { RawFileNameComponents m("test_master_1.json"); REQUIRE(m.base_name() == "test"); REQUIRE(m.ext() == ".json"); @@ -14,7 +13,7 @@ TEST_CASE("Parse a master file fname"){ REQUIRE(m.base_path() == ""); } -TEST_CASE("Extraction of base path works"){ +TEST_CASE("Extraction of base path works") { RawFileNameComponents m("some/path/test_master_73.json"); REQUIRE(m.base_name() == "test"); REQUIRE(m.ext() == ".json"); @@ -22,7 +21,7 @@ TEST_CASE("Extraction of base path works"){ REQUIRE(m.base_path() == "some/path"); } -TEST_CASE("Construction of master file name and data files"){ +TEST_CASE("Construction of master file name and data files") { RawFileNameComponents m("test_master_1.json"); REQUIRE(m.master_fname() == "test_master_1.json"); REQUIRE(m.data_fname(0, 0) == "test_d0_f0_1.raw"); @@ -31,7 +30,7 @@ TEST_CASE("Construction of master file name and data files"){ REQUIRE(m.data_fname(1, 1) == "test_d1_f1_1.raw"); } -TEST_CASE("Construction of master file name and data files using old scheme"){ +TEST_CASE("Construction of master file name and data files using old scheme") { RawFileNameComponents m("test_master_1.raw"); m.set_old_scheme(true); REQUIRE(m.master_fname() == "test_master_1.raw"); @@ -41,16 +40,15 @@ TEST_CASE("Construction of master file name and data files using old scheme"){ REQUIRE(m.data_fname(1, 1) == "test_d1_f000000000001_1.raw"); } -TEST_CASE("Master file name does not fit pattern"){ +TEST_CASE("Master file name does not fit pattern") { REQUIRE_THROWS(RawFileNameComponents("somefile.json")); REQUIRE_THROWS(RawFileNameComponents("another_test_d0_f0_1.raw")); REQUIRE_THROWS(RawFileNameComponents("test_master_1.txt")); } - - -TEST_CASE("Parse scan parameters"){ - ScanParameters s("[enabled\ndac dac 4\nstart 500\nstop 2200\nstep 5\nsettleTime 100us\n]"); +TEST_CASE("Parse scan parameters") { + ScanParameters s("[enabled\ndac dac 4\nstart 500\nstop 2200\nstep " + "5\nsettleTime 100us\n]"); REQUIRE(s.enabled()); REQUIRE(s.dac() == "dac 4"); REQUIRE(s.start() == 500); @@ -58,7 +56,7 @@ TEST_CASE("Parse scan parameters"){ REQUIRE(s.step() == 5); } -TEST_CASE("A disabled scan"){ +TEST_CASE("A disabled scan") { ScanParameters s("[disabled]"); REQUIRE_FALSE(s.enabled()); REQUIRE(s.dac() == ""); @@ -67,9 +65,9 @@ TEST_CASE("A disabled scan"){ REQUIRE(s.step() == 0); } - -TEST_CASE("Parse a master file in .json format", "[.integration]"){ - auto fpath = test_data_path() / "jungfrau" / "jungfrau_single_master_0.json"; +TEST_CASE("Parse a master file in .json format", "[.integration]") { + auto fpath = + test_data_path() / "jungfrau" / "jungfrau_single_master_0.json"; REQUIRE(std::filesystem::exists(fpath)); RawMasterFile f(fpath); @@ -80,7 +78,7 @@ TEST_CASE("Parse a master file in .json format", "[.integration]"){ REQUIRE(f.detector_type() == DetectorType::Jungfrau); // "Timing Mode": "auto", REQUIRE(f.timing_mode() == TimingMode::Auto); - + // "Geometry": { // "x": 1, // "y": 1 @@ -100,10 +98,9 @@ TEST_CASE("Parse a master file in .json format", "[.integration]"){ // "Max Frames Per File": 3, REQUIRE(f.max_frames_per_file() == 3); - //Jungfrau doesn't write but it is 16 + // Jungfrau doesn't write but it is 16 REQUIRE(f.bitdepth() == 16); - // "Frame Discard Policy": "nodiscard", // "Frame Padding": 1, @@ -125,33 +122,35 @@ TEST_CASE("Parse a master file in .json format", "[.integration]"){ // "Frames in File": 10, REQUIRE(f.frames_in_file() == 10); - //TODO! Should we parse this? - // "Frame Header Format": { - // "Frame Number": "8 bytes", - // "SubFrame Number/ExpLength": "4 bytes", - // "Packet Number": "4 bytes", - // "Bunch ID": "8 bytes", - // "Timestamp": "8 bytes", - // "Module Id": "2 bytes", - // "Row": "2 bytes", - // "Column": "2 bytes", - // "Reserved": "2 bytes", - // "Debug": "4 bytes", - // "Round Robin Number": "2 bytes", - // "Detector Type": "1 byte", - // "Header Version": "1 byte", - // "Packets Caught Mask": "64 bytes" - // } - // } + // TODO! Should we parse this? + // "Frame Header Format": { + // "Frame Number": "8 bytes", + // "SubFrame Number/ExpLength": "4 bytes", + // "Packet Number": "4 bytes", + // "Bunch ID": "8 bytes", + // "Timestamp": "8 bytes", + // "Module Id": "2 bytes", + // "Row": "2 bytes", + // "Column": "2 bytes", + // "Reserved": "2 bytes", + // "Debug": "4 bytes", + // "Round Robin Number": "2 bytes", + // "Detector Type": "1 byte", + // "Header Version": "1 byte", + // "Packets Caught Mask": "64 bytes" + // } + // } REQUIRE_FALSE(f.analog_samples()); - REQUIRE_FALSE(f.digital_samples()); - + REQUIRE_FALSE(f.digital_samples()); } -TEST_CASE("Parse a master file in .raw format", "[.integration]"){ - - auto fpath = test_data_path() / "moench/moench04_noise_200V_sto_both_100us_no_light_thresh_900_master_0.raw"; +TEST_CASE("Parse a master file in .raw format", "[.integration]") { + + auto fpath = + test_data_path() / + "moench/" + "moench04_noise_200V_sto_both_100us_no_light_thresh_900_master_0.raw"; REQUIRE(std::filesystem::exists(fpath)); RawMasterFile f(fpath); @@ -209,80 +208,74 @@ TEST_CASE("Parse a master file in .raw format", "[.integration]"){ // Detector Type : 1 byte // Header Version : 1 byte // Packets Caught Mask : 64 bytes - - } - -TEST_CASE("Read eiger master file", "[.integration]"){ -auto fpath = test_data_path() / "eiger" / "eiger_500k_32bit_master_0.json"; +TEST_CASE("Read eiger master file", "[.integration]") { + auto fpath = test_data_path() / "eiger" / "eiger_500k_32bit_master_0.json"; REQUIRE(std::filesystem::exists(fpath)); RawMasterFile f(fpath); - -// { -// "Version": 7.2, -REQUIRE(f.version() == "7.2"); -// "Timestamp": "Tue Mar 26 17:24:34 2024", -// "Detector Type": "Eiger", -REQUIRE(f.detector_type() == DetectorType::Eiger); -// "Timing Mode": "auto", -REQUIRE(f.timing_mode() == TimingMode::Auto); -// "Geometry": { -// "x": 2, -// "y": 2 -// }, -// "Image Size in bytes": 524288, -REQUIRE(f.image_size_in_bytes() == 524288); -// "Pixels": { -// "x": 512, -REQUIRE(f.pixels_x() == 512); -// "y": 256 -REQUIRE(f.pixels_y() == 256); -// }, -// "Max Frames Per File": 10000, -REQUIRE(f.max_frames_per_file() == 10000); -// "Frame Discard Policy": "nodiscard", -REQUIRE(f.frame_discard_policy() == FrameDiscardPolicy::NoDiscard); -// "Frame Padding": 1, -REQUIRE(f.frame_padding() == 1); - -// "Scan Parameters": "[disabled]", -// "Total Frames": 3, -// "Receiver Roi": { -// "xmin": 4294967295, -// "xmax": 4294967295, -// "ymin": 4294967295, -// "ymax": 4294967295 -// }, -// "Dynamic Range": 32, -// "Ten Giga": 0, -// "Exptime": "5s", -// "Period": "1s", -// "Threshold Energy": -1, -// "Sub Exptime": "2.62144ms", -// "Sub Period": "2.62144ms", -// "Quad": 0, -// "Number of rows": 256, -// "Rate Corrections": "[0, 0]", -// "Frames in File": 3, -// "Frame Header Format": { -// "Frame Number": "8 bytes", -// "SubFrame Number/ExpLength": "4 bytes", -// "Packet Number": "4 bytes", -// "Bunch ID": "8 bytes", -// "Timestamp": "8 bytes", -// "Module Id": "2 bytes", -// "Row": "2 bytes", -// "Column": "2 bytes", -// "Reserved": "2 bytes", -// "Debug": "4 bytes", -// "Round Robin Number": "2 bytes", -// "Detector Type": "1 byte", -// "Header Version": "1 byte", -// "Packets Caught Mask": "64 bytes" -// } -// } - + // { + // "Version": 7.2, + REQUIRE(f.version() == "7.2"); + // "Timestamp": "Tue Mar 26 17:24:34 2024", + // "Detector Type": "Eiger", + REQUIRE(f.detector_type() == DetectorType::Eiger); + // "Timing Mode": "auto", + REQUIRE(f.timing_mode() == TimingMode::Auto); + // "Geometry": { + // "x": 2, + // "y": 2 + // }, + // "Image Size in bytes": 524288, + REQUIRE(f.image_size_in_bytes() == 524288); + // "Pixels": { + // "x": 512, + REQUIRE(f.pixels_x() == 512); + // "y": 256 + REQUIRE(f.pixels_y() == 256); + // }, + // "Max Frames Per File": 10000, + REQUIRE(f.max_frames_per_file() == 10000); + // "Frame Discard Policy": "nodiscard", + REQUIRE(f.frame_discard_policy() == FrameDiscardPolicy::NoDiscard); + // "Frame Padding": 1, + REQUIRE(f.frame_padding() == 1); + // "Scan Parameters": "[disabled]", + // "Total Frames": 3, + // "Receiver Roi": { + // "xmin": 4294967295, + // "xmax": 4294967295, + // "ymin": 4294967295, + // "ymax": 4294967295 + // }, + // "Dynamic Range": 32, + // "Ten Giga": 0, + // "Exptime": "5s", + // "Period": "1s", + // "Threshold Energy": -1, + // "Sub Exptime": "2.62144ms", + // "Sub Period": "2.62144ms", + // "Quad": 0, + // "Number of rows": 256, + // "Rate Corrections": "[0, 0]", + // "Frames in File": 3, + // "Frame Header Format": { + // "Frame Number": "8 bytes", + // "SubFrame Number/ExpLength": "4 bytes", + // "Packet Number": "4 bytes", + // "Bunch ID": "8 bytes", + // "Timestamp": "8 bytes", + // "Module Id": "2 bytes", + // "Row": "2 bytes", + // "Column": "2 bytes", + // "Reserved": "2 bytes", + // "Debug": "4 bytes", + // "Round Robin Number": "2 bytes", + // "Detector Type": "1 byte", + // "Header Version": "1 byte", + // "Packets Caught Mask": "64 bytes" + // } + // } } \ No newline at end of file diff --git a/src/RawSubFile.cpp b/src/RawSubFile.cpp index a8d29ce..3ed2c6f 100644 --- a/src/RawSubFile.cpp +++ b/src/RawSubFile.cpp @@ -1,36 +1,30 @@ #include "aare/RawSubFile.hpp" #include "aare/PixelMap.hpp" #include "aare/algorithm.hpp" -#include "aare/utils/ifstream_helpers.hpp" #include "aare/logger.hpp" - +#include "aare/utils/ifstream_helpers.hpp" #include // memcpy #include #include #include - - - namespace aare { RawSubFile::RawSubFile(const std::filesystem::path &fname, DetectorType detector, size_t rows, size_t cols, size_t bitdepth, uint32_t pos_row, uint32_t pos_col) - : m_detector_type(detector), m_bitdepth(bitdepth), - m_rows(rows), m_cols(cols), - m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row), - m_pos_col(pos_col) { + : m_detector_type(detector), m_bitdepth(bitdepth), m_rows(rows), + m_cols(cols), m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), + m_pos_row(pos_row), m_pos_col(pos_col) { - LOG(logDEBUG) << "RawSubFile::RawSubFile()"; + LOG(logDEBUG) << "RawSubFile::RawSubFile()"; if (m_detector_type == DetectorType::Moench03_old) { m_pixel_map = GenerateMoench03PixelMap(); } else if (m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0) { m_pixel_map = GenerateEigerFlipRowsPixelMap(); } - parse_fname(fname); scan_files(); open_file(m_current_file_index); // open the first file @@ -51,7 +45,8 @@ void RawSubFile::seek(size_t frame_index) { auto frame_offset = (file_index) ? frame_index - m_last_frame_in_file[file_index - 1] : frame_index; - auto byte_offset = frame_offset * (m_bytes_per_frame + sizeof(DetectorHeader)); + auto byte_offset = + frame_offset * (m_bytes_per_frame + sizeof(DetectorHeader)); m_file.seekg(byte_offset); } @@ -69,7 +64,7 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { m_file.seekg(sizeof(DetectorHeader), std::ios::cur); } - if (m_file.fail()){ + if (m_file.fail()) { throw std::runtime_error(LOCATION + ifstream_error_msg(m_file)); } @@ -78,14 +73,15 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { // read into a temporary buffer and then copy the data to the buffer // in the correct order // TODO! add 4 bit support - if(m_bitdepth == 8){ + if (m_bitdepth == 8) { read_with_map(image_buf); - }else if (m_bitdepth == 16) { + } else if (m_bitdepth == 16) { read_with_map(image_buf); } else if (m_bitdepth == 32) { read_with_map(image_buf); - }else{ - throw std::runtime_error("Unsupported bitdepth for read with pixel map"); + } else { + throw std::runtime_error( + "Unsupported bitdepth for read with pixel map"); } } else { @@ -93,11 +89,11 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { m_file.read(reinterpret_cast(image_buf), bytes_per_frame()); } - if (m_file.fail()){ + if (m_file.fail()) { throw std::runtime_error(LOCATION + ifstream_error_msg(m_file)); } - ++ m_current_frame_index; + ++m_current_frame_index; if (m_current_frame_index >= m_last_frame_in_file[m_current_file_index] && (m_current_frame_index < m_total_frames)) { ++m_current_file_index; @@ -105,7 +101,8 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { } } -void RawSubFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) { +void RawSubFile::read_into(std::byte *image_buf, size_t n_frames, + DetectorHeader *header) { for (size_t i = 0; i < n_frames; i++) { read_into(image_buf, header); image_buf += bytes_per_frame(); @@ -115,10 +112,7 @@ void RawSubFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader } } - - -template -void RawSubFile::read_with_map(std::byte *image_buf) { +template void RawSubFile::read_with_map(std::byte *image_buf) { auto part_buffer = new std::byte[bytes_per_frame()]; m_file.read(reinterpret_cast(part_buffer), bytes_per_frame()); auto *data = reinterpret_cast(image_buf); @@ -157,14 +151,17 @@ void RawSubFile::parse_fname(const std::filesystem::path &fname) { std::smatch match; if (std::regex_match(m_base_name, match, pattern)) { - m_offset = std::stoi(match[4].str()); // find the first file index in case of a truncated series - m_base_name = match[1].str() + match[2].str() + match[3].str() + "{}" + match[5].str(); + m_offset = std::stoi(match[4].str()); // find the first file index in + // case of a truncated series + m_base_name = match[1].str() + match[2].str() + match[3].str() + "{}" + + match[5].str(); LOG(logDEBUG) << "Base name: " << m_base_name; LOG(logDEBUG) << "Offset: " << m_offset; LOG(logDEBUG) << "Path: " << m_path.string(); } else { throw std::runtime_error( - LOCATION + fmt::format("Could not parse file name {}", fname.string())); + LOCATION + + fmt::format("Could not parse file name {}", fname.string())); } } @@ -175,12 +172,13 @@ std::filesystem::path RawSubFile::fpath(size_t file_index) const { void RawSubFile::open_file(size_t file_index) { m_file.close(); - auto fname = fpath(file_index+m_offset); + auto fname = fpath(file_index + m_offset); LOG(logDEBUG) << "RawSubFile::open_file(): " << fname.string(); m_file.open(fname, std::ios::binary); if (!m_file.is_open()) { throw std::runtime_error( - LOCATION + fmt::format("Could not open file {}", fpath(file_index).string())); + LOCATION + + fmt::format("Could not open file {}", fpath(file_index).string())); } m_current_file_index = file_index; } @@ -190,20 +188,21 @@ void RawSubFile::scan_files() { // find how many files we have and the number of frames in each file m_last_frame_in_file.clear(); size_t file_index = m_offset; - + while (std::filesystem::exists(fpath(file_index))) { auto n_frames = std::filesystem::file_size(fpath(file_index)) / (m_bytes_per_frame + sizeof(DetectorHeader)); m_last_frame_in_file.push_back(n_frames); - LOG(logDEBUG) << "Found: " << n_frames << " frames in file: " << fpath(file_index).string(); + LOG(logDEBUG) << "Found: " << n_frames + << " frames in file: " << fpath(file_index).string(); ++file_index; } // find where we need to open the next file and total number of frames m_last_frame_in_file = cumsum(m_last_frame_in_file); - if(m_last_frame_in_file.empty()){ + if (m_last_frame_in_file.empty()) { m_total_frames = 0; - }else{ + } else { m_total_frames = m_last_frame_in_file.back(); } } diff --git a/src/RawSubFile.test.cpp b/src/RawSubFile.test.cpp index 89cf858..37d071b 100644 --- a/src/RawSubFile.test.cpp +++ b/src/RawSubFile.test.cpp @@ -1,13 +1,14 @@ #include "aare/RawSubFile.hpp" #include "aare/File.hpp" #include "aare/NDArray.hpp" -#include #include "test_config.hpp" +#include using namespace aare; -TEST_CASE("Read frames directly from a RawSubFile", "[.files]"){ - auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f0_0.raw"; +TEST_CASE("Read frames directly from a RawSubFile", "[.files]") { + auto fpath_raw = + test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f0_0.raw"; REQUIRE(std::filesystem::exists(fpath_raw)); RawSubFile f(fpath_raw, DetectorType::Jungfrau, 512, 1024, 16); @@ -17,19 +18,19 @@ TEST_CASE("Read frames directly from a RawSubFile", "[.files]"){ REQUIRE(f.bytes_per_frame() == 512 * 1024 * 2); REQUIRE(f.bytes_per_pixel() == 2); - - auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; + auto fpath_npy = + test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; REQUIRE(std::filesystem::exists(fpath_npy)); - //Numpy file with the same data to use as reference + // Numpy file with the same data to use as reference File npy(fpath_npy, "r"); CHECK(f.frames_in_file() == 10); CHECK(npy.total_frames() == 10); - DetectorHeader header{}; - NDArray image({static_cast(f.rows()), static_cast(f.cols())}); + NDArray image( + {static_cast(f.rows()), static_cast(f.cols())}); for (size_t i = 0; i < 10; ++i) { CHECK(f.tell() == i); f.read_into(image.buffer(), &header); @@ -38,38 +39,40 @@ TEST_CASE("Read frames directly from a RawSubFile", "[.files]"){ } } -TEST_CASE("Read frames directly from a RawSubFile starting at the second file", "[.files]"){ +TEST_CASE("Read frames directly from a RawSubFile starting at the second file", + "[.files]") { // we know this file has 10 frames with frame numbers 1 to 10 // f0 1,2,3 // f1 4,5,6 <-- starting here // f2 7,8,9 // f3 10 - - auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f1_0.raw"; + + auto fpath_raw = + test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f1_0.raw"; REQUIRE(std::filesystem::exists(fpath_raw)); RawSubFile f(fpath_raw, DetectorType::Jungfrau, 512, 1024, 16); - - auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; + auto fpath_npy = + test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; REQUIRE(std::filesystem::exists(fpath_npy)); - //Numpy file with the same data to use as reference + // Numpy file with the same data to use as reference File npy(fpath_npy, "r"); npy.seek(3); CHECK(f.frames_in_file() == 7); CHECK(npy.total_frames() == 10); - DetectorHeader header{}; - NDArray image({static_cast(f.rows()), static_cast(f.cols())}); + NDArray image( + {static_cast(f.rows()), static_cast(f.cols())}); for (size_t i = 0; i < 7; ++i) { CHECK(f.tell() == i); f.read_into(image.buffer(), &header); - // frame numbers start at 1 frame index at 0 + // frame numbers start at 1 frame index at 0 // adding 3 + 1 to verify the frame number - CHECK(header.frameNumber == i + 4); + CHECK(header.frameNumber == i + 4); auto npy_frame = npy.read_frame(); CHECK((image.view() == npy_frame.view())); } diff --git a/src/algorithm.test.cpp b/src/algorithm.test.cpp index bf49c52..3706aba 100644 --- a/src/algorithm.test.cpp +++ b/src/algorithm.test.cpp @@ -161,12 +161,10 @@ TEST_CASE("cumsum works with negative numbers", "[algorithm]") { REQUIRE(result[4] == -10); } - TEST_CASE("cumsum on an empty vector", "[algorithm]") { std::vector vec = {}; auto result = aare::cumsum(vec); REQUIRE(result.size() == 0); - } TEST_CASE("All equal on an empty vector is false", "[algorithm]") { @@ -184,7 +182,8 @@ TEST_CASE("All equal on a vector with 2 elements is true", "[algorithm]") { REQUIRE(aare::all_equal(vec) == true); } -TEST_CASE("All equal on a vector with two different elements is false", "[algorithm]") { +TEST_CASE("All equal on a vector with two different elements is false", + "[algorithm]") { std::vector vec = {1, 2}; REQUIRE(aare::all_equal(vec) == false); } diff --git a/src/decode.cpp b/src/decode.cpp index 436ad7b..0c78b70 100644 --- a/src/decode.cpp +++ b/src/decode.cpp @@ -2,9 +2,9 @@ #include namespace aare { -uint16_t adc_sar_05_decode64to16(uint64_t input){ +uint16_t adc_sar_05_decode64to16(uint64_t input) { - //we want bits 29,19,28,18,31,21,27,20,24,23,25,22 and then pad to 16 + // we want bits 29,19,28,18,31,21,27,20,24,23,25,22 and then pad to 16 uint16_t output = 0; output |= ((input >> 22) & 1) << 11; output |= ((input >> 25) & 1) << 10; @@ -21,19 +21,21 @@ uint16_t adc_sar_05_decode64to16(uint64_t input){ return output; } -void adc_sar_05_decode64to16(NDView input, NDView output){ - if(input.shape() != output.shape()){ - throw std::invalid_argument(LOCATION + " input and output shapes must match"); +void adc_sar_05_decode64to16(NDView input, + NDView output) { + if (input.shape() != output.shape()) { + throw std::invalid_argument(LOCATION + + " input and output shapes must match"); } - for(ssize_t i = 0; i < input.shape(0); i++){ - for(ssize_t j = 0; j < input.shape(1); j++){ - output(i,j) = adc_sar_05_decode64to16(input(i,j)); + for (ssize_t i = 0; i < input.shape(0); i++) { + for (ssize_t j = 0; j < input.shape(1); j++) { + output(i, j) = adc_sar_05_decode64to16(input(i, j)); } } } -uint16_t adc_sar_04_decode64to16(uint64_t input){ +uint16_t adc_sar_04_decode64to16(uint64_t input) { // bit_map = array([15,17,19,21,23,4,6,8,10,12,14,16] LSB->MSB uint16_t output = 0; @@ -52,20 +54,23 @@ uint16_t adc_sar_04_decode64to16(uint64_t input){ return output; } -void adc_sar_04_decode64to16(NDView input, NDView output){ - if(input.shape() != output.shape()){ - throw std::invalid_argument(LOCATION + " input and output shapes must match"); +void adc_sar_04_decode64to16(NDView input, + NDView output) { + if (input.shape() != output.shape()) { + throw std::invalid_argument(LOCATION + + " input and output shapes must match"); } - for(ssize_t i = 0; i < input.shape(0); i++){ - for(ssize_t j = 0; j < input.shape(1); j++){ - output(i,j) = adc_sar_04_decode64to16(input(i,j)); + for (ssize_t i = 0; i < input.shape(0); i++) { + for (ssize_t j = 0; j < input.shape(1); j++) { + output(i, j) = adc_sar_04_decode64to16(input(i, j)); } } } double apply_custom_weights(uint16_t input, const NDView weights) { - if(weights.size() > 16){ - throw std::invalid_argument("weights size must be less than or equal to 16"); + if (weights.size() > 16) { + throw std::invalid_argument( + "weights size must be less than or equal to 16"); } double result = 0.0; @@ -73,30 +78,30 @@ double apply_custom_weights(uint16_t input, const NDView weights) { result += ((input >> i) & 1) * std::pow(weights[i], i); } return result; - } -void apply_custom_weights(NDView input, NDView output, const NDView weights) { - if(input.shape() != output.shape()){ - throw std::invalid_argument(LOCATION + " input and output shapes must match"); +void apply_custom_weights(NDView input, NDView output, + const NDView weights) { + if (input.shape() != output.shape()) { + throw std::invalid_argument(LOCATION + + " input and output shapes must match"); } - //Calculate weights to avoid repeatedly calling std::pow + // Calculate weights to avoid repeatedly calling std::pow std::vector weights_powers(weights.size()); for (ssize_t i = 0; i < weights.size(); ++i) { weights_powers[i] = std::pow(weights[i], i); } // Apply custom weights to each element in the input array - for (ssize_t i = 0; i < input.shape(0); i++) { + for (ssize_t i = 0; i < input.shape(0); i++) { double result = 0.0; - for (size_t bit_index = 0; bit_index < weights_powers.size(); ++bit_index) { + for (size_t bit_index = 0; bit_index < weights_powers.size(); + ++bit_index) { result += ((input(i) >> bit_index) & 1) * weights_powers[bit_index]; } output(i) = result; } } - - } // namespace aare diff --git a/src/decode.test.cpp b/src/decode.test.cpp index 1e4b2fc..b4310ca 100644 --- a/src/decode.test.cpp +++ b/src/decode.test.cpp @@ -1,17 +1,16 @@ #include "aare/decode.hpp" -#include -#include #include "aare/NDArray.hpp" +#include +#include using Catch::Matchers::WithinAbs; #include -TEST_CASE("test_adc_sar_05_decode64to16"){ +TEST_CASE("test_adc_sar_05_decode64to16") { uint64_t input = 0; uint16_t output = aare::adc_sar_05_decode64to16(input); CHECK(output == 0); - // bit 29 on th input is bit 0 on the output input = 1UL << 29; output = aare::adc_sar_05_decode64to16(input); @@ -25,7 +24,6 @@ TEST_CASE("test_adc_sar_05_decode64to16"){ CHECK(output == (1 << i)); } - // test a few "random" values input = 0; input |= (1UL << 29); @@ -34,7 +32,6 @@ TEST_CASE("test_adc_sar_05_decode64to16"){ output = aare::adc_sar_05_decode64to16(input); CHECK(output == 7UL); - input = 0; input |= (1UL << 18); input |= (1UL << 27); @@ -47,10 +44,9 @@ TEST_CASE("test_adc_sar_05_decode64to16"){ input |= (1UL << 22); output = aare::adc_sar_05_decode64to16(input); CHECK(output == 3072UL); - } +} - - TEST_CASE("test_apply_custom_weights") { +TEST_CASE("test_apply_custom_weights") { uint16_t input = 1; aare::NDArray weights_data({3}, 0.0); @@ -60,7 +56,6 @@ TEST_CASE("test_adc_sar_05_decode64to16"){ auto weights = weights_data.view(); - double output = aare::apply_custom_weights(input, weights); CHECK_THAT(output, WithinAbs(1.0, 0.001)); @@ -68,7 +63,6 @@ TEST_CASE("test_adc_sar_05_decode64to16"){ output = aare::apply_custom_weights(input, weights); CHECK_THAT(output, WithinAbs(2.1, 0.001)); - input = 1 << 2; output = aare::apply_custom_weights(input, weights); CHECK_THAT(output, WithinAbs(3.24, 0.001)); @@ -76,5 +70,4 @@ TEST_CASE("test_adc_sar_05_decode64to16"){ input = 0b111; output = aare::apply_custom_weights(input, weights); CHECK_THAT(output, WithinAbs(6.34, 0.001)); - - } \ No newline at end of file +} \ No newline at end of file diff --git a/src/defs.cpp b/src/defs.cpp index 7c7cc45..b93ff06 100644 --- a/src/defs.cpp +++ b/src/defs.cpp @@ -5,15 +5,11 @@ #include namespace aare { - -void assert_failed(const std::string &msg) - { +void assert_failed(const std::string &msg) { fmt::print(msg); exit(1); } - - /** * @brief Convert a DetectorType to a string * @param type DetectorType @@ -40,7 +36,7 @@ template <> std::string ToString(DetectorType arg) { case DetectorType::Xilinx_ChipTestBoard: return "Xilinx_ChipTestBoard"; - //Custom ones + // Custom ones case DetectorType::Moench03: return "Moench03"; case DetectorType::Moench03_old: @@ -48,8 +44,8 @@ template <> std::string ToString(DetectorType arg) { case DetectorType::Unknown: return "Unknown"; - //no default case to trigger compiler warning if not all - //enum values are handled + // no default case to trigger compiler warning if not all + // enum values are handled } throw std::runtime_error("Could not decode detector to string"); } @@ -80,14 +76,14 @@ template <> DetectorType StringTo(const std::string &arg) { if (arg == "Xilinx_ChipTestBoard") return DetectorType::Xilinx_ChipTestBoard; - //Custom ones + // Custom ones if (arg == "Moench03") return DetectorType::Moench03; if (arg == "Moench03_old") return DetectorType::Moench03_old; if (arg == "Unknown") return DetectorType::Unknown; - + throw std::runtime_error("Could not decode detector from: \"" + arg + "\""); } @@ -102,7 +98,8 @@ template <> TimingMode StringTo(const std::string &arg) { return TimingMode::Auto; if (arg == "trigger") return TimingMode::Trigger; - throw std::runtime_error("Could not decode timing mode from: \"" + arg + "\""); + throw std::runtime_error("Could not decode timing mode from: \"" + arg + + "\""); } template <> FrameDiscardPolicy StringTo(const std::string &arg) { @@ -112,7 +109,8 @@ template <> FrameDiscardPolicy StringTo(const std::string &arg) { return FrameDiscardPolicy::Discard; if (arg == "discardpartial") return FrameDiscardPolicy::DiscardPartial; - throw std::runtime_error("Could not decode frame discard policy from: \"" + arg + "\""); + throw std::runtime_error("Could not decode frame discard policy from: \"" + + arg + "\""); } // template <> TimingMode StringTo(std::string mode); diff --git a/src/defs.test.cpp b/src/defs.test.cpp index 6ab9394..2106d86 100644 --- a/src/defs.test.cpp +++ b/src/defs.test.cpp @@ -3,12 +3,12 @@ #include #include -using aare::ToString; using aare::StringTo; +using aare::ToString; TEST_CASE("Enum to string conversion") { - // TODO! By the way I don't think the enum string conversions should be in the defs.hpp file - // but let's use this to show a test + // TODO! By the way I don't think the enum string conversions should be in + // the defs.hpp file but let's use this to show a test REQUIRE(ToString(aare::DetectorType::Generic) == "Generic"); REQUIRE(ToString(aare::DetectorType::Eiger) == "Eiger"); REQUIRE(ToString(aare::DetectorType::Gotthard) == "Gotthard"); @@ -17,30 +17,42 @@ TEST_CASE("Enum to string conversion") { REQUIRE(ToString(aare::DetectorType::Moench) == "Moench"); REQUIRE(ToString(aare::DetectorType::Mythen3) == "Mythen3"); REQUIRE(ToString(aare::DetectorType::Gotthard2) == "Gotthard2"); - REQUIRE(ToString(aare::DetectorType::Xilinx_ChipTestBoard) == "Xilinx_ChipTestBoard"); + REQUIRE(ToString(aare::DetectorType::Xilinx_ChipTestBoard) == + "Xilinx_ChipTestBoard"); REQUIRE(ToString(aare::DetectorType::Moench03) == "Moench03"); REQUIRE(ToString(aare::DetectorType::Moench03_old) == "Moench03_old"); REQUIRE(ToString(aare::DetectorType::Unknown) == "Unknown"); } -TEST_CASE("String to enum"){ - REQUIRE(StringTo("Generic") == aare::DetectorType::Generic); +TEST_CASE("String to enum") { + REQUIRE(StringTo("Generic") == + aare::DetectorType::Generic); REQUIRE(StringTo("Eiger") == aare::DetectorType::Eiger); - REQUIRE(StringTo("Gotthard") == aare::DetectorType::Gotthard); - REQUIRE(StringTo("Jungfrau") == aare::DetectorType::Jungfrau); - REQUIRE(StringTo("ChipTestBoard") == aare::DetectorType::ChipTestBoard); - REQUIRE(StringTo("Moench") == aare::DetectorType::Moench); - REQUIRE(StringTo("Mythen3") == aare::DetectorType::Mythen3); - REQUIRE(StringTo("Gotthard2") == aare::DetectorType::Gotthard2); - REQUIRE(StringTo("Xilinx_ChipTestBoard") == aare::DetectorType::Xilinx_ChipTestBoard); - REQUIRE(StringTo("Moench03") == aare::DetectorType::Moench03); - REQUIRE(StringTo("Moench03_old") == aare::DetectorType::Moench03_old); - REQUIRE(StringTo("Unknown") == aare::DetectorType::Unknown); + REQUIRE(StringTo("Gotthard") == + aare::DetectorType::Gotthard); + REQUIRE(StringTo("Jungfrau") == + aare::DetectorType::Jungfrau); + REQUIRE(StringTo("ChipTestBoard") == + aare::DetectorType::ChipTestBoard); + REQUIRE(StringTo("Moench") == + aare::DetectorType::Moench); + REQUIRE(StringTo("Mythen3") == + aare::DetectorType::Mythen3); + REQUIRE(StringTo("Gotthard2") == + aare::DetectorType::Gotthard2); + REQUIRE(StringTo("Xilinx_ChipTestBoard") == + aare::DetectorType::Xilinx_ChipTestBoard); + REQUIRE(StringTo("Moench03") == + aare::DetectorType::Moench03); + REQUIRE(StringTo("Moench03_old") == + aare::DetectorType::Moench03_old); + REQUIRE(StringTo("Unknown") == + aare::DetectorType::Unknown); } -TEST_CASE("Enum values"){ - //Since some of the enums are written to file we need to make sure - //they match the value in the slsDetectorPackage +TEST_CASE("Enum values") { + // Since some of the enums are written to file we need to make sure + // they match the value in the slsDetectorPackage REQUIRE(static_cast(aare::DetectorType::Generic) == 0); REQUIRE(static_cast(aare::DetectorType::Eiger) == 1); @@ -52,7 +64,7 @@ TEST_CASE("Enum values"){ REQUIRE(static_cast(aare::DetectorType::Gotthard2) == 7); REQUIRE(static_cast(aare::DetectorType::Xilinx_ChipTestBoard) == 8); - //Not included + // Not included REQUIRE(static_cast(aare::DetectorType::Moench03) == 100); } @@ -85,5 +97,6 @@ TEST_CASE("DynamicCluster creation") { // double v3 = c2.get(33 * 44 - 1); // REQUIRE(aare::compare_floats(123.11, v3)); -// REQUIRE_THROWS_AS(c2.set(0, 1), std::invalid_argument); // set int to double +// REQUIRE_THROWS_AS(c2.set(0, 1), std::invalid_argument); // set int to +// double // } \ No newline at end of file diff --git a/src/geo_helpers.cpp b/src/geo_helpers.cpp index 39086ec..96a9056 100644 --- a/src/geo_helpers.cpp +++ b/src/geo_helpers.cpp @@ -2,15 +2,16 @@ #include "aare/geo_helpers.hpp" #include "fmt/core.h" -namespace aare{ +namespace aare { DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) { - #ifdef AARE_VERBOSE +#ifdef AARE_VERBOSE fmt::println("update_geometry_with_roi() called with ROI: {} {} {} {}", roi.xmin, roi.xmax, roi.ymin, roi.ymax); - fmt::println("Geometry: {} {} {} {} {} {}", - geo.modules_x, geo.modules_y, geo.pixels_x, geo.pixels_y, geo.module_gap_row, geo.module_gap_col); - #endif + fmt::println("Geometry: {} {} {} {} {} {}", geo.modules_x, geo.modules_y, + geo.pixels_x, geo.pixels_y, geo.module_gap_row, + geo.module_gap_col); +#endif int pos_y = 0; int pos_y_increment = 0; for (int row = 0; row < geo.modules_y; row++) { @@ -41,9 +42,9 @@ DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) { if (m.origin_y + m.height < roi.ymin) { m.height = 0; } else { - if ((roi.ymin > m.origin_y) && (roi.ymin < m.origin_y + m.height)) { + if ((roi.ymin > m.origin_y) && + (roi.ymin < m.origin_y + m.height)) { m.height -= roi.ymin - m.origin_y; - } if (roi.ymax < m.origin_y + original_height) { m.height -= m.origin_y + original_height - roi.ymax; @@ -51,9 +52,10 @@ DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) { m.origin_y = pos_y; pos_y_increment = m.height; } - #ifdef AARE_VERBOSE - fmt::println("Module {} {} {} {}", m.origin_x, m.origin_y, m.width, m.height); - #endif +#ifdef AARE_VERBOSE + fmt::println("Module {} {} {} {}", m.origin_x, m.origin_y, m.width, + m.height); +#endif } // increment pos_y pos_y += pos_y_increment; @@ -65,7 +67,6 @@ DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) { geo.pixels_y = roi.height(); return geo; - } } // namespace aare \ No newline at end of file diff --git a/src/geo_helpers.test.cpp b/src/geo_helpers.test.cpp index 08ee96c..48ae9cf 100644 --- a/src/geo_helpers.test.cpp +++ b/src/geo_helpers.test.cpp @@ -1,6 +1,6 @@ #include "aare/File.hpp" -#include "aare/RawMasterFile.hpp" //needed for ROI #include "aare/RawFile.hpp" +#include "aare/RawMasterFile.hpp" //needed for ROI #include #include @@ -8,26 +8,24 @@ #include "aare/geo_helpers.hpp" #include "test_config.hpp" -TEST_CASE("Simple ROIs on one module"){ - // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) +TEST_CASE("Simple ROIs on one module") { + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI + // roi) aare::DetectorGeometry geo; - - aare::ModuleGeometry mod; + aare::ModuleGeometry mod; mod.origin_x = 0; mod.origin_y = 0; mod.width = 1024; mod.height = 512; - - geo.pixels_x = 1024; geo.pixels_y = 512; geo.modules_x = 1; geo.modules_y = 1; geo.module_pixel_0.push_back(mod); - SECTION("ROI is the whole module"){ + SECTION("ROI is the whole module") { aare::ROI roi; roi.xmin = 0; roi.xmax = 1024; @@ -42,7 +40,7 @@ TEST_CASE("Simple ROIs on one module"){ REQUIRE(updated_geo.module_pixel_0[0].height == 512); REQUIRE(updated_geo.module_pixel_0[0].width == 1024); } - SECTION("ROI is the top left corner of the module"){ + SECTION("ROI is the top left corner of the module") { aare::ROI roi; roi.xmin = 100; roi.xmax = 200; @@ -58,7 +56,7 @@ TEST_CASE("Simple ROIs on one module"){ REQUIRE(updated_geo.module_pixel_0[0].width == 100); } - SECTION("ROI is a small square"){ + SECTION("ROI is a small square") { aare::ROI roi; roi.xmin = 1000; roi.xmax = 1010; @@ -73,7 +71,7 @@ TEST_CASE("Simple ROIs on one module"){ REQUIRE(updated_geo.module_pixel_0[0].height == 10); REQUIRE(updated_geo.module_pixel_0[0].width == 10); } - SECTION("ROI is a few columns"){ + SECTION("ROI is a few columns") { aare::ROI roi; roi.xmin = 750; roi.xmax = 800; @@ -90,14 +88,12 @@ TEST_CASE("Simple ROIs on one module"){ } } - - -TEST_CASE("Two modules side by side"){ - // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) +TEST_CASE("Two modules side by side") { + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI + // roi) aare::DetectorGeometry geo; - - aare::ModuleGeometry mod; + aare::ModuleGeometry mod; mod.origin_x = 0; mod.origin_y = 0; mod.width = 1024; @@ -112,7 +108,7 @@ TEST_CASE("Two modules side by side"){ mod.origin_x = 1024; geo.module_pixel_0.push_back(mod); - SECTION("ROI is the whole image"){ + SECTION("ROI is the whole image") { aare::ROI roi; roi.xmin = 0; roi.xmax = 2048; @@ -125,7 +121,7 @@ TEST_CASE("Two modules side by side"){ REQUIRE(updated_geo.modules_x == 2); REQUIRE(updated_geo.modules_y == 1); } - SECTION("rectangle on both modules"){ + SECTION("rectangle on both modules") { aare::ROI roi; roi.xmin = 800; roi.xmax = 1300; @@ -141,11 +137,12 @@ TEST_CASE("Two modules side by side"){ REQUIRE(updated_geo.module_pixel_0[0].width == 224); REQUIRE(updated_geo.module_pixel_0[1].height == 299); REQUIRE(updated_geo.module_pixel_0[1].width == 276); - } + } } -TEST_CASE("Three modules side by side"){ - // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) +TEST_CASE("Three modules side by side") { + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI + // roi) aare::DetectorGeometry geo; aare::ROI roi; roi.xmin = 700; @@ -153,7 +150,7 @@ TEST_CASE("Three modules side by side"){ roi.ymin = 0; roi.ymax = 123; - aare::ModuleGeometry mod; + aare::ModuleGeometry mod; mod.origin_x = 0; mod.origin_y = 0; mod.width = 1024; @@ -184,8 +181,9 @@ TEST_CASE("Three modules side by side"){ REQUIRE(updated_geo.module_pixel_0[2].width == 452); } -TEST_CASE("Four modules as a square"){ - // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) +TEST_CASE("Four modules as a square") { + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI + // roi) aare::DetectorGeometry geo; aare::ROI roi; roi.xmin = 500; @@ -193,7 +191,7 @@ TEST_CASE("Four modules as a square"){ roi.ymin = 500; roi.ymax = 600; - aare::ModuleGeometry mod; + aare::ModuleGeometry mod; mod.origin_x = 0; mod.origin_y = 0; mod.width = 1024; diff --git a/src/utils/ifstream_helpers.cpp b/src/utils/ifstream_helpers.cpp index 74c56f3..79f006d 100644 --- a/src/utils/ifstream_helpers.cpp +++ b/src/utils/ifstream_helpers.cpp @@ -10,7 +10,7 @@ std::string ifstream_error_msg(std::ifstream &ifs) { return " Bad file stream"; } else if (state & std::ios_base::failbit) { return " File read failed"; - }else{ + } else { return " Unknown/no error"; } } diff --git a/src/utils/task.test.cpp b/src/utils/task.test.cpp index e19994a..3ca71c7 100644 --- a/src/utils/task.test.cpp +++ b/src/utils/task.test.cpp @@ -1,10 +1,9 @@ #include "aare/utils/task.hpp" -#include #include +#include - -TEST_CASE("Split a range into multiple tasks"){ +TEST_CASE("Split a range into multiple tasks") { auto tasks = aare::split_task(0, 10, 3); REQUIRE(tasks.size() == 3); @@ -22,11 +21,8 @@ TEST_CASE("Split a range into multiple tasks"){ tasks = aare::split_task(0, 10, 10); REQUIRE(tasks.size() == 10); - for (int i = 0; i < 10; i++){ + for (int i = 0; i < 10; i++) { REQUIRE(tasks[i].first == i); - REQUIRE(tasks[i].second == i+1); + REQUIRE(tasks[i].second == i + 1); } - - - } \ No newline at end of file diff --git a/tests/test.cpp b/tests/test.cpp index 513f690..1d4456f 100644 --- a/tests/test.cpp +++ b/tests/test.cpp @@ -2,8 +2,8 @@ #include #include #include -#include #include +#include TEST_CASE("Test suite can find data assets", "[.integration]") { auto fpath = test_data_path() / "numpy" / "test_numpy_file.npy"; From 3cc44f780f255c9f110ff16b91baf7b4091e0581 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Wed, 11 Jun 2025 13:21:21 +0200 Subject: [PATCH 10/13] Added branching strategy etc. to docs (#191) Added a section on the ideas behind the library and also explaining the branching strategy. --------- Co-authored-by: Dhanya Thattil --- .github/workflows/build_docs.yml | 7 ++- RELEASE.md | 22 ++++++++ docs/src/Philosophy.rst | 47 +++++++++++++++++ docs/src/Requirements.rst | 9 ++-- docs/src/Workflow.rst | 86 ++++++++++++++++++++++++++++++++ docs/src/index.rst | 2 + 6 files changed, 168 insertions(+), 5 deletions(-) create mode 100644 RELEASE.md create mode 100644 docs/src/Philosophy.rst create mode 100644 docs/src/Workflow.rst diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index 153c210..57bcfb7 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -2,7 +2,10 @@ name: Build the package using cmake then documentation on: workflow_dispatch: - push: + pull_request: + release: + types: + - published permissions: @@ -55,7 +58,7 @@ jobs: url: ${{ steps.deployment.outputs.page_url }} runs-on: ubuntu-latest needs: build - if: github.event_name == 'release' && github.event.action == 'published' + if: (github.event_name == 'release' && github.event.action == 'published') || (github.event_name == 'workflow_dispatch' ) steps: - name: Deploy to GitHub Pages id: deployment diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 0000000..afda148 --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,22 @@ +# Release notes + + +### head + +Features: + +- Cluster finder now works with 5x5, 7x7 and 9x9 clusters + + +### 2025.05.22 + +Features: + +- Added scurve fitting + +Bugfixes: + +- Fixed crash when opening raw files with large number of data files + + + diff --git a/docs/src/Philosophy.rst b/docs/src/Philosophy.rst new file mode 100644 index 0000000..f187bad --- /dev/null +++ b/docs/src/Philosophy.rst @@ -0,0 +1,47 @@ +**************** +Philosophy +**************** + + +Fast code with a simple interface +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Aare should be fast and efficient, but also easy to use. We strive to keep a simple interface that feels intuitive. +Internally we use C++ for performance and the ability to integrate the library in other programs, but we see most +users using the Python interface. + +Live at head +~~~~~~~~~~~~~~~~~~ + +As a user of the library you should be able to, and is expected to, use the latest version. Bug fixes will rarely be backported +to older releases. By upgrading frequently you will benefit from the latest features and minimize the effort to maintain your scripts/code +by doing several small upgrades instead of one big upgrade. + +API +~~~~~~~~~~~~~~~~~~ + +We aim to keep the API stable and only break it for good reasons. But specially now in the early stages of development +the API will change. On those occasions it will be clearly stated in the release notes. However, the norm should be a +backward compatible API. + +Documentation +~~~~~~~~~~~~~~~~~~ + +Being a library it is important to have a well documented API. We use Doxygen to generate the C++ documentation +and Sphinx for the Python part. Breathe is used to integrate the two into one Sphinx html site. The documentation is built +automatically on release by the CI and published to GitHub pages. In addition to the generated API documentation, +certain classes might need more descriptions of the usage. This is then placed in the .rst files in the docs/src directory. + +.. attention:: + + The code should be well documented, but using descriptive names is more important. In the same spirit + if a function is called `getNumberOfFrames()` you don't need to write a comment saying that it gets the + number of frames. + + +Dependencies +~~~~~~~~~~~~~~~~~~ + +Deployment in the scientific community is often tricky. Either due to old OS versions or the lack of package managers. +We strive to keep the dependencies to a minimum and will vendor some libraries to simplify deployment even though it comes +at a cost of build time. \ No newline at end of file diff --git a/docs/src/Requirements.rst b/docs/src/Requirements.rst index c962f73..b0c370f 100644 --- a/docs/src/Requirements.rst +++ b/docs/src/Requirements.rst @@ -2,18 +2,21 @@ Requirements ============================================== - C++17 compiler (gcc 8/clang 7) -- CMake 3.14+ +- CMake 3.15+ **Internally used libraries** .. note :: - These can also be picked up from the system/conda environment by specifying: + To save compile time some of the dependencies can also be picked up from the system/conda environment by specifying: -DAARE_SYSTEM_LIBRARIES=ON during the cmake configuration. -- pybind11 +To simplify deployment we build and statically link a few libraries. + - fmt +- lmfit - https://jugit.fz-juelich.de/mlz/lmfit - nlohmann_json +- pybind11 - ZeroMQ **Extra dependencies for building documentation** diff --git a/docs/src/Workflow.rst b/docs/src/Workflow.rst new file mode 100644 index 0000000..9b41e74 --- /dev/null +++ b/docs/src/Workflow.rst @@ -0,0 +1,86 @@ +**************** +Workflow +**************** + +This page describes how we develop aare. + +GitHub centric +~~~~~~~~~~~~~~~~~~ + +We use GitHub for all development. Issues and pull requests provide a platform for collaboration as well +as a record of the development process. Even if we discuss things in person, we record the outcome in an issue. +If a particular implementation is chosen over another, the reason should be recorded in the pull request. + + +Branches +~~~~~~~~~~~~~~~~~~ + +We aim for an as lightweight branching strategy as possible. Short-lived feature branches are merged back into main. +The main branch is expected to always be in a releasable state. A release is simply a tag on main which provides a +reference and triggers the CI to build the release artifacts (conda, pypi etc.). For large features consider merging +smaller chunks into main as they are completed, rather than waiting for the entire feature to be finished. Worst case +make sure your feature branch merges with main regularly to avoid large merge conflicts later on. + +.. note:: + + The main branch is expected to always work. Feel free to pull from main instead of sticking to a + release + + +Releases +~~~~~~~~~~~~~~~~~~ + +Release early, release often. As soon as "enough" new features have been implemented, a release is created. +A release should not be a big thing, rather a routine part of development that does not require any special person or +unfamiliar steps. + + + +Checklists for deployment +~~~~~~~~~~~~~~~~~~ + +**Feature:** + +#. Create a new issue for the feature (label feature) +#. Create a new branch from main. +#. Implement the feature including test and documentation +#. Add the feature to RELEASE.md under head +#. Create a pull request linked to the issue +#. Code is reviewed by at least one other person +#. Once approved, the branch is merged into main + + +**BugFix:** + +Essentially the same as for a feature, if possible start with +a failing test that demonstrates the bug. + +#. Create a new issue for the bug (label bug) +#. Create a new branch from main. +#. **Write a test that fails for the bug** +#. Implement the fix +#. **Run the test to ensure it passes** +#. Add the bugfix to RELEASE.md under head +#. Create a pull request linked to the issue. +#. Code is reviewed by at least one other person +#. Once approved, the branch is merged into main + +**Release:** + +#. Once "enough" new features have been implemented, a release is created +#. Update RELEASE.md with the tag of the release and verify that it is complete +#. Create the release in GitHub describing the new features and bug fixes +#. CI makes magic + + +**Update documentation only:** + +.. attention:: + + It's possible to update the documentation without changing the code, but take + care since the docs will reflect the code in main and not the latest release. + +#. Create a PR to main with the documentation changes +#. Create a pull request linked to the issue. +#. Code is reviewed by at least one other person +#. Once merged you can manually trigger the CI workflow for documentation \ No newline at end of file diff --git a/docs/src/index.rst b/docs/src/index.rst index af5e99a..a33b4df 100644 --- a/docs/src/index.rst +++ b/docs/src/index.rst @@ -63,4 +63,6 @@ AARE :caption: Developer :maxdepth: 3 + Philosophy + Workflow Tests \ No newline at end of file From 4976ec165110bfc7988f54ff3ff39eda9e2db66c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Thu, 12 Jun 2025 09:32:42 +0200 Subject: [PATCH 11/13] added back chunk_size in python (#199) When refactoring the dispatch of the python binding for ClusterFile I forgot chunk_size. Adding it back in. Excluded from release notes since the bug was introduced after the last release and now fixed before the next release. 1. added back chunk_size 2. removed a few commented out lines closes #197 --- python/aare/ClusterFinder.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/python/aare/ClusterFinder.py b/python/aare/ClusterFinder.py index 99bcc5f..5509e51 100644 --- a/python/aare/ClusterFinder.py +++ b/python/aare/ClusterFinder.py @@ -1,16 +1,8 @@ - -# from ._aare import ClusterFinder_Cluster3x3i, ClusterFinder_Cluster2x2i, ClusterFinderMT_Cluster3x3i, ClusterFinderMT_Cluster2x2i, ClusterCollector_Cluster3x3i, ClusterCollector_Cluster2x2i - - -# from ._aare import ClusterFileSink_Cluster3x3i, ClusterFileSink_Cluster2x2i - from . import _aare import numpy as np _supported_cluster_sizes = [(2,2), (3,3), (5,5), (7,7), (9,9),] -# def _get_class() - def _type_to_char(dtype): if dtype == np.int32: return 'i' @@ -74,11 +66,11 @@ def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32): return cls(clusterfindermt, cluster_file) -def ClusterFile(fname, cluster_size=(3,3), dtype=np.int32): +def ClusterFile(fname, cluster_size=(3,3), dtype=np.int32, chunk_size = 1000): """ Factory function to create a ClusterFile object. Provides a cleaner syntax for the templated ClusterFile in C++. """ cls = _get_class("ClusterFile", cluster_size, dtype) - return cls(fname) + return cls(fname, chunk_size=chunk_size) From 11fa95b23c649d3a39c8ac692be650885c7e47f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Fri, 13 Jun 2025 10:41:39 +0200 Subject: [PATCH 12/13] Improved documentation for ClusterFile on the python side (#201) - Fixed CI job not doing python docs - added more docs on cluster file - fixed generating docs on cluster vector --- .github/workflows/build_docs.yml | 2 +- docs/src/ClusterFile.rst | 3 ++- docs/src/pyClusterFile.rst | 15 +++++++++++++++ docs/src/pyClusterVector.rst | 9 ++++++--- etc/dev-env.yml | 1 + python/aare/ClusterFinder.py | 11 +++++++++++ python/src/bind_ClusterFile.hpp | 7 ++++--- 7 files changed, 40 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index 57bcfb7..4fd23e7 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -43,7 +43,7 @@ jobs: run: | mkdir build cd build - cmake .. -DAARE_SYSTEM_LIBRARIES=ON -DAARE_DOCS=ON + cmake .. -DAARE_SYSTEM_LIBRARIES=ON -DAARE_PYTHON_BINDINGS=ON -DAARE_DOCS=ON make -j 2 make docs diff --git a/docs/src/ClusterFile.rst b/docs/src/ClusterFile.rst index 79de086..a2ee162 100644 --- a/docs/src/ClusterFile.rst +++ b/docs/src/ClusterFile.rst @@ -4,4 +4,5 @@ ClusterFile .. doxygenclass:: aare::ClusterFile :members: :undoc-members: - :private-members: \ No newline at end of file + :private-members: + diff --git a/docs/src/pyClusterFile.rst b/docs/src/pyClusterFile.rst index bdf898c..cd391e0 100644 --- a/docs/src/pyClusterFile.rst +++ b/docs/src/pyClusterFile.rst @@ -2,9 +2,24 @@ ClusterFile ============ + +The :class:`ClusterFile` class is the main interface to read and write clusters in aare. Unfortunately the +old file format does not include metadata like the cluster size and the data type. This means that the +user has to know this information from other sources. Specifying the wrong cluster size or data type +will lead to garbage data being read. + .. py:currentmodule:: aare .. autoclass:: ClusterFile + :members: + :undoc-members: + :inherited-members: + + +Below is the API of the ClusterFile_Cluster3x3i but all variants share the same API. + +.. autoclass:: aare._aare.ClusterFile_Cluster3x3i + :special-members: __init__ :members: :undoc-members: :show-inheritance: diff --git a/docs/src/pyClusterVector.rst b/docs/src/pyClusterVector.rst index 4277920..ff115c9 100644 --- a/docs/src/pyClusterVector.rst +++ b/docs/src/pyClusterVector.rst @@ -2,8 +2,10 @@ ClusterVector ================ The ClusterVector, holds clusters from the ClusterFinder. Since it is templated -in C++ we use a suffix indicating the data type in python. The suffix is -``_i`` for integer, ``_f`` for float, and ``_d`` for double. +in C++ we use a suffix indicating the type of cluster it holds. The suffix follows +the same pattern as for ClusterFile i.e. ``ClusterVector_Cluster3x3i`` +for a vector holding 3x3 integer clusters. + At the moment the functionality from python is limited and it is not supported to push_back clusters to the vector. The intended use case is to pass it to @@ -26,7 +28,8 @@ C++ functions that support the ClusterVector or to view it as a numpy array. .. py:currentmodule:: aare -.. autoclass:: ClusterVector_i +.. autoclass:: aare._aare.ClusterVector_Cluster3x3i + :special-members: __init__ :members: :undoc-members: :show-inheritance: diff --git a/etc/dev-env.yml b/etc/dev-env.yml index e580c81..4e4d08f 100644 --- a/etc/dev-env.yml +++ b/etc/dev-env.yml @@ -10,4 +10,5 @@ dependencies: - sphinx_rtd_theme - furo - zeromq + - pybind11 diff --git a/python/aare/ClusterFinder.py b/python/aare/ClusterFinder.py index 5509e51..251d938 100644 --- a/python/aare/ClusterFinder.py +++ b/python/aare/ClusterFinder.py @@ -70,6 +70,17 @@ def ClusterFile(fname, cluster_size=(3,3), dtype=np.int32, chunk_size = 1000): """ Factory function to create a ClusterFile object. Provides a cleaner syntax for the templated ClusterFile in C++. + + .. code-block:: python + + from aare import ClusterFile + + with ClusterFile("clusters.clust", cluster_size=(3,3), dtype=np.int32) as cf: + # cf is now a ClusterFile_Cluster3x3i object but you don't need to know that. + for clusters in cf: + # Loop over clusters in chunks of 1000 + # The type of clusters will be a ClusterVector_Cluster3x3i in this case + """ cls = _get_class("ClusterFile", cluster_size, dtype) diff --git a/python/src/bind_ClusterFile.hpp b/python/src/bind_ClusterFile.hpp index c2c801d..5d8aa88 100644 --- a/python/src/bind_ClusterFile.hpp +++ b/python/src/bind_ClusterFile.hpp @@ -38,19 +38,20 @@ void define_ClusterFile(py::module &m, const std::string &typestr) { self.read_clusters(n_clusters)); return v; }, - py::return_value_policy::take_ownership) + py::return_value_policy::take_ownership, py::arg("n_clusters")) .def("read_frame", [](ClusterFile &self) { auto v = new ClusterVector(self.read_frame()); return v; }) - .def("set_roi", &ClusterFile::set_roi) + .def("set_roi", &ClusterFile::set_roi, + py::arg("roi")) .def( "set_noise_map", [](ClusterFile &self, py::array_t noise_map) { auto view = make_view_2d(noise_map); self.set_noise_map(view); - }) + }, py::arg("noise_map")) .def("set_gain_map", [](ClusterFile &self, py::array_t gain_map) { From 83544396050bdf014e4a5d1c80ec356c0fccd7d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Fri, 13 Jun 2025 15:25:43 +0200 Subject: [PATCH 13/13] droping version spec on sphinx (#202) - Removing the version requirement on sphinx since the latest version works again - added numpy and matplotlib do the etc/dev-env.yml since they are needed to import aare --- etc/dev-env.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/etc/dev-env.yml b/etc/dev-env.yml index 4e4d08f..2edfc46 100644 --- a/etc/dev-env.yml +++ b/etc/dev-env.yml @@ -5,10 +5,12 @@ dependencies: - anaconda-client - conda-build - doxygen - - sphinx=7.1.2 + - sphinx - breathe - sphinx_rtd_theme - furo - zeromq - pybind11 + - numpy + - matplotlib