diff --git a/.clang-tidy b/.clang-tidy new file mode 100644 index 0000000..a2ab6c1 --- /dev/null +++ b/.clang-tidy @@ -0,0 +1,42 @@ + +--- +Checks: '*, + -altera-*, + -android-cloexec-fopen, + -cppcoreguidelines-pro-bounds-array-to-pointer-decay, + -cppcoreguidelines-pro-bounds-pointer-arithmetic, + -fuchsia*, + -readability-else-after-return, + -readability-avoid-const-params-in-decls, + -readability-identifier-length, + -cppcoreguidelines-pro-bounds-constant-array-index, + -cppcoreguidelines-pro-type-reinterpret-cast, + -llvm-header-guard, + -modernize-use-nodiscard, + -misc-non-private-member-variables-in-classes, + -readability-static-accessed-through-instance, + -readability-braces-around-statements, + -readability-isolate-declaration, + -readability-implicit-bool-conversion, + -readability-identifier-length, + -readability-identifier-naming, + -hicpp-signed-bitwise, + -hicpp-no-array-decay, + -hicpp-braces-around-statements, + -google-runtime-references, + -google-readability-todo, + -google-readability-braces-around-statements, + -modernize-use-trailing-return-type, + -llvmlibc-*' + +HeaderFilterRegex: \.hpp +FormatStyle: none +CheckOptions: + - { key: readability-identifier-naming.NamespaceCase, value: lower_case } + # - { key: readability-identifier-naming.FunctionCase, value: lower_case } + - { key: readability-identifier-naming.ClassCase, value: CamelCase } + # - { key: readability-identifier-naming.MethodCase, value: CamelCase } + # - { key: readability-identifier-naming.StructCase, value: CamelCase } + # - { key: readability-identifier-naming.VariableCase, value: lower_case } + - { key: readability-identifier-naming.GlobalConstantCase, value: UPPER_CASE } +... diff --git a/.gitea/workflows/cmake_build.yml b/.gitea/workflows/cmake_build.yml new file mode 100644 index 0000000..aa7a297 --- /dev/null +++ b/.gitea/workflows/cmake_build.yml @@ -0,0 +1,58 @@ +name: Build the package using cmake then documentation + +on: + workflow_dispatch: + + + +permissions: + contents: read + pages: write + id-token: write + +jobs: + build: + strategy: + fail-fast: false + matrix: + platform: [ubuntu-latest, ] + python-version: ["3.12", ] + + runs-on: ${{ matrix.platform }} + + + defaults: + run: + shell: "bash -l {0}" + + steps: + - uses: actions/checkout@v4 + + - name: Setup dev env + run: | + sudo apt-get update + sudo apt-get -y install cmake gcc g++ + + - name: Get conda + uses: conda-incubator/setup-miniconda@v3 + with: + python-version: ${{ matrix.python-version }} + environment-file: etc/dev-env.yml + miniforge-version: latest + channels: conda-forge + conda-remove-defaults: "true" + + - name: Build library + run: | + mkdir build + cd build + cmake .. -DAARE_SYSTEM_LIBRARIES=ON -DAARE_DOCS=ON + make -j 2 + make docs + + + + + + + diff --git a/.gitea/workflows/rh8-native.yml b/.gitea/workflows/rh8-native.yml new file mode 100644 index 0000000..1c64161 --- /dev/null +++ b/.gitea/workflows/rh8-native.yml @@ -0,0 +1,36 @@ +name: Build on RHEL8 + +on: + push: + workflow_dispatch: + +permissions: + contents: read + +jobs: + build: + runs-on: "ubuntu-latest" + container: + image: gitea.psi.ch/images/rhel8-developer-gitea-actions + steps: + # workaround until actions/checkout@v4 is available for RH8 + # - uses: actions/checkout@v4 + - name: Clone repository + run: | + echo Cloning ${{ github.ref_name }} + git clone https://${{secrets.GITHUB_TOKEN}}@gitea.psi.ch/${{ github.repository }}.git --branch=${{ github.ref_name }} . + + + - name: Install dependencies + run: | + dnf install -y cmake python3.12 python3.12-devel python3.12-pip + + - name: Build library + run: | + mkdir build && cd build + cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON -DPython_FIND_VIRTUALENV=FIRST + make -j 2 + + - name: C++ unit tests + working-directory: ${{gitea.workspace}}/build + run: ctest \ No newline at end of file diff --git a/.gitea/workflows/rh9-native.yml b/.gitea/workflows/rh9-native.yml new file mode 100644 index 0000000..5027365 --- /dev/null +++ b/.gitea/workflows/rh9-native.yml @@ -0,0 +1,31 @@ +name: Build on RHEL9 + +on: + push: + workflow_dispatch: + +permissions: + contents: read + +jobs: + build: + runs-on: "ubuntu-latest" + container: + image: gitea.psi.ch/images/rhel9-developer-gitea-actions + steps: + - uses: actions/checkout@v4 + + + - name: Install dependencies + run: | + dnf install -y cmake python3.12 python3.12-devel python3.12-pip + + - name: Build library + run: | + mkdir build && cd build + cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON + make -j 2 + + - name: C++ unit tests + working-directory: ${{gitea.workspace}}/build + run: ctest \ No newline at end of file diff --git a/.github/workflows/deploy.yml b/.github/workflows/build_and_deploy_conda.yml similarity index 75% rename from .github/workflows/deploy.yml rename to .github/workflows/build_and_deploy_conda.yml index 81edde3..8917419 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/build_and_deploy_conda.yml @@ -1,10 +1,9 @@ name: Build pkgs and deploy if on main on: - push: - branches: - - main - - developer + release: + types: + - published jobs: build: @@ -25,16 +24,15 @@ jobs: - uses: actions/checkout@v4 - name: Get conda - uses: conda-incubator/setup-miniconda@v3.0.4 + uses: conda-incubator/setup-miniconda@v3 with: python-version: ${{ matrix.python-version }} + environment-file: etc/dev-env.yml + miniforge-version: latest channels: conda-forge - - - name: Prepare - run: conda install conda-build=24.9 conda-verify pytest anaconda-client + conda-remove-defaults: "true" - name: Enable upload - if: github.ref == 'refs/heads/main' run: conda config --set anaconda_upload yes - name: Build diff --git a/.github/workflows/build_conda.yml b/.github/workflows/build_conda.yml new file mode 100644 index 0000000..3bd465e --- /dev/null +++ b/.github/workflows/build_conda.yml @@ -0,0 +1,41 @@ +name: Build pkgs and deploy if on main + +on: + push: + branches: + - developer + +jobs: + build: + strategy: + fail-fast: false + matrix: + platform: [ubuntu-latest, ] # macos-12, windows-2019] + python-version: ["3.12",] + + runs-on: ${{ matrix.platform }} + + # The setup-miniconda action needs this to activate miniconda + defaults: + run: + shell: "bash -l {0}" + + steps: + - uses: actions/checkout@v4 + + - name: Get conda + uses: conda-incubator/setup-miniconda@v3 + with: + python-version: ${{ matrix.python-version }} + environment-file: etc/dev-env.yml + miniforge-version: latest + channels: conda-forge + conda-remove-defaults: "true" + + + - name: Disable upload + run: conda config --set anaconda_upload no + + - name: Build + run: conda build conda-recipe + diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index 959ab70..24050a3 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -5,7 +5,6 @@ on: push: - permissions: contents: read pages: write @@ -16,12 +15,11 @@ jobs: strategy: fail-fast: false matrix: - platform: [ubuntu-latest, ] # macos-12, windows-2019] + platform: [ubuntu-latest, ] python-version: ["3.12",] runs-on: ${{ matrix.platform }} - # The setup-miniconda action needs this to activate miniconda defaults: run: shell: "bash -l {0}" @@ -30,13 +28,13 @@ jobs: - uses: actions/checkout@v4 - name: Get conda - uses: conda-incubator/setup-miniconda@v3.0.4 + uses: conda-incubator/setup-miniconda@v3 with: python-version: ${{ matrix.python-version }} + environment-file: etc/dev-env.yml + miniforge-version: latest channels: conda-forge - - - name: Prepare - run: conda install doxygen sphinx=7.1.2 breathe pybind11 sphinx_rtd_theme furo nlohmann_json zeromq fmt numpy + conda-remove-defaults: "true" - name: Build library run: | diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml new file mode 100644 index 0000000..f131e77 --- /dev/null +++ b/.github/workflows/build_wheel.yml @@ -0,0 +1,64 @@ +name: Build wheel + +on: + workflow_dispatch: + pull_request: + push: + branches: + - main + release: + types: + - published + + +jobs: + build_wheels: + name: Build wheels on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest,] + + steps: + - uses: actions/checkout@v4 + + - name: Build wheels + run: pipx run cibuildwheel==2.23.0 + + - uses: actions/upload-artifact@v4 + with: + name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }} + path: ./wheelhouse/*.whl + + build_sdist: + name: Build source distribution + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Build sdist + run: pipx run build --sdist + + - uses: actions/upload-artifact@v4 + with: + name: cibw-sdist + path: dist/*.tar.gz + + upload_pypi: + needs: [build_wheels, build_sdist] + runs-on: ubuntu-latest + environment: pypi + permissions: + id-token: write + if: github.event_name == 'release' && github.event.action == 'published' + # or, alternatively, upload to PyPI on every tag starting with 'v' (remove on: release above to use this) + # if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') + steps: + - uses: actions/download-artifact@v4 + with: + # unpacks all CIBW artifacts into dist/ + pattern: cibw-* + path: dist + merge-multiple: true + + - uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.gitignore b/.gitignore index af3e3b7..5982f7f 100644 --- a/.gitignore +++ b/.gitignore @@ -17,7 +17,8 @@ Testing/ ctbDict.cpp ctbDict.h - +wheelhouse/ +dist/ *.pyc */__pycache__/* diff --git a/CMakeLists.txt b/CMakeLists.txt index 67aaba9..09d7620 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,16 +1,29 @@ -cmake_minimum_required(VERSION 3.14) +cmake_minimum_required(VERSION 3.15) project(aare - VERSION 1.0.0 DESCRIPTION "Data processing library for PSI detectors" HOMEPAGE_URL "https://github.com/slsdetectorgroup/aare" LANGUAGES C CXX ) +# Read VERSION file into project version +set(VERSION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/VERSION") +file(READ "${VERSION_FILE}" VERSION_CONTENT) +string(STRIP "${VERSION_CONTENT}" PROJECT_VERSION_STRING) +set(PROJECT_VERSION ${PROJECT_VERSION_STRING}) + set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) +execute_process( + COMMAND git log -1 --format=%h + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + OUTPUT_VARIABLE GIT_HASH + OUTPUT_STRIP_TRAILING_WHITESPACE + ) +message(STATUS "Building from git hash: ${GIT_HASH}") + if (${CMAKE_VERSION} VERSION_GREATER "3.24") cmake_policy(SET CMP0135 NEW) #Fetch content download timestamp endif() @@ -31,7 +44,7 @@ set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) # General options -option(AARE_PYTHON_BINDINGS "Build python bindings" ON) +option(AARE_PYTHON_BINDINGS "Build python bindings" OFF) option(AARE_TESTS "Build tests" OFF) option(AARE_BENCHMARKS "Build benchmarks" OFF) option(AARE_EXAMPLES "Build examples" OFF) @@ -41,7 +54,7 @@ option(AARE_VERBOSE "Verbose output" OFF) option(AARE_CUSTOM_ASSERT "Use custom assert" OFF) option(AARE_INSTALL_PYTHONEXT "Install the python extension in the install tree under CMAKE_INSTALL_PREFIX/aare/" OFF) option(AARE_HDF5 "Hdf5 File Format" OFF) - +option(AARE_ASAN "Enable AddressSanitizer" OFF) # Configure which of the dependencies to use FetchContent for option(AARE_FETCH_FMT "Use FetchContent to download fmt" ON) @@ -49,6 +62,7 @@ option(AARE_FETCH_PYBIND11 "Use FetchContent to download pybind11" ON) option(AARE_FETCH_CATCH "Use FetchContent to download catch2" ON) option(AARE_FETCH_JSON "Use FetchContent to download nlohmann::json" ON) option(AARE_FETCH_ZMQ "Use FetchContent to download libzmq" ON) +option(AARE_FETCH_LMFIT "Use FetchContent to download lmfit" ON) #Convenience option to use system libraries only (no FetchContent) @@ -60,10 +74,15 @@ if(AARE_SYSTEM_LIBRARIES) set(AARE_FETCH_CATCH OFF CACHE BOOL "Disabled FetchContent for catch2" FORCE) set(AARE_FETCH_JSON OFF CACHE BOOL "Disabled FetchContent for nlohmann::json" FORCE) set(AARE_FETCH_ZMQ OFF CACHE BOOL "Disabled FetchContent for libzmq" FORCE) + # Still fetch lmfit when setting AARE_SYSTEM_LIBRARIES since this is not available + # on conda-forge endif() if(AARE_VERBOSE) add_compile_definitions(AARE_VERBOSE) + add_compile_definitions(AARE_LOG_LEVEL=aare::logDEBUG5) +else() + add_compile_definitions(AARE_LOG_LEVEL=aare::logERROR) endif() if(AARE_CUSTOM_ASSERT) @@ -75,18 +94,70 @@ if(AARE_BENCHMARKS) endif() + set(CMAKE_EXPORT_COMPILE_COMMANDS ON) +if(AARE_FETCH_LMFIT) + #TODO! Should we fetch lmfit from the web or inlcude a tar.gz in the repo? + set(LMFIT_PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) + + # For cmake < 3.28 we can't supply EXCLUDE_FROM_ALL to FetchContent_Declare + # so we need this workaround + if (${CMAKE_VERSION} VERSION_LESS "3.28") + FetchContent_Declare( + lmfit + GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git + GIT_TAG main + PATCH_COMMAND ${LMFIT_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 + ) + else() + FetchContent_Declare( + lmfit + GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git + GIT_TAG main + PATCH_COMMAND ${LMFIT_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 + EXCLUDE_FROM_ALL 1 + ) + endif() + + + #Disable what we don't need from lmfit + set(BUILD_TESTING OFF CACHE BOOL "") + set(LMFIT_CPPTEST OFF CACHE BOOL "") + set(LIB_MAN OFF CACHE BOOL "") + set(LMFIT_CPPTEST OFF CACHE BOOL "") + set(BUILD_SHARED_LIBS OFF CACHE BOOL "") + + if (${CMAKE_VERSION} VERSION_LESS "3.28") + if(NOT lmfit_POPULATED) + FetchContent_Populate(lmfit) + add_subdirectory(${lmfit_SOURCE_DIR} ${lmfit_BINARY_DIR} EXCLUDE_FROM_ALL) + endif() + else() + FetchContent_MakeAvailable(lmfit) + endif() + + set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON) +else() + find_package(lmfit REQUIRED) +endif() + + if(AARE_FETCH_ZMQ) # Fetchcontent_Declare is deprecated need to find a way to update this # for now setting the policy to old is enough if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.30") cmake_policy(SET CMP0169 OLD) endif() + set(ZMQ_PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/libzmq_cmake_version.patch) FetchContent_Declare( libzmq GIT_REPOSITORY https://github.com/zeromq/libzmq.git GIT_TAG v4.3.4 + PATCH_COMMAND ${ZMQ_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 ) # Disable unwanted options from libzmq set(BUILD_TESTS OFF CACHE BOOL "Switch off libzmq test build") @@ -128,8 +199,8 @@ if (AARE_FETCH_FMT) LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} -) + INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + ) else() find_package(fmt 6 REQUIRED) endif() @@ -147,7 +218,6 @@ if (AARE_FETCH_JSON) install( TARGETS nlohmann_json EXPORT "${TARGETS_EXPORT_NAME}" - ) message(STATUS "target: ${NLOHMANN_JSON_TARGET_NAME}") else() @@ -226,13 +296,6 @@ if(CMAKE_BUILD_TYPE STREQUAL "Release") target_compile_options(aare_compiler_flags INTERFACE -O3) else() message(STATUS "Debug build") - target_compile_options( - aare_compiler_flags - INTERFACE - -Og - -ggdb3 - ) - endif() # Common flags for GCC and Clang @@ -257,7 +320,21 @@ target_compile_options( endif() #GCC/Clang specific - +if(AARE_ASAN) + message(STATUS "AddressSanitizer enabled") + target_compile_options( + aare_compiler_flags + INTERFACE + -fsanitize=address,undefined,pointer-compare + -fno-omit-frame-pointer + ) + target_link_libraries( + aare_compiler_flags + INTERFACE + -fsanitize=address,undefined,pointer-compare + -fno-omit-frame-pointer + ) +endif() @@ -273,14 +350,23 @@ endif() set(PUBLICHEADERS include/aare/ArrayExpr.hpp + include/aare/CalculateEta.hpp + include/aare/Cluster.hpp include/aare/ClusterFinder.hpp include/aare/ClusterFile.hpp include/aare/CtbRawFile.hpp + include/aare/ClusterVector.hpp + include/aare/decode.hpp include/aare/defs.hpp include/aare/Dtype.hpp include/aare/File.hpp + include/aare/Fit.hpp include/aare/FileInterface.hpp + include/aare/FilePtr.hpp include/aare/Frame.hpp + include/aare/GainMap.hpp + include/aare/geo_helpers.hpp + include/aare/JungfrauDataFile.hpp include/aare/NDArray.hpp include/aare/NDView.hpp include/aare/NumpyFile.hpp @@ -291,23 +377,30 @@ set(PUBLICHEADERS include/aare/RawMasterFile.hpp include/aare/RawSubFile.hpp include/aare/VarClusterFinder.hpp - + include/aare/utils/task.hpp ) set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/CtbRawFile.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/FilePtr.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/Fit.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp ) # HDF5 @@ -329,9 +422,12 @@ endif (AARE_HDF5) add_library(aare_core STATIC ${SourceFiles}) target_include_directories(aare_core PUBLIC "$" - "$" + "$" ) +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads REQUIRED) + target_link_libraries( aare_core PUBLIC @@ -340,6 +436,9 @@ target_link_libraries( ${STD_FS_LIB} # from helpers.cmake PRIVATE aare_compiler_flags + Threads::Threads + $ + ) if (AARE_HDF5 AND HDF5_FOUND) @@ -363,17 +462,29 @@ endif() if(AARE_TESTS) set(TestSources + ${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/Cluster.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/CalculateEta.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinderMT.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.test.cpp + ) if(HDF5_FOUND) list (APPEND TestSources @@ -457,4 +568,4 @@ if(AARE_MASTER_PROJECT) set(CMAKE_INSTALL_DIR "share/cmake/${PROJECT_NAME}") set(PROJECT_LIBRARIES aare-core aare-compiler-flags ) include(cmake/package_config.cmake) -endif() \ No newline at end of file +endif() diff --git a/VERSION b/VERSION new file mode 100644 index 0000000..ae365e4 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +2025.5.22 \ No newline at end of file diff --git a/benchmarks/CMakeLists.txt b/benchmarks/CMakeLists.txt index d083bab..699b4c6 100644 --- a/benchmarks/CMakeLists.txt +++ b/benchmarks/CMakeLists.txt @@ -1,11 +1,27 @@ -find_package(benchmark REQUIRED) -add_executable(ndarray_benchmark ndarray_benchmark.cpp) +include(FetchContent) -target_link_libraries(ndarray_benchmark benchmark::benchmark aare_core aare_compiler_flags) -# target_link_libraries(tests PRIVATE aare_core aare_compiler_flags) -set_target_properties(ndarray_benchmark PROPERTIES - RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR} - # OUTPUT_NAME run_tests +FetchContent_Declare( + benchmark + GIT_REPOSITORY https://github.com/google/benchmark.git + GIT_TAG v1.8.3 # Change to the latest version if needed +) + +# Ensure Google Benchmark is built correctly +set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE) + +FetchContent_MakeAvailable(benchmark) + +add_executable(benchmarks) + +target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp) + +# Link Google Benchmark and other necessary libraries +target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags) + +# Set output properties +set_target_properties(benchmarks PROPERTIES + RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR} + OUTPUT_NAME run_benchmarks ) \ No newline at end of file diff --git a/benchmarks/calculateeta_benchmark.cpp b/benchmarks/calculateeta_benchmark.cpp new file mode 100644 index 0000000..a320188 --- /dev/null +++ b/benchmarks/calculateeta_benchmark.cpp @@ -0,0 +1,70 @@ +#include "aare/CalculateEta.hpp" +#include "aare/ClusterFile.hpp" +#include + +using namespace aare; + +class ClusterFixture : public benchmark::Fixture { + public: + Cluster cluster_2x2{}; + Cluster cluster_3x3{}; + + private: + using benchmark::Fixture::SetUp; + + void SetUp([[maybe_unused]] const benchmark::State &state) override { + int temp_data[4] = {1, 2, 3, 1}; + std::copy(std::begin(temp_data), std::end(temp_data), + std::begin(cluster_2x2.data)); + + cluster_2x2.x = 0; + cluster_2x2.y = 0; + + int temp_data2[9] = {1, 2, 3, 1, 3, 4, 5, 1, 20}; + std::copy(std::begin(temp_data2), std::end(temp_data2), + std::begin(cluster_3x3.data)); + + cluster_3x3.x = 0; + cluster_3x3.y = 0; + } + + // void TearDown(::benchmark::State& state) { + // } +}; + +BENCHMARK_F(ClusterFixture, Calculate2x2Eta)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + Eta2 eta = calculate_eta2(cluster_2x2); + benchmark::DoNotOptimize(eta); + } +} + +// almost takes double the time +BENCHMARK_F(ClusterFixture, + CalculateGeneralEtaFor2x2Cluster)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + Eta2 eta = calculate_eta2(cluster_2x2); + benchmark::DoNotOptimize(eta); + } +} + +BENCHMARK_F(ClusterFixture, Calculate3x3Eta)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + Eta2 eta = calculate_eta2(cluster_3x3); + benchmark::DoNotOptimize(eta); + } +} + +// almost takes double the time +BENCHMARK_F(ClusterFixture, + CalculateGeneralEtaFor3x3Cluster)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + Eta2 eta = calculate_eta2(cluster_3x3); + benchmark::DoNotOptimize(eta); + } +} +// BENCHMARK_MAIN(); \ No newline at end of file diff --git a/conda-recipe/conda_build_config.yaml b/conda-recipe/conda_build_config.yaml index 36a7465..6d3d479 100644 --- a/conda-recipe/conda_build_config.yaml +++ b/conda-recipe/conda_build_config.yaml @@ -1,28 +1,5 @@ python: - 3.11 - - 3.11 - - 3.11 - - 3.12 - - 3.12 - 3.12 - 3.13 - - -numpy: - - 1.26 - - 2.0 - - 2.1 - - 1.26 - - 2.0 - - 2.1 - - 2.1 - - -zip_keys: - - python - - numpy - -pin_run_as_build: - numpy: x.x - python: x.x \ No newline at end of file diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 86fc9a8..8fea745 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,7 +1,10 @@ +source: + path: ../ + +{% set version = load_file_regex(load_file = 'VERSION', regex_pattern = '(\d+(?:\.\d+)*(?:[\+\w\.]+))').group(1) %} package: name: aare - version: 2024.11.28.dev0 #TODO! how to not duplicate this? - + version: {{version}} source: path: .. @@ -9,44 +12,39 @@ source: build: number: 0 script: - - unset CMAKE_GENERATOR && {{ PYTHON }} -m pip install . -vv # [not win] - - {{ PYTHON }} -m pip install . -vv # [win] + - unset CMAKE_GENERATOR && {{ PYTHON }} -m pip install . -vv requirements: build: - - python {{python}} - - numpy {{ numpy }} - {{ compiler('cxx') }} - - - host: - cmake - ninja - - python {{python}} - - numpy {{ numpy }} + + host: + - python - pip + - numpy=2.1 - scikit-build-core - pybind11 >=2.13.0 - - fmt - - zeromq - - nlohmann_json - - catch2 + - matplotlib # needed in host to solve the environment for run run: - - python {{python}} - - numpy {{ numpy }} + - python + - {{ pin_compatible('numpy') }} + - matplotlib + test: imports: - aare - # requires: - # - pytest - # source_files: - # - tests - # commands: - # - pytest tests + requires: + - pytest + - boost-histogram + source_files: + - python/tests + commands: + - python -m pytest python/tests about: - summary: An example project built with pybind11 and scikit-build. - # license_file: LICENSE \ No newline at end of file + summary: Data analysis library for hybrid pixel detectors from PSI diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 4b99470..3b4442a 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -12,33 +12,6 @@ set(SPHINX_BUILD ${CMAKE_CURRENT_BINARY_DIR}) file(GLOB SPHINX_SOURCE_FILES CONFIGURE_DEPENDS "src/*.rst") -# set(SPHINX_SOURCE_FILES -# src/index.rst -# src/Installation.rst -# src/Requirements.rst -# src/NDArray.rst -# src/NDView.rst -# src/File.rst -# src/Frame.rst -# src/Dtype.rst -# src/ClusterFinder.rst -# src/ClusterFile.rst -# src/Pedestal.rst -# src/RawFile.rst -# src/RawSubFile.rst -# src/RawMasterFile.rst -# src/Hdf5File.rst -# src/Hdf5SubFile.rst -# src/Hdf5MasterFile.rst -# src/VarClusterFinder.rst -# src/pyVarClusterFinder.rst -# src/pyFile.rst -# src/pyCtbRawFile.rst -# src/pyRawFile.rst -# src/pyRawMasterFile.rst -# src/pyHdf5File.rst -# src/pyHdf5MasterFile.rst -# ) foreach(filename ${SPHINX_SOURCE_FILES}) diff --git a/docs/conf.py.in b/docs/conf.py.in index 3702330..ad73575 100644 --- a/docs/conf.py.in +++ b/docs/conf.py.in @@ -29,7 +29,6 @@ version = '@PROJECT_VERSION@' # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['breathe', - 'sphinx_rtd_theme', 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', ] diff --git a/docs/src/ClusterFinderMT.rst b/docs/src/ClusterFinderMT.rst new file mode 100644 index 0000000..b15eb8b --- /dev/null +++ b/docs/src/ClusterFinderMT.rst @@ -0,0 +1,7 @@ +ClusterFinderMT +================== + + +.. doxygenclass:: aare::ClusterFinderMT + :members: + :undoc-members: \ No newline at end of file diff --git a/docs/src/ClusterVector.rst b/docs/src/ClusterVector.rst new file mode 100644 index 0000000..bb2a0d8 --- /dev/null +++ b/docs/src/ClusterVector.rst @@ -0,0 +1,6 @@ +ClusterVector +============= + +.. doxygenclass:: aare::ClusterVector + :members: + :undoc-members: \ No newline at end of file diff --git a/docs/src/JungfrauDataFile.rst b/docs/src/JungfrauDataFile.rst new file mode 100644 index 0000000..78d473f --- /dev/null +++ b/docs/src/JungfrauDataFile.rst @@ -0,0 +1,25 @@ +JungfrauDataFile +================== + +JungfrauDataFile is a class to read the .dat files that are produced by Aldo's receiver. +It is mostly used for calibration. + +The structure of the file is: + +* JungfrauDataHeader +* Binary data (256x256, 256x1024 or 512x1024) +* JungfrauDataHeader +* ... + +There is no metadata indicating number of frames or the size of the image, but this +will be infered by this reader. + +.. doxygenstruct:: aare::JungfrauDataHeader + :members: + :undoc-members: + :private-members: + +.. doxygenclass:: aare::JungfrauDataFile + :members: + :undoc-members: + :private-members: \ No newline at end of file diff --git a/docs/src/Tests.rst b/docs/src/Tests.rst new file mode 100644 index 0000000..da98001 --- /dev/null +++ b/docs/src/Tests.rst @@ -0,0 +1,47 @@ +**************** +Tests +**************** + +We test the code both from the C++ and Python API. By default only tests that does not require image data is run. + +C++ +~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + mkdir build + cd build + cmake .. -DAARE_TESTS=ON + make -j 4 + + export AARE_TEST_DATA=/path/to/test/data + ./run_test [.files] #or using ctest, [.files] is the option to include tests needing data + + + +Python +~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + #From the root dir of the library + python -m pytest python/tests --files # passing --files will run the tests needing data + + + +Getting the test data +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. attention :: + + The tests needing the test data are not run by default. To make the data available, you need to set the environment variable + AARE_TEST_DATA to the path of the test data directory. Then pass either [.files] for the C++ tests or --files for Python + +The image files needed for the test are large and are not included in the repository. They are stored +using GIT LFS in a separate repository. To get the test data, you need to clone the repository. +To do this, you need to have GIT LFS installed. You can find instructions on how to install it here: https://git-lfs.github.com/ +Once you have GIT LFS installed, you can clone the repository like any normal repo using: + +.. code-block:: bash + + git clone https://gitea.psi.ch/detectors/aare-test-data.git diff --git a/docs/src/algorithm.rst b/docs/src/algorithm.rst new file mode 100644 index 0000000..9b11857 --- /dev/null +++ b/docs/src/algorithm.rst @@ -0,0 +1,5 @@ +algorithm +============= + +.. doxygenfile:: algorithm.hpp + diff --git a/docs/src/index.rst b/docs/src/index.rst index c9dce62..4fdf760 100644 --- a/docs/src/index.rst +++ b/docs/src/index.rst @@ -20,9 +20,6 @@ AARE Requirements Consume - - - .. toctree:: :caption: Python API :maxdepth: 1 @@ -30,24 +27,32 @@ AARE pyFile pyCtbRawFile pyClusterFile + pyClusterVector + pyJungfrauDataFile pyRawFile pyRawMasterFile pyHdf5File pyHdf5MasterFile pyVarClusterFinder + pyFit + .. toctree:: :caption: C++ API :maxdepth: 1 + algorithm NDArray NDView Frame File Dtype ClusterFinder + ClusterFinderMT ClusterFile + ClusterVector + JungfrauDataFile Pedestal RawFile RawSubFile @@ -58,4 +63,8 @@ AARE - +.. toctree:: + :caption: Developer + :maxdepth: 3 + + Tests \ No newline at end of file diff --git a/docs/src/pyClusterVector.rst b/docs/src/pyClusterVector.rst new file mode 100644 index 0000000..4277920 --- /dev/null +++ b/docs/src/pyClusterVector.rst @@ -0,0 +1,33 @@ +ClusterVector +================ + +The ClusterVector, holds clusters from the ClusterFinder. Since it is templated +in C++ we use a suffix indicating the data type in python. The suffix is +``_i`` for integer, ``_f`` for float, and ``_d`` for double. + +At the moment the functionality from python is limited and it is not supported +to push_back clusters to the vector. The intended use case is to pass it to +C++ functions that support the ClusterVector or to view it as a numpy array. + +**View ClusterVector as numpy array** + +.. code:: python + + from aare import ClusterFile + with ClusterFile("path/to/file") as f: + cluster_vector = f.read_frame() + + # Create a copy of the cluster data in a numpy array + clusters = np.array(cluster_vector) + + # Avoid copying the data by passing copy=False + clusters = np.array(cluster_vector, copy = False) + + +.. py:currentmodule:: aare + +.. autoclass:: ClusterVector_i + :members: + :undoc-members: + :show-inheritance: + :inherited-members: \ No newline at end of file diff --git a/docs/src/pyFit.rst b/docs/src/pyFit.rst new file mode 100644 index 0000000..abaa3cf --- /dev/null +++ b/docs/src/pyFit.rst @@ -0,0 +1,19 @@ + +Fit +======== + +.. py:currentmodule:: aare + + +**Functions** + +.. autofunction:: gaus + +.. autofunction:: pol1 + + +**Fitting** + +.. autofunction:: fit_gaus + +.. autofunction:: fit_pol1 \ No newline at end of file diff --git a/docs/src/pyJungfrauDataFile.rst b/docs/src/pyJungfrauDataFile.rst new file mode 100644 index 0000000..2173adf --- /dev/null +++ b/docs/src/pyJungfrauDataFile.rst @@ -0,0 +1,10 @@ +JungfrauDataFile +=================== + +.. py:currentmodule:: aare + +.. autoclass:: JungfrauDataFile + :members: + :undoc-members: + :show-inheritance: + :inherited-members: \ No newline at end of file diff --git a/etc/dev-env.yml b/etc/dev-env.yml new file mode 100644 index 0000000..e580c81 --- /dev/null +++ b/etc/dev-env.yml @@ -0,0 +1,13 @@ +name: dev-environment +channels: + - conda-forge +dependencies: + - anaconda-client + - conda-build + - doxygen + - sphinx=7.1.2 + - breathe + - sphinx_rtd_theme + - furo + - zeromq + diff --git a/include/aare/ArrayExpr.hpp b/include/aare/ArrayExpr.hpp index 7f8015c..d326601 100644 --- a/include/aare/ArrayExpr.hpp +++ b/include/aare/ArrayExpr.hpp @@ -1,22 +1,24 @@ #pragma once -#include //int64_t -#include //size_t +#include +#include #include - #include +#include "aare/defs.hpp" + + namespace aare { -template class ArrayExpr { +template class ArrayExpr { public: static constexpr bool is_leaf = false; auto operator[](size_t i) const { return static_cast(*this)[i]; } auto operator()(size_t i) const { return static_cast(*this)[i]; } auto size() const { return static_cast(*this).size(); } - std::array shape() const { return static_cast(*this).shape(); } + std::array shape() const { return static_cast(*this).shape(); } }; -template +template class ArrayAdd : public ArrayExpr, Ndim> { const A &arr1_; const B &arr2_; @@ -27,10 +29,10 @@ class ArrayAdd : public ArrayExpr, Ndim> { } auto operator[](int i) const { return arr1_[i] + arr2_[i]; } size_t size() const { return arr1_.size(); } - std::array shape() const { return arr1_.shape(); } + std::array shape() const { return arr1_.shape(); } }; -template +template class ArraySub : public ArrayExpr, Ndim> { const A &arr1_; const B &arr2_; @@ -41,10 +43,10 @@ class ArraySub : public ArrayExpr, Ndim> { } auto operator[](int i) const { return arr1_[i] - arr2_[i]; } size_t size() const { return arr1_.size(); } - std::array shape() const { return arr1_.shape(); } + std::array shape() const { return arr1_.shape(); } }; -template +template class ArrayMul : public ArrayExpr,Ndim> { const A &arr1_; const B &arr2_; @@ -55,10 +57,10 @@ class ArrayMul : public ArrayExpr,Ndim> { } auto operator[](int i) const { return arr1_[i] * arr2_[i]; } size_t size() const { return arr1_.size(); } - std::array shape() const { return arr1_.shape(); } + std::array shape() const { return arr1_.shape(); } }; -template +template class ArrayDiv : public ArrayExpr, Ndim> { const A &arr1_; const B &arr2_; @@ -69,27 +71,27 @@ class ArrayDiv : public ArrayExpr, Ndim> { } auto operator[](int i) const { return arr1_[i] / arr2_[i]; } size_t size() const { return arr1_.size(); } - std::array shape() const { return arr1_.shape(); } + std::array shape() const { return arr1_.shape(); } }; -template +template auto operator+(const ArrayExpr &arr1, const ArrayExpr &arr2) { return ArrayAdd, ArrayExpr, Ndim>(arr1, arr2); } -template +template auto operator-(const ArrayExpr &arr1, const ArrayExpr &arr2) { return ArraySub, ArrayExpr, Ndim>(arr1, arr2); } -template +template auto operator*(const ArrayExpr &arr1, const ArrayExpr &arr2) { return ArrayMul, ArrayExpr, Ndim>(arr1, arr2); } -template +template auto operator/(const ArrayExpr &arr1, const ArrayExpr &arr2) { return ArrayDiv, ArrayExpr, Ndim>(arr1, arr2); } diff --git a/include/aare/CalculateEta.hpp b/include/aare/CalculateEta.hpp new file mode 100644 index 0000000..db17dad --- /dev/null +++ b/include/aare/CalculateEta.hpp @@ -0,0 +1,170 @@ +#pragma once + +#include "aare/Cluster.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDArray.hpp" + +namespace aare { + +enum class corner : int { + cBottomLeft = 0, + cBottomRight = 1, + cTopLeft = 2, + cTopRight = 3 +}; + +enum class pixel : int { + pBottomLeft = 0, + pBottom = 1, + pBottomRight = 2, + pLeft = 3, + pCenter = 4, + pRight = 5, + pTopLeft = 6, + pTop = 7, + pTopRight = 8 +}; + +template struct Eta2 { + double x; + double y; + int c; + T sum; +}; + +/** + * @brief Calculate the eta2 values for all clusters in a Clustervector + */ +template >> +NDArray calculate_eta2(const ClusterVector &clusters) { + NDArray eta2({static_cast(clusters.size()), 2}); + + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters[i]); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; + } + + return eta2; +} + +/** + * @brief Calculate the eta2 values for a generic sized cluster and return them + * in a Eta2 struct containing etay, etax and the index of the respective 2x2 + * subcluster. + */ +template +Eta2 +calculate_eta2(const Cluster &cl) { + Eta2 eta{}; + + auto max_sum = cl.max_sum_2x2(); + eta.sum = max_sum.first; + auto c = max_sum.second; + + size_t cluster_center_index = + (ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX; + + size_t index_bottom_left_max_2x2_subcluster = + (int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1); + + // check that cluster center is in max subcluster + if (cluster_center_index != index_bottom_left_max_2x2_subcluster && + cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 && + cluster_center_index != + index_bottom_left_max_2x2_subcluster + ClusterSizeX && + cluster_center_index != + index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1) + throw std::runtime_error("Photon center is not in max 2x2_subcluster"); + + if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) % + ClusterSizeX == + 0) { + if ((cl.data[cluster_center_index + 1] + + cl.data[cluster_center_index]) != 0) + + eta.x = static_cast(cl.data[cluster_center_index + 1]) / + static_cast((cl.data[cluster_center_index + 1] + + cl.data[cluster_center_index])); + } else { + if ((cl.data[cluster_center_index] + + cl.data[cluster_center_index - 1]) != 0) + + eta.x = static_cast(cl.data[cluster_center_index]) / + static_cast((cl.data[cluster_center_index - 1] + + cl.data[cluster_center_index])); + } + if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) / + ClusterSizeX < + 1) { + assert(cluster_center_index + ClusterSizeX < + ClusterSizeX * ClusterSizeY); // suppress warning + if ((cl.data[cluster_center_index] + + cl.data[cluster_center_index + ClusterSizeX]) != 0) + eta.y = static_cast( + cl.data[cluster_center_index + ClusterSizeX]) / + static_cast( + (cl.data[cluster_center_index] + + cl.data[cluster_center_index + ClusterSizeX])); + } else { + if ((cl.data[cluster_center_index] + + cl.data[cluster_center_index - ClusterSizeX]) != 0) + eta.y = static_cast(cl.data[cluster_center_index]) / + static_cast( + (cl.data[cluster_center_index] + + cl.data[cluster_center_index - ClusterSizeX])); + } + + eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no + // underyling enum class + return eta; +} + +// TODO! Look up eta2 calculation - photon center should be top right corner +template +Eta2 calculate_eta2(const Cluster &cl) { + Eta2 eta{}; + + if ((cl.data[0] + cl.data[1]) != 0) + eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); + if ((cl.data[0] + cl.data[2]) != 0) + eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); + eta.sum = cl.sum(); + eta.c = static_cast(corner::cBottomLeft); // TODO! This is not correct, + // but need to put something + return eta; +} + +// calculates Eta3 for 3x3 cluster based on code from analyze_cluster +// TODO only supported for 3x3 Clusters +template Eta2 calculate_eta3(const Cluster &cl) { + + Eta2 eta{}; + + T sum = 0; + + std::for_each(std::begin(cl.data), std::end(cl.data), + [&sum](T x) { sum += x; }); + + eta.sum = sum; + + eta.c = corner::cBottomLeft; + + if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0) + + eta.x = static_cast(-cl.data[3] + cl.data[3 + 2]) / + + (cl.data[3] + cl.data[4] + cl.data[5]); + + if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0) + + eta.y = static_cast(-cl.data[1] + cl.data[2 * 3 + 1]) / + + (cl.data[1] + cl.data[4] + cl.data[7]); + + return eta; +} + +} // namespace aare \ No newline at end of file diff --git a/include/aare/CircularFifo.hpp b/include/aare/CircularFifo.hpp new file mode 100644 index 0000000..8098082 --- /dev/null +++ b/include/aare/CircularFifo.hpp @@ -0,0 +1,97 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "aare/ProducerConsumerQueue.hpp" + +namespace aare { + +template class CircularFifo { + uint32_t fifo_size; + aare::ProducerConsumerQueue free_slots; + aare::ProducerConsumerQueue filled_slots; + + public: + CircularFifo() : CircularFifo(100){}; + CircularFifo(uint32_t size) : fifo_size(size), free_slots(size + 1), filled_slots(size + 1) { + + // TODO! how do we deal with alignment for writing? alignas??? + // Do we give the user a chance to provide memory locations? + // Templated allocator? + for (size_t i = 0; i < fifo_size; ++i) { + free_slots.write(ItemType{}); + } + } + + bool next() { + // TODO! avoid default constructing ItemType + ItemType it; + if (!filled_slots.read(it)) + return false; + if (!free_slots.write(std::move(it))) + return false; + return true; + } + + ~CircularFifo() {} + + using value_type = ItemType; + + auto numFilledSlots() const noexcept { return filled_slots.sizeGuess(); } + auto numFreeSlots() const noexcept { return free_slots.sizeGuess(); } + auto isFull() const noexcept { return filled_slots.isFull(); } + + ItemType pop_free() { + ItemType v; + while (!free_slots.read(v)) + ; + return std::move(v); + // return v; + } + + bool try_pop_free(ItemType &v) { return free_slots.read(v); } + + ItemType pop_value(std::chrono::nanoseconds wait, std::atomic &stopped) { + ItemType v; + while (!filled_slots.read(v) && !stopped) { + std::this_thread::sleep_for(wait); + } + return std::move(v); + } + + ItemType pop_value() { + ItemType v; + while (!filled_slots.read(v)) + ; + return std::move(v); + } + + ItemType *frontPtr() { return filled_slots.frontPtr(); } + + // TODO! Add function to move item from filled to free to be used + // with the frontPtr function + + template void push_value(Args &&...recordArgs) { + while (!filled_slots.write(std::forward(recordArgs)...)) + ; + } + + template bool try_push_value(Args &&...recordArgs) { + return filled_slots.write(std::forward(recordArgs)...); + } + + template void push_free(Args &&...recordArgs) { + while (!free_slots.write(std::forward(recordArgs)...)) + ; + } + + template bool try_push_free(Args &&...recordArgs) { + return free_slots.write(std::forward(recordArgs)...); + } +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp new file mode 100644 index 0000000..889593b --- /dev/null +++ b/include/aare/Cluster.hpp @@ -0,0 +1,86 @@ + +/************************************************ + * @file Cluster.hpp + * @short definition of cluster, where CoordType (x,y) give + * the cluster center coordinates and data the actual cluster data + * cluster size is given as template parameters + ***********************************************/ + +#pragma once + +#include +#include +#include +#include +#include + +namespace aare { + +// requires clause c++20 maybe update +template +struct Cluster { + + static_assert(std::is_arithmetic_v, "T needs to be an arithmetic type"); + static_assert(std::is_integral_v, + "CoordType needs to be an integral type"); + static_assert(ClusterSizeX > 0 && ClusterSizeY > 0, + "Cluster sizes must be bigger than zero"); + + CoordType x; + CoordType y; + std::array data; + + static constexpr uint8_t cluster_size_x = ClusterSizeX; + static constexpr uint8_t cluster_size_y = ClusterSizeY; + using value_type = T; + using coord_type = CoordType; + + T sum() const { return std::accumulate(data.begin(), data.end(), T{}); } + + std::pair max_sum_2x2() const { + + if constexpr (cluster_size_x == 3 && cluster_size_y == 3) { + std::array sum_2x2_subclusters; + sum_2x2_subclusters[0] = data[0] + data[1] + data[3] + data[4]; + sum_2x2_subclusters[1] = data[1] + data[2] + data[4] + data[5]; + sum_2x2_subclusters[2] = data[3] + data[4] + data[6] + data[7]; + sum_2x2_subclusters[3] = data[4] + data[5] + data[7] + data[8]; + int index = std::max_element(sum_2x2_subclusters.begin(), + sum_2x2_subclusters.end()) - + sum_2x2_subclusters.begin(); + return std::make_pair(sum_2x2_subclusters[index], index); + } else if constexpr (cluster_size_x == 2 && cluster_size_y == 2) { + return std::make_pair(data[0] + data[1] + data[2] + data[3], 0); + } else { + constexpr size_t num_2x2_subclusters = + (ClusterSizeX - 1) * (ClusterSizeY - 1); + + std::array sum_2x2_subcluster; + for (size_t i = 0; i < ClusterSizeY - 1; ++i) { + for (size_t j = 0; j < ClusterSizeX - 1; ++j) + sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] = + data[i * ClusterSizeX + j] + + data[i * ClusterSizeX + j + 1] + + data[(i + 1) * ClusterSizeX + j] + + data[(i + 1) * ClusterSizeX + j + 1]; + } + + int index = std::max_element(sum_2x2_subcluster.begin(), + sum_2x2_subcluster.end()) - + sum_2x2_subcluster.begin(); + return std::make_pair(sum_2x2_subcluster[index], index); + } + } +}; + +// Type Traits for is_cluster_type +template +struct is_cluster : std::false_type {}; // Default case: Not a Cluster + +template +struct is_cluster> : std::true_type {}; // Cluster + +template constexpr bool is_cluster_v = is_cluster::value; + +} // namespace aare diff --git a/include/aare/ClusterCollector.hpp b/include/aare/ClusterCollector.hpp new file mode 100644 index 0000000..ae78a8e --- /dev/null +++ b/include/aare/ClusterCollector.hpp @@ -0,0 +1,58 @@ +#pragma once +#include +#include + +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ProducerConsumerQueue.hpp" + +namespace aare { + +template >> +class ClusterCollector { + ProducerConsumerQueue> *m_source; + std::atomic m_stop_requested{false}; + std::atomic m_stopped{true}; + std::chrono::milliseconds m_default_wait{1}; + std::thread m_thread; + std::vector> m_clusters; + + void process() { + m_stopped = false; + fmt::print("ClusterCollector started\n"); + while (!m_stop_requested || !m_source->isEmpty()) { + if (ClusterVector *clusters = m_source->frontPtr(); + clusters != nullptr) { + m_clusters.push_back(std::move(*clusters)); + m_source->popFront(); + } else { + std::this_thread::sleep_for(m_default_wait); + } + } + fmt::print("ClusterCollector stopped\n"); + m_stopped = true; + } + + public: + ClusterCollector(ClusterFinderMT *source) { + m_source = source->sink(); + m_thread = + std::thread(&ClusterCollector::process, + this); // only one process does that so why isnt it + // automatically written to m_cluster in collect + // - instead of writing first to m_sink? + } + void stop() { + m_stop_requested = true; + m_thread.join(); + } + std::vector> steal_clusters() { + if (!m_stopped) { + throw std::runtime_error("ClusterCollector is still running"); + } + return std::move(m_clusters); + } +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 2baf0f4..ef78874 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -1,67 +1,449 @@ +#pragma once - +#include "aare/Cluster.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/GainMap.hpp" +#include "aare/NDArray.hpp" #include "aare/defs.hpp" #include #include +#include namespace aare { -struct Cluster { - int16_t x; - int16_t y; - int32_t data[9]; -}; - -typedef enum { - cBottomLeft = 0, - cBottomRight = 1, - cTopLeft = 2, - cTopRight = 3 -} corner; - -typedef enum { - pBottomLeft = 0, - pBottom = 1, - pBottomRight = 2, - pLeft = 3, - pCenter = 4, - pRight = 5, - pTopLeft = 6, - pTop = 7, - pTopRight = 8 -} pixel; - -struct ClusterAnalysis { - uint32_t c; - int32_t tot; - double etax; - double etay; -}; - - +/* +Binary cluster file. Expects data to be layed out as: +int32_t frame_number +uint32_t number_of_clusters +int16_t x, int16_t y, int32_t data[9] x number_of_clusters +int32_t frame_number +uint32_t number_of_clusters +.... +*/ +// TODO: change to support any type of clusters, e.g. header line with +// clsuter_size_x, cluster_size_y, +/** + * @brief Class to read and write cluster files + * Expects data to be laid out as: + * + * + * int32_t frame_number + * uint32_t number_of_clusters + * int16_t x, int16_t y, int32_t data[9] * number_of_clusters + * int32_t frame_number + * uint32_t number_of_clusters + * etc. + */ +template >> class ClusterFile { FILE *fp{}; - uint32_t m_num_left{}; - size_t m_chunk_size{}; + const std::string m_filename{}; + uint32_t m_num_left{}; /*Number of photons left in frame*/ + size_t m_chunk_size{}; /*Number of clusters to read at a time*/ + std::string m_mode; /*Mode to open the file in*/ + std::optional m_roi; /*Region of interest, will be applied if set*/ + std::optional> + m_noise_map; /*Noise map to cut photons, will be applied if set*/ + std::optional m_gain_map; /*Gain map to apply to the + clusters, will be applied if set*/ public: - ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000); - ~ClusterFile(); - std::vector read_clusters(size_t n_clusters); - std::vector read_frame(int32_t &out_fnum); - std::vector - read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); + /** + * @brief Construct a new Cluster File object + * @param fname path to the file + * @param chunk_size number of clusters to read at a time when iterating + * over the file + * @param mode mode to open the file in. "r" for reading, "w" for writing, + * "a" for appending + * @throws std::runtime_error if the file could not be opened + */ + ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000, + const std::string &mode = "r") - int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y); - int analyze_cluster(Cluster cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, - double *eta3y); + : m_filename(fname.string()), m_chunk_size(chunk_size), m_mode(mode) { + if (mode == "r") { + fp = fopen(m_filename.c_str(), "rb"); + if (!fp) { + throw std::runtime_error("Could not open file for reading: " + + m_filename); + } + } else if (mode == "w") { + fp = fopen(m_filename.c_str(), "wb"); + if (!fp) { + throw std::runtime_error("Could not open file for writing: " + + m_filename); + } + } else if (mode == "a") { + fp = fopen(m_filename.c_str(), "ab"); + if (!fp) { + throw std::runtime_error("Could not open file for appending: " + + m_filename); + } + } else { + throw std::runtime_error("Unsupported mode: " + mode); + } + } + + ~ClusterFile() { close(); } + + /** + * @brief Read n_clusters clusters from the file discarding + * frame numbers. If EOF is reached the returned vector will + * have less than n_clusters clusters + */ + ClusterVector read_clusters(size_t n_clusters) { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_noise_map || m_roi) { + return read_clusters_with_cut(n_clusters); + } else { + return read_clusters_without_cut(n_clusters); + } + } + + /** + * @brief Read a single frame from the file and return the + * clusters. The cluster vector will have the frame number + * set. + * @throws std::runtime_error if the file is not opened for + * reading or the file pointer not at the beginning of a + * frame + */ + ClusterVector read_frame() { + if (m_mode != "r") { + throw std::runtime_error(LOCATION + "File not opened for reading"); + } + if (m_noise_map || m_roi) { + return read_frame_with_cut(); + } else { + return read_frame_without_cut(); + } + } + + void write_frame(const ClusterVector &clusters) { + if (m_mode != "w" && m_mode != "a") { + throw std::runtime_error("File not opened for writing"); + } + + int32_t frame_number = clusters.frame_number(); + fwrite(&frame_number, sizeof(frame_number), 1, fp); + uint32_t n_clusters = clusters.size(); + fwrite(&n_clusters, sizeof(n_clusters), 1, fp); + fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp); + } + + /** + * @brief Return the chunk size + */ size_t chunk_size() const { return m_chunk_size; } - void close(); + /** + * @brief Set the region of interest to use when reading + * clusters. If set only clusters within the ROI will be + * read. + */ + void set_roi(ROI roi) { m_roi = roi; } + + /** + * @brief Set the noise map to use when reading clusters. If + * set clusters below the noise level will be discarded. + * Selection criteria one of: Central pixel above noise, + * highest 2x2 sum above 2 * noise, total sum above 3 * + * noise. + */ + void set_noise_map(const NDView noise_map) { + m_noise_map = NDArray(noise_map); + } + + /** + * @brief Set the gain map to use when reading clusters. If set the gain map + * will be applied to the clusters that pass ROI and noise_map selection. + * The gain map is expected to be in ADU/energy. + */ + void set_gain_map(const NDView gain_map) { + m_gain_map = InvertedGainMap(gain_map); + } + + void set_gain_map(const InvertedGainMap &gain_map) { + m_gain_map = gain_map; + } + + void set_gain_map(const InvertedGainMap &&gain_map) { + m_gain_map = gain_map; + } + + /** + * @brief Close the file. If not closed the file will be + * closed in the destructor + */ + void close() { + if (fp) { + fclose(fp); + fp = nullptr; + } + } + + /** @brief Open the file in specific mode + * + */ + void open(const std::string &mode) { + if (fp) { + close(); + } + + if (mode == "r") { + fp = fopen(m_filename.c_str(), "rb"); + if (!fp) { + throw std::runtime_error("Could not open file for reading: " + + m_filename); + } + m_mode = "r"; + } else if (mode == "w") { + fp = fopen(m_filename.c_str(), "wb"); + if (!fp) { + throw std::runtime_error("Could not open file for writing: " + + m_filename); + } + m_mode = "w"; + } else if (mode == "a") { + fp = fopen(m_filename.c_str(), "ab"); + if (!fp) { + throw std::runtime_error("Could not open file for appending: " + + m_filename); + } + m_mode = "a"; + } else { + throw std::runtime_error("Unsupported mode: " + mode); + } + } + + private: + ClusterVector read_clusters_with_cut(size_t n_clusters); + ClusterVector read_clusters_without_cut(size_t n_clusters); + ClusterVector read_frame_with_cut(); + ClusterVector read_frame_without_cut(); + bool is_selected(ClusterType &cl); + ClusterType read_one_cluster(); }; +template +ClusterVector +ClusterFile::read_clusters_without_cut(size_t n_clusters) { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + + ClusterVector clusters(n_clusters); + clusters.resize(n_clusters); + + int32_t iframe = 0; // frame number needs to be 4 bytes! + size_t nph_read = 0; + uint32_t nn = m_num_left; + uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 + + auto buf = clusters.data(); + // if there are photons left from previous frame read them first + if (nph) { + if (nph > n_clusters) { + // if we have more photons left in the frame then photons to + // read we read directly the requested number + nn = n_clusters; + } else { + nn = nph; + } + nph_read += fread((buf + nph_read), clusters.item_size(), nn, fp); + m_num_left = nph - nn; // write back the number of photons left + } + + if (nph_read < n_clusters) { + // keep on reading frames and photons until reaching n_clusters + while (fread(&iframe, sizeof(iframe), 1, fp)) { + clusters.set_frame_number(iframe); + // read number of clusters in frame + if (fread(&nph, sizeof(nph), 1, fp)) { + if (nph > (n_clusters - nph_read)) + nn = n_clusters - nph_read; + else + nn = nph; + + nph_read += + fread((buf + nph_read), clusters.item_size(), nn, fp); + m_num_left = nph - nn; + } + if (nph_read >= n_clusters) + break; + } + } + + // Resize the vector to the number o f clusters. + // No new allocation, only change bounds. + clusters.resize(nph_read); + if (m_gain_map) + m_gain_map->apply_gain_map(clusters); + return clusters; +} + +template +ClusterVector +ClusterFile::read_clusters_with_cut(size_t n_clusters) { + ClusterVector clusters; + clusters.reserve(n_clusters); + + // if there are photons left from previous frame read them first + if (m_num_left) { + while (m_num_left && clusters.size() < n_clusters) { + ClusterType c = read_one_cluster(); + if (is_selected(c)) { + clusters.push_back(c); + } + } + } + + // we did not have enough clusters left in the previous frame + // keep on reading frames until reaching n_clusters + if (clusters.size() < n_clusters) { + // sanity check + if (m_num_left) { + throw std::runtime_error( + LOCATION + "Entered second loop with clusters left\n"); + } + + int32_t frame_number = 0; // frame number needs to be 4 bytes! + while (fread(&frame_number, sizeof(frame_number), 1, fp)) { + if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) { + clusters.set_frame_number( + frame_number); // cluster vector will hold the last + // frame number + while (m_num_left && clusters.size() < n_clusters) { + ClusterType c = read_one_cluster(); + if (is_selected(c)) { + clusters.push_back(c); + } + } + } + + // we have enough clusters, break out of the outer while loop + if (clusters.size() >= n_clusters) + break; + } + } + if (m_gain_map) + m_gain_map->apply_gain_map(clusters); + + return clusters; +} + +template +ClusterType ClusterFile::read_one_cluster() { + ClusterType c; + auto rc = fread(&c, sizeof(c), 1, fp); + if (rc != 1) { + throw std::runtime_error(LOCATION + "Could not read cluster"); + } + --m_num_left; + return c; +} + +template +ClusterVector +ClusterFile::read_frame_without_cut() { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_num_left) { + throw std::runtime_error( + "There are still photons left in the last frame"); + } + int32_t frame_number; + if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { + throw std::runtime_error(LOCATION + "Could not read frame number"); + } + + int32_t n_clusters; // Saved as 32bit integer in the cluster file + if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { + throw std::runtime_error(LOCATION + + "Could not read number of clusters"); + } + + ClusterVector clusters(n_clusters); + clusters.set_frame_number(frame_number); + + clusters.resize(n_clusters); + + if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != + static_cast(n_clusters)) { + throw std::runtime_error(LOCATION + "Could not read clusters"); + } + + if (m_gain_map) + m_gain_map->apply_gain_map(clusters); + return clusters; +} + +template +ClusterVector +ClusterFile::read_frame_with_cut() { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_num_left) { + throw std::runtime_error( + "There are still photons left in the last frame"); + } + int32_t frame_number; + if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { + throw std::runtime_error("Could not read frame number"); + } + + if (fread(&m_num_left, sizeof(m_num_left), 1, fp) != 1) { + throw std::runtime_error("Could not read number of clusters"); + } + + ClusterVector clusters; + clusters.reserve(m_num_left); + clusters.set_frame_number(frame_number); + while (m_num_left) { + ClusterType c = read_one_cluster(); + if (is_selected(c)) { + clusters.push_back(c); + } + } + if (m_gain_map) + m_gain_map->apply_gain_map(clusters); + return clusters; +} + +template +bool ClusterFile::is_selected(ClusterType &cl) { + // Should fail fast + if (m_roi) { + if (!(m_roi->contains(cl.x, cl.y))) { + return false; + } + } + + size_t cluster_center_index = + (ClusterType::cluster_size_x / 2) + + (ClusterType::cluster_size_y / 2) * ClusterType::cluster_size_x; + + if (m_noise_map) { + auto sum_1x1 = cl.data[cluster_center_index]; // central pixel + auto sum_2x2 = cl.max_sum_2x2().first; // highest sum of 2x2 subclusters + auto total_sum = cl.sum(); // sum of all pixels + + auto noise = + (*m_noise_map)(cl.y, cl.x); // TODO! check if this is correct + if (sum_1x1 <= noise || sum_2x2 <= 2 * noise || + total_sum <= 3 * noise) { + return false; + } + } + // we passed all checks + return true; +} + } // namespace aare diff --git a/include/aare/ClusterFileSink.hpp b/include/aare/ClusterFileSink.hpp new file mode 100644 index 0000000..810e63c --- /dev/null +++ b/include/aare/ClusterFileSink.hpp @@ -0,0 +1,62 @@ +#pragma once +#include +#include +#include + +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ProducerConsumerQueue.hpp" + +namespace aare { + +template >> +class ClusterFileSink { + ProducerConsumerQueue> *m_source; + std::atomic m_stop_requested{false}; + std::atomic m_stopped{true}; + std::chrono::milliseconds m_default_wait{1}; + std::thread m_thread; + std::ofstream m_file; + + void process() { + m_stopped = false; + fmt::print("ClusterFileSink started\n"); + while (!m_stop_requested || !m_source->isEmpty()) { + if (ClusterVector *clusters = m_source->frontPtr(); + clusters != nullptr) { + // Write clusters to file + int32_t frame_number = + clusters->frame_number(); // TODO! Should we store frame + // number already as int? + uint32_t num_clusters = clusters->size(); + m_file.write(reinterpret_cast(&frame_number), + sizeof(frame_number)); + m_file.write(reinterpret_cast(&num_clusters), + sizeof(num_clusters)); + m_file.write(reinterpret_cast(clusters->data()), + clusters->size() * clusters->item_size()); + m_source->popFront(); + } else { + std::this_thread::sleep_for(m_default_wait); + } + } + fmt::print("ClusterFileSink stopped\n"); + m_stopped = true; + } + + public: + ClusterFileSink(ClusterFinderMT *source, + const std::filesystem::path &fname) { + m_source = source->sink(); + m_thread = std::thread(&ClusterFileSink::process, this); + m_file.open(fname, std::ios::binary); + } + void stop() { + m_stop_requested = true; + m_thread.join(); + m_file.close(); + } +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFileV2.hpp b/include/aare/ClusterFileV2.hpp deleted file mode 100644 index 99f5976..0000000 --- a/include/aare/ClusterFileV2.hpp +++ /dev/null @@ -1,148 +0,0 @@ -#pragma once -#include "aare/core/defs.hpp" -#include -#include -#include - -namespace aare { -struct ClusterHeader { - int32_t frame_number; - int32_t n_clusters; - std::string to_string() const { - return "frame_number: " + std::to_string(frame_number) + ", n_clusters: " + std::to_string(n_clusters); - } -}; - -struct ClusterV2_ { - int16_t x; - int16_t y; - std::array data; - std::string to_string(bool detailed = false) const { - if (detailed) { - std::string data_str = "["; - for (auto &d : data) { - data_str += std::to_string(d) + ", "; - } - data_str += "]"; - return "x: " + std::to_string(x) + ", y: " + std::to_string(y) + ", data: " + data_str; - } - return "x: " + std::to_string(x) + ", y: " + std::to_string(y); - } -}; - -struct ClusterV2 { - ClusterV2_ cluster; - int32_t frame_number; - std::string to_string() const { - return "frame_number: " + std::to_string(frame_number) + ", " + cluster.to_string(); - } -}; - -/** - * @brief - * important not: fp always points to the clusters header and does not point to individual clusters - * - */ -class ClusterFileV2 { - std::filesystem::path m_fpath; - std::string m_mode; - FILE *fp{nullptr}; - - void check_open(){ - if (!fp) - throw std::runtime_error(fmt::format("File: {} not open", m_fpath.string())); - } - - public: - ClusterFileV2(std::filesystem::path const &fpath, std::string const &mode): m_fpath(fpath), m_mode(mode) { - if (m_mode != "r" && m_mode != "w") - throw std::invalid_argument("mode must be 'r' or 'w'"); - if (m_mode == "r" && !std::filesystem::exists(m_fpath)) - throw std::invalid_argument("File does not exist"); - if (mode == "r") { - fp = fopen(fpath.string().c_str(), "rb"); - } else if (mode == "w") { - if (std::filesystem::exists(fpath)) { - fp = fopen(fpath.string().c_str(), "r+b"); - } else { - fp = fopen(fpath.string().c_str(), "wb"); - } - } - if (fp == nullptr) { - throw std::runtime_error("Failed to open file"); - } - } - ~ClusterFileV2() { close(); } - std::vector read() { - check_open(); - - ClusterHeader header; - fread(&header, sizeof(ClusterHeader), 1, fp); - std::vector clusters_(header.n_clusters); - fread(clusters_.data(), sizeof(ClusterV2_), header.n_clusters, fp); - std::vector clusters; - for (auto &c : clusters_) { - ClusterV2 cluster; - cluster.cluster = std::move(c); - cluster.frame_number = header.frame_number; - clusters.push_back(cluster); - } - - return clusters; - } - std::vector> read(int n_frames) { - std::vector> clusters; - for (int i = 0; i < n_frames; i++) { - clusters.push_back(read()); - } - return clusters; - } - - size_t write(std::vector const &clusters) { - check_open(); - if (m_mode != "w") - throw std::runtime_error("File not opened in write mode"); - if (clusters.empty()) - return 0; - - ClusterHeader header; - header.frame_number = clusters[0].frame_number; - header.n_clusters = clusters.size(); - fwrite(&header, sizeof(ClusterHeader), 1, fp); - for (auto &c : clusters) { - fwrite(&c.cluster, sizeof(ClusterV2_), 1, fp); - } - return clusters.size(); - } - - size_t write(std::vector> const &clusters) { - check_open(); - if (m_mode != "w") - throw std::runtime_error("File not opened in write mode"); - - size_t n_clusters = 0; - for (auto &c : clusters) { - n_clusters += write(c); - } - return n_clusters; - } - - int seek_to_begin() { return fseek(fp, 0, SEEK_SET); } - int seek_to_end() { return fseek(fp, 0, SEEK_END); } - - int32_t frame_number() { - auto pos = ftell(fp); - ClusterHeader header; - fread(&header, sizeof(ClusterHeader), 1, fp); - fseek(fp, pos, SEEK_SET); - return header.frame_number; - } - - void close() { - if (fp) { - fclose(fp); - fp = nullptr; - } - } -}; -} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index addb6db..ea11162 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -1,4 +1,6 @@ #pragma once +#include "aare/ClusterFile.hpp" +#include "aare/ClusterVector.hpp" #include "aare/Dtype.hpp" #include "aare/NDArray.hpp" #include "aare/NDView.hpp" @@ -8,251 +10,147 @@ namespace aare { -/** enum to define the event types */ -enum eventType { - PEDESTAL, /** pedestal */ - NEIGHBOUR, /** neighbour i.e. below threshold, but in the cluster of a - photon */ - PHOTON, /** photon i.e. above threshold */ - PHOTON_MAX, /** maximum of a cluster satisfying the photon conditions */ - NEGATIVE_PEDESTAL, /** negative value, will not be accounted for as pedestal - in order to avoid drift of the pedestal towards - negative values */ - UNDEFINED_EVENT = -1 /** undefined */ -}; - -template +template , + typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double> class ClusterFinder { Shape<2> m_image_size; - const int m_cluster_sizeX; - const int m_cluster_sizeY; - const double m_threshold; - const double m_nSigma; - const double c2; - const double c3; + const PEDESTAL_TYPE m_nSigma; + const PEDESTAL_TYPE c2; + const PEDESTAL_TYPE c3; Pedestal m_pedestal; + ClusterVector m_clusters; + + static const uint8_t ClusterSizeX = ClusterType::cluster_size_x; + static const uint8_t ClusterSizeY = ClusterType::cluster_size_y; + using CT = typename ClusterType::value_type; public: - ClusterFinder(Shape<2> image_size, Shape<2>cluster_size, double nSigma = 5.0, - double threshold = 0.0) - : m_image_size(image_size), m_cluster_sizeX(cluster_size[0]), m_cluster_sizeY(cluster_size[1]), - m_threshold(threshold), m_nSigma(nSigma), - c2(sqrt((m_cluster_sizeY + 1) / 2 * (m_cluster_sizeX + 1) / 2)), - c3(sqrt(m_cluster_sizeX * m_cluster_sizeY)), - m_pedestal(image_size[0], image_size[1]) { - - // c2 = sqrt((cluster_sizeY + 1) / 2 * (cluster_sizeX + 1) / 2); - // c3 = sqrt(cluster_sizeX * cluster_sizeY); - }; + /** + * @brief Construct a new ClusterFinder object + * @param image_size size of the image + * @param cluster_size size of the cluster (x, y) + * @param nSigma number of sigma above the pedestal to consider a photon + * @param capacity initial capacity of the cluster vector + * + */ + ClusterFinder(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0, + size_t capacity = 1000000) + : m_image_size(image_size), m_nSigma(nSigma), + c2(sqrt((ClusterSizeY + 1) / 2 * (ClusterSizeX + 1) / 2)), + c3(sqrt(ClusterSizeX * ClusterSizeY)), + m_pedestal(image_size[0], image_size[1]), m_clusters(capacity) {}; void push_pedestal_frame(NDView frame) { m_pedestal.push(frame); } - NDArray pedestal() { - return m_pedestal.mean(); + NDArray pedestal() { return m_pedestal.mean(); } + NDArray noise() { return m_pedestal.std(); } + void clear_pedestal() { m_pedestal.clear(); } + + /** + * @brief Move the clusters from the ClusterVector in the ClusterFinder to a + * new ClusterVector and return it. + * @param realloc_same_capacity if true the new ClusterVector will have the + * same capacity as the old one + * + */ + ClusterVector + steal_clusters(bool realloc_same_capacity = false) { + ClusterVector tmp = std::move(m_clusters); + if (realloc_same_capacity) + m_clusters = ClusterVector(tmp.capacity()); + else + m_clusters = ClusterVector{}; + return tmp; } + void find_clusters(NDView frame, uint64_t frame_number = 0) { + // // TODO! deal with even size clusters + // // currently 3,3 -> +/- 1 + // // 4,4 -> +/- 2 + int dy = ClusterSizeY / 2; + int dx = ClusterSizeX / 2; + int has_center_pixel_x = + ClusterSizeX % + 2; // for even sized clusters there is no proper cluster center and + // even amount of pixels around the center + int has_center_pixel_y = ClusterSizeY % 2; - std::vector - find_clusters_without_threshold(NDView frame, - // Pedestal &pedestal, - bool late_update = false) { - struct pedestal_update { - int x; - int y; - FRAME_TYPE value; - }; - std::vector pedestal_updates; - - std::vector clusters; - std::vector> eventMask; - for (int i = 0; i < frame.shape(0); i++) { - eventMask.push_back(std::vector(frame.shape(1))); - } - long double val; - long double max; - + m_clusters.set_frame_number(frame_number); for (int iy = 0; iy < frame.shape(0); iy++) { for (int ix = 0; ix < frame.shape(1); ix++) { - // initialize max and total - max = std::numeric_limits::min(); - long double total = 0; - eventMask[iy][ix] = PEDESTAL; - for (short ir = -(m_cluster_sizeY / 2); - ir < (m_cluster_sizeY / 2) + 1; ir++) { - for (short ic = -(m_cluster_sizeX / 2); - ic < (m_cluster_sizeX / 2) + 1; ic++) { + PEDESTAL_TYPE max = std::numeric_limits::min(); + PEDESTAL_TYPE total = 0; + + // What can we short circuit here? + PEDESTAL_TYPE rms = m_pedestal.std(iy, ix); + PEDESTAL_TYPE value = (frame(iy, ix) - m_pedestal.mean(iy, ix)); + + if (value < -m_nSigma * rms) + continue; // NEGATIVE_PEDESTAL go to next pixel + // TODO! No pedestal update??? + + for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) { + for (int ic = -dx; ic < dx + has_center_pixel_x; ic++) { if (ix + ic >= 0 && ix + ic < frame.shape(1) && iy + ir >= 0 && iy + ir < frame.shape(0)) { - val = frame(iy + ir, ix + ic) - - m_pedestal.mean(iy + ir, ix + ic); + PEDESTAL_TYPE val = + frame(iy + ir, ix + ic) - + m_pedestal.mean(iy + ir, ix + ic); + total += val; - if (val > max) { - max = val; - } + max = std::max(max, val); } } } - auto rms = m_pedestal.std(iy, ix); - - if (frame(iy, ix) - m_pedestal.mean(iy, ix) < -m_nSigma * rms) { - eventMask[iy][ix] = NEGATIVE_PEDESTAL; - continue; - } else if (max > m_nSigma * rms) { - eventMask[iy][ix] = PHOTON; + if ((max > m_nSigma * rms)) { + if (value < max) + continue; // Not max go to the next pixel + // but also no pedestal update } else if (total > c3 * m_nSigma * rms) { - eventMask[iy][ix] = PHOTON; + // pass } else { - if (late_update) { - pedestal_updates.push_back({ix, iy, frame(iy, ix)}); - } else { - m_pedestal.push(iy, ix, frame(iy, ix)); - } - continue; + // m_pedestal.push(iy, ix, frame(iy, ix)); // Safe option + m_pedestal.push_fast( + iy, ix, + frame(iy, + ix)); // Assume we have reached n_samples in the + // pedestal, slight performance improvement + continue; // It was a pedestal value nothing to store } - if (eventMask[iy][ix] == PHOTON && - (frame(iy, ix) - m_pedestal.mean(iy, ix)) >= max) { - eventMask[iy][ix] = PHOTON_MAX; - DynamicCluster cluster(m_cluster_sizeX, m_cluster_sizeY, - Dtype(typeid(PEDESTAL_TYPE))); + + // Store cluster + if (value == max) { + ClusterType cluster{}; cluster.x = ix; cluster.y = iy; - short i = 0; - for (short ir = -(m_cluster_sizeY / 2); - ir < (m_cluster_sizeY / 2) + 1; ir++) { - for (short ic = -(m_cluster_sizeX / 2); - ic < (m_cluster_sizeX / 2) + 1; ic++) { + // Fill the cluster data since we have a photon to store + // It's worth redoing the look since most of the time we + // don't have a photon + int i = 0; + for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) { + for (int ic = -dx; ic < dx + has_center_pixel_y; ic++) { if (ix + ic >= 0 && ix + ic < frame.shape(1) && iy + ir >= 0 && iy + ir < frame.shape(0)) { - PEDESTAL_TYPE tmp = - static_cast( - frame(iy + ir, ix + ic)) - - m_pedestal.mean(iy + ir, ix + ic); - cluster.set(i, tmp); + CT tmp = + static_cast(frame(iy + ir, ix + ic)) - + static_cast( + m_pedestal.mean(iy + ir, ix + ic)); + cluster.data[i] = + tmp; // Watch for out of bounds access i++; } } } - clusters.push_back(cluster); + + // Add the cluster to the output ClusterVector + m_clusters.push_back(cluster); } } } - if (late_update) { - for (auto &update : pedestal_updates) { - m_pedestal.push(update.y, update.x, update.value); - } - } - return clusters; - } - - // template - std::vector - find_clusters_with_threshold(NDView frame, - Pedestal &pedestal) { - assert(m_threshold > 0); - std::vector clusters; - std::vector> eventMask; - for (int i = 0; i < frame.shape(0); i++) { - eventMask.push_back(std::vector(frame.shape(1))); - } - double tthr, tthr1, tthr2; - - NDArray rest({frame.shape(0), frame.shape(1)}); - NDArray nph({frame.shape(0), frame.shape(1)}); - // convert to n photons - // nph = (frame-pedestal.mean()+0.5*m_threshold)/m_threshold; // can be - // optimized with expression templates? - for (int iy = 0; iy < frame.shape(0); iy++) { - for (int ix = 0; ix < frame.shape(1); ix++) { - auto val = frame(iy, ix) - pedestal.mean(iy, ix); - nph(iy, ix) = (val + 0.5 * m_threshold) / m_threshold; - nph(iy, ix) = nph(iy, ix) < 0 ? 0 : nph(iy, ix); - rest(iy, ix) = val - nph(iy, ix) * m_threshold; - } - } - // iterate over frame pixels - for (int iy = 0; iy < frame.shape(0); iy++) { - for (int ix = 0; ix < frame.shape(1); ix++) { - eventMask[iy][ix] = PEDESTAL; - // initialize max and total - FRAME_TYPE max = std::numeric_limits::min(); - long double total = 0; - if (rest(iy, ix) <= 0.25 * m_threshold) { - pedestal.push(iy, ix, frame(iy, ix)); - continue; - } - eventMask[iy][ix] = NEIGHBOUR; - // iterate over cluster pixels around the current pixel (ix,iy) - for (short ir = -(m_cluster_sizeY / 2); - ir < (m_cluster_sizeY / 2) + 1; ir++) { - for (short ic = -(m_cluster_sizeX / 2); - ic < (m_cluster_sizeX / 2) + 1; ic++) { - if (ix + ic >= 0 && ix + ic < frame.shape(1) && - iy + ir >= 0 && iy + ir < frame.shape(0)) { - auto val = frame(iy + ir, ix + ic) - - pedestal.mean(iy + ir, ix + ic); - total += val; - if (val > max) { - max = val; - } - } - } - } - - auto rms = pedestal.std(iy, ix); - if (m_nSigma == 0) { - tthr = m_threshold; - tthr1 = m_threshold; - tthr2 = m_threshold; - } else { - tthr = m_nSigma * rms; - tthr1 = m_nSigma * rms * c3; - tthr2 = m_nSigma * rms * c2; - - if (m_threshold > 2 * tthr) - tthr = m_threshold - tthr; - if (m_threshold > 2 * tthr1) - tthr1 = tthr - tthr1; - if (m_threshold > 2 * tthr2) - tthr2 = tthr - tthr2; - } - if (total > tthr1 || max > tthr) { - eventMask[iy][ix] = PHOTON; - nph(iy, ix) += 1; - rest(iy, ix) -= m_threshold; - } else { - pedestal.push(iy, ix, frame(iy, ix)); - continue; - } - if (eventMask[iy][ix] == PHOTON && - frame(iy, ix) - pedestal.mean(iy, ix) >= max) { - eventMask[iy][ix] = PHOTON_MAX; - DynamicCluster cluster(m_cluster_sizeX, m_cluster_sizeY, - Dtype(typeid(FRAME_TYPE))); - cluster.x = ix; - cluster.y = iy; - short i = 0; - for (short ir = -(m_cluster_sizeY / 2); - ir < (m_cluster_sizeY / 2) + 1; ir++) { - for (short ic = -(m_cluster_sizeX / 2); - ic < (m_cluster_sizeX / 2) + 1; ic++) { - if (ix + ic >= 0 && ix + ic < frame.shape(1) && - iy + ir >= 0 && iy + ir < frame.shape(0)) { - auto tmp = frame(iy + ir, ix + ic) - - pedestal.mean(iy + ir, ix + ic); - cluster.set(i, tmp); - i++; - } - } - } - clusters.push_back(cluster); - } - } - } - return clusters; } }; diff --git a/include/aare/ClusterFinderMT.hpp b/include/aare/ClusterFinderMT.hpp new file mode 100644 index 0000000..2dfb279 --- /dev/null +++ b/include/aare/ClusterFinderMT.hpp @@ -0,0 +1,277 @@ +#pragma once +#include +#include +#include +#include +#include + +#include "aare/ClusterFinder.hpp" +#include "aare/NDArray.hpp" +#include "aare/ProducerConsumerQueue.hpp" + +namespace aare { + +enum class FrameType { + DATA, + PEDESTAL, +}; + +struct FrameWrapper { + FrameType type; + uint64_t frame_number; + NDArray data; +}; + +/** + * @brief ClusterFinderMT is a multi-threaded version of ClusterFinder. It uses + * a producer-consumer queue to distribute the frames to the threads. The + * clusters are collected in a single output queue. + * @tparam FRAME_TYPE type of the frame data + * @tparam PEDESTAL_TYPE type of the pedestal data + * @tparam CT type of the cluster data + */ +template , + typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double> +class ClusterFinderMT { + + protected: + using CT = typename ClusterType::value_type; + size_t m_current_thread{0}; + size_t m_n_threads{0}; + using Finder = ClusterFinder; + using InputQueue = ProducerConsumerQueue; + using OutputQueue = ProducerConsumerQueue>; + std::vector> m_input_queues; + std::vector> m_output_queues; + + OutputQueue m_sink{1000}; // All clusters go into this queue + + std::vector> m_cluster_finders; + std::vector m_threads; + std::thread m_collect_thread; + std::chrono::milliseconds m_default_wait{1}; + + private: + std::atomic m_stop_requested{false}; + std::atomic m_processing_threads_stopped{true}; + + /** + * @brief Function called by the processing threads. It reads the frames + * from the input queue and processes them. + */ + void process(int thread_id) { + auto cf = m_cluster_finders[thread_id].get(); + auto q = m_input_queues[thread_id].get(); + bool realloc_same_capacity = true; + + while (!m_stop_requested || !q->isEmpty()) { + if (FrameWrapper *frame = q->frontPtr(); frame != nullptr) { + + switch (frame->type) { + case FrameType::DATA: + cf->find_clusters(frame->data.view(), frame->frame_number); + m_output_queues[thread_id]->write( + cf->steal_clusters(realloc_same_capacity)); + break; + + case FrameType::PEDESTAL: + m_cluster_finders[thread_id]->push_pedestal_frame( + frame->data.view()); + break; + } + + // frame is processed now discard it + m_input_queues[thread_id]->popFront(); + } else { + std::this_thread::sleep_for(m_default_wait); + } + } + } + + /** + * @brief Collect all the clusters from the output queues and write them to + * the sink + */ + void collect() { + bool empty = true; + while (!m_stop_requested || !empty || !m_processing_threads_stopped) { + empty = true; + for (auto &queue : m_output_queues) { + if (!queue->isEmpty()) { + + while (!m_sink.write(std::move(*queue->frontPtr()))) { + std::this_thread::sleep_for(m_default_wait); + } + queue->popFront(); + empty = false; + } + } + } + } + + public: + /** + * @brief Construct a new ClusterFinderMT object + * @param image_size size of the image + * @param cluster_size size of the cluster + * @param nSigma number of sigma above the pedestal to consider a photon + * @param capacity initial capacity of the cluster vector. Should match + * expected number of clusters in a frame per frame. + * @param n_threads number of threads to use + */ + ClusterFinderMT(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0, + size_t capacity = 2000, size_t n_threads = 3) + : m_n_threads(n_threads) { + + for (size_t i = 0; i < n_threads; i++) { + m_cluster_finders.push_back( + std::make_unique< + ClusterFinder>( + image_size, nSigma, capacity)); + } + for (size_t i = 0; i < n_threads; i++) { + m_input_queues.emplace_back(std::make_unique(200)); + m_output_queues.emplace_back(std::make_unique(200)); + } + // TODO! Should we start automatically? + start(); + } + + /** + * @brief Return the sink queue where all the clusters are collected + * @warning You need to empty this queue otherwise the cluster finder will + * wait forever + */ + ProducerConsumerQueue> *sink() { + return &m_sink; + } + + /** + * @brief Start all processing threads + */ + void start() { + m_processing_threads_stopped = false; + m_stop_requested = false; + + for (size_t i = 0; i < m_n_threads; i++) { + m_threads.push_back( + std::thread(&ClusterFinderMT::process, this, i)); + } + + m_collect_thread = std::thread(&ClusterFinderMT::collect, this); + } + + /** + * @brief Stop all processing threads + */ + void stop() { + m_stop_requested = true; + + for (auto &thread : m_threads) { + thread.join(); + } + m_threads.clear(); + + m_processing_threads_stopped = true; + m_collect_thread.join(); + } + + /** + * @brief Wait for all the queues to be empty. Mostly used for timing tests. + */ + void sync() { + for (auto &q : m_input_queues) { + while (!q->isEmpty()) { + std::this_thread::sleep_for(m_default_wait); + } + } + for (auto &q : m_output_queues) { + while (!q->isEmpty()) { + std::this_thread::sleep_for(m_default_wait); + } + } + while (!m_sink.isEmpty()) { + std::this_thread::sleep_for(m_default_wait); + } + } + + /** + * @brief Push a pedestal frame to all the cluster finders. The frames is + * expected to be dark. No photon finding is done. Just pedestal update. + */ + void push_pedestal_frame(NDView frame) { + FrameWrapper fw{FrameType::PEDESTAL, 0, + NDArray(frame)}; // TODO! copies the data! + + for (auto &queue : m_input_queues) { + while (!queue->write(fw)) { + std::this_thread::sleep_for(m_default_wait); + } + } + } + + /** + * @brief Push the frame to the queue of the next available thread. Function + * returns once the frame is in a queue. + * @note Spin locks with a default wait if the queue is full. + */ + void find_clusters(NDView frame, uint64_t frame_number = 0) { + FrameWrapper fw{FrameType::DATA, frame_number, + NDArray(frame)}; // TODO! copies the data! + while (!m_input_queues[m_current_thread % m_n_threads]->write(fw)) { + std::this_thread::sleep_for(m_default_wait); + } + m_current_thread++; + } + + void clear_pedestal() { + if (!m_processing_threads_stopped) { + throw std::runtime_error("ClusterFinderMT is still running"); + } + for (auto &cf : m_cluster_finders) { + cf->clear_pedestal(); + } + } + + /** + * @brief Return the pedestal currently used by the cluster finder + * @param thread_index index of the thread + */ + auto pedestal(size_t thread_index = 0) { + if (m_cluster_finders.empty()) { + throw std::runtime_error("No cluster finders available"); + } + if (!m_processing_threads_stopped) { + throw std::runtime_error("ClusterFinderMT is still running"); + } + if (thread_index >= m_cluster_finders.size()) { + throw std::runtime_error("Thread index out of range"); + } + return m_cluster_finders[thread_index]->pedestal(); + } + + /** + * @brief Return the noise currently used by the cluster finder + * @param thread_index index of the thread + */ + auto noise(size_t thread_index = 0) { + if (m_cluster_finders.empty()) { + throw std::runtime_error("No cluster finders available"); + } + if (!m_processing_threads_stopped) { + throw std::runtime_error("ClusterFinderMT is still running"); + } + if (thread_index >= m_cluster_finders.size()) { + throw std::runtime_error("Thread index out of range"); + } + return m_cluster_finders[thread_index]->noise(); + } + + // void push(FrameWrapper&& frame) { + // //TODO! need to loop until we are successful + // auto rc = m_input_queue.write(std::move(frame)); + // fmt::print("pushed frame {}\n", rc); + // } +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp new file mode 100644 index 0000000..9d575d9 --- /dev/null +++ b/include/aare/ClusterVector.hpp @@ -0,0 +1,170 @@ +#pragma once +#include "aare/Cluster.hpp" //TODO maybe store in seperate file !!! +#include +#include +#include +#include +#include +#include + +#include + +#include "aare/Cluster.hpp" +#include "aare/NDView.hpp" + +namespace aare { + +template >> +class ClusterVector; // Forward declaration + +/** + * @brief ClusterVector is a container for clusters of various sizes. It + * uses a contiguous memory buffer to store the clusters. It is templated on + * the data type and the coordinate type of the clusters. + * @note push_back can invalidate pointers to elements in the container + * @warning ClusterVector is currently move only to catch unintended copies, + * but this might change since there are probably use cases where copying is + * needed. + * @tparam T data type of the pixels in the cluster + * @tparam CoordType data type of the x and y coordinates of the cluster + * (normally int16_t) + */ +template +class ClusterVector> { + + std::vector> m_data{}; + int32_t m_frame_number{0}; // TODO! Check frame number size and type + + public: + using value_type = T; + using ClusterType = Cluster; + + /** + * @brief Construct a new ClusterVector object + * @param capacity initial capacity of the buffer in number of clusters + * @param frame_number frame number of the clusters. Default is 0, which is + * also used to indicate that the clusters come from many frames + */ + ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0) + : m_frame_number(frame_number) { + m_data.reserve(capacity); + } + + // Move constructor + ClusterVector(ClusterVector &&other) noexcept + : m_data(other.m_data), m_frame_number(other.m_frame_number) { + other.m_data.clear(); + } + + // Move assignment operator + ClusterVector &operator=(ClusterVector &&other) noexcept { + if (this != &other) { + m_data = other.m_data; + m_frame_number = other.m_frame_number; + other.m_data.clear(); + other.m_frame_number = 0; + } + return *this; + } + + /** + * @brief Sum the pixels in each cluster + * @return std::vector vector of sums for each cluster + */ + std::vector sum() { + std::vector sums(m_data.size()); + + std::transform( + m_data.begin(), m_data.end(), sums.begin(), + [](const ClusterType &cluster) { return cluster.sum(); }); + + return sums; + } + + /** + * @brief Sum the pixels in the 2x2 subcluster with the biggest pixel sum in + * each cluster + * @return std::vector vector of sums for each cluster + */ + std::vector sum_2x2() { + std::vector sums_2x2(m_data.size()); + + std::transform(m_data.begin(), m_data.end(), sums_2x2.begin(), + [](const ClusterType &cluster) { + return cluster.max_sum_2x2().first; + }); + + return sums_2x2; + } + + /** + * @brief Reserve space for at least capacity clusters + * @param capacity number of clusters to reserve space for + * @note If capacity is less than the current capacity, the function does + * nothing. + */ + void reserve(size_t capacity) { m_data.reserve(capacity); } + + void resize(size_t size) { m_data.resize(size); } + + void push_back(const ClusterType &cluster) { m_data.push_back(cluster); } + + ClusterVector &operator+=(const ClusterVector &other) { + m_data.insert(m_data.end(), other.begin(), other.end()); + + return *this; + } + + /** + * @brief Return the number of clusters in the vector + */ + size_t size() const { return m_data.size(); } + + uint8_t cluster_size_x() const { return ClusterSizeX; } + + uint8_t cluster_size_y() const { return ClusterSizeY; } + + /** + * @brief Return the capacity of the buffer in number of clusters. This is + * the number of clusters that can be stored in the current buffer without + * reallocation. + */ + size_t capacity() const { return m_data.capacity(); } + + auto begin() const { return m_data.begin(); } + + auto end() const { return m_data.end(); } + + /** + * @brief Return the size in bytes of a single cluster + */ + size_t item_size() const { + return sizeof(ClusterType); // 2 * sizeof(CoordType) + ClusterSizeX * + // ClusterSizeY * sizeof(T); + } + + ClusterType *data() { return m_data.data(); } + ClusterType const *data() const { return m_data.data(); } + + /** + * @brief Return a reference to the i-th cluster casted to type V + * @tparam V type of the cluster + */ + ClusterType &operator[](size_t i) { return m_data[i]; } + + const ClusterType &operator[](size_t i) const { return m_data[i]; } + + /** + * @brief Return the frame number of the clusters. 0 is used to indicate + * that the clusters come from many frames + */ + int32_t frame_number() const { return m_frame_number; } + + void set_frame_number(int32_t frame_number) { + m_frame_number = frame_number; + } +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/File.hpp b/include/aare/File.hpp index d368eb8..61a26a8 100644 --- a/include/aare/File.hpp +++ b/include/aare/File.hpp @@ -36,6 +36,8 @@ class File { File(File &&other) noexcept; File& operator=(File &&other) noexcept; ~File() = default; + + // void close(); //!< close the file Frame read_frame(); //!< read one frame from the file at the current position Frame read_frame(size_t frame_index); //!< read one frame at the position given by frame number @@ -44,6 +46,7 @@ class File { void read_into(std::byte *image_buf); void read_into(std::byte *image_buf, size_t n_frames); + size_t frame_number(); //!< get the frame number at the current position size_t frame_number(size_t frame_index); //!< get the frame number at the given frame index size_t bytes_per_frame() const; size_t pixels_per_frame() const; diff --git a/include/aare/FilePtr.hpp b/include/aare/FilePtr.hpp new file mode 100644 index 0000000..4ddc76e --- /dev/null +++ b/include/aare/FilePtr.hpp @@ -0,0 +1,30 @@ +#pragma once +#include +#include + +namespace aare { + +/** + * \brief RAII wrapper for FILE pointer + */ +class FilePtr { + FILE *fp_{nullptr}; + + public: + FilePtr() = default; + FilePtr(const std::filesystem::path& fname, const std::string& mode); + FilePtr(const FilePtr &) = delete; // we don't want a copy + FilePtr &operator=(const FilePtr &) = delete; // since we handle a resource + FilePtr(FilePtr &&other); + FilePtr &operator=(FilePtr &&other); + FILE *get(); + ssize_t tell(); + void seek(ssize_t offset, int whence = SEEK_SET) { + if (fseek(fp_, offset, whence) != 0) + throw std::runtime_error("Error seeking in file"); + } + std::string error_msg(); + ~FilePtr(); +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/Fit.hpp b/include/aare/Fit.hpp new file mode 100644 index 0000000..eb9ac22 --- /dev/null +++ b/include/aare/Fit.hpp @@ -0,0 +1,115 @@ +#pragma once + +#include +#include +#include + +#include "aare/NDArray.hpp" + +namespace aare { + +namespace func { +double gaus(const double x, const double *par); +NDArray gaus(NDView x, NDView par); + +double pol1(const double x, const double *par); +NDArray pol1(NDView x, NDView par); + +double scurve(const double x, const double *par); +NDArray scurve(NDView x, NDView par); + +double scurve2(const double x, const double *par); +NDArray scurve2(NDView x, NDView par); + +} // namespace func + + +/** + * @brief Estimate the initial parameters for a Gaussian fit + */ +std::array gaus_init_par(const NDView x, const NDView y); + +std::array pol1_init_par(const NDView x, const NDView y); + +std::array scurve_init_par(const NDView x, const NDView y); +std::array scurve2_init_par(const NDView x, const NDView y); + +static constexpr int DEFAULT_NUM_THREADS = 4; + +/** + * @brief Fit a 1D Gaussian to data. + * @param data data to fit + * @param x x values + */ +NDArray fit_gaus(NDView x, NDView y); + + +/** + * @brief Fit a 1D Gaussian to each pixel. Data layout [row, col, values] + * @param x x values + * @param y y values, layout [row, col, values] + * @param n_threads number of threads to use + */ + +NDArray fit_gaus(NDView x, NDView y, + int n_threads = DEFAULT_NUM_THREADS); + + + + +/** + * @brief Fit a 1D Gaussian with error estimates + * @param x x values + * @param y y values, layout [row, col, values] + * @param y_err error in y, layout [row, col, values] + * @param par_out output parameters + * @param par_err_out output error parameters + */ +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, + double& chi2); + +/** + * @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout + * [row, col, values] + * @param x x values + * @param y y values, layout [row, col, values] + * @param y_err error in y, layout [row, col, values] + * @param par_out output parameters, layout [row, col, values] + * @param par_err_out output parameter errors, layout [row, col, values] + * @param n_threads number of threads to use + */ +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads = DEFAULT_NUM_THREADS + ); + +NDArray fit_pol1(NDView x, NDView y); + +NDArray fit_pol1(NDView x, NDView y, + int n_threads = DEFAULT_NUM_THREADS); + +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2); + +// TODO! not sure we need to offer the different version in C++ +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out,NDView chi2_out, + int n_threads = DEFAULT_NUM_THREADS); + +NDArray fit_scurve(NDView x, NDView y); +NDArray fit_scurve(NDView x, NDView y, int n_threads); +void fit_scurve(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2); +void fit_scurve(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads); + +NDArray fit_scurve2(NDView x, NDView y); +NDArray fit_scurve2(NDView x, NDView y, int n_threads); +void fit_scurve2(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2); +void fit_scurve2(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads); +} // namespace aare \ No newline at end of file diff --git a/include/aare/Frame.hpp b/include/aare/Frame.hpp index 5ce63ac..02ea82f 100644 --- a/include/aare/Frame.hpp +++ b/include/aare/Frame.hpp @@ -107,8 +107,8 @@ class Frame { * @return NDView */ template NDView view() { - std::array shape = {static_cast(m_rows), - static_cast(m_cols)}; + std::array shape = {static_cast(m_rows), + static_cast(m_cols)}; T *data = reinterpret_cast(m_data); return NDView(data, shape); } diff --git a/include/aare/GainMap.hpp b/include/aare/GainMap.hpp new file mode 100644 index 0000000..ac558d0 --- /dev/null +++ b/include/aare/GainMap.hpp @@ -0,0 +1,68 @@ +/************************************************ + * @file GainMap.hpp + * @short function to apply gain map of image size to a vector of clusters - + *note stored gainmap is inverted for efficient aaplication to images + ***********************************************/ + +#pragma once +#include "aare/Cluster.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDArray.hpp" +#include "aare/NDView.hpp" +#include + +namespace aare { + +class InvertedGainMap { + + public: + explicit InvertedGainMap(const NDArray &gain_map) + : m_gain_map(gain_map) { + for (auto &item : m_gain_map) { + item = 1.0 / item; + } + }; + + explicit InvertedGainMap(const NDView gain_map) { + m_gain_map = NDArray(gain_map); + for (auto &item : m_gain_map) { + item = 1.0 / item; + } + } + + template >> + void apply_gain_map(ClusterVector &clustervec) { + // in principle we need to know the size of the image for this lookup + size_t ClusterSizeX = clustervec.cluster_size_x(); + size_t ClusterSizeY = clustervec.cluster_size_y(); + + using T = typename ClusterVector::value_type; + + int64_t index_cluster_center_x = ClusterSizeX / 2; + int64_t index_cluster_center_y = ClusterSizeY / 2; + for (size_t i = 0; i < clustervec.size(); i++) { + auto &cl = clustervec[i]; + + if (cl.x > 0 && cl.y > 0 && cl.x < m_gain_map.shape(1) - 1 && + cl.y < m_gain_map.shape(0) - 1) { + for (size_t j = 0; j < ClusterSizeX * ClusterSizeY; j++) { + size_t x = cl.x + j % ClusterSizeX - index_cluster_center_x; + size_t y = cl.y + j / ClusterSizeX - index_cluster_center_y; + cl.data[j] = static_cast( + static_cast(cl.data[j]) * + m_gain_map( + y, x)); // cast after conversion to keep precision + } + } else { + // clear edge clusters + cl.data.fill(0); + } + } + } + + private: + NDArray m_gain_map{}; +}; + +} // end of namespace aare \ No newline at end of file diff --git a/include/aare/Interpolator.hpp b/include/aare/Interpolator.hpp new file mode 100644 index 0000000..8e65f38 --- /dev/null +++ b/include/aare/Interpolator.hpp @@ -0,0 +1,130 @@ +#pragma once + +#include "aare/CalculateEta.hpp" +#include "aare/Cluster.hpp" +#include "aare/ClusterFile.hpp" //Cluster_3x3 +#include "aare/ClusterVector.hpp" +#include "aare/NDArray.hpp" +#include "aare/NDView.hpp" +#include "aare/algorithm.hpp" + +namespace aare { + +struct Photon { + double x; + double y; + double energy; +}; + +class Interpolator { + NDArray m_ietax; + NDArray m_ietay; + + NDArray m_etabinsx; + NDArray m_etabinsy; + NDArray m_energy_bins; + + public: + Interpolator(NDView etacube, NDView xbins, + NDView ybins, NDView ebins); + NDArray get_ietax() { return m_ietax; } + NDArray get_ietay() { return m_ietay; } + + template >> + std::vector interpolate(const ClusterVector &clusters); +}; + +// TODO: generalize to support any clustertype!!! otherwise add std::enable_if_t +// to only take Cluster2x2 and Cluster3x3 +template +std::vector +Interpolator::interpolate(const ClusterVector &clusters) { + std::vector photons; + photons.reserve(clusters.size()); + + if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { + for (const ClusterType &cluster : clusters) { + + auto eta = calculate_eta2(cluster); + + Photon photon; + photon.x = cluster.x; + photon.y = cluster.y; + photon.energy = static_cast(eta.sum); + + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + // Finding the index of the last element that is smaller + // should work fine as long as we have many bins + auto ie = last_smaller(m_energy_bins, photon.energy); + auto ix = last_smaller(m_etabinsx, eta.x); + auto iy = last_smaller(m_etabinsy, eta.y); + + // fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy); + + double dX, dY; + // cBottomLeft = 0, + // cBottomRight = 1, + // cTopLeft = 2, + // cTopRight = 3 + switch (static_cast(eta.c)) { + case corner::cTopLeft: + dX = -1.; + dY = 0; + break; + case corner::cTopRight:; + dX = 0; + dY = 0; + break; + case corner::cBottomLeft: + dX = -1.; + dY = -1.; + break; + case corner::cBottomRight: + dX = 0.; + dY = -1.; + break; + } + photon.x += m_ietax(ix, iy, ie) * 2 + dX; + photon.y += m_ietay(ix, iy, ie) * 2 + dY; + photons.push_back(photon); + } + } else if (clusters.cluster_size_x() == 2 || + clusters.cluster_size_y() == 2) { + for (const ClusterType &cluster : clusters) { + auto eta = calculate_eta2(cluster); + + Photon photon; + photon.x = cluster.x; + photon.y = cluster.y; + photon.energy = static_cast(eta.sum); + + // Now do some actual interpolation. + // Find which energy bin the cluster is in + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + // Finding the index of the last element that is smaller + // should work fine as long as we have many bins + auto ie = last_smaller(m_energy_bins, photon.energy); + auto ix = last_smaller(m_etabinsx, eta.x); + auto iy = last_smaller(m_etabinsy, eta.y); + + photon.x += m_ietax(ix, iy, ie) * + 2; // eta goes between 0 and 1 but we could move the hit + // anywhere in the 2x2 + photon.y += m_ietay(ix, iy, ie) * 2; + photons.push_back(photon); + } + + } else { + throw std::runtime_error( + "Only 3x3 and 2x2 clusters are supported for interpolation"); + } + + return photons; +} + +} // namespace aare \ No newline at end of file diff --git a/include/aare/JungfrauDataFile.hpp b/include/aare/JungfrauDataFile.hpp new file mode 100644 index 0000000..9b1bc48 --- /dev/null +++ b/include/aare/JungfrauDataFile.hpp @@ -0,0 +1,106 @@ +#pragma once +#include +#include +#include + +#include "aare/FilePtr.hpp" +#include "aare/defs.hpp" +#include "aare/NDArray.hpp" +#include "aare/FileInterface.hpp" +namespace aare { + + +struct JungfrauDataHeader{ + uint64_t framenum; + uint64_t bunchid; +}; + +class JungfrauDataFile : public FileInterface { + + size_t m_rows{}; //!< number of rows in the image, from find_frame_size(); + size_t m_cols{}; //!< number of columns in the image, from find_frame_size(); + size_t m_bytes_per_frame{}; //!< number of bytes per frame excluding header + size_t m_total_frames{}; //!< total number of frames in the series of files + size_t m_offset{}; //!< file index of the first file, allow starting at non zero file + size_t m_current_file_index{}; //!< The index of the open file + size_t m_current_frame_index{}; //!< The index of the current frame (with reference to all files) + + std::vector m_last_frame_in_file{}; //!< Used for seeking to the correct file + std::filesystem::path m_path; //!< path to the files + std::string m_base_name; //!< base name used for formatting file names + + FilePtr m_fp; //!< RAII wrapper for a FILE* + + + using pixel_type = uint16_t; + static constexpr size_t header_size = sizeof(JungfrauDataHeader); + static constexpr size_t n_digits_in_file_index = 6; //!< to format file names + + public: + JungfrauDataFile(const std::filesystem::path &fname); + + std::string base_name() const; //!< get the base name of the file (without path and extension) + size_t bytes_per_frame() override; + size_t pixels_per_frame() override; + size_t bytes_per_pixel() const; + size_t bitdepth() const override; + void seek(size_t frame_index) override; //!< seek to the given frame index (note not byte offset) + size_t tell() override; //!< get the frame index of the file pointer + size_t total_frames() const override; + size_t rows() const override; + size_t cols() const override; + std::array shape() const; + size_t n_files() const; //!< get the number of files in the series. + + // Extra functions needed for FileInterface + Frame read_frame() override; + Frame read_frame(size_t frame_number) override; + std::vector read_n(size_t n_frames=0) override; + void read_into(std::byte *image_buf) override; + void read_into(std::byte *image_buf, size_t n_frames) override; + size_t frame_number(size_t frame_index) override; + DetectorType detector_type() const override; + + /** + * @brief Read a single frame from the file into the given buffer. + * @param image_buf buffer to read the frame into. (Note the caller is responsible for allocating the buffer) + * @param header pointer to a JungfrauDataHeader or nullptr to skip header) + */ + void read_into(std::byte *image_buf, JungfrauDataHeader *header = nullptr); + + /** + * @brief Read a multiple frames from the file into the given buffer. + * @param image_buf buffer to read the frame into. (Note the caller is responsible for allocating the buffer) + * @param n_frames number of frames to read + * @param header pointer to a JungfrauDataHeader or nullptr to skip header) + */ + void read_into(std::byte *image_buf, size_t n_frames, JungfrauDataHeader *header = nullptr); + + /** + * @brief Read a single frame from the file into the given NDArray + * @param image NDArray to read the frame into. + */ + void read_into(NDArray* image, JungfrauDataHeader* header = nullptr); + + JungfrauDataHeader read_header(); + std::filesystem::path current_file() const { return fpath(m_current_file_index+m_offset); } + + + private: + /** + * @brief Find the size of the frame in the file. (256x256, 256x1024, 512x1024) + * @param fname path to the file + * @throws std::runtime_error if the file is empty or the size cannot be determined + */ + void find_frame_size(const std::filesystem::path &fname); + + + void parse_fname(const std::filesystem::path &fname); + void scan_files(); + void open_file(size_t file_index); + std::filesystem::path fpath(size_t frame_index) const; + + + }; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index 346646c..3c08a3c 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -22,10 +22,10 @@ TODO! Add expression templates for operators namespace aare { -template +template class NDArray : public ArrayExpr, Ndim> { - std::array shape_; - std::array strides_; + std::array shape_; + std::array strides_; size_t size_{}; T *data_; @@ -42,7 +42,7 @@ class NDArray : public ArrayExpr, Ndim> { * * @param shape shape of the new NDArray */ - explicit NDArray(std::array shape) + explicit NDArray(std::array shape) : shape_(shape), strides_(c_strides(shape_)), size_(std::accumulate(shape_.begin(), shape_.end(), 1, std::multiplies<>())), @@ -55,7 +55,7 @@ class NDArray : public ArrayExpr, Ndim> { * @param shape shape of the new array * @param value value to initialize the array with */ - NDArray(std::array shape, T value) : NDArray(shape) { + NDArray(std::array shape, T value) : NDArray(shape) { this->operator=(value); } @@ -69,6 +69,11 @@ class NDArray : public ArrayExpr, Ndim> { std::copy(v.begin(), v.end(), begin()); } + template + NDArray(const std::array& arr) : NDArray({Size}) { + std::copy(arr.begin(), arr.end(), begin()); + } + // Move constructor NDArray(NDArray &&other) noexcept : shape_(other.shape_), strides_(c_strides(shape_)), @@ -87,7 +92,7 @@ class NDArray : public ArrayExpr, Ndim> { // Conversion operator from array expression to array template NDArray(ArrayExpr &&expr) : NDArray(expr.shape()) { - for (int i = 0; i < size_; ++i) { + for (size_t i = 0; i < size_; ++i) { data_[i] = expr[i]; } } @@ -97,6 +102,9 @@ class NDArray : public ArrayExpr, Ndim> { auto begin() { return data_; } auto end() { return data_ + size_; } + auto begin() const { return data_; } + auto end() const { return data_ + size_; } + using value_type = T; NDArray &operator=(NDArray &&other) noexcept; // Move assign @@ -105,6 +113,20 @@ class NDArray : public ArrayExpr, Ndim> { NDArray &operator-=(const NDArray &other); NDArray &operator*=(const NDArray &other); + //Write directly to the data array, or create a new one + template + NDArray& operator=(const std::array &other){ + if(Size != size_){ + delete[] data_; + size_ = Size; + data_ = new T[size_]; + } + for (size_t i = 0; i < Size; ++i) { + data_[i] = other[i]; + } + return *this; + } + // NDArray& operator/=(const NDArray& other); template NDArray &operator/=(const NDArray &other) { @@ -135,6 +157,11 @@ class NDArray : public ArrayExpr, Ndim> { NDArray &operator&=(const T & /*mask*/); + + + + + void sqrt() { for (int i = 0; i < size_; ++i) { data_[i] = std::sqrt(data_[i]); @@ -159,22 +186,22 @@ class NDArray : public ArrayExpr, Ndim> { } // TODO! is int the right type for index? - T &operator()(int i) { return data_[i]; } - const T &operator()(int i) const { return data_[i]; } + T &operator()(ssize_t i) { return data_[i]; } + const T &operator()(ssize_t i) const { return data_[i]; } - T &operator[](int i) { return data_[i]; } - const T &operator[](int i) const { return data_[i]; } + T &operator[](ssize_t i) { return data_[i]; } + const T &operator[](ssize_t i) const { return data_[i]; } T *data() { return data_; } std::byte *buffer() { return reinterpret_cast(data_); } - size_t size() const { return size_; } + ssize_t size() const { return static_cast(size_); } size_t total_bytes() const { return size_ * sizeof(T); } - std::array shape() const noexcept { return shape_; } - int64_t shape(int64_t i) const noexcept { return shape_[i]; } - std::array strides() const noexcept { return strides_; } + std::array shape() const noexcept { return shape_; } + ssize_t shape(ssize_t i) const noexcept { return shape_[i]; } + std::array strides() const noexcept { return strides_; } size_t bitdepth() const noexcept { return sizeof(T) * 8; } - std::array byte_strides() const noexcept { + std::array byte_strides() const noexcept { auto byte_strides = strides_; for (auto &val : byte_strides) val *= sizeof(T); @@ -201,7 +228,7 @@ class NDArray : public ArrayExpr, Ndim> { }; // Move assign -template +template NDArray & NDArray::operator=(NDArray &&other) noexcept { if (this != &other) { @@ -215,7 +242,7 @@ NDArray::operator=(NDArray &&other) noexcept { return *this; } -template +template NDArray &NDArray::operator+=(const NDArray &other) { // check shape if (shape_ == other.shape_) { @@ -227,7 +254,7 @@ NDArray &NDArray::operator+=(const NDArray &other) { throw(std::runtime_error("Shape of ImageDatas must match")); } -template +template NDArray &NDArray::operator-=(const NDArray &other) { // check shape if (shape_ == other.shape_) { @@ -239,7 +266,7 @@ NDArray &NDArray::operator-=(const NDArray &other) { throw(std::runtime_error("Shape of ImageDatas must match")); } -template +template NDArray &NDArray::operator*=(const NDArray &other) { // check shape if (shape_ == other.shape_) { @@ -251,14 +278,14 @@ NDArray &NDArray::operator*=(const NDArray &other) { throw(std::runtime_error("Shape of ImageDatas must match")); } -template +template NDArray &NDArray::operator&=(const T &mask) { for (auto it = begin(); it != end(); ++it) *it &= mask; return *this; } -template +template NDArray NDArray::operator>(const NDArray &other) { if (shape_ == other.shape_) { NDArray result{shape_}; @@ -270,7 +297,7 @@ NDArray NDArray::operator>(const NDArray &other) { throw(std::runtime_error("Shape of ImageDatas must match")); } -template +template NDArray &NDArray::operator=(const NDArray &other) { if (this != &other) { delete[] data_; @@ -283,7 +310,7 @@ NDArray &NDArray::operator=(const NDArray &other) { return *this; } -template +template bool NDArray::operator==(const NDArray &other) const { if (shape_ != other.shape_) return false; @@ -295,80 +322,83 @@ bool NDArray::operator==(const NDArray &other) const { return true; } -template +template bool NDArray::operator!=(const NDArray &other) const { return !((*this) == other); } -template +template NDArray &NDArray::operator++() { for (uint32_t i = 0; i < size_; ++i) data_[i] += 1; return *this; } -template +template NDArray &NDArray::operator=(const T &value) { std::fill_n(data_, size_, value); return *this; } -template +template NDArray &NDArray::operator+=(const T &value) { for (uint32_t i = 0; i < size_; ++i) data_[i] += value; return *this; } -template + + + +template NDArray NDArray::operator+(const T &value) { NDArray result = *this; result += value; return result; } -template +template NDArray &NDArray::operator-=(const T &value) { for (uint32_t i = 0; i < size_; ++i) data_[i] -= value; return *this; } -template +template NDArray NDArray::operator-(const T &value) { NDArray result = *this; result -= value; return result; } -template +template NDArray &NDArray::operator/=(const T &value) { for (uint32_t i = 0; i < size_; ++i) data_[i] /= value; return *this; } -template +template NDArray NDArray::operator/(const T &value) { NDArray result = *this; result /= value; return result; } -template +template NDArray &NDArray::operator*=(const T &value) { for (uint32_t i = 0; i < size_; ++i) data_[i] *= value; return *this; } -template +template NDArray NDArray::operator*(const T &value) { NDArray result = *this; result *= value; return result; } -template void NDArray::Print() { - if (shape_[0] < 20 && shape_[1] < 20) - Print_all(); - else - Print_some(); -} +// template void NDArray::Print() { +// if (shape_[0] < 20 && shape_[1] < 20) +// Print_all(); +// else +// Print_some(); +// } -template +template std::ostream &operator<<(std::ostream &os, const NDArray &arr) { for (auto row = 0; row < arr.shape(0); ++row) { for (auto col = 0; col < arr.shape(1); ++col) { @@ -380,7 +410,7 @@ std::ostream &operator<<(std::ostream &os, const NDArray &arr) { return os; } -template void NDArray::Print_all() { +template void NDArray::Print_all() { for (auto row = 0; row < shape_[0]; ++row) { for (auto col = 0; col < shape_[1]; ++col) { std::cout << std::setw(3); @@ -389,7 +419,7 @@ template void NDArray::Print_all() { std::cout << "\n"; } } -template void NDArray::Print_some() { +template void NDArray::Print_some() { for (auto row = 0; row < 5; ++row) { for (auto col = 0; col < 5; ++col) { std::cout << std::setw(7); @@ -399,7 +429,7 @@ template void NDArray::Print_some() { } } -template +template void save(NDArray &img, std::string &pathname) { std::ofstream f; f.open(pathname, std::ios::binary); @@ -407,9 +437,9 @@ void save(NDArray &img, std::string &pathname) { f.close(); } -template +template NDArray load(const std::string &pathname, - std::array shape) { + std::array shape) { NDArray img{shape}; std::ifstream f; f.open(pathname, std::ios::binary); @@ -418,4 +448,6 @@ NDArray load(const std::string &pathname, return img; } + + } // namespace aare \ No newline at end of file diff --git a/include/aare/NDView.hpp b/include/aare/NDView.hpp index e3a6d30..56054e2 100644 --- a/include/aare/NDView.hpp +++ b/include/aare/NDView.hpp @@ -1,5 +1,5 @@ #pragma once - +#include "aare/defs.hpp" #include "aare/ArrayExpr.hpp" #include @@ -14,10 +14,10 @@ #include namespace aare { -template using Shape = std::array; +template using Shape = std::array; // TODO! fix mismatch between signed and unsigned -template Shape make_shape(const std::vector &shape) { +template Shape make_shape(const std::vector &shape) { if (shape.size() != Ndim) throw std::runtime_error("Shape size mismatch"); Shape arr; @@ -25,41 +25,41 @@ template Shape make_shape(const std::vector &shape) return arr; } -template int64_t element_offset(const Strides & /*unused*/) { return 0; } +template ssize_t element_offset(const Strides & /*unused*/) { return 0; } -template -int64_t element_offset(const Strides &strides, int64_t i, Ix... index) { +template +ssize_t element_offset(const Strides &strides, ssize_t i, Ix... index) { return i * strides[Dim] + element_offset(strides, index...); } -template std::array c_strides(const std::array &shape) { - std::array strides{}; +template std::array c_strides(const std::array &shape) { + std::array strides{}; std::fill(strides.begin(), strides.end(), 1); - for (int64_t i = Ndim - 1; i > 0; --i) { + for (ssize_t i = Ndim - 1; i > 0; --i) { strides[i - 1] = strides[i] * shape[i]; } return strides; } -template std::array make_array(const std::vector &vec) { +template std::array make_array(const std::vector &vec) { assert(vec.size() == Ndim); - std::array arr{}; + std::array arr{}; std::copy_n(vec.begin(), Ndim, arr.begin()); return arr; } -template class NDView : public ArrayExpr, Ndim> { +template class NDView : public ArrayExpr, Ndim> { public: NDView() = default; ~NDView() = default; NDView(const NDView &) = default; NDView(NDView &&) = default; - NDView(T *buffer, std::array shape) + NDView(T *buffer, std::array shape) : buffer_(buffer), strides_(c_strides(shape)), shape_(shape), size_(std::accumulate(std::begin(shape), std::end(shape), 1, std::multiplies<>())) {} - // NDView(T *buffer, const std::vector &shape) + // NDView(T *buffer, const std::vector &shape) // : buffer_(buffer), strides_(c_strides(make_array(shape))), shape_(make_array(shape)), // size_(std::accumulate(std::begin(shape), std::end(shape), 1, std::multiplies<>())) {} @@ -71,16 +71,16 @@ template class NDView : public ArrayExpr(size_); } size_t total_bytes() const { return size_ * sizeof(T); } - std::array strides() const noexcept { return strides_; } + std::array strides() const noexcept { return strides_; } T *begin() { return buffer_; } T *end() { return buffer_ + size_; } T const *begin() const { return buffer_; } T const *end() const { return buffer_ + size_; } - T &operator()(int64_t i) const { return buffer_[i]; } - T &operator[](int64_t i) const { return buffer_[i]; } + T &operator()(ssize_t i) const { return buffer_[i]; } + T &operator[](ssize_t i) const { return buffer_[i]; } bool operator==(const NDView &other) const { if (size_ != other.size_) @@ -99,6 +99,15 @@ template class NDView : public ArrayExpr()); } + + template + NDView& operator=(const std::array &arr) { + if(size() != static_cast(arr.size())) + throw std::runtime_error(LOCATION + "Array and NDView size mismatch"); + std::copy(arr.begin(), arr.end(), begin()); + return *this; + } + NDView &operator=(const T val) { for (auto it = begin(); it != end(); ++it) *it = val; @@ -127,15 +136,15 @@ template class NDView : public ArrayExpr strides_{}; - std::array shape_{}; + std::array strides_{}; + std::array shape_{}; uint64_t size_{}; template NDView &elemenwise(T val, BinaryOperation op) { @@ -151,7 +160,7 @@ template class NDView : public ArrayExpr void NDView::print_all() const { +template void NDView::print_all() const { for (auto row = 0; row < shape_[0]; ++row) { for (auto col = 0; col < shape_[1]; ++col) { std::cout << std::setw(3); @@ -162,7 +171,7 @@ template void NDView::print_all() const { } -template +template std::ostream& operator <<(std::ostream& os, const NDView& arr){ for (auto row = 0; row < arr.shape(0); ++row) { for (auto col = 0; col < arr.shape(1); ++col) { @@ -175,4 +184,9 @@ std::ostream& operator <<(std::ostream& os, const NDView& arr){ } +template +NDView make_view(std::vector& vec){ + return NDView(vec.data(), {static_cast(vec.size())}); +} + } // namespace aare \ No newline at end of file diff --git a/include/aare/NumpyFile.hpp b/include/aare/NumpyFile.hpp index 9cd2d61..7381a76 100644 --- a/include/aare/NumpyFile.hpp +++ b/include/aare/NumpyFile.hpp @@ -69,7 +69,7 @@ class NumpyFile : public FileInterface { */ template NDArray load() { NDArray arr(make_shape(m_header.shape)); - if (fseek(fp, static_cast(header_size), SEEK_SET)) { + if (fseek(fp, static_cast(header_size), SEEK_SET)) { throw std::runtime_error(LOCATION + "Error seeking to the start of the data"); } size_t rc = fread(arr.data(), sizeof(T), arr.size(), fp); diff --git a/include/aare/Pedestal.hpp b/include/aare/Pedestal.hpp index b5f245b..d6223c1 100644 --- a/include/aare/Pedestal.hpp +++ b/include/aare/Pedestal.hpp @@ -18,34 +18,48 @@ template class Pedestal { uint32_t m_samples; NDArray m_cur_samples; + + //TODO! in case of int needs to be changed to uint64_t NDArray m_sum; NDArray m_sum2; + //Cache mean since it is used over and over in the ClusterFinder + //This optimization is related to the access pattern of the ClusterFinder + //Relies on having more reads than pushes to the pedestal + NDArray m_mean; + public: Pedestal(uint32_t rows, uint32_t cols, uint32_t n_samples = 1000) : m_rows(rows), m_cols(cols), m_samples(n_samples), m_cur_samples(NDArray({rows, cols}, 0)), m_sum(NDArray({rows, cols})), - m_sum2(NDArray({rows, cols})) { + m_sum2(NDArray({rows, cols})), + m_mean(NDArray({rows, cols})) { assert(rows > 0 && cols > 0 && n_samples > 0); m_sum = 0; m_sum2 = 0; + m_mean = 0; } ~Pedestal() = default; NDArray mean() { - NDArray mean_array({m_rows, m_cols}); - for (uint32_t i = 0; i < m_rows * m_cols; i++) { - mean_array(i / m_cols, i % m_cols) = mean(i / m_cols, i % m_cols); - } - return mean_array; + return m_mean; } SUM_TYPE mean(const uint32_t row, const uint32_t col) const { + return m_mean(row, col); + } + + SUM_TYPE std(const uint32_t row, const uint32_t col) const { + return std::sqrt(variance(row, col)); + } + + SUM_TYPE variance(const uint32_t row, const uint32_t col) const { if (m_cur_samples(row, col) == 0) { return 0.0; } - return m_sum(row, col) / m_cur_samples(row, col); + return m_sum2(row, col) / m_cur_samples(row, col) - + mean(row, col) * mean(row, col); } NDArray variance() { @@ -57,13 +71,7 @@ template class Pedestal { return variance_array; } - SUM_TYPE variance(const uint32_t row, const uint32_t col) const { - if (m_cur_samples(row, col) == 0) { - return 0.0; - } - return m_sum2(row, col) / m_cur_samples(row, col) - - mean(row, col) * mean(row, col); - } + NDArray std() { NDArray standard_deviation_array({m_rows, m_cols}); @@ -75,14 +83,13 @@ template class Pedestal { return standard_deviation_array; } - SUM_TYPE std(const uint32_t row, const uint32_t col) const { - return std::sqrt(variance(row, col)); - } + void clear() { - for (uint32_t i = 0; i < m_rows * m_cols; i++) { - clear(i / m_cols, i % m_cols); - } + m_sum = 0; + m_sum2 = 0; + m_cur_samples = 0; + m_mean = 0; } @@ -91,28 +98,51 @@ template class Pedestal { m_sum(row, col) = 0; m_sum2(row, col) = 0; m_cur_samples(row, col) = 0; + m_mean(row, col) = 0; } - // frame level operations + + + template void push(NDView frame) { assert(frame.size() == m_rows * m_cols); // TODO! move away from m_rows, m_cols - if (frame.shape() != std::array{m_rows, m_cols}) { + if (frame.shape() != std::array{m_rows, m_cols}) { throw std::runtime_error( "Frame shape does not match pedestal shape"); } - for (uint32_t row = 0; row < m_rows; row++) { - for (uint32_t col = 0; col < m_cols; col++) { + for (size_t row = 0; row < m_rows; row++) { + for (size_t col = 0; col < m_cols; col++) { push(row, col, frame(row, col)); } } - - // // TODO: test the effect of #pragma omp parallel for - // for (uint32_t index = 0; index < m_rows * m_cols; index++) { - // push(index / m_cols, index % m_cols, frame(index)); - // } } + + /** + * Push but don't update the cached mean. Speeds up the process + * when initializing the pedestal. + * + */ + template void push_no_update(NDView frame) { + assert(frame.size() == m_rows * m_cols); + + // TODO! move away from m_rows, m_cols + if (frame.shape() != std::array{m_rows, m_cols}) { + throw std::runtime_error( + "Frame shape does not match pedestal shape"); + } + + for (size_t row = 0; row < m_rows; row++) { + for (size_t col = 0; col < m_cols; col++) { + push_no_update(row, col, frame(row, col)); + } + } + } + + + + template void push(Frame &frame) { assert(frame.rows() == static_cast(m_rows) && frame.cols() == static_cast(m_cols)); @@ -132,18 +162,48 @@ template class Pedestal { template void push(const uint32_t row, const uint32_t col, const T val_) { SUM_TYPE val = static_cast(val_); - const uint32_t idx = index(row, col); - if (m_cur_samples(idx) < m_samples) { - m_sum(idx) += val; - m_sum2(idx) += val * val; - m_cur_samples(idx)++; + if (m_cur_samples(row, col) < m_samples) { + m_sum(row, col) += val; + m_sum2(row, col) += val * val; + m_cur_samples(row, col)++; } else { - m_sum(idx) += val - m_sum(idx) / m_cur_samples(idx); - m_sum2(idx) += val * val - m_sum2(idx) / m_cur_samples(idx); + m_sum(row, col) += val - m_sum(row, col) / m_samples; + m_sum2(row, col) += val * val - m_sum2(row, col) / m_samples; + } + //Since we just did a push we know that m_cur_samples(row, col) is at least 1 + m_mean(row, col) = m_sum(row, col) / m_cur_samples(row, col); + } + + template + void push_no_update(const uint32_t row, const uint32_t col, const T val_) { + SUM_TYPE val = static_cast(val_); + if (m_cur_samples(row, col) < m_samples) { + m_sum(row, col) += val; + m_sum2(row, col) += val * val; + m_cur_samples(row, col)++; + } else { + m_sum(row, col) += val - m_sum(row, col) / m_cur_samples(row, col); + m_sum2(row, col) += val * val - m_sum2(row, col) / m_cur_samples(row, col); } } - uint32_t index(const uint32_t row, const uint32_t col) const { - return row * m_cols + col; - }; + + /** + * @brief Update the mean of the pedestal. This is used after having done + * push_no_update. It is not necessary to call this function after push. + */ + void update_mean(){ + m_mean = m_sum / m_cur_samples; + } + + template + void push_fast(const uint32_t row, const uint32_t col, const T val_){ + //Assume we reached the steady state where all pixels have + //m_samples samples + SUM_TYPE val = static_cast(val_); + m_sum(row, col) += val - m_sum(row, col) / m_samples; + m_sum2(row, col) += val * val - m_sum2(row, col) / m_samples; + m_mean(row, col) = m_sum(row, col) / m_samples; + } + }; } // namespace aare \ No newline at end of file diff --git a/include/aare/ProducerConsumerQueue.hpp b/include/aare/ProducerConsumerQueue.hpp new file mode 100644 index 0000000..426b9e2 --- /dev/null +++ b/include/aare/ProducerConsumerQueue.hpp @@ -0,0 +1,203 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// @author Bo Hu (bhu@fb.com) +// @author Jordan DeLong (delong.j@fb.com) + +// Changes made by PSD Detector Group: +// Copied: Line 34 constexpr std::size_t hardware_destructive_interference_size = 128; from folly/lang/Align.h +// Changed extension to .hpp +// Changed namespace to aare + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +constexpr std::size_t hardware_destructive_interference_size = 128; +namespace aare { + +/* + * ProducerConsumerQueue is a one producer and one consumer queue + * without locks. + */ +template struct ProducerConsumerQueue { + typedef T value_type; + + ProducerConsumerQueue(const ProducerConsumerQueue &) = delete; + ProducerConsumerQueue &operator=(const ProducerConsumerQueue &) = delete; + + + ProducerConsumerQueue(ProducerConsumerQueue &&other){ + size_ = other.size_; + records_ = other.records_; + other.records_ = nullptr; + readIndex_ = other.readIndex_.load(std::memory_order_acquire); + writeIndex_ = other.writeIndex_.load(std::memory_order_acquire); + } + ProducerConsumerQueue &operator=(ProducerConsumerQueue &&other){ + size_ = other.size_; + records_ = other.records_; + other.records_ = nullptr; + readIndex_ = other.readIndex_.load(std::memory_order_acquire); + writeIndex_ = other.writeIndex_.load(std::memory_order_acquire); + return *this; + } + + + ProducerConsumerQueue():ProducerConsumerQueue(2){}; + // size must be >= 2. + // + // Also, note that the number of usable slots in the queue at any + // given time is actually (size-1), so if you start with an empty queue, + // isFull() will return true after size-1 insertions. + explicit ProducerConsumerQueue(uint32_t size) + : size_(size), records_(static_cast(std::malloc(sizeof(T) * size))), readIndex_(0), writeIndex_(0) { + assert(size >= 2); + if (!records_) { + throw std::bad_alloc(); + } + } + + ~ProducerConsumerQueue() { + // We need to destruct anything that may still exist in our queue. + // (No real synchronization needed at destructor time: only one + // thread can be doing this.) + if (!std::is_trivially_destructible::value) { + size_t readIndex = readIndex_; + size_t endIndex = writeIndex_; + while (readIndex != endIndex) { + records_[readIndex].~T(); + if (++readIndex == size_) { + readIndex = 0; + } + } + } + + std::free(records_); + } + + template bool write(Args &&...recordArgs) { + auto const currentWrite = writeIndex_.load(std::memory_order_relaxed); + auto nextRecord = currentWrite + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + if (nextRecord != readIndex_.load(std::memory_order_acquire)) { + new (&records_[currentWrite]) T(std::forward(recordArgs)...); + writeIndex_.store(nextRecord, std::memory_order_release); + return true; + } + + // queue is full + return false; + } + + // move (or copy) the value at the front of the queue to given variable + bool read(T &record) { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + if (currentRead == writeIndex_.load(std::memory_order_acquire)) { + // queue is empty + return false; + } + + auto nextRecord = currentRead + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + record = std::move(records_[currentRead]); + records_[currentRead].~T(); + readIndex_.store(nextRecord, std::memory_order_release); + return true; + } + + // pointer to the value at the front of the queue (for use in-place) or + // nullptr if empty. + T *frontPtr() { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + if (currentRead == writeIndex_.load(std::memory_order_acquire)) { + // queue is empty + return nullptr; + } + return &records_[currentRead]; + } + + // queue must not be empty + void popFront() { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + assert(currentRead != writeIndex_.load(std::memory_order_acquire)); + + auto nextRecord = currentRead + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + records_[currentRead].~T(); + readIndex_.store(nextRecord, std::memory_order_release); + } + + bool isEmpty() const { + return readIndex_.load(std::memory_order_acquire) == writeIndex_.load(std::memory_order_acquire); + } + + bool isFull() const { + auto nextRecord = writeIndex_.load(std::memory_order_acquire) + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + if (nextRecord != readIndex_.load(std::memory_order_acquire)) { + return false; + } + // queue is full + return true; + } + + // * If called by consumer, then true size may be more (because producer may + // be adding items concurrently). + // * If called by producer, then true size may be less (because consumer may + // be removing items concurrently). + // * It is undefined to call this from any other thread. + size_t sizeGuess() const { + int ret = writeIndex_.load(std::memory_order_acquire) - readIndex_.load(std::memory_order_acquire); + if (ret < 0) { + ret += size_; + } + return ret; + } + + // maximum number of items in the queue. + size_t capacity() const { return size_ - 1; } + + private: + using AtomicIndex = std::atomic; + + char pad0_[hardware_destructive_interference_size]; + // const uint32_t size_; + uint32_t size_; + // T *const records_; + T* records_; + + alignas(hardware_destructive_interference_size) AtomicIndex readIndex_; + alignas(hardware_destructive_interference_size) AtomicIndex writeIndex_; + + char pad1_[hardware_destructive_interference_size - sizeof(AtomicIndex)]; +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/RawFile.hpp b/include/aare/RawFile.hpp index eb044e3..1cca1fd 100644 --- a/include/aare/RawFile.hpp +++ b/include/aare/RawFile.hpp @@ -30,19 +30,12 @@ struct ModuleConfig { * Consider using that unless you need raw file specific functionality. */ class RawFile : public FileInterface { - size_t n_subfiles{}; //f0,f1...fn - size_t n_subfile_parts{}; // d0,d1...dn - //TODO! move to vector of SubFile instead of pointers - std::vector> subfiles; //subfiles[f0,f1...fn][d0,d1...dn] - std::vector positions; - std::vector m_module_pixel_0; + std::vector> m_subfiles; ModuleConfig cfg{0, 0}; - RawMasterFile m_master; - size_t m_current_frame{}; - size_t m_rows{}; - size_t m_cols{}; + size_t m_current_subfile{}; + DetectorGeometry m_geometry; public: /** @@ -52,7 +45,7 @@ class RawFile : public FileInterface { */ RawFile(const std::filesystem::path &fname, const std::string &mode = "r"); - virtual ~RawFile() override; + virtual ~RawFile() override = default; Frame read_frame() override; Frame read_frame(size_t frame_number) override; @@ -76,7 +69,7 @@ class RawFile : public FileInterface { size_t cols() const override; size_t bitdepth() const override; xy geometry(); - size_t n_mod() const; + size_t n_modules() const; RawMasterFile master() const; @@ -111,11 +104,9 @@ class RawFile : public FileInterface { */ static DetectorHeader read_header(const std::filesystem::path &fname); - void update_geometry_with_roi(); - int find_number_of_subfiles(); - void open_subfiles(); void find_geometry(); }; + } // namespace aare \ No newline at end of file diff --git a/include/aare/RawMasterFile.hpp b/include/aare/RawMasterFile.hpp index 42c324e..4d143a6 100644 --- a/include/aare/RawMasterFile.hpp +++ b/include/aare/RawMasterFile.hpp @@ -62,17 +62,6 @@ class ScanParameters { }; -struct ROI{ - int64_t xmin{}; - int64_t xmax{}; - int64_t ymin{}; - int64_t ymax{}; - - int64_t height() const { return ymax - ymin; } - int64_t width() const { return xmax - xmin; } -}; - - /** * @brief Class for parsing a master file either in our .json format or the old * .raw format @@ -132,6 +121,7 @@ class RawMasterFile { size_t total_frames_expected() const; xy geometry() const; + size_t n_modules() const; std::optional analog_samples() const; std::optional digital_samples() const; diff --git a/include/aare/RawSubFile.hpp b/include/aare/RawSubFile.hpp index 4d78670..1059843 100644 --- a/include/aare/RawSubFile.hpp +++ b/include/aare/RawSubFile.hpp @@ -18,11 +18,20 @@ class RawSubFile { std::ifstream m_file; DetectorType m_detector_type; size_t m_bitdepth; - std::filesystem::path m_fname; + std::filesystem::path m_path; //!< path to the subfile + std::string m_base_name; //!< base name used for formatting file names + size_t m_offset{}; //!< file index of the first file, allow starting at non zero file + size_t m_total_frames{}; //!< total number of frames in the series of files size_t m_rows{}; size_t m_cols{}; size_t m_bytes_per_frame{}; - size_t n_frames{}; + + + int m_module_index{}; + size_t m_current_file_index{}; //!< The index of the open file + size_t m_current_frame_index{}; //!< The index of the current frame (with reference to all files) + std::vector m_last_frame_in_file{}; //!< Used for seeking to the correct file + uint32_t m_pos_row{}; uint32_t m_pos_col{}; @@ -53,6 +62,7 @@ class RawSubFile { size_t tell(); void read_into(std::byte *image_buf, DetectorHeader *header = nullptr); + void read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header= nullptr); void get_part(std::byte *buffer, size_t frame_index); void read_header(DetectorHeader *header); @@ -64,8 +74,18 @@ class RawSubFile { size_t bytes_per_frame() const { return m_bytes_per_frame; } size_t pixels_per_frame() const { return m_rows * m_cols; } - size_t bytes_per_pixel() const { return m_bitdepth / 8; } + size_t bytes_per_pixel() const { return m_bitdepth / bits_per_byte; } + size_t frames_in_file() const { return m_total_frames; } + +private: + template + void read_with_map(std::byte *image_buf); + + void parse_fname(const std::filesystem::path &fname); + void scan_files(); + void open_file(size_t file_index); + std::filesystem::path fpath(size_t file_index) const; }; diff --git a/include/aare/VarClusterFinder.hpp b/include/aare/VarClusterFinder.hpp index d4d51cc..596bf06 100644 --- a/include/aare/VarClusterFinder.hpp +++ b/include/aare/VarClusterFinder.hpp @@ -7,7 +7,7 @@ #include "aare/NDArray.hpp" -const int MAX_CLUSTER_SIZE = 200; +const int MAX_CLUSTER_SIZE = 50; namespace aare { template class VarClusterFinder { @@ -28,7 +28,7 @@ template class VarClusterFinder { }; private: - const std::array shape_; + const std::array shape_; NDView original_; NDArray labeled_; NDArray peripheral_labeled_; @@ -226,7 +226,7 @@ template void VarClusterFinder::single_pass(NDView img) { template void VarClusterFinder::first_pass() { - for (size_t i = 0; i < original_.size(); ++i) { + for (ssize_t i = 0; i < original_.size(); ++i) { if (use_noise_map) threshold_ = 5 * noiseMap(i); binary_(i) = (original_(i) > threshold_); @@ -250,7 +250,7 @@ template void VarClusterFinder::first_pass() { template void VarClusterFinder::second_pass() { - for (size_t i = 0; i != labeled_.size(); ++i) { + for (ssize_t i = 0; i != labeled_.size(); ++i) { auto cl = labeled_(i); if (cl != 0) { auto it = child.find(cl); diff --git a/include/aare/algorithm.hpp b/include/aare/algorithm.hpp new file mode 100644 index 0000000..be2018f --- /dev/null +++ b/include/aare/algorithm.hpp @@ -0,0 +1,122 @@ + +#pragma once +#include +#include +#include +#include + +namespace aare { +/** + * @brief Index of the last element that is smaller than val. + * Requires a sorted array. Uses >= for ordering. If all elements + * are smaller it returns the last element and if all elements are + * larger it returns the first element. + * @param first iterator to the first element + * @param last iterator to the last element + * @param val value to compare + * @return index of the last element that is smaller than val + * + */ +template +size_t last_smaller(const T* first, const T* last, T val) { + for (auto iter = first+1; iter != last; ++iter) { + if (*iter >= val) { + return std::distance(first, iter-1); + } + } + return std::distance(first, last-1); +} + +template +size_t last_smaller(const NDArray& arr, T val) { + return last_smaller(arr.begin(), arr.end(), val); +} + +template +size_t last_smaller(const std::vector& vec, T val) { + return last_smaller(vec.data(), vec.data()+vec.size(), val); +} + +/** + * @brief Index of the first element that is larger than val. + * Requires a sorted array. Uses > for ordering. If all elements + * are larger it returns the first element and if all elements are + * smaller it returns the last element. + * @param first iterator to the first element + * @param last iterator to the last element + * @param val value to compare + * @return index of the first element that is larger than val + */ +template +size_t first_larger(const T* first, const T* last, T val) { + for (auto iter = first; iter != last; ++iter) { + if (*iter > val) { + return std::distance(first, iter); + } + } + return std::distance(first, last-1); +} + +template +size_t first_larger(const NDArray& arr, T val) { + return first_larger(arr.begin(), arr.end(), val); +} + +template +size_t first_larger(const std::vector& vec, T val) { + return first_larger(vec.data(), vec.data()+vec.size(), val); +} + +/** + * @brief Index of the nearest element to val. + * Requires a sorted array. If there is no difference it takes the first element. + * @param first iterator to the first element + * @param last iterator to the last element + * @param val value to compare + * @return index of the nearest element + */ +template +size_t nearest_index(const T* first, const T* last, T val) { + auto iter = std::min_element(first, last, + [val](T a, T b) { + return std::abs(a - val) < std::abs(b - val); + }); + return std::distance(first, iter); +} + +template +size_t nearest_index(const NDArray& arr, T val) { + return nearest_index(arr.begin(), arr.end(), val); +} + +template +size_t nearest_index(const std::vector& vec, T val) { + return nearest_index(vec.data(), vec.data()+vec.size(), val); +} + +template +size_t nearest_index(const std::array& arr, T val) { + return nearest_index(arr.data(), arr.data()+arr.size(), val); +} + +template +std::vector cumsum(const std::vector& vec) { + std::vector result(vec.size()); + std::partial_sum(vec.begin(), vec.end(), result.begin()); + return result; +} + + +template bool all_equal(const Container &c) { + if (!c.empty() && + std::all_of(begin(c), end(c), + [c](const typename Container::value_type &element) { + return element == c.front(); + })) + return true; + return false; +} + + + +} // namespace aare \ No newline at end of file diff --git a/include/aare/decode.hpp b/include/aare/decode.hpp new file mode 100644 index 0000000..e784c4a --- /dev/null +++ b/include/aare/decode.hpp @@ -0,0 +1,26 @@ +#pragma once + +#include +#include +#include +namespace aare { + + +uint16_t adc_sar_05_decode64to16(uint64_t input); +uint16_t adc_sar_04_decode64to16(uint64_t input); +void adc_sar_05_decode64to16(NDView input, NDView output); +void adc_sar_04_decode64to16(NDView input, NDView output); + + +/** + * @brief Apply custom weights to a 16-bit input value. Will sum up weights[i]**i + * for each bit i that is set in the input value. + * @throws std::out_of_range if weights.size() < 16 + * @param input 16-bit input value + * @param weights vector of weights, size must be less than or equal to 16 + */ +double apply_custom_weights(uint16_t input, const NDView weights); + +void apply_custom_weights(NDView input, NDView output, const NDView weights); + +} // namespace aare diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index 13bfa36..8b048d8 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -1,11 +1,9 @@ #pragma once #include "aare/Dtype.hpp" -// #include "aare/utils/logger.hpp" #include #include - #include #include #include @@ -38,16 +36,19 @@ namespace aare { +inline constexpr size_t bits_per_byte = 8; + void assert_failed(const std::string &msg); + class DynamicCluster { public: int cluster_sizeX; int cluster_sizeY; int16_t x; int16_t y; - Dtype dt; + Dtype dt; // 4 bytes private: std::byte *m_data; @@ -179,15 +180,49 @@ template struct t_xy { using xy = t_xy; +/** + * @brief Class to hold the geometry of a module. Where pixel 0 is located and the size of the module + */ struct ModuleGeometry{ - int x{}; - int y{}; + int origin_x{}; + int origin_y{}; int height{}; int width{}; + int row_index{}; + int col_index{}; }; +/** + * @brief Class to hold the geometry of a detector. Number of modules, their size and where pixel 0 + * for each module is located + */ +struct DetectorGeometry{ + int modules_x{}; + int modules_y{}; + int pixels_x{}; + int pixels_y{}; + int module_gap_row{}; + int module_gap_col{}; + std::vector module_pixel_0; + + auto size() const { return module_pixel_0.size(); } +}; -using dynamic_shape = std::vector; +struct ROI{ + ssize_t xmin{}; + ssize_t xmax{}; + ssize_t ymin{}; + ssize_t ymax{}; + + ssize_t height() const { return ymax - ymin; } + ssize_t width() const { return xmax - xmin; } + bool contains(ssize_t x, ssize_t y) const { + return x >= xmin && x < xmax && y >= ymin && y < ymax; + } + }; + + +using dynamic_shape = std::vector; //TODO! Can we uniform enums between the libraries? diff --git a/include/aare/geo_helpers.hpp b/include/aare/geo_helpers.hpp new file mode 100644 index 0000000..d0d5d1a --- /dev/null +++ b/include/aare/geo_helpers.hpp @@ -0,0 +1,16 @@ +#pragma once +#include "aare/defs.hpp" +#include "aare/RawMasterFile.hpp" //ROI refactor away +namespace aare{ + +/** + * @brief Update the detector geometry given a region of interest + * + * @param geo + * @param roi + * @return DetectorGeometry + */ +DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, ROI roi); + + +} // namespace aare \ No newline at end of file diff --git a/include/aare/logger.hpp b/include/aare/logger.hpp new file mode 100644 index 0000000..06e6feb --- /dev/null +++ b/include/aare/logger.hpp @@ -0,0 +1,139 @@ +#pragma once +/*Utility to log to console*/ + + +#include +#include +#include + +namespace aare { + +#define RED "\x1b[31m" +#define GREEN "\x1b[32m" +#define YELLOW "\x1b[33m" +#define BLUE "\x1b[34m" +#define MAGENTA "\x1b[35m" +#define CYAN "\x1b[36m" +#define GRAY "\x1b[37m" +#define DARKGRAY "\x1b[30m" + +#define BG_BLACK "\x1b[48;5;232m" +#define BG_RED "\x1b[41m" +#define BG_GREEN "\x1b[42m" +#define BG_YELLOW "\x1b[43m" +#define BG_BLUE "\x1b[44m" +#define BG_MAGENTA "\x1b[45m" +#define BG_CYAN "\x1b[46m" +#define RESET "\x1b[0m" +#define BOLD "\x1b[1m" + + +enum TLogLevel { + logERROR, + logWARNING, + logINFOBLUE, + logINFOGREEN, + logINFORED, + logINFOCYAN, + logINFOMAGENTA, + logINFO, + logDEBUG, + logDEBUG1, + logDEBUG2, + logDEBUG3, + logDEBUG4, + logDEBUG5 +}; + +// Compiler should optimize away anything below this value +#ifndef AARE_LOG_LEVEL +#define AARE_LOG_LEVEL "LOG LEVEL NOT SET IN CMAKE" //This is configured in the main CMakeLists.txt +#endif + +#define __AT__ \ + std::string(__FILE__) + std::string("::") + std::string(__func__) + \ + std::string("(): ") +#define __SHORT_FORM_OF_FILE__ \ + (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) +#define __SHORT_AT__ \ + std::string(__SHORT_FORM_OF_FILE__) + std::string("::") + \ + std::string(__func__) + std::string("(): ") + +class Logger { + std::ostringstream os; + TLogLevel m_level = AARE_LOG_LEVEL; + + public: + Logger() = default; + explicit Logger(TLogLevel level) : m_level(level){}; + ~Logger() { + // output in the destructor to allow for << syntax + os << RESET << '\n'; + std::clog << os.str() << std::flush; // Single write + } + + static TLogLevel &ReportingLevel() { // singelton eeh TODO! Do we need a runtime option? + static TLogLevel reportingLevel = logDEBUG5; + return reportingLevel; + } + + // Danger this buffer need as many elements as TLogLevel + static const char *Color(TLogLevel level) noexcept { + static const char *const colors[] = { + RED BOLD, YELLOW BOLD, BLUE, GREEN, RED, CYAN, MAGENTA, + RESET, RESET, RESET, RESET, RESET, RESET, RESET}; + // out of bounds + if (level < 0 || level >= sizeof(colors) / sizeof(colors[0])) { + return RESET; + } + return colors[level]; + } + + // Danger this buffer need as many elements as TLogLevel + static std::string ToString(TLogLevel level) { + static const char *const buffer[] = { + "ERROR", "WARNING", "INFO", "INFO", "INFO", + "INFO", "INFO", "INFO", "DEBUG", "DEBUG1", + "DEBUG2", "DEBUG3", "DEBUG4", "DEBUG5"}; + // out of bounds + if (level < 0 || level >= sizeof(buffer) / sizeof(buffer[0])) { + return "UNKNOWN"; + } + return buffer[level]; + } + + std::ostringstream &Get() { + os << Color(m_level) << "- " << Timestamp() << " " << ToString(m_level) + << ": "; + return os; + } + + static std::string Timestamp() { + constexpr size_t buffer_len = 12; + char buffer[buffer_len]; + time_t t; + ::time(&t); + tm r; + strftime(buffer, buffer_len, "%X", localtime_r(&t, &r)); + buffer[buffer_len - 1] = '\0'; + struct timeval tv; + gettimeofday(&tv, nullptr); + constexpr size_t result_len = 100; + char result[result_len]; + snprintf(result, result_len, "%s.%03ld", buffer, + static_cast(tv.tv_usec) / 1000); + result[result_len - 1] = '\0'; + return result; + } +}; + +// TODO! Do we need to keep the runtime option? +#define LOG(level) \ + if (level > AARE_LOG_LEVEL) \ + ; \ + else if (level > aare::Logger::ReportingLevel()) \ + ; \ + else \ + aare::Logger(level).Get() + +} // namespace aare diff --git a/include/aare/utils/ifstream_helpers.hpp b/include/aare/utils/ifstream_helpers.hpp new file mode 100644 index 0000000..0a842ed --- /dev/null +++ b/include/aare/utils/ifstream_helpers.hpp @@ -0,0 +1,12 @@ +#pragma once + +#include +#include +namespace aare { + +/** + * @brief Get the error message from an ifstream object +*/ +std::string ifstream_error_msg(std::ifstream &ifs); + +} // namespace aare \ No newline at end of file diff --git a/include/aare/utils/par.hpp b/include/aare/utils/par.hpp new file mode 100644 index 0000000..efb1c77 --- /dev/null +++ b/include/aare/utils/par.hpp @@ -0,0 +1,18 @@ +#include +#include +#include + +namespace aare { + + template + void RunInParallel(F func, const std::vector>& tasks) { + // auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(func, task.first, task.second)); + } + for (auto &thread : threads) { + thread.join(); + } + } +} // namespace aare \ No newline at end of file diff --git a/include/aare/utils/task.hpp b/include/aare/utils/task.hpp new file mode 100644 index 0000000..a6ee142 --- /dev/null +++ b/include/aare/utils/task.hpp @@ -0,0 +1,8 @@ + +#include +#include + +namespace aare { +std::vector> split_task(int first, int last, int n_threads); + +} // namespace aare \ No newline at end of file diff --git a/patches/libzmq_cmake_version.patch b/patches/libzmq_cmake_version.patch new file mode 100644 index 0000000..4e421d3 --- /dev/null +++ b/patches/libzmq_cmake_version.patch @@ -0,0 +1,18 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index dd3d8eb9..c0187747 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -1,11 +1,8 @@ + # CMake build script for ZeroMQ + project(ZeroMQ) + +-if(${CMAKE_SYSTEM_NAME} STREQUAL Darwin) +- cmake_minimum_required(VERSION 3.0.2) +-else() +- cmake_minimum_required(VERSION 2.8.12) +-endif() ++cmake_minimum_required(VERSION 3.15) ++message(STATUS "Patched cmake version") + + include(CheckIncludeFiles) + include(CheckCCompilerFlag) diff --git a/patches/lmfit.patch b/patches/lmfit.patch new file mode 100644 index 0000000..22063bf --- /dev/null +++ b/patches/lmfit.patch @@ -0,0 +1,13 @@ +diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt +index 4efb7ed..6533660 100644 +--- a/lib/CMakeLists.txt ++++ b/lib/CMakeLists.txt +@@ -11,7 +11,7 @@ target_compile_definitions(${lib} PRIVATE "LMFIT_EXPORT") # for Windows DLL expo + + target_include_directories(${lib} + PUBLIC +- $ ++ $ + $ + ) + diff --git a/pyproject.toml b/pyproject.toml index f194c68..db3cb3c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,15 +1,41 @@ +[tool.scikit-build.metadata.version] +provider = "scikit_build_core.metadata.regex" +input = "VERSION" +regex = '^(?P\d+(?:\.\d+)*(?:[\.\+\w]+)?)$' +result = "{version}" + [build-system] requires = ["scikit-build-core>=0.10", "pybind11", "numpy"] build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2024.11.28.dev0" +dynamic = ["version"] +requires-python = ">=3.11" +dependencies = [ + "numpy", + "matplotlib", +] + + +[tool.cibuildwheel] + +build = "cp{311,312,313}-manylinux_x86_64" + + + [tool.scikit-build] -cmake.verbose = true +build.verbose = true +cmake.build-type = "Release" +install.components = ["python"] [tool.scikit-build.cmake.define] AARE_PYTHON_BINDINGS = "ON" -AARE_SYSTEM_LIBRARIES = "ON" -AARE_INSTALL_PYTHONEXT = "ON" \ No newline at end of file +AARE_INSTALL_PYTHONEXT = "ON" + + +[tool.pytest.ini_options] +markers = [ + "files: marks tests that need additional data (deselect with '-m \"not files\"')", +] \ No newline at end of file diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index e2a7bca..ae84baa 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -1,12 +1,13 @@ -find_package (Python 3.10 COMPONENTS Interpreter Development REQUIRED) +find_package (Python 3.10 COMPONENTS Interpreter Development.Module REQUIRED) +set(PYBIND11_FINDPYTHON ON) # Needed for RH8 # Download or find pybind11 depending on configuration if(AARE_FETCH_PYBIND11) FetchContent_Declare( pybind11 GIT_REPOSITORY https://github.com/pybind/pybind11 - GIT_TAG v2.13.0 + GIT_TAG v2.13.6 ) FetchContent_MakeAvailable(pybind11) else() @@ -28,6 +29,10 @@ target_link_libraries(_aare PRIVATE aare_core aare_compiler_flags) set( PYTHON_FILES aare/__init__.py aare/CtbRawFile.py + aare/ClusterFinder.py + aare/ClusterVector.py + + aare/func.py aare/RawFile.py aare/transform.py aare/ScanParameters.py @@ -35,22 +40,6 @@ set( PYTHON_FILES ) -#HDF5 -if (AARE_HDF5) - find_package(HDF5 1.10 COMPONENTS CXX REQUIRED) - add_definitions( - ${HDF5_DEFINITIONS} - ) - list(APPEND PYTHON_FILES - aare/Hdf5File.py - ) - if(HDF5_FOUND) - add_definitions(-DHDF5_FOUND) - target_link_libraries(_aare PUBLIC ${HDF5_LIBRARIES}) - target_include_directories(_aare PUBLIC ${HDF5_INCLUDE_DIRS}) - endif() -endif() - # Copy the python files to the build directory foreach(FILE ${PYTHON_FILES}) configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} ) @@ -60,17 +49,30 @@ set_target_properties(_aare PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/aare ) +set(PYTHON_EXAMPLES + examples/play.py + examples/fits.py +) -# Copy the examples/scripts to the build directory -configure_file(examples/play.py ${CMAKE_BINARY_DIR}/play.py) +# Copy the python examples to the build directory +foreach(FILE ${PYTHON_EXAMPLES}) + configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} ) + message(STATUS "Copying ${FILE} to ${CMAKE_BINARY_DIR}/${FILE}") +endforeach(FILE ${PYTHON_EXAMPLES}) if(AARE_INSTALL_PYTHONEXT) - install(TARGETS _aare + install( + TARGETS _aare EXPORT "${TARGETS_EXPORT_NAME}" LIBRARY DESTINATION aare + COMPONENT python ) - install(FILES ${PYTHON_FILES} DESTINATION aare) + install( + FILES ${PYTHON_FILES} + DESTINATION aare + COMPONENT python + ) endif() \ No newline at end of file diff --git a/python/aare/ClusterFinder.py b/python/aare/ClusterFinder.py new file mode 100644 index 0000000..6e7c352 --- /dev/null +++ b/python/aare/ClusterFinder.py @@ -0,0 +1,67 @@ + +from ._aare import ClusterFinder_Cluster3x3i, ClusterFinder_Cluster2x2i, ClusterFinderMT_Cluster3x3i, ClusterFinderMT_Cluster2x2i, ClusterCollector_Cluster3x3i, ClusterCollector_Cluster2x2i + + +from ._aare import ClusterFileSink_Cluster3x3i, ClusterFileSink_Cluster2x2i +import numpy as np + +def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024): + """ + Factory function to create a ClusterFinder object. Provides a cleaner syntax for + the templated ClusterFinder in C++. + """ + if dtype == np.int32 and cluster_size == (3,3): + return ClusterFinder_Cluster3x3i(image_size, n_sigma = n_sigma, capacity=capacity) + elif dtype == np.int32 and cluster_size == (2,2): + return ClusterFinder_Cluster2x2i(image_size, n_sigma = n_sigma, capacity=capacity) + else: + #TODO! add the other formats + raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") + + +def ClusterFinderMT(image_size, cluster_size = (3,3), dtype=np.int32, n_sigma=5, capacity = 1024, n_threads = 3): + """ + Factory function to create a ClusterFinderMT object. Provides a cleaner syntax for + the templated ClusterFinderMT in C++. + """ + + if dtype == np.int32 and cluster_size == (3,3): + return ClusterFinderMT_Cluster3x3i(image_size, n_sigma = n_sigma, + capacity = capacity, n_threads = n_threads) + elif dtype == np.int32 and cluster_size == (2,2): + return ClusterFinderMT_Cluster2x2i(image_size, n_sigma = n_sigma, + capacity = capacity, n_threads = n_threads) + else: + #TODO! add the other formats + raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") + + +def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32): + """ + Factory function to create a ClusterCollector object. Provides a cleaner syntax for + the templated ClusterCollector in C++. + """ + + if dtype == np.int32 and cluster_size == (3,3): + return ClusterCollector_Cluster3x3i(clusterfindermt) + elif dtype == np.int32 and cluster_size == (2,2): + return ClusterCollector_Cluster2x2i(clusterfindermt) + + else: + #TODO! add the other formats + raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") + +def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32): + """ + Factory function to create a ClusterCollector object. Provides a cleaner syntax for + the templated ClusterCollector in C++. + """ + + if dtype == np.int32 and clusterfindermt.cluster_size == (3,3): + return ClusterFileSink_Cluster3x3i(clusterfindermt, cluster_file) + elif dtype == np.int32 and clusterfindermt.cluster_size == (2,2): + return ClusterFileSink_Cluster2x2i(clusterfindermt, cluster_file) + + else: + #TODO! add the other formats + raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") \ No newline at end of file diff --git a/python/aare/ClusterVector.py b/python/aare/ClusterVector.py new file mode 100644 index 0000000..b0dd453 --- /dev/null +++ b/python/aare/ClusterVector.py @@ -0,0 +1,11 @@ + + +from ._aare import ClusterVector_Cluster3x3i +import numpy as np + +def ClusterVector(cluster_size, dtype = np.int32): + + if dtype == np.int32 and cluster_size == (3,3): + return ClusterVector_Cluster3x3i() + else: + raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") diff --git a/python/aare/__init__.py b/python/aare/__init__.py index 46db110..837da6d 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -2,15 +2,33 @@ from . import _aare -from ._aare import File, RawMasterFile, RawSubFile, Hdf5MasterFile -from ._aare import Pedestal, ClusterFinder, VarClusterFinder +from ._aare import File, RawMasterFile, RawSubFile, Hdf5MasterFile, JungfrauDataFile +from ._aare import Pedestal_d, Pedestal_f, ClusterFinder_Cluster3x3i, VarClusterFinder from ._aare import DetectorType -from ._aare import ClusterFile +from ._aare import ClusterFile_Cluster3x3i as ClusterFile +from ._aare import hitmap +from ._aare import ROI + +# from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i + +from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT, ClusterFileSink +from .ClusterVector import ClusterVector + + +from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2 +from ._aare import Interpolator +from ._aare import calculate_eta2 + + +from ._aare import apply_custom_weights from .CtbRawFile import CtbRawFile from .RawFile import RawFile from .Hdf5File import Hdf5File from .ScanParameters import ScanParameters -from .utils import random_pixels, random_pixel +from .utils import random_pixels, random_pixel, flat_list, add_colorbar + +#make functions available in the top level API +from .func import * diff --git a/python/aare/func.py b/python/aare/func.py new file mode 100644 index 0000000..e8a7b46 --- /dev/null +++ b/python/aare/func.py @@ -0,0 +1 @@ +from ._aare import gaus, pol1, scurve, scurve2 \ No newline at end of file diff --git a/python/aare/transform.py b/python/aare/transform.py index 414eb27..2f66942 100644 --- a/python/aare/transform.py +++ b/python/aare/transform.py @@ -2,6 +2,14 @@ import numpy as np from . import _aare +class AdcSar04Transform64to16: + def __call__(self, data): + return _aare.adc_sar_04_decode64to16(data) + +class AdcSar05Transform64to16: + def __call__(self, data): + return _aare.adc_sar_05_decode64to16(data) + class Moench05Transform: #Could be moved to C++ without changing the interface def __init__(self): @@ -45,4 +53,6 @@ class Matterhorn02Transform: moench05 = Moench05Transform() moench05_1g = Moench05Transform1g() moench05_old = Moench05TransformOld() -matterhorn02 = Matterhorn02Transform() \ No newline at end of file +matterhorn02 = Matterhorn02Transform() +adc_sar_04_64to16 = AdcSar04Transform64to16() +adc_sar_05_64to16 = AdcSar05Transform64to16() \ No newline at end of file diff --git a/python/aare/utils.py b/python/aare/utils.py index d53f844..a10f54c 100644 --- a/python/aare/utils.py +++ b/python/aare/utils.py @@ -1,4 +1,6 @@ import numpy as np +import matplotlib.pyplot as plt +from mpl_toolkits.axes_grid1 import make_axes_locatable def random_pixels(n_pixels, xmin=0, xmax=512, ymin=0, ymax=1024): """Return a list of random pixels. @@ -20,4 +22,15 @@ def random_pixel(xmin=0, xmax=512, ymin=0, ymax=1024): Returns: tuple: (row, col) """ - return random_pixels(1, xmin, xmax, ymin, ymax)[0] \ No newline at end of file + return random_pixels(1, xmin, xmax, ymin, ymax)[0] + +def flat_list(xss): + """Flatten a list of lists.""" + return [x for xs in xss for x in xs] + +def add_colorbar(ax, im, size="5%", pad=0.05): + """Add a colorbar with the same height as the image.""" + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size=size, pad=pad) + plt.colorbar(im, cax=cax) + return ax, im, cax \ No newline at end of file diff --git a/python/examples/fits.py b/python/examples/fits.py new file mode 100644 index 0000000..aa3aef6 --- /dev/null +++ b/python/examples/fits.py @@ -0,0 +1,79 @@ +import matplotlib.pyplot as plt +import numpy as np +from aare import fit_gaus, fit_pol1 +from aare import gaus, pol1 + +textpm = f"±" # +textmu = f"μ" # +textsigma = f"σ" # + + + +# ================================= Gauss fit ================================= +# Parameters +mu = np.random.uniform(1, 100) # Mean of Gaussian +sigma = np.random.uniform(4, 20) # Standard deviation +num_points = 10000 # Number of points for smooth distribution +noise_sigma = 100 + +# Generate Gaussian distribution +data = np.random.normal(mu, sigma, num_points) + +# Generate errors for each point +errors = np.abs(np.random.normal(0, sigma, num_points)) # Errors with mean 0, std 0.5 + +# Create subplot +fig0, ax0 = plt.subplots(1, 1, num=0, figsize=(12, 8)) + +x = np.histogram(data, bins=30)[1][:-1] + 0.05 +y = np.histogram(data, bins=30)[0] +yerr = errors[:30] + + +# Add the errors as error bars in the step plot +ax0.errorbar(x, y, yerr=yerr, fmt=". ", capsize=5) +ax0.grid() + +par, err = fit_gaus(x, y, yerr) +print(par, err) + +x = np.linspace(x[0], x[-1], 1000) +ax0.plot(x, gaus(x, par), marker="") +ax0.set(xlabel="x", ylabel="Counts", title=f"A0 = {par[0]:0.2f}{textpm}{err[0]:0.2f}\n" + f"{textmu} = {par[1]:0.2f}{textpm}{err[1]:0.2f}\n" + f"{textsigma} = {par[2]:0.2f}{textpm}{err[2]:0.2f}\n" + f"(init: {textmu}: {mu:0.2f}, {textsigma}: {sigma:0.2f})") +fig0.tight_layout() + + + +# ================================= pol1 fit ================================= +# Parameters +n_points = 40 + +# Generate random slope and intercept (origin) +slope = np.random.uniform(-10, 10) # Random slope between 0.5 and 2.0 +intercept = np.random.uniform(-10, 10) # Random intercept between -10 and 10 + +# Generate random x values +x_values = np.random.uniform(-10, 10, n_points) + +# Calculate y values based on the linear function y = mx + b + error +errors = np.abs(np.random.normal(0, np.random.uniform(1, 5), n_points)) +var_points = np.random.normal(0, np.random.uniform(0.1, 2), n_points) +y_values = slope * x_values + intercept + var_points + +fig1, ax1 = plt.subplots(1, 1, num=1, figsize=(12, 8)) +ax1.errorbar(x_values, y_values, yerr=errors, fmt=". ", capsize=5) +par, err = fit_pol1(x_values, y_values, errors) + + +x = np.linspace(np.min(x_values), np.max(x_values), 1000) +ax1.plot(x, pol1(x, par), marker="") +ax1.set(xlabel="x", ylabel="y", title=f"a = {par[0]:0.2f}{textpm}{err[0]:0.2f}\n" + f"b = {par[1]:0.2f}{textpm}{err[1]:0.2f}\n" + f"(init: {slope:0.2f}, {intercept:0.2f})") +fig1.tight_layout() + +plt.show() + diff --git a/python/examples/play.py b/python/examples/play.py index 633b7e2..0f4feca 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -1,15 +1,89 @@ -import matplotlib.pyplot as plt -import numpy as np -plt.ion() +import sys +sys.path.append('/home/l_msdetect/erik/aare/build') + + +from aare import RawSubFile, DetectorType, RawFile + from pathlib import Path -from aare import ClusterFile +path = Path("/home/l_msdetect/erik/data/aare-test-data/raw/jungfrau/") +f = RawSubFile(path/"jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16) -base = Path('~/data/aare_test_data/clusters').expanduser() - -f = ClusterFile(base / 'beam_En700eV_-40deg_300V_10us_d0_f0_100.clust') -# f = ClusterFile(base / 'single_frame_97_clustrers.clust') +# f = RawFile(path/"jungfrau_single_master_0.json") -for i in range(10): - fn, cl = f.read_frame() - print(fn, cl.size) +# from aare._aare import ClusterVector_i, Interpolator + +# import pickle +# import numpy as np +# import matplotlib.pyplot as plt +# import boost_histogram as bh +# import torch +# import math +# import time + + + +# def gaussian_2d(mx, my, sigma = 1, res=100, grid_size = 2): +# """ +# Generate a 2D gaussian as position mx, my, with sigma=sigma. +# The gaussian is placed on a 2x2 pixel matrix with resolution +# res in one dimesion. +# """ +# x = torch.linspace(0, pixel_size*grid_size, res) +# x,y = torch.meshgrid(x,x, indexing="ij") +# return 1 / (2*math.pi*sigma**2) * \ +# torch.exp(-((x - my)**2 / (2*sigma**2) + (y - mx)**2 / (2*sigma**2))) + +# scale = 1000 #Scale factor when converting to integer +# pixel_size = 25 #um +# grid = 2 +# resolution = 100 +# sigma_um = 10 +# xa = np.linspace(0,grid*pixel_size,resolution) +# ticks = [0, 25, 50] + +# hit = np.array((20,20)) +# etahist_fname = "/home/l_msdetect/erik/tmp/test_hist.pkl" + +# local_resolution = 99 +# grid_size = 3 +# xaxis = np.linspace(0,grid_size*pixel_size, local_resolution) +# t = gaussian_2d(hit[0],hit[1], grid_size = grid_size, sigma = 10, res = local_resolution) +# pixels = t.reshape(grid_size, t.shape[0] // grid_size, grid_size, t.shape[1] // grid_size).sum(axis = 3).sum(axis = 1) +# pixels = pixels.numpy() +# pixels = (pixels*scale).astype(np.int32) +# v = ClusterVector_i(3,3) +# v.push_back(1,1, pixels) + +# with open(etahist_fname, "rb") as f: +# hist = pickle.load(f) +# eta = hist.view().copy() +# etabinsx = np.array(hist.axes.edges.T[0].flat) +# etabinsy = np.array(hist.axes.edges.T[1].flat) +# ebins = np.array(hist.axes.edges.T[2].flat) +# p = Interpolator(eta, etabinsx[0:-1], etabinsy[0:-1], ebins[0:-1]) + + + + +# #Generate the hit + + + + +# tmp = p.interpolate(v) +# print(f'tmp:{tmp}') +# pos = np.array((tmp['x'], tmp['y']))*25 + + +# print(pixels) +# fig, ax = plt.subplots(figsize = (7,7)) +# ax.pcolormesh(xaxis, xaxis, t) +# ax.plot(*pos, 'o') +# ax.set_xticks([0,25,50,75]) +# ax.set_yticks([0,25,50,75]) +# ax.set_xlim(0,75) +# ax.set_ylim(0,75) +# ax.grid() +# print(f'{hit=}') +# print(f'{pos=}') \ No newline at end of file diff --git a/python/src/bind_Cluster.hpp b/python/src/bind_Cluster.hpp new file mode 100644 index 0000000..daf0946 --- /dev/null +++ b/python/src/bind_Cluster.hpp @@ -0,0 +1,64 @@ +#include "aare/Cluster.hpp" + +#include +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +template +void define_Cluster(py::module &m, const std::string &typestr) { + auto class_name = fmt::format("Cluster{}", typestr); + + py::class_>( + m, class_name.c_str(), py::buffer_protocol()) + + .def(py::init([](uint8_t x, uint8_t y, py::array_t data) { + py::buffer_info buf_info = data.request(); + Cluster cluster; + cluster.x = x; + cluster.y = y; + auto r = data.template unchecked<1>(); // no bounds checks + for (py::ssize_t i = 0; i < data.size(); ++i) { + cluster.data[i] = r(i); + } + return cluster; + })); + + /* + //TODO! Review if to keep or not + .def_property( + "data", + [](ClusterType &c) -> py::array { + return py::array(py::buffer_info( + c.data, sizeof(Type), + py::format_descriptor::format(), // Type + // format + 1, // Number of dimensions + {static_cast(ClusterSizeX * + ClusterSizeY)}, // Shape (flattened) + {sizeof(Type)} // Stride (step size between elements) + )); + }, + [](ClusterType &c, py::array_t arr) { + py::buffer_info buf_info = arr.request(); + Type *ptr = static_cast(buf_info.ptr); + std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY, + c.data); // TODO dont iterate over centers!!! + + }); + */ +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/bind_ClusterCollector.hpp b/python/src/bind_ClusterCollector.hpp new file mode 100644 index 0000000..4836e6e --- /dev/null +++ b/python/src/bind_ClusterCollector.hpp @@ -0,0 +1,46 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" +#include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDView.hpp" +#include "aare/Pedestal.hpp" +#include "np_helper.hpp" + +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + + +template +void define_ClusterCollector(py::module &m, + const std::string &typestr) { + auto class_name = fmt::format("ClusterCollector_{}", typestr); + + using ClusterType = Cluster; + + py::class_>(m, class_name.c_str()) + .def(py::init *>()) + .def("stop", &ClusterCollector::stop) + .def( + "steal_clusters", + [](ClusterCollector &self) { + auto v = new std::vector>( + self.steal_clusters()); + return v; // TODO change!!! + }, + py::return_value_policy::take_ownership); +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/bind_ClusterFile.hpp b/python/src/bind_ClusterFile.hpp new file mode 100644 index 0000000..8ce5360 --- /dev/null +++ b/python/src/bind_ClusterFile.hpp @@ -0,0 +1,94 @@ +#include "aare/CalculateEta.hpp" +#include "aare/ClusterFile.hpp" +#include "aare/defs.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +// Disable warnings for unused parameters, as we ignore some +// in the __exit__ method +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +namespace py = pybind11; +using namespace ::aare; + +template +void define_ClusterFile(py::module &m, + const std::string &typestr) { + + using ClusterType = Cluster; + + auto class_name = fmt::format("ClusterFile_{}", typestr); + + py::class_>(m, class_name.c_str()) + .def(py::init(), + py::arg(), py::arg("chunk_size") = 1000, py::arg("mode") = "r") + .def( + "read_clusters", + [](ClusterFile &self, size_t n_clusters) { + auto v = new ClusterVector( + self.read_clusters(n_clusters)); + return v; + }, + py::return_value_policy::take_ownership) + .def("read_frame", + [](ClusterFile &self) { + auto v = new ClusterVector(self.read_frame()); + return v; + }) + .def("set_roi", &ClusterFile::set_roi) + .def( + "set_noise_map", + [](ClusterFile &self, py::array_t noise_map) { + auto view = make_view_2d(noise_map); + self.set_noise_map(view); + }) + + .def("set_gain_map", + [](ClusterFile &self, py::array_t gain_map) { + auto view = make_view_2d(gain_map); + self.set_gain_map(view); + }) + + .def("close", &ClusterFile::close) + .def("write_frame", &ClusterFile::write_frame) + .def("__enter__", [](ClusterFile &self) { return &self; }) + .def("__exit__", + [](ClusterFile &self, + const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + self.close(); + }) + .def("__iter__", [](ClusterFile &self) { return &self; }) + .def("__next__", [](ClusterFile &self) { + auto v = new ClusterVector( + self.read_clusters(self.chunk_size())); + if (v->size() == 0) { + throw py::stop_iteration(); + } + return v; + }); +} + +template +void register_calculate_eta(py::module &m) { + using ClusterType = Cluster; + m.def("calculate_eta2", + [](const aare::ClusterVector &clusters) { + auto eta2 = new NDArray(calculate_eta2(clusters)); + return return_image_data(eta2); + }); +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/bind_ClusterFileSink.hpp b/python/src/bind_ClusterFileSink.hpp new file mode 100644 index 0000000..9b3a74d --- /dev/null +++ b/python/src/bind_ClusterFileSink.hpp @@ -0,0 +1,44 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" +#include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDView.hpp" +#include "aare/Pedestal.hpp" +#include "np_helper.hpp" + +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + + + + + + +template +void define_ClusterFileSink(py::module &m, + const std::string &typestr) { + auto class_name = fmt::format("ClusterFileSink_{}", typestr); + + using ClusterType = Cluster; + + py::class_>(m, class_name.c_str()) + .def(py::init *, + const std::filesystem::path &>()) + .def("stop", &ClusterFileSink::stop); +} + + +#pragma GCC diagnostic pop diff --git a/python/src/bind_ClusterFinder.hpp b/python/src/bind_ClusterFinder.hpp new file mode 100644 index 0000000..5f0fe8d --- /dev/null +++ b/python/src/bind_ClusterFinder.hpp @@ -0,0 +1,77 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" +#include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDView.hpp" +#include "aare/Pedestal.hpp" +#include "np_helper.hpp" + +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +template +void define_ClusterFinder(py::module &m, const std::string &typestr) { + auto class_name = fmt::format("ClusterFinder_{}", typestr); + + using ClusterType = Cluster; + + py::class_>( + m, class_name.c_str()) + .def(py::init, pd_type, size_t>(), py::arg("image_size"), + py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000) + .def("push_pedestal_frame", + [](ClusterFinder &self, + py::array_t frame) { + auto view = make_view_2d(frame); + self.push_pedestal_frame(view); + }) + .def("clear_pedestal", + &ClusterFinder::clear_pedestal) + .def_property_readonly( + "pedestal", + [](ClusterFinder &self) { + auto pd = new NDArray{}; + *pd = self.pedestal(); + return return_image_data(pd); + }) + .def_property_readonly( + "noise", + [](ClusterFinder &self) { + auto arr = new NDArray{}; + *arr = self.noise(); + return return_image_data(arr); + }) + .def( + "steal_clusters", + [](ClusterFinder &self, + bool realloc_same_capacity) { + ClusterVector clusters = + self.steal_clusters(realloc_same_capacity); + return clusters; + }, + py::arg("realloc_same_capacity") = false) + .def( + "find_clusters", + [](ClusterFinder &self, + py::array_t frame, uint64_t frame_number) { + auto view = make_view_2d(frame); + self.find_clusters(view, frame_number); + return; + }, + py::arg(), py::arg("frame_number") = 0); +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/bind_ClusterFinderMT.hpp b/python/src/bind_ClusterFinderMT.hpp new file mode 100644 index 0000000..d1769db --- /dev/null +++ b/python/src/bind_ClusterFinderMT.hpp @@ -0,0 +1,81 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" +#include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDView.hpp" +#include "aare/Pedestal.hpp" +#include "np_helper.hpp" + +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +template +void define_ClusterFinderMT(py::module &m, + const std::string &typestr) { + auto class_name = fmt::format("ClusterFinderMT_{}", typestr); + + using ClusterType = Cluster; + + py::class_>( + m, class_name.c_str()) + .def(py::init, pd_type, size_t, size_t>(), + py::arg("image_size"), py::arg("n_sigma") = 5.0, + py::arg("capacity") = 2048, py::arg("n_threads") = 3) + .def("push_pedestal_frame", + [](ClusterFinderMT &self, + py::array_t frame) { + auto view = make_view_2d(frame); + self.push_pedestal_frame(view); + }) + .def( + "find_clusters", + [](ClusterFinderMT &self, + py::array_t frame, uint64_t frame_number) { + auto view = make_view_2d(frame); + self.find_clusters(view, frame_number); + return; + }, + py::arg(), py::arg("frame_number") = 0) + .def_property_readonly("cluster_size", [](ClusterFinderMT &self){ + return py::make_tuple(ClusterSizeX, ClusterSizeY); + }) + .def("clear_pedestal", + &ClusterFinderMT::clear_pedestal) + .def("sync", &ClusterFinderMT::sync) + .def("stop", &ClusterFinderMT::stop) + .def("start", &ClusterFinderMT::start) + .def( + "pedestal", + [](ClusterFinderMT &self, + size_t thread_index) { + auto pd = new NDArray{}; + *pd = self.pedestal(thread_index); + return return_image_data(pd); + }, + py::arg("thread_index") = 0) + .def( + "noise", + [](ClusterFinderMT &self, + size_t thread_index) { + auto arr = new NDArray{}; + *arr = self.noise(thread_index); + return return_image_data(arr); + }, + py::arg("thread_index") = 0); +} + + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/bind_ClusterVector.hpp b/python/src/bind_ClusterVector.hpp new file mode 100644 index 0000000..550db9a --- /dev/null +++ b/python/src/bind_ClusterVector.hpp @@ -0,0 +1,106 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" +#include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDView.hpp" +#include "aare/Pedestal.hpp" +#include "np_helper.hpp" + +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +template +void define_ClusterVector(py::module &m, const std::string &typestr) { + using ClusterType = Cluster; + auto class_name = fmt::format("ClusterVector_{}", typestr); + + py::class_, void>>( + m, class_name.c_str(), + py::buffer_protocol()) + + .def(py::init()) // TODO change!!! + + .def("push_back", + [](ClusterVector &self, const ClusterType &cluster) { + self.push_back(cluster); + }) + + .def("sum", + [](ClusterVector &self) { + auto *vec = new std::vector(self.sum()); + return return_vector(vec); + }) + .def("sum_2x2", [](ClusterVector &self){ + auto *vec = new std::vector(self.sum_2x2()); + return return_vector(vec); + }) + .def_property_readonly("size", &ClusterVector::size) + .def("item_size", &ClusterVector::item_size) + .def_property_readonly("fmt", + [typestr](ClusterVector &self) { + return fmt_format; + }) + + .def_property_readonly("cluster_size_x", + &ClusterVector::cluster_size_x) + .def_property_readonly("cluster_size_y", + &ClusterVector::cluster_size_y) + .def_property_readonly("capacity", + &ClusterVector::capacity) + .def_property("frame_number", &ClusterVector::frame_number, + &ClusterVector::set_frame_number) + .def_buffer( + [typestr](ClusterVector &self) -> py::buffer_info { + return py::buffer_info( + self.data(), /* Pointer to buffer */ + self.item_size(), /* Size of one scalar */ + fmt_format, /* Format descriptor */ + 1, /* Number of dimensions */ + {self.size()}, /* Buffer dimensions */ + {self.item_size()} /* Strides (in bytes) for each index */ + ); + }); + + // Free functions using ClusterVector + m.def("hitmap", + [](std::array image_size, ClusterVector &cv) { + // Create a numpy array to hold the hitmap + // The shape of the array is (image_size[0], image_size[1]) + // note that the python array is passed as [row, col] which + // is the opposite of the clusters [x,y] + py::array_t hitmap(image_size); + auto r = hitmap.mutable_unchecked<2>(); + + // Initialize hitmap to 0 + for (py::ssize_t i = 0; i < r.shape(0); i++) + for (py::ssize_t j = 0; j < r.shape(1); j++) + r(i, j) = 0; + + // Loop over the clusters and increment the hitmap + // Skip out of bound clusters + for (const auto &cluster : cv) { + auto x = cluster.x; + auto y = cluster.y; + if (x < image_size[1] && y < image_size[0]) + r(cluster.y, cluster.x) += 1; + } + + return hitmap; + }); +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp deleted file mode 100644 index 6932281..0000000 --- a/python/src/cluster.hpp +++ /dev/null @@ -1,52 +0,0 @@ -#include "aare/ClusterFinder.hpp" -#include "aare/NDView.hpp" -#include "aare/Pedestal.hpp" -#include "np_helper.hpp" - -#include -#include -#include -#include - -namespace py = pybind11; - -void define_cluster_finder_bindings(py::module &m) { - py::class_>(m, "ClusterFinder") - .def(py::init, Shape<2>>()) - .def("push_pedestal_frame", - [](ClusterFinder &self, - py::array_t frame) { - auto view = make_view_2d(frame); - self.push_pedestal_frame(view); - }) - .def("pedestal", - [](ClusterFinder &self) { - auto pd = new NDArray{}; - *pd = self.pedestal(); - return return_image_data(pd); - }) - .def("find_clusters_without_threshold", - [](ClusterFinder &self, - py::array_t frame) { - auto view = make_view_2d(frame); - auto clusters = self.find_clusters_without_threshold(view); - return clusters; - }); - - py::class_(m, "DynamicCluster", py::buffer_protocol()) - .def(py::init()) - .def("size", &DynamicCluster::size) - .def("begin", &DynamicCluster::begin) - .def("end", &DynamicCluster::end) - .def_readwrite("x", &DynamicCluster::x) - .def_readwrite("y", &DynamicCluster::y) - .def_buffer([](DynamicCluster &c) -> py::buffer_info { - return py::buffer_info(c.data(), c.dt.bytes(), c.dt.format_descr(), - 1, {c.size()}, {c.dt.bytes()}); - }) - - .def("__repr__", [](const DynamicCluster &a) { - return ""; - }); -} \ No newline at end of file diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp deleted file mode 100644 index 6f37c3d..0000000 --- a/python/src/cluster_file.hpp +++ /dev/null @@ -1,50 +0,0 @@ -#include "aare/ClusterFile.hpp" -#include "aare/defs.hpp" - - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace py = pybind11; -using namespace ::aare; - -void define_cluster_file_io_bindings(py::module &m) { - PYBIND11_NUMPY_DTYPE(Cluster, x, y, data); - - py::class_(m, "ClusterFile") - .def(py::init(), py::arg(), py::arg("chunk_size") = 1000) - .def("read_clusters", - [](ClusterFile &self, size_t n_clusters) { - auto* vec = new std::vector(self.read_clusters(n_clusters)); - return return_vector(vec); - }) - .def("read_frame", - [](ClusterFile &self) { - int32_t frame_number; - auto* vec = new std::vector(self.read_frame(frame_number)); - return py::make_tuple(frame_number, return_vector(vec)); - }) - .def("read_cluster_with_cut", - [](ClusterFile &self, size_t n_clusters, py::array_t noise_map, int nx, int ny) { - auto view = make_view_2d(noise_map); - auto* vec = new std::vector(self.read_cluster_with_cut(n_clusters, view.data(), nx, ny)); - return return_vector(vec); - }) - .def("__enter__", [](ClusterFile &self) { return &self; }) - .def("__exit__", [](ClusterFile &self) { self.close();}) - .def("__iter__", [](ClusterFile &self) { return &self; }) - .def("__next__", [](ClusterFile &self) { - auto vec = new std::vector(self.read_clusters(self.chunk_size())); - if(vec->size() == 0) { - throw py::stop_iteration(); - } - return return_vector(vec); - }); - -} \ No newline at end of file diff --git a/python/src/ctb_raw_file.hpp b/python/src/ctb_raw_file.hpp index 39c1001..c9b5310 100644 --- a/python/src/ctb_raw_file.hpp +++ b/python/src/ctb_raw_file.hpp @@ -7,8 +7,11 @@ #include "aare/RawSubFile.hpp" #include "aare/defs.hpp" +#include "aare/decode.hpp" // #include "aare/fClusterFileV2.hpp" +#include "np_helper.hpp" + #include #include #include @@ -23,35 +26,95 @@ using namespace ::aare; void define_ctb_raw_file_io_bindings(py::module &m) { - py::class_(m, "CtbRawFile") - .def(py::init()) - .def("read_frame", - [](CtbRawFile &self) { - size_t image_size = self.image_size_in_bytes(); - py::array image; - std::vector shape; - shape.reserve(2); - shape.push_back(1); - shape.push_back(image_size); +m.def("adc_sar_05_decode64to16", [](py::array_t input) { - py::array_t header(1); + + if(input.ndim() != 2){ + throw std::runtime_error("Only 2D arrays are supported at this moment"); + } - // always read bytes - image = py::array_t(shape); + //Create a 2D output array with the same shape as the input + std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; + py::array_t output(shape); - self.read_into( - reinterpret_cast(image.mutable_data()), - header.mutable_data()); + //Create a view of the input and output arrays + NDView input_view(reinterpret_cast(input.mutable_data()), {output.shape(0), output.shape(1)}); + NDView output_view(output.mutable_data(), {output.shape(0), output.shape(1)}); - return py::make_tuple(header, image); - }) - .def("seek", &CtbRawFile::seek) - .def("tell", &CtbRawFile::tell) - .def("master", &CtbRawFile::master) + adc_sar_05_decode64to16(input_view, output_view); - .def_property_readonly("image_size_in_bytes", - &CtbRawFile::image_size_in_bytes) + return output; +}); - .def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file); -} \ No newline at end of file +m.def("adc_sar_04_decode64to16", [](py::array_t input) { + + + if(input.ndim() != 2){ + throw std::runtime_error("Only 2D arrays are supported at this moment"); + } + + //Create a 2D output array with the same shape as the input + std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; + py::array_t output(shape); + + //Create a view of the input and output arrays + NDView input_view(reinterpret_cast(input.mutable_data()), {output.shape(0), output.shape(1)}); + NDView output_view(output.mutable_data(), {output.shape(0), output.shape(1)}); + + adc_sar_04_decode64to16(input_view, output_view); + + return output; +}); + +m.def( + "apply_custom_weights", + [](py::array_t &input, + py::array_t + &weights) { + + + // Create new array with same shape as the input array (uninitialized values) + py::buffer_info buf = input.request(); + py::array_t output(buf.shape); + + // Use NDViews to call into the C++ library + auto weights_view = make_view_1d(weights); + NDView input_view(input.mutable_data(), {input.size()}); + NDView output_view(output.mutable_data(), {output.size()}); + + apply_custom_weights(input_view, output_view, weights_view); + return output; + }); + +py::class_(m, "CtbRawFile") + .def(py::init()) + .def("read_frame", + [](CtbRawFile &self) { + size_t image_size = self.image_size_in_bytes(); + py::array image; + std::vector shape; + shape.reserve(2); + shape.push_back(1); + shape.push_back(image_size); + + py::array_t header(1); + + // always read bytes + image = py::array_t(shape); + + self.read_into(reinterpret_cast(image.mutable_data()), + header.mutable_data()); + + return py::make_tuple(header, image); + }) + .def("seek", &CtbRawFile::seek) + .def("tell", &CtbRawFile::tell) + .def("master", &CtbRawFile::master) + + .def_property_readonly("image_size_in_bytes", + &CtbRawFile::image_size_in_bytes) + + .def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file); + +} diff --git a/python/src/file.hpp b/python/src/file.hpp index 46ef5cd..5849f29 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -25,6 +25,14 @@ namespace py = pybind11; using namespace ::aare; + + + +//Disable warnings for unused parameters, as we ignore some +//in the __exit__ method +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + void define_file_io_bindings(py::module &m) { @@ -56,7 +64,8 @@ void define_file_io_bindings(py::module &m) { .def(py::init()) - .def("frame_number", &File::frame_number) + .def("frame_number", py::overload_cast<>(&File::frame_number)) + .def("frame_number", py::overload_cast(&File::frame_number)) .def_property_readonly("bytes_per_frame", &File::bytes_per_frame) .def_property_readonly("pixels_per_frame", &File::pixels_per_frame) .def("seek", &File::seek) @@ -128,8 +137,41 @@ void define_file_io_bindings(py::module &m) { self.read_into(reinterpret_cast(image.mutable_data()), n_frames); return image; + }) + .def("__enter__", [](File &self) { return &self; }) + .def("__exit__", + [](File &self, + const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + // self.close(); + }) + .def("__iter__", [](File &self) { return &self; }) + .def("__next__", [](File &self) { + + try{ + const uint8_t item_size = self.bytes_per_pixel(); + py::array image; + std::vector shape; + shape.reserve(2); + shape.push_back(self.rows()); + shape.push_back(self.cols()); + if (item_size == 1) { + image = py::array_t(shape); + } else if (item_size == 2) { + image = py::array_t(shape); + } else if (item_size == 4) { + image = py::array_t(shape); + } + self.read_into( + reinterpret_cast(image.mutable_data())); + return image; + }catch(std::runtime_error &e){ + throw py::stop_iteration(); + } }); + py::class_(m, "FileConfig") .def(py::init<>()) .def_readwrite("rows", &FileConfig::rows) @@ -161,6 +203,8 @@ void define_file_io_bindings(py::module &m) { py::class_(m, "ROI") .def(py::init<>()) + .def(py::init(), py::arg("xmin"), + py::arg("xmax"), py::arg("ymin"), py::arg("ymax")) .def_readwrite("xmin", &ROI::xmin) .def_readwrite("xmax", &ROI::xmax) .def_readwrite("ymin", &ROI::ymin) @@ -178,38 +222,11 @@ void define_file_io_bindings(py::module &m) { - py::class_(m, "RawSubFile") - .def(py::init()) - .def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame) - .def_property_readonly("pixels_per_frame", - &RawSubFile::pixels_per_frame) - .def("seek", &RawSubFile::seek) - .def("tell", &RawSubFile::tell) - .def_property_readonly("rows", &RawSubFile::rows) - .def_property_readonly("cols", &RawSubFile::cols) - .def("read_frame", - [](RawSubFile &self) { - const uint8_t item_size = self.bytes_per_pixel(); - py::array image; - std::vector shape; - shape.reserve(2); - shape.push_back(self.rows()); - shape.push_back(self.cols()); - if (item_size == 1) { - image = py::array_t(shape); - } else if (item_size == 2) { - image = py::array_t(shape); - } else if (item_size == 4) { - image = py::array_t(shape); - } - fmt::print("item_size: {} rows: {} cols: {}\n", item_size, self.rows(), self.cols()); - self.read_into( - reinterpret_cast(image.mutable_data())); - return image; - }); + + +#pragma GCC diagnostic pop // py::class_(m, "ClusterHeader") // .def(py::init<>()) // .def_readwrite("frame_number", &ClusterHeader::frame_number) diff --git a/python/src/fit.hpp b/python/src/fit.hpp new file mode 100644 index 0000000..97dafb5 --- /dev/null +++ b/python/src/fit.hpp @@ -0,0 +1,465 @@ +#include +#include +#include +#include +#include + +#include "aare/Fit.hpp" + +namespace py = pybind11; +using namespace pybind11::literals; + + +void define_fit_bindings(py::module &m) { + + // TODO! Evaluate without converting to double + m.def( + "gaus", + [](py::array_t x, + py::array_t par) { + auto x_view = make_view_1d(x); + auto par_view = make_view_1d(par); + auto y = new NDArray{aare::func::gaus(x_view, par_view)}; + return return_image_data(y); + }, + R"( + Evaluate a 1D Gaussian function for all points in x using parameters par. + + Parameters + ---------- + x : array_like + The points at which to evaluate the Gaussian function. + par : array_like + The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation. + )", + py::arg("x"), py::arg("par")); + + m.def( + "pol1", + [](py::array_t x, + py::array_t par) { + auto x_view = make_view_1d(x); + auto par_view = make_view_1d(par); + auto y = new NDArray{aare::func::pol1(x_view, par_view)}; + return return_image_data(y); + }, + R"( + Evaluate a 1D polynomial function for all points in x using parameters par. (p0+p1*x) + + Parameters + ---------- + x : array_like + The points at which to evaluate the polynomial function. + par : array_like + The parameters of the polynomial function. The first element is the intercept, and the second element is the slope. + )", + py::arg("x"), py::arg("par")); + + m.def( + "scurve", + [](py::array_t x, + py::array_t par) { + auto x_view = make_view_1d(x); + auto par_view = make_view_1d(par); + auto y = new NDArray{aare::func::scurve(x_view, par_view)}; + return return_image_data(y); + }, + R"( + Evaluate a 1D scurve function for all points in x using parameters par. + + Parameters + ---------- + x : array_like + The points at which to evaluate the scurve function. + par : array_like + The parameters of the scurve function. The first element is the background slope, the second element is the background intercept, the third element is the mean, the fourth element is the standard deviation, the fifth element is inflexion point count number, and the sixth element is C. + )", + py::arg("x"), py::arg("par")); + + m.def( + "scurve2", + [](py::array_t x, + py::array_t par) { + auto x_view = make_view_1d(x); + auto par_view = make_view_1d(par); + auto y = new NDArray{aare::func::scurve2(x_view, par_view)}; + return return_image_data(y); + }, + R"( + Evaluate a 1D scurve2 function for all points in x using parameters par. + + Parameters + ---------- + x : array_like + The points at which to evaluate the scurve function. + par : array_like + The parameters of the scurve2 function. The first element is the background slope, the second element is the background intercept, the third element is the mean, the fourth element is the standard deviation, the fifth element is inflexion point count number, and the sixth element is C. + )", + py::arg("x"), py::arg("par")); + + m.def( + "fit_gaus", + [](py::array_t x, + py::array_t y, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray{}; + auto y_view = make_view_3d(y); + auto x_view = make_view_1d(x); + *par = aare::fit_gaus(x_view, y_view, n_threads); + return return_image_data(par); + } else if (y.ndim() == 1) { + auto par = new NDArray{}; + auto y_view = make_view_1d(y); + auto x_view = make_view_1d(x); + *par = aare::fit_gaus(x_view, y_view); + return return_image_data(par); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + R"( + +Fit a 1D Gaussian to data. + +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("n_threads") = 4); + + m.def( + "fit_gaus", + [](py::array_t x, + py::array_t y, + py::array_t y_err, + int n_threads) { + + if (y.ndim() == 3) { + // Allocate memory for the output + // Need to have pointers to allow python to manage + // the memory + auto par = new NDArray({y.shape(0), y.shape(1), 3}); + auto par_err = + new NDArray({y.shape(0), y.shape(1), 3}); + auto chi2 = new NDArray({y.shape(0), y.shape(1)}); + + // Make views of the numpy arrays + auto y_view = make_view_3d(y); + auto y_view_err = make_view_3d(y_err); + auto x_view = make_view_1d(x); + + aare::fit_gaus(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2->view(), n_threads); + + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = return_image_data(chi2), + "Ndf"_a = y.shape(2) - 3); + } else if (y.ndim() == 1) { + // Allocate memory for the output + // Need to have pointers to allow python to manage + // the memory + auto par = new NDArray({3}); + auto par_err = new NDArray({3}); + + // Decode the numpy arrays + auto y_view = make_view_1d(y); + auto y_view_err = make_view_1d(y_err); + auto x_view = make_view_1d(x); + + + double chi2 = 0; + aare::fit_gaus(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2); + + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = chi2, "Ndf"_a = y.size() - 3); + + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + R"( + +Fit a 1D Gaussian to data with error estimates. + +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +y_err : array_like + The error in the y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4); + + m.def( + "fit_pol1", + [](py::array_t x, + py::array_t y, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray{}; + + auto x_view = make_view_1d(x); + auto y_view = make_view_3d(y); + *par = aare::fit_pol1(x_view, y_view, n_threads); + return return_image_data(par); + } else if (y.ndim() == 1) { + auto par = new NDArray{}; + auto x_view = make_view_1d(x); + auto y_view = make_view_1d(y); + *par = aare::fit_pol1(x_view, y_view); + return return_image_data(par); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + py::arg("x"), py::arg("y"), py::arg("n_threads") = 4); + + m.def( + "fit_pol1", + [](py::array_t x, + py::array_t y, + py::array_t y_err, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray({y.shape(0), y.shape(1), 2}); + + auto par_err = + new NDArray({y.shape(0), y.shape(1), 2}); + + auto y_view = make_view_3d(y); + auto y_view_err = make_view_3d(y_err); + auto x_view = make_view_1d(x); + + auto chi2 = new NDArray({y.shape(0), y.shape(1)}); + + aare::fit_pol1(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2->view(), n_threads); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = return_image_data(chi2), + "Ndf"_a = y.shape(2) - 2); + + + } else if (y.ndim() == 1) { + auto par = new NDArray({2}); + auto par_err = new NDArray({2}); + + auto y_view = make_view_1d(y); + auto y_view_err = make_view_1d(y_err); + auto x_view = make_view_1d(x); + + double chi2 = 0; + + aare::fit_pol1(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = chi2, "Ndf"_a = y.size() - 2); + + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + R"( +Fit a 1D polynomial to data with error estimates. + +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +y_err : array_like + The error in the y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4); + +//========= + m.def( + "fit_scurve", + [](py::array_t x, + py::array_t y, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray{}; + + auto x_view = make_view_1d(x); + auto y_view = make_view_3d(y); + *par = aare::fit_scurve(x_view, y_view, n_threads); + return return_image_data(par); + } else if (y.ndim() == 1) { + auto par = new NDArray{}; + auto x_view = make_view_1d(x); + auto y_view = make_view_1d(y); + *par = aare::fit_scurve(x_view, y_view); + return return_image_data(par); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + py::arg("x"), py::arg("y"), py::arg("n_threads") = 4); + + m.def( + "fit_scurve", + [](py::array_t x, + py::array_t y, + py::array_t y_err, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray({y.shape(0), y.shape(1), 6}); + + auto par_err = + new NDArray({y.shape(0), y.shape(1), 6}); + + auto y_view = make_view_3d(y); + auto y_view_err = make_view_3d(y_err); + auto x_view = make_view_1d(x); + + auto chi2 = new NDArray({y.shape(0), y.shape(1)}); + + aare::fit_scurve(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2->view(), n_threads); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = return_image_data(chi2), + "Ndf"_a = y.shape(2) - 2); + + + } else if (y.ndim() == 1) { + auto par = new NDArray({2}); + auto par_err = new NDArray({2}); + + auto y_view = make_view_1d(y); + auto y_view_err = make_view_1d(y_err); + auto x_view = make_view_1d(x); + + double chi2 = 0; + + aare::fit_scurve(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = chi2, "Ndf"_a = y.size() - 2); + + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + R"( +Fit a 1D polynomial to data with error estimates. + +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +y_err : array_like + The error in the y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4); + + + m.def( + "fit_scurve2", + [](py::array_t x, + py::array_t y, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray{}; + + auto x_view = make_view_1d(x); + auto y_view = make_view_3d(y); + *par = aare::fit_scurve2(x_view, y_view, n_threads); + return return_image_data(par); + } else if (y.ndim() == 1) { + auto par = new NDArray{}; + auto x_view = make_view_1d(x); + auto y_view = make_view_1d(y); + *par = aare::fit_scurve2(x_view, y_view); + return return_image_data(par); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + py::arg("x"), py::arg("y"), py::arg("n_threads") = 4); + + m.def( + "fit_scurve2", + [](py::array_t x, + py::array_t y, + py::array_t y_err, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray({y.shape(0), y.shape(1), 6}); + + auto par_err = + new NDArray({y.shape(0), y.shape(1), 6}); + + auto y_view = make_view_3d(y); + auto y_view_err = make_view_3d(y_err); + auto x_view = make_view_1d(x); + + auto chi2 = new NDArray({y.shape(0), y.shape(1)}); + + aare::fit_scurve2(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2->view(), n_threads); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = return_image_data(chi2), + "Ndf"_a = y.shape(2) - 2); + + + } else if (y.ndim() == 1) { + auto par = new NDArray({6}); + auto par_err = new NDArray({6}); + + auto y_view = make_view_1d(y); + auto y_view_err = make_view_1d(y_err); + auto x_view = make_view_1d(x); + + double chi2 = 0; + + aare::fit_scurve2(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = chi2, "Ndf"_a = y.size() - 2); + + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + R"( +Fit a 1D polynomial to data with error estimates. + +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +y_err : array_like + The error in the y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4); +} \ No newline at end of file diff --git a/python/src/interpolation.hpp b/python/src/interpolation.hpp new file mode 100644 index 0000000..e667015 --- /dev/null +++ b/python/src/interpolation.hpp @@ -0,0 +1,82 @@ +#include "aare/Interpolator.hpp" +#include "aare/NDArray.hpp" +#include "aare/NDView.hpp" +#include "np_helper.hpp" +#include +#include +#include +#include + +namespace py = pybind11; + +template +void register_interpolate(py::class_ &interpolator) { + + using ClusterType = Cluster; + + interpolator.def("interpolate", + [](aare::Interpolator &self, + const ClusterVector &clusters) { + auto photons = self.interpolate(clusters); + auto *ptr = new std::vector{photons}; + return return_vector(ptr); + }); +} + +void define_interpolation_bindings(py::module &m) { + + PYBIND11_NUMPY_DTYPE(aare::Photon, x, y, energy); + + auto interpolator = + py::class_(m, "Interpolator") + .def(py::init([](py::array_t + etacube, + py::array_t xbins, + py::array_t ybins, + py::array_t ebins) { + return Interpolator(make_view_3d(etacube), make_view_1d(xbins), + make_view_1d(ybins), make_view_1d(ebins)); + })) + .def("get_ietax", + [](Interpolator &self) { + auto *ptr = new NDArray{}; + *ptr = self.get_ietax(); + return return_image_data(ptr); + }) + .def("get_ietay", [](Interpolator &self) { + auto *ptr = new NDArray{}; + *ptr = self.get_ietay(); + return return_image_data(ptr); + }); + + register_interpolate(interpolator); + register_interpolate(interpolator); + register_interpolate(interpolator); + register_interpolate(interpolator); + register_interpolate(interpolator); + register_interpolate(interpolator); + + // TODO! Evaluate without converting to double + m.def( + "hej", + []() { + // auto boost_histogram = py::module_::import("boost_histogram"); + // py::object axis = + // boost_histogram.attr("axis").attr("Regular")(10, 0.0, 10.0); + // py::object histogram = boost_histogram.attr("Histogram")(axis); + // return histogram; + // return h; + }, + R"( + Evaluate a 1D Gaussian function for all points in x using parameters par. + + Parameters + ---------- + x : array_like + The points at which to evaluate the Gaussian function. + par : array_like + The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation. + )"); +} \ No newline at end of file diff --git a/python/src/jungfrau_data_file.hpp b/python/src/jungfrau_data_file.hpp new file mode 100644 index 0000000..942f6a6 --- /dev/null +++ b/python/src/jungfrau_data_file.hpp @@ -0,0 +1,116 @@ + +#include "aare/JungfrauDataFile.hpp" +#include "aare/defs.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; +using namespace ::aare; + +// Disable warnings for unused parameters, as we ignore some +// in the __exit__ method +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +auto read_dat_frame(JungfrauDataFile &self) { + py::array_t header(1); + py::array_t image({ + self.rows(), + self.cols() + }); + + self.read_into(reinterpret_cast(image.mutable_data()), + header.mutable_data()); + + return py::make_tuple(header, image); +} + +auto read_n_dat_frames(JungfrauDataFile &self, size_t n_frames) { + // adjust for actual frames left in the file + n_frames = std::min(n_frames, self.total_frames() - self.tell()); + if (n_frames == 0) { + throw std::runtime_error("No frames left in file"); + } + + py::array_t header(n_frames); + py::array_t image({ + n_frames, self.rows(), + self.cols()}); + + self.read_into(reinterpret_cast(image.mutable_data()), + n_frames, header.mutable_data()); + + return py::make_tuple(header, image); +} + +void define_jungfrau_data_file_io_bindings(py::module &m) { + // Make the JungfrauDataHeader usable from numpy + PYBIND11_NUMPY_DTYPE(JungfrauDataHeader, framenum, bunchid); + + py::class_(m, "JungfrauDataFile") + .def(py::init()) + .def("seek", &JungfrauDataFile::seek, + R"( + Seek to the given frame index. + )") + .def("tell", &JungfrauDataFile::tell, + R"( + Get the current frame index. + )") + .def_property_readonly("rows", &JungfrauDataFile::rows) + .def_property_readonly("cols", &JungfrauDataFile::cols) + .def_property_readonly("base_name", &JungfrauDataFile::base_name) + .def_property_readonly("bytes_per_frame", + &JungfrauDataFile::bytes_per_frame) + .def_property_readonly("pixels_per_frame", + &JungfrauDataFile::pixels_per_frame) + .def_property_readonly("bytes_per_pixel", + &JungfrauDataFile::bytes_per_pixel) + .def_property_readonly("bitdepth", &JungfrauDataFile::bitdepth) + .def_property_readonly("current_file", &JungfrauDataFile::current_file) + .def_property_readonly("total_frames", &JungfrauDataFile::total_frames) + .def_property_readonly("n_files", &JungfrauDataFile::n_files) + .def("read_frame", &read_dat_frame, + R"( + Read a single frame from the file. + )") + .def("read_n", &read_n_dat_frames, + R"( + Read maximum n_frames frames from the file. + )") + .def( + "read", + [](JungfrauDataFile &self) { + self.seek(0); + auto n_frames = self.total_frames(); + return read_n_dat_frames(self, n_frames); + }, + R"( + Read all frames from the file. Seeks to the beginning before reading. + )") + .def("__enter__", [](JungfrauDataFile &self) { return &self; }) + .def("__exit__", + [](JungfrauDataFile &self, + const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + // self.close(); + }) + .def("__iter__", [](JungfrauDataFile &self) { return &self; }) + .def("__next__", [](JungfrauDataFile &self) { + try { + return read_dat_frame(self); + } catch (std::runtime_error &e) { + throw py::stop_iteration(); + } + }); +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/module.cpp b/python/src/module.cpp index cc55f81..5a5dc99 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -1,19 +1,32 @@ -//Files with bindings to the different classes -#include "file.hpp" -#include "raw_file.hpp" +// Files with bindings to the different classes + +//New style file naming +#include "bind_Cluster.hpp" +#include "bind_ClusterCollector.hpp" +#include "bind_ClusterFinder.hpp" +#include "bind_ClusterFinderMT.hpp" +#include "bind_ClusterFile.hpp" +#include "bind_ClusterFileSink.hpp" +#include "bind_ClusterVector.hpp" + +//TODO! migrate the other names #include "ctb_raw_file.hpp" +#include "file.hpp" +#include "fit.hpp" +#include "interpolation.hpp" +#include "raw_sub_file.hpp" #include "raw_master_file.hpp" +#include "raw_file.hpp" #ifdef HDF5_FOUND #include "hdf5_file.hpp" #include "hdf5_master_file.hpp" #endif -#include "var_cluster.hpp" #include "pixel_map.hpp" +#include "var_cluster.hpp" #include "pedestal.hpp" -#include "cluster.hpp" -#include "cluster_file.hpp" +#include "jungfrau_data_file.hpp" -//Pybind stuff +// Pybind stuff #include #include @@ -22,6 +35,7 @@ namespace py = pybind11; PYBIND11_MODULE(_aare, m) { define_file_io_bindings(m); define_raw_file_io_bindings(m); + define_raw_sub_file_io_bindings(m); define_ctb_raw_file_io_bindings(m); define_raw_master_file_bindings(m); #ifdef HDF5_FOUND @@ -30,8 +44,65 @@ PYBIND11_MODULE(_aare, m) { #endif define_var_cluster_finder_bindings(m); define_pixel_map_bindings(m); - define_pedestal_bindings(m, "Pedestal"); - define_pedestal_bindings(m, "Pedestal_float32"); - define_cluster_finder_bindings(m); - define_cluster_file_io_bindings(m); -} \ No newline at end of file + define_pedestal_bindings(m, "Pedestal_d"); + define_pedestal_bindings(m, "Pedestal_f"); + define_fit_bindings(m); + define_interpolation_bindings(m); + define_jungfrau_data_file_io_bindings(m); + + define_ClusterFile(m, "Cluster3x3i"); + define_ClusterFile(m, "Cluster3x3d"); + define_ClusterFile(m, "Cluster3x3f"); + define_ClusterFile(m, "Cluster2x2i"); + define_ClusterFile(m, "Cluster2x2f"); + define_ClusterFile(m, "Cluster2x2d"); + + define_ClusterVector(m, "Cluster3x3i"); + define_ClusterVector(m, "Cluster3x3d"); + define_ClusterVector(m, "Cluster3x3f"); + define_ClusterVector(m, "Cluster2x2i"); + define_ClusterVector(m, "Cluster2x2d"); + define_ClusterVector(m, "Cluster2x2f"); + + define_ClusterFinder(m, "Cluster3x3i"); + define_ClusterFinder(m, "Cluster3x3d"); + define_ClusterFinder(m, "Cluster3x3f"); + define_ClusterFinder(m, "Cluster2x2i"); + define_ClusterFinder(m, "Cluster2x2d"); + define_ClusterFinder(m, "Cluster2x2f"); + + define_ClusterFinderMT(m, "Cluster3x3i"); + define_ClusterFinderMT(m, "Cluster3x3d"); + define_ClusterFinderMT(m, "Cluster3x3f"); + define_ClusterFinderMT(m, "Cluster2x2i"); + define_ClusterFinderMT(m, "Cluster2x2d"); + define_ClusterFinderMT(m, "Cluster2x2f"); + + define_ClusterFileSink(m, "Cluster3x3i"); + define_ClusterFileSink(m, "Cluster3x3d"); + define_ClusterFileSink(m, "Cluster3x3f"); + define_ClusterFileSink(m, "Cluster2x2i"); + define_ClusterFileSink(m, "Cluster2x2d"); + define_ClusterFileSink(m, "Cluster2x2f"); + + define_ClusterCollector(m, "Cluster3x3i"); + define_ClusterCollector(m, "Cluster3x3d"); + define_ClusterCollector(m, "Cluster3x3f"); + define_ClusterCollector(m, "Cluster2x2i"); + define_ClusterCollector(m, "Cluster2x2d"); + define_ClusterCollector(m, "Cluster2x2f"); + + define_Cluster(m, "3x3i"); + define_Cluster(m, "3x3f"); + define_Cluster(m, "3x3d"); + define_Cluster(m, "2x2i"); + define_Cluster(m, "2x2f"); + define_Cluster(m, "2x2d"); + + register_calculate_eta(m); + register_calculate_eta(m); + register_calculate_eta(m); + register_calculate_eta(m); + register_calculate_eta(m); + register_calculate_eta(m); +} diff --git a/python/src/np_helper.hpp b/python/src/np_helper.hpp index e0c145b..78166aa 100644 --- a/python/src/np_helper.hpp +++ b/python/src/np_helper.hpp @@ -10,9 +10,10 @@ #include "aare/NDView.hpp" namespace py = pybind11; +using namespace aare; // Pass image data back to python as a numpy array -template +template py::array return_image_data(aare::NDArray *image) { py::capsule free_when_done(image, [](void *f) { @@ -39,78 +40,47 @@ template py::array return_vector(std::vector *vec) { free_when_done); // numpy array references this parent } -// template py::array do_read(Reader &r, size_t n_frames) { -// py::array image; -// if (n_frames == 0) -// n_frames = r.total_frames(); - -// std::array shape{static_cast(n_frames), r.rows(), -// r.cols()}; -// const uint8_t item_size = r.bytes_per_pixel(); -// if (item_size == 1) { -// image = py::array_t( -// shape); -// } else if (item_size == 2) { -// image = -// py::array_t( -// shape); -// } else if (item_size == 4) { -// image = -// py::array_t( -// shape); -// } -// r.read_into(reinterpret_cast(image.mutable_data()), n_frames); -// return image; -// } - -// py::array return_frame(pl::Frame *ptr) { -// py::capsule free_when_done(ptr, [](void *f) { -// pl::Frame *foo = reinterpret_cast(f); -// delete foo; -// }); - -// const uint8_t item_size = ptr->bytes_per_pixel(); -// std::vector shape; -// for (auto val : ptr->shape()) -// if (val > 1) -// shape.push_back(val); - -// std::vector strides; -// if (shape.size() == 1) -// strides.push_back(item_size); -// else if (shape.size() == 2) { -// strides.push_back(item_size * shape[1]); -// strides.push_back(item_size); -// } - -// if (item_size == 1) -// return py::array_t( -// shape, strides, -// reinterpret_cast(ptr->data()), free_when_done); -// else if (item_size == 2) -// return py::array_t(shape, strides, -// reinterpret_cast(ptr->data()), -// free_when_done); -// else if (item_size == 4) -// return py::array_t(shape, strides, -// reinterpret_cast(ptr->data()), -// free_when_done); -// return {}; -// } - // todo rewrite generic -template auto get_shape_3d(py::array_t arr) { +template +auto get_shape_3d(const py::array_t &arr) { return aare::Shape<3>{arr.shape(0), arr.shape(1), arr.shape(2)}; } -template auto make_view_3d(py::array_t arr) { +template auto make_view_3d(py::array_t &arr) { return aare::NDView(arr.mutable_data(), get_shape_3d(arr)); } -template auto get_shape_2d(py::array_t arr) { +template +auto get_shape_2d(const py::array_t &arr) { return aare::Shape<2>{arr.shape(0), arr.shape(1)}; } -template auto make_view_2d(py::array_t arr) { +template +auto get_shape_1d(const py::array_t &arr) { + return aare::Shape<1>{arr.shape(0)}; +} + +template auto make_view_2d(py::array_t &arr) { return aare::NDView(arr.mutable_data(), get_shape_2d(arr)); -} \ No newline at end of file +} +template auto make_view_1d(py::array_t &arr) { + return aare::NDView(arr.mutable_data(), get_shape_1d(arr)); +} + +template struct fmt_format_trait; // forward declaration + +template +struct fmt_format_trait> { + + static std::string value() { + return fmt::format("T{{{}:x:{}:y:{}:data:}}", + py::format_descriptor::format(), + py::format_descriptor::format(), + fmt::format("({},{}){}", ClusterSizeX, ClusterSizeY, + py::format_descriptor::format())); + } +}; + +template +auto fmt_format = fmt_format_trait::value(); \ No newline at end of file diff --git a/python/src/pedestal.hpp b/python/src/pedestal.hpp index 4d5d043..77148dc 100644 --- a/python/src/pedestal.hpp +++ b/python/src/pedestal.hpp @@ -43,5 +43,10 @@ template void define_pedestal_bindings(py::module &m, const .def("push", [](Pedestal &pedestal, py::array_t &f) { auto v = make_view_2d(f); pedestal.push(v); - }); + }) + .def("push_no_update", [](Pedestal &pedestal, py::array_t &f) { + auto v = make_view_2d(f); + pedestal.push_no_update(v); + }, py::arg().noconvert()) + .def("update_mean", &Pedestal::update_mean); } \ No newline at end of file diff --git a/python/src/raw_file.hpp b/python/src/raw_file.hpp index 38b4896..8d72220 100644 --- a/python/src/raw_file.hpp +++ b/python/src/raw_file.hpp @@ -32,7 +32,7 @@ void define_raw_file_io_bindings(py::module &m) { shape.push_back(self.cols()); // return headers from all subfiles - py::array_t header(self.n_mod()); + py::array_t header(self.n_modules()); const uint8_t item_size = self.bytes_per_pixel(); if (item_size == 1) { @@ -61,10 +61,10 @@ void define_raw_file_io_bindings(py::module &m) { // return headers from all subfiles py::array_t header; - if (self.n_mod() == 1) { + if (self.n_modules() == 1) { header = py::array_t(n_frames); } else { - header = py::array_t({self.n_mod(), n_frames}); + header = py::array_t({self.n_modules(), n_frames}); } // py::array_t header({self.n_mod(), n_frames}); @@ -100,7 +100,7 @@ void define_raw_file_io_bindings(py::module &m) { .def_property_readonly("cols", &RawFile::cols) .def_property_readonly("bitdepth", &RawFile::bitdepth) .def_property_readonly("geometry", &RawFile::geometry) - .def_property_readonly("n_mod", &RawFile::n_mod) + .def_property_readonly("n_modules", &RawFile::n_modules) .def_property_readonly("detector_type", &RawFile::detector_type) .def_property_readonly("master", &RawFile::master); } \ No newline at end of file diff --git a/python/src/raw_sub_file.hpp b/python/src/raw_sub_file.hpp new file mode 100644 index 0000000..2cb83fc --- /dev/null +++ b/python/src/raw_sub_file.hpp @@ -0,0 +1,110 @@ +#include "aare/CtbRawFile.hpp" +#include "aare/File.hpp" +#include "aare/Frame.hpp" +#include "aare/RawFile.hpp" +#include "aare/RawMasterFile.hpp" +#include "aare/RawSubFile.hpp" + +#include "aare/defs.hpp" +// #include "aare/fClusterFileV2.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; +using namespace ::aare; + +auto read_frame_from_RawSubFile(RawSubFile &self) { + py::array_t header(1); + const uint8_t item_size = self.bytes_per_pixel(); + std::vector shape{static_cast(self.rows()), + static_cast(self.cols())}; + + py::array image; + if (item_size == 1) { + image = py::array_t(shape); + } else if (item_size == 2) { + image = py::array_t(shape); + } else if (item_size == 4) { + image = py::array_t(shape); + } + self.read_into(reinterpret_cast(image.mutable_data()), + header.mutable_data()); + + return py::make_tuple(header, image); +} + +auto read_n_frames_from_RawSubFile(RawSubFile &self, size_t n_frames) { + py::array_t header(n_frames); + const uint8_t item_size = self.bytes_per_pixel(); + std::vector shape{ + static_cast(n_frames), + static_cast(self.rows()), + static_cast(self.cols()) + }; + + py::array image; + if (item_size == 1) { + image = py::array_t(shape); + } else if (item_size == 2) { + image = py::array_t(shape); + } else if (item_size == 4) { + image = py::array_t(shape); + } + self.read_into(reinterpret_cast(image.mutable_data()), n_frames, + header.mutable_data()); + + return py::make_tuple(header, image); +} + + +//Disable warnings for unused parameters, as we ignore some +//in the __exit__ method +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +void define_raw_sub_file_io_bindings(py::module &m) { + py::class_(m, "RawSubFile") + .def(py::init()) + .def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame) + .def_property_readonly("pixels_per_frame", + &RawSubFile::pixels_per_frame) + .def_property_readonly("bytes_per_pixel", &RawSubFile::bytes_per_pixel) + .def("seek", &RawSubFile::seek) + .def("tell", &RawSubFile::tell) + .def_property_readonly("rows", &RawSubFile::rows) + .def_property_readonly("cols", &RawSubFile::cols) + .def_property_readonly("frames_in_file", &RawSubFile::frames_in_file) + .def("read_frame", &read_frame_from_RawSubFile) + .def("read_n", &read_n_frames_from_RawSubFile) + .def("read", [](RawSubFile &self){ + self.seek(0); + auto n_frames = self.frames_in_file(); + return read_n_frames_from_RawSubFile(self, n_frames); + }) + .def("__enter__", [](RawSubFile &self) { return &self; }) + .def("__exit__", + [](RawSubFile &self, + const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + }) + .def("__iter__", [](RawSubFile &self) { return &self; }) + .def("__next__", [](RawSubFile &self) { + try { + return read_frame_from_RawSubFile(self); + } catch (std::runtime_error &e) { + throw py::stop_iteration(); + } + }); + +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/var_cluster.hpp b/python/src/var_cluster.hpp index f3a5741..f7b373f 100644 --- a/python/src/var_cluster.hpp +++ b/python/src/var_cluster.hpp @@ -19,15 +19,24 @@ using namespace::aare; void define_var_cluster_finder_bindings(py::module &m) { PYBIND11_NUMPY_DTYPE(VarClusterFinder::Hit, size, row, col, - reserved, energy, max); + reserved, energy, max, rows, cols, enes); py::class_>(m, "VarClusterFinder") .def(py::init, double>()) .def("labeled", [](VarClusterFinder &self) { - auto ptr = new NDArray(self.labeled()); + auto *ptr = new NDArray(self.labeled()); return return_image_data(ptr); }) + .def("set_noiseMap", + [](VarClusterFinder &self, + py::array_t + noise_map) { + auto noise_map_span = make_view_2d(noise_map); + self.set_noiseMap(noise_map_span); + }) + .def("set_peripheralThresholdFactor", + &VarClusterFinder::set_peripheralThresholdFactor) .def("find_clusters", [](VarClusterFinder &self, py::array_t @@ -35,6 +44,30 @@ void define_var_cluster_finder_bindings(py::module &m) { auto view = make_view_2d(img); self.find_clusters(view); }) + .def("find_clusters_X", + [](VarClusterFinder &self, + py::array_t + img) { + auto img_span = make_view_2d(img); + self.find_clusters_X(img_span); + }) + .def("single_pass", + [](VarClusterFinder &self, + py::array_t + img) { + auto img_span = make_view_2d(img); + self.single_pass(img_span); + }) + .def("hits", + [](VarClusterFinder &self) { + auto ptr = new std::vector::Hit>( + self.steal_hits()); + return return_vector(ptr); + }) + .def("clear_hits", + [](VarClusterFinder &self) { + self.clear_hits(); + }) .def("steal_hits", [](VarClusterFinder &self) { auto ptr = new std::vector::Hit>( diff --git a/python/tests/conftest.py b/python/tests/conftest.py new file mode 100644 index 0000000..fbcfeb3 --- /dev/null +++ b/python/tests/conftest.py @@ -0,0 +1,34 @@ +import os +from pathlib import Path +import pytest + + + +def pytest_addoption(parser): + parser.addoption( + "--files", action="store_true", default=False, help="run slow tests" + ) + + +def pytest_configure(config): + config.addinivalue_line("markers", "files: mark test as needing image files to run") + + +def pytest_collection_modifyitems(config, items): + if config.getoption("--files"): + return + skip = pytest.mark.skip(reason="need --files option to run") + for item in items: + if "files" in item.keywords: + item.add_marker(skip) + + +@pytest.fixture +def test_data_path(): + env_value = os.environ.get("AARE_TEST_DATA") + if not env_value: + raise RuntimeError("Environment variable AARE_TEST_DATA is not set or is empty") + + return Path(env_value) + + diff --git a/python/tests/test_Cluster.py b/python/tests/test_Cluster.py new file mode 100644 index 0000000..ddaa6f3 --- /dev/null +++ b/python/tests/test_Cluster.py @@ -0,0 +1,110 @@ +import pytest +import numpy as np + +from aare import _aare #import the C++ module +from conftest import test_data_path + + +def test_cluster_vector_can_be_converted_to_numpy(): + cv = _aare.ClusterVector_Cluster3x3i() + arr = np.array(cv, copy=False) + assert arr.shape == (0,) # 4 for x, y, size, energy and 9 for the cluster data + + +def test_ClusterVector(): + """Test ClusterVector""" + + clustervector = _aare.ClusterVector_Cluster3x3i() + assert clustervector.cluster_size_x == 3 + assert clustervector.cluster_size_y == 3 + assert clustervector.item_size() == 4+9*4 + assert clustervector.frame_number == 0 + assert clustervector.size == 0 + + cluster = _aare.Cluster3x3i(0,0,np.ones(9, dtype=np.int32)) + + clustervector.push_back(cluster) + assert clustervector.size == 1 + + with pytest.raises(TypeError): # Or use the appropriate exception type + clustervector.push_back(_aare.Cluster2x2i(0,0,np.ones(4, dtype=np.int32))) + + with pytest.raises(TypeError): + clustervector.push_back(_aare.Cluster3x3f(0,0,np.ones(9, dtype=np.float32))) + +def test_Interpolator(): + """Test Interpolator""" + + ebins = np.linspace(0,10, 20, dtype=np.float64) + xbins = np.linspace(0, 5, 30, dtype=np.float64) + ybins = np.linspace(0, 5, 30, dtype=np.float64) + + etacube = np.zeros(shape=[30, 30, 20], dtype=np.float64) + interpolator = _aare.Interpolator(etacube, xbins, ybins, ebins) + + assert interpolator.get_ietax().shape == (30,30,20) + assert interpolator.get_ietay().shape == (30,30,20) + clustervector = _aare.ClusterVector_Cluster3x3i() + + cluster = _aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32)) + clustervector.push_back(cluster) + + interpolated_photons = interpolator.interpolate(clustervector) + + assert interpolated_photons.size == 1 + + assert interpolated_photons[0]["x"] == -1 + assert interpolated_photons[0]["y"] == -1 + assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0 + + clustervector = _aare.ClusterVector_Cluster2x2i() + + cluster = _aare.Cluster2x2i(0,0, np.ones(4, dtype=np.int32)) + clustervector.push_back(cluster) + + interpolated_photons = interpolator.interpolate(clustervector) + + assert interpolated_photons.size == 1 + + assert interpolated_photons[0]["x"] == 0 + assert interpolated_photons[0]["y"] == 0 + assert interpolated_photons[0]["energy"] == 4 + + + +def test_calculate_eta(): + """Calculate Eta""" + clusters = _aare.ClusterVector_Cluster3x3i() + clusters.push_back(_aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32))) + clusters.push_back(_aare.Cluster3x3i(0,0, np.array([1,1,1,2,2,2,3,3,3]))) + + eta2 = _aare.calculate_eta2(clusters) + + assert eta2.shape == (2,2) + assert eta2[0,0] == 0.5 + assert eta2[0,1] == 0.5 + assert eta2[1,0] == 0.5 + assert eta2[1,1] == 0.6 #1/5 + +def test_cluster_finder(): + """Test ClusterFinder""" + + clusterfinder = _aare.ClusterFinder_Cluster3x3i([100,100]) + + #frame = np.random.rand(100,100) + frame = np.zeros(shape=[100,100]) + + clusterfinder.find_clusters(frame) + + clusters = clusterfinder.steal_clusters(False) #conversion does not work + + assert clusters.size == 0 + + + + + + + + + diff --git a/python/tests/test_ClusterFile.py b/python/tests/test_ClusterFile.py new file mode 100644 index 0000000..4126a6c --- /dev/null +++ b/python/tests/test_ClusterFile.py @@ -0,0 +1,64 @@ + +import pytest +import numpy as np +import boost_histogram as bh +import time +from pathlib import Path +import pickle + +from aare import ClusterFile +from conftest import test_data_path + +@pytest.mark.files +def test_cluster_file(test_data_path): + """Test ClusterFile""" + f = ClusterFile(test_data_path / "clust/single_frame_97_clustrers.clust") + cv = f.read_clusters(10) #conversion does not work + + + assert cv.frame_number == 135 + assert cv.size == 10 + + #Known data + #frame_number, num_clusters [135] 97 + #[ 1 200] [0 1 2 3 4 5 6 7 8] + #[ 2 201] [ 9 10 11 12 13 14 15 16 17] + #[ 3 202] [18 19 20 21 22 23 24 25 26] + #[ 4 203] [27 28 29 30 31 32 33 34 35] + #[ 5 204] [36 37 38 39 40 41 42 43 44] + #[ 6 205] [45 46 47 48 49 50 51 52 53] + #[ 7 206] [54 55 56 57 58 59 60 61 62] + #[ 8 207] [63 64 65 66 67 68 69 70 71] + #[ 9 208] [72 73 74 75 76 77 78 79 80] + #[ 10 209] [81 82 83 84 85 86 87 88 89] + + #conversion to numpy array + arr = np.array(cv, copy = False) + + assert arr.size == 10 + for i in range(10): + assert arr[i]['x'] == i+1 + +@pytest.mark.files +def test_read_clusters_and_fill_histogram(test_data_path): + # Create the histogram + n_bins = 100 + xmin = -100 + xmax = 1e4 + hist_aare = bh.Histogram(bh.axis.Regular(n_bins, xmin, xmax)) + + fname = test_data_path / "clust/beam_En700eV_-40deg_300V_10us_d0_f0_100.clust" + + #Read clusters and fill the histogram with pixel values + with ClusterFile(fname, chunk_size = 10000) as f: + for clusters in f: + arr = np.array(clusters, copy = False) + hist_aare.fill(arr['data'].flat) + + + #Load the histogram from the pickle file + with open(fname.with_suffix('.pkl'), 'rb') as f: + hist_py = pickle.load(f) + + #Compare the two histograms + assert hist_aare == hist_py \ No newline at end of file diff --git a/python/tests/test_ClusterVector.py b/python/tests/test_ClusterVector.py new file mode 100644 index 0000000..b64aeef --- /dev/null +++ b/python/tests/test_ClusterVector.py @@ -0,0 +1,54 @@ +import pytest +import numpy as np +import boost_histogram as bh +import time +from pathlib import Path +import pickle + +from aare import ClusterFile +from aare import _aare +from conftest import test_data_path + + +def test_create_cluster_vector(): + cv = _aare.ClusterVector_Cluster3x3i() + assert cv.cluster_size_x == 3 + assert cv.cluster_size_y == 3 + assert cv.size == 0 + + +def test_push_back_on_cluster_vector(): + cv = _aare.ClusterVector_Cluster2x2i() + assert cv.cluster_size_x == 2 + assert cv.cluster_size_y == 2 + assert cv.size == 0 + + cluster = _aare.Cluster2x2i(19, 22, np.ones(4, dtype=np.int32)) + cv.push_back(cluster) + assert cv.size == 1 + + arr = np.array(cv, copy=False) + assert arr[0]['x'] == 19 + assert arr[0]['y'] == 22 + + +def test_make_a_hitmap_from_cluster_vector(): + cv = _aare.ClusterVector_Cluster3x3i() + + # Push back 4 clusters with different positions + cv.push_back(_aare.Cluster3x3i(0, 0, np.ones(9, dtype=np.int32))) + cv.push_back(_aare.Cluster3x3i(1, 1, np.ones(9, dtype=np.int32))) + cv.push_back(_aare.Cluster3x3i(1, 1, np.ones(9, dtype=np.int32))) + cv.push_back(_aare.Cluster3x3i(2, 2, np.ones(9, dtype=np.int32))) + + ref = np.zeros((5, 5), dtype=np.int32) + ref[0,0] = 1 + ref[1,1] = 2 + ref[2,2] = 1 + + + img = _aare.hitmap((5,5), cv) + # print(img) + # print(ref) + assert (img == ref).all() + \ No newline at end of file diff --git a/python/tests/test_RawSubFile.py b/python/tests/test_RawSubFile.py new file mode 100644 index 0000000..aa4721a --- /dev/null +++ b/python/tests/test_RawSubFile.py @@ -0,0 +1,39 @@ +import pytest +import numpy as np +from aare import RawSubFile, DetectorType + + +@pytest.mark.files +def test_read_a_jungfrau_RawSubFile(test_data_path): + + # Starting with f1 there is now 7 frames left in the series of files + with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f1_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f: + assert f.frames_in_file == 7 + + headers, frames = f.read() + + assert headers.size == 7 + assert frames.shape == (7, 512, 1024) + + + for i,h in zip(range(4,11,1), headers): + assert h["frameNumber"] == i + + # Compare to canned data using numpy + data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy") + assert np.all(data[3:] == frames) + +@pytest.mark.files +def test_iterate_over_a_jungfrau_RawSubFile(test_data_path): + + data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy") + + # Given the first subfile in a series we can read all frames from f0, f1, f2...fN + with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f: + i = 0 + for header, frame in f: + assert header["frameNumber"] == i+1 + assert np.all(frame == data[i]) + i += 1 + assert i == 10 + assert header["frameNumber"] == 10 diff --git a/python/tests/test_jungfrau_dat_files.py b/python/tests/test_jungfrau_dat_files.py new file mode 100644 index 0000000..5d3fdf8 --- /dev/null +++ b/python/tests/test_jungfrau_dat_files.py @@ -0,0 +1,92 @@ +import pytest +import numpy as np +from aare import JungfrauDataFile + +@pytest.mark.files +def test_jfungfrau_dat_read_number_of_frames(test_data_path): + with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as dat_file: + assert dat_file.total_frames == 24 + + with JungfrauDataFile(test_data_path / "dat/AldoJF250k_000000.dat") as dat_file: + assert dat_file.total_frames == 53 + + with JungfrauDataFile(test_data_path / "dat/AldoJF65k_000000.dat") as dat_file: + assert dat_file.total_frames == 113 + + +@pytest.mark.files +def test_jfungfrau_dat_read_number_of_file(test_data_path): + with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as dat_file: + assert dat_file.n_files == 4 + + with JungfrauDataFile(test_data_path / "dat/AldoJF250k_000000.dat") as dat_file: + assert dat_file.n_files == 7 + + with JungfrauDataFile(test_data_path / "dat/AldoJF65k_000000.dat") as dat_file: + assert dat_file.n_files == 7 + + +@pytest.mark.files +def test_read_module(test_data_path): + """ + Read all frames from the series of .dat files. Compare to canned data in npz format. + """ + + # Read all frames from the .dat file + with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as f: + header, data = f.read() + + #Sanity check + n_frames = 24 + assert header.size == n_frames + assert data.shape == (n_frames, 512, 1024) + + # Read reference data using numpy + with np.load(test_data_path / "dat/AldoJF500k.npz") as f: + ref_header = f["headers"] + ref_data = f["frames"] + + # Check that the data is the same + assert np.all(ref_header == header) + assert np.all(ref_data == data) + +@pytest.mark.files +def test_read_half_module(test_data_path): + + # Read all frames from the .dat file + with JungfrauDataFile(test_data_path / "dat/AldoJF250k_000000.dat") as f: + header, data = f.read() + + n_frames = 53 + assert header.size == n_frames + assert data.shape == (n_frames, 256, 1024) + + # Read reference data using numpy + with np.load(test_data_path / "dat/AldoJF250k.npz") as f: + ref_header = f["headers"] + ref_data = f["frames"] + + # Check that the data is the same + assert np.all(ref_header == header) + assert np.all(ref_data == data) + + +@pytest.mark.files +def test_read_single_chip(test_data_path): + + # Read all frames from the .dat file + with JungfrauDataFile(test_data_path / "dat/AldoJF65k_000000.dat") as f: + header, data = f.read() + + n_frames = 113 + assert header.size == n_frames + assert data.shape == (n_frames, 256, 256) + + # Read reference data using numpy + with np.load(test_data_path / "dat/AldoJF65k.npz") as f: + ref_header = f["headers"] + ref_data = f["frames"] + + # Check that the data is the same + assert np.all(ref_header == header) + assert np.all(ref_data == data) \ No newline at end of file diff --git a/src/CalculateEta.test.cpp b/src/CalculateEta.test.cpp new file mode 100644 index 0000000..820ab44 --- /dev/null +++ b/src/CalculateEta.test.cpp @@ -0,0 +1,127 @@ +/************************************************ + * @file CalculateEta.test.cpp + * @short test case to calculate_eta2 + ***********************************************/ + +#include "aare/CalculateEta.hpp" +#include "aare/Cluster.hpp" +#include "aare/ClusterFile.hpp" + +// #include "catch.hpp" +#include +#include +#include + +using namespace aare; + +using ClusterTypes = + std::variant, Cluster, Cluster, + Cluster, Cluster>; + +auto get_test_parameters() { + return GENERATE( + std::make_tuple(ClusterTypes{Cluster{0, 0, {1, 2, 3, 1}}}, + Eta2{2. / 3, 3. / 4, + static_cast(corner::cBottomLeft), 7}), + std::make_tuple( + ClusterTypes{Cluster{0, 0, {1, 2, 3, 4, 5, 6, 1, 2, 7}}}, + Eta2{6. / 11, 2. / 7, static_cast(corner::cTopRight), + 20}), + std::make_tuple(ClusterTypes{Cluster{ + 0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 2, 8, 9, 8, + 1, 4, 5, 6, 7, 8, 4, 1, 1, 1, 1, 1}}}, + Eta2{8. / 17, 7. / 15, 9, 30}), + std::make_tuple( + ClusterTypes{Cluster{0, 0, {1, 4, 7, 2, 5, 6, 4, 3}}}, + Eta2{4. / 10, 4. / 11, 1, 21}), + std::make_tuple( + ClusterTypes{Cluster{0, 0, {1, 3, 2, 3, 4, 2}}}, + Eta2{3. / 5, 2. / 5, 1, 11})); +} + +TEST_CASE("compute_largest_2x2_subcluster", "[eta_calculation]") { + auto [cluster, expected_eta] = get_test_parameters(); + + auto [sum, index] = std::visit( + [](const auto &clustertype) { return clustertype.max_sum_2x2(); }, + cluster); + CHECK(expected_eta.c == index); + CHECK(expected_eta.sum == sum); +} + +TEST_CASE("calculate_eta2", "[eta_calculation]") { + + auto [cluster, expected_eta] = get_test_parameters(); + + auto eta = std::visit( + [](const auto &clustertype) { return calculate_eta2(clustertype); }, + cluster); + + CHECK(eta.x == expected_eta.x); + CHECK(eta.y == expected_eta.y); + CHECK(eta.c == expected_eta.c); + CHECK(eta.sum == expected_eta.sum); +} + +// 3x3 cluster layout (rotated to match the cBottomLeft enum): +// 6, 7, 8 +// 3, 4, 5 +// 0, 1, 2 + +TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in " + "the bottom left", + "[eta_calculation]") { + + // Create a 3x3 cluster + Cluster cl; + cl.x = 0; + cl.y = 0; + cl.data[0] = 30; + cl.data[1] = 23; + cl.data[2] = 5; + cl.data[3] = 20; + cl.data[4] = 50; + cl.data[5] = 3; + cl.data[6] = 8; + cl.data[7] = 2; + cl.data[8] = 3; + + // 8, 2, 3 + // 20, 50, 3 + // 30, 23, 5 + + auto eta = calculate_eta2(cl); + CHECK(eta.c == static_cast(corner::cBottomLeft)); + CHECK(eta.x == 50.0 / (20 + 50)); // 4/(3+4) + CHECK(eta.y == 50.0 / (23 + 50)); // 4/(1+4) + CHECK(eta.sum == 30 + 23 + 20 + 50); +} + +TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in " + "the top left", + "[eta_calculation]") { + + // Create a 3x3 cluster + Cluster cl; + cl.x = 0; + cl.y = 0; + cl.data[0] = 8; + cl.data[1] = 12; + cl.data[2] = 5; + cl.data[3] = 77; + cl.data[4] = 80; + cl.data[5] = 3; + cl.data[6] = 82; + cl.data[7] = 91; + cl.data[8] = 3; + + // 82, 91, 3 + // 77, 80, 3 + // 8, 12, 5 + + auto eta = calculate_eta2(cl); + CHECK(eta.c == static_cast(corner::cTopLeft)); + CHECK(eta.x == 80. / (77 + 80)); // 4/(3+4) + CHECK(eta.y == 91.0 / (91 + 80)); // 7/(7+4) + CHECK(eta.sum == 77 + 80 + 82 + 91); +} diff --git a/src/Cluster.test.cpp b/src/Cluster.test.cpp new file mode 100644 index 0000000..ba9cda1 --- /dev/null +++ b/src/Cluster.test.cpp @@ -0,0 +1,21 @@ +/************************************************ + * @file test-Cluster.cpp + * @short test case for generic Cluster, ClusterVector, and calculate_eta2 + ***********************************************/ + +#include "aare/Cluster.hpp" +#include "aare/CalculateEta.hpp" +#include "aare/ClusterFile.hpp" + +// #include "catch.hpp" +#include +#include +#include + +using namespace aare; + +TEST_CASE("Test sum of Cluster", "[.cluster]") { + Cluster cluster{0, 0, {1, 2, 3, 4}}; + + CHECK(cluster.sum() == 10); +} \ No newline at end of file diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 3daa9d6..d24e803 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -1,34 +1,115 @@ #include "aare/ClusterFile.hpp" +#include + namespace aare { -ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size): m_chunk_size(chunk_size) { - fp = fopen(fname.c_str(), "rb"); - if (!fp) { - throw std::runtime_error("Could not open file: " + fname.string()); +ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size, + const std::string &mode) + : m_chunk_size(chunk_size), m_mode(mode) { + + if (mode == "r") { + fp = fopen(fname.c_str(), "rb"); + if (!fp) { + throw std::runtime_error("Could not open file for reading: " + + fname.string()); + } + } else if (mode == "w") { + fp = fopen(fname.c_str(), "wb"); + if (!fp) { + throw std::runtime_error("Could not open file for writing: " + + fname.string()); + } + } else if (mode == "a") { + fp = fopen(fname.c_str(), "ab"); + if (!fp) { + throw std::runtime_error("Could not open file for appending: " + + fname.string()); + } + } else { + throw std::runtime_error("Unsupported mode: " + mode); } } -ClusterFile::~ClusterFile() { - close(); +void ClusterFile::set_roi(ROI roi){ + m_roi = roi; } -void ClusterFile::close(){ - if (fp){ +void ClusterFile::set_noise_map(const NDView noise_map){ + m_noise_map = NDArray(noise_map); +} + +void ClusterFile::set_gain_map(const NDView gain_map){ + m_gain_map = NDArray(gain_map); + + // Gain map is passed as ADU/keV to avoid dividing in when applying the gain + // map we invert it here + for (auto &item : m_gain_map->view()) { + item = 1.0 / item; + } +} + +ClusterFile::~ClusterFile() { close(); } + +void ClusterFile::close() { + if (fp) { fclose(fp); fp = nullptr; - } + } } -std::vector ClusterFile::read_clusters(size_t n_clusters) { - std::vector clusters(n_clusters); +void ClusterFile::write_frame(const ClusterVector &clusters) { + if (m_mode != "w" && m_mode != "a") { + throw std::runtime_error("File not opened for writing"); + } + if (!(clusters.cluster_size_x() == 3) && + !(clusters.cluster_size_y() == 3)) { + throw std::runtime_error("Only 3x3 clusters are supported"); + } + //First write the frame number - 4 bytes + int32_t frame_number = clusters.frame_number(); + if(fwrite(&frame_number, sizeof(frame_number), 1, fp)!=1){ + throw std::runtime_error(LOCATION + "Could not write frame number"); + } + + //Then write the number of clusters - 4 bytes + uint32_t n_clusters = clusters.size(); + if(fwrite(&n_clusters, sizeof(n_clusters), 1, fp)!=1){ + throw std::runtime_error(LOCATION + "Could not write number of clusters"); + } + + //Now write the clusters in the frame + if(fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp)!=clusters.size()){ + throw std::runtime_error(LOCATION + "Could not write clusters"); + } +} + + +ClusterVector ClusterFile::read_clusters(size_t n_clusters){ + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_noise_map || m_roi){ + return read_clusters_with_cut(n_clusters); + }else{ + return read_clusters_without_cut(n_clusters); + } +} + +ClusterVector ClusterFile::read_clusters_without_cut(size_t n_clusters) { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + + ClusterVector clusters(3,3, n_clusters); int32_t iframe = 0; // frame number needs to be 4 bytes! size_t nph_read = 0; uint32_t nn = m_num_left; uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 - auto buf = reinterpret_cast(clusters.data()); + // auto buf = reinterpret_cast(clusters.data()); + auto buf = clusters.data(); // if there are photons left from previous frame read them first if (nph) { if (nph > n_clusters) { @@ -38,13 +119,15 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { } else { nn = nph; } - nph_read += fread(reinterpret_cast(buf + nph_read), sizeof(Cluster), nn, fp); + nph_read += fread((buf + nph_read*clusters.item_size()), + clusters.item_size(), nn, fp); m_num_left = nph - nn; // write back the number of photons left } if (nph_read < n_clusters) { // keep on reading frames and photons until reaching n_clusters while (fread(&iframe, sizeof(iframe), 1, fp)) { + clusters.set_frame_number(iframe); // read number of clusters in frame if (fread(&nph, sizeof(nph), 1, fp)) { if (nph > (n_clusters - nph_read)) @@ -52,8 +135,8 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { else nn = nph; - nph_read += - fread(reinterpret_cast(buf + nph_read), sizeof(Cluster), nn, fp); + nph_read += fread((buf + nph_read*clusters.item_size()), + clusters.item_size(), nn, fp); m_num_left = nph - nn; } if (nph_read >= n_clusters) @@ -64,260 +147,256 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { // Resize the vector to the number of clusters. // No new allocation, only change bounds. clusters.resize(nph_read); + if(m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); return clusters; } -std::vector ClusterFile::read_frame(int32_t &out_fnum) { + +ClusterVector ClusterFile::read_clusters_with_cut(size_t n_clusters) { + ClusterVector clusters(3,3); + clusters.reserve(n_clusters); + + // if there are photons left from previous frame read them first if (m_num_left) { - throw std::runtime_error("There are still photons left in the last frame"); + while(m_num_left && clusters.size() < n_clusters){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); + } + } } - if (fread(&out_fnum, sizeof(out_fnum), 1, fp) != 1) { - throw std::runtime_error("Could not read frame number"); + // we did not have enough clusters left in the previous frame + // keep on reading frames until reaching n_clusters + if (clusters.size() < n_clusters) { + // sanity check + if (m_num_left) { + throw std::runtime_error(LOCATION + "Entered second loop with clusters left\n"); + } + + int32_t frame_number = 0; // frame number needs to be 4 bytes! + while (fread(&frame_number, sizeof(frame_number), 1, fp)) { + if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) { + clusters.set_frame_number(frame_number); //cluster vector will hold the last frame number + while(m_num_left && clusters.size() < n_clusters){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); + } + } + } + + // we have enough clusters, break out of the outer while loop + if (clusters.size() >= n_clusters) + break; + } + + } + if(m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); + + return clusters; +} + +Cluster3x3 ClusterFile::read_one_cluster(){ + Cluster3x3 c; + auto rc = fread(&c, sizeof(c), 1, fp); + if (rc != 1) { + throw std::runtime_error(LOCATION + "Could not read cluster"); + } + --m_num_left; + return c; +} + +ClusterVector ClusterFile::read_frame(){ + if (m_mode != "r") { + throw std::runtime_error(LOCATION + "File not opened for reading"); + } + if (m_noise_map || m_roi){ + return read_frame_with_cut(); + }else{ + return read_frame_without_cut(); + } +} + +ClusterVector ClusterFile::read_frame_without_cut() { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_num_left) { + throw std::runtime_error( + "There are still photons left in the last frame"); + } + int32_t frame_number; + if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { + throw std::runtime_error(LOCATION + "Could not read frame number"); } int32_t n_clusters; // Saved as 32bit integer in the cluster file if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { + throw std::runtime_error(LOCATION + "Could not read number of clusters"); + } + + ClusterVector clusters(3, 3, n_clusters); + clusters.set_frame_number(frame_number); + + if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != + static_cast(n_clusters)) { + throw std::runtime_error(LOCATION + "Could not read clusters"); + } + clusters.resize(n_clusters); + if (m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); + return clusters; +} + +ClusterVector ClusterFile::read_frame_with_cut() { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_num_left) { + throw std::runtime_error( + "There are still photons left in the last frame"); + } + int32_t frame_number; + if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { + throw std::runtime_error("Could not read frame number"); + } + + + if (fread(&m_num_left, sizeof(m_num_left), 1, fp) != 1) { throw std::runtime_error("Could not read number of clusters"); } - std::vector clusters(n_clusters); - - if (fread(clusters.data(), sizeof(Cluster), n_clusters, fp) != static_cast(n_clusters)) { - throw std::runtime_error("Could not read clusters"); + + ClusterVector clusters(3, 3); + clusters.reserve(m_num_left); + clusters.set_frame_number(frame_number); + while(m_num_left){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); + } } + if (m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); return clusters; - } -std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, - double *noise_map, - int nx, int ny) { - std::vector clusters(n_clusters); - // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf, - // uint32_t *n_left, double *noise_map, int - // nx, int ny) { - int iframe = 0; - // uint32_t nph = *n_left; - uint32_t nph = m_num_left; - // uint32_t nn = *n_left; - uint32_t nn = m_num_left; - size_t nph_read = 0; - int32_t t2max, tot1; - int32_t tot3; - // Cluster *ptr = buf; - Cluster *ptr = clusters.data(); - int good = 1; - double noise; - // read photons left from previous frame - if (noise_map) - printf("Using noise map\n"); - - if (nph) { - if (nph > n_clusters) { - // if we have more photons left in the frame then photons to - // read we read directly the requested number - nn = n_clusters; - } else { - nn = nph; - } - for (size_t iph = 0; iph < nn; iph++) { - // read photons 1 by 1 - size_t n_read = fread(reinterpret_cast(ptr), sizeof(Cluster), 1, fp); - if (n_read != 1) { - clusters.resize(nph_read); - return clusters; - } - // TODO! error handling on read - good = 1; - if (noise_map) { - if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) { - tot1 = ptr->data[4]; - analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL, - NULL); - noise = noise_map[ptr->y * nx + ptr->x]; - if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) { - ; - } else { - good = 0; - printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise, - tot1, t2max, tot3); - } - } else { - printf("Bad pixel number %d %d\n", ptr->x, ptr->y); - good = 0; - } - } - if (good) { - ptr++; - nph_read++; - } - (m_num_left)--; - if (nph_read >= n_clusters) - break; +bool ClusterFile::is_selected(Cluster3x3 &cl) { + //Should fail fast + if (m_roi) { + if (!(m_roi->contains(cl.x, cl.y))) { + return false; } } - if (nph_read < n_clusters) { - // // keep on reading frames and photons until reaching n_clusters - while (fread(&iframe, sizeof(iframe), 1, fp)) { - // // printf("%d\n",nph_read); + if (m_noise_map){ + int32_t sum_1x1 = cl.data[4]; // central pixel + int32_t sum_2x2 = cl.sum_2x2(); // highest sum of 2x2 subclusters + int32_t sum_3x3 = cl.sum(); // sum of all pixels - if (fread(&nph, sizeof(nph), 1, fp)) { - // // printf("** %d\n",nph); - m_num_left = nph; - for (size_t iph = 0; iph < nph; iph++) { - // // read photons 1 by 1 - size_t n_read = - fread(reinterpret_cast(ptr), sizeof(Cluster), 1, fp); - if (n_read != 1) { - clusters.resize(nph_read); - return clusters; - // return nph_read; - } - good = 1; - if (noise_map) { - if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && - ptr->y < ny) { - tot1 = ptr->data[4]; - analyze_cluster(*ptr, &t2max, &tot3, NULL, - NULL, - NULL, NULL, NULL); - // noise = noise_map[ptr->y * nx + ptr->x]; - noise = noise_map[ptr->y + ny * ptr->x]; - if (tot1 > noise || t2max > 2 * noise || - tot3 > 3 * noise) { - ; - } else - good = 0; - } else { - printf("Bad pixel number %d %d\n", ptr->x, - ptr->y); good = 0; - } - } - if (good) { - ptr++; - nph_read++; - } - (m_num_left)--; - if (nph_read >= n_clusters) - break; - } - } - if (nph_read >= n_clusters) - break; - } + auto noise = (*m_noise_map)(cl.y, cl.x); //TODO! check if this is correct + if (sum_1x1 <= noise || sum_2x2 <= 2 * noise || sum_3x3 <= 3 * noise) { + return false; } - // printf("%d\n",nph_read); - clusters.resize(nph_read); - return clusters; - + } + //we passed all checks + return true; } -int ClusterFile::analyze_cluster(Cluster cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, - double *eta3y) { - - return analyze_data(cl.data, t2, t3, quad, eta2x, eta2y, eta3x, eta3y); +NDArray calculate_eta2(ClusterVector &clusters) { + //TOTO! make work with 2x2 clusters + NDArray eta2({static_cast(clusters.size()), 2}); + + if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; + } + }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; + } + }else{ + throw std::runtime_error("Only 3x3 and 2x2 clusters are supported"); + } + + return eta2; } -int ClusterFile::analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y) { +/** + * @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 struct + * containing etay, etax and the corner of the cluster. +*/ +Eta2 calculate_eta2(Cluster3x3 &cl) { + Eta2 eta{}; - int ok = 1; + std::array tot2; + tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4]; + tot2[1] = cl.data[1] + cl.data[2] + cl.data[4] + cl.data[5]; + tot2[2] = cl.data[3] + cl.data[4] + cl.data[6] + cl.data[7]; + tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8]; - int32_t tot2[4]; - int32_t t2max = 0; - char c = 0; - int32_t val, tot3; - - tot3 = 0; - for (int i = 0; i < 4; i++) - tot2[i] = 0; - - for (int ix = 0; ix < 3; ix++) { - for (int iy = 0; iy < 3; iy++) { - val = data[iy * 3 + ix]; - // printf ("%d ",data[iy * 3 + ix]); - tot3 += val; - if (ix <= 1 && iy <= 1) - tot2[cBottomLeft] += val; - if (ix >= 1 && iy <= 1) - tot2[cBottomRight] += val; - if (ix <= 1 && iy >= 1) - tot2[cTopLeft] += val; - if (ix >= 1 && iy >= 1) - tot2[cTopRight] += val; - } - // printf ("\n"); + auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin(); + eta.sum = tot2[c]; + switch (c) { + case cBottomLeft: + if ((cl.data[3] + cl.data[4]) != 0) + eta.x = + static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); + if ((cl.data[1] + cl.data[4]) != 0) + eta.y = + static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + eta.c = cBottomLeft; + break; + case cBottomRight: + if ((cl.data[2] + cl.data[5]) != 0) + eta.x = + static_cast(cl.data[5]) / (cl.data[4] + cl.data[5]); + if ((cl.data[1] + cl.data[4]) != 0) + eta.y = + static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + eta.c = cBottomRight; + break; + case cTopLeft: + if ((cl.data[7] + cl.data[4]) != 0) + eta.x = + static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); + if ((cl.data[7] + cl.data[4]) != 0) + eta.y = + static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + eta.c = cTopLeft; + break; + case cTopRight: + if ((cl.data[5] + cl.data[4]) != 0) + eta.x = + static_cast(cl.data[5]) / (cl.data[5] + cl.data[4]); + if ((cl.data[7] + cl.data[4]) != 0) + eta.y = + static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + eta.c = cTopRight; + break; + // no default to allow compiler to warn about missing cases } - // printf ("\n"); - - if (t2 || quad) { - - t2max = tot2[0]; - c = cBottomLeft; - for (int i = 1; i < 4; i++) { - if (tot2[i] > t2max) { - t2max = tot2[i]; - c = i; - } - } - //printf("*** %d %d %d %d -- %d\n",tot2[0],tot2[1],tot2[2],tot2[3],t2max); - if (quad) - *quad = c; - if (t2) - *t2 = t2max; - } - if (t3) - *t3 = tot3; - - if (eta2x || eta2y) { - if (eta2x) - *eta2x = 0; - if (eta2y) - *eta2y = 0; - switch (c) { - case cBottomLeft: - if (eta2x && (data[3] + data[4]) != 0) - *eta2x = static_cast(data[4]) / (data[3] + data[4]); - if (eta2y && (data[1] + data[4]) != 0) - *eta2y = static_cast(data[4]) / (data[1] + data[4]); - break; - case cBottomRight: - if (eta2x && (data[2] + data[5]) != 0) - *eta2x = static_cast(data[5]) / (data[4] + data[5]); - if (eta2y && (data[1] + data[4]) != 0) - *eta2y = static_cast(data[4]) / (data[1] + data[4]); - break; - case cTopLeft: - if (eta2x && (data[7] + data[4]) != 0) - *eta2x = static_cast(data[4]) / (data[3] + data[4]); - if (eta2y && (data[7] + data[4]) != 0) - *eta2y = static_cast(data[7]) / (data[7] + data[4]); - break; - case cTopRight: - if (eta2x && t2max != 0) - *eta2x = static_cast(data[5]) / (data[5] + data[4]); - if (eta2y && t2max != 0) - *eta2y = static_cast(data[7]) / (data[7] + data[4]); - break; - default:; - } - } - - if (eta3x || eta3y) { - if (eta3x && (data[3] + data[4] + data[5]) != 0) - *eta3x = static_cast(-data[3] + data[3 + 2]) / - (data[3] + data[4] + data[5]); - if (eta3y && (data[1] + data[4] + data[7]) != 0) - *eta3y = static_cast(-data[1] + data[2 * 3 + 1]) / - (data[1] + data[4] + data[7]); - } - - return ok; + return eta; } +Eta2 calculate_eta2(Cluster2x2 &cl) { + Eta2 eta{}; + if ((cl.data[0] + cl.data[1]) != 0) + eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); + if ((cl.data[0] + cl.data[2]) != 0) + eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); + eta.sum = cl.data[0] + cl.data[1] + cl.data[2]+ cl.data[3]; + eta.c = cBottomLeft; //TODO! This is not correct, but need to put something + return eta; +} + } // namespace aare \ No newline at end of file diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp new file mode 100644 index 0000000..6254b5d --- /dev/null +++ b/src/ClusterFile.test.cpp @@ -0,0 +1,351 @@ +#include "aare/ClusterFile.hpp" +#include "test_config.hpp" + +#include "aare/defs.hpp" +#include +#include +#include + +using aare::Cluster; +using aare::ClusterFile; +using aare::ClusterVector; + + +TEST_CASE("Read one frame from a cluster file", "[.files]") { + //We know that the frame has 97 clusters + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile> f(fpath); + auto clusters = f.read_frame(); + CHECK(clusters.size() == 97); + CHECK(clusters.frame_number() == 135); + CHECK(clusters[0].x == 1); + CHECK(clusters[0].y == 200); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data), + std::begin(expected_cluster_data))); +} + + +TEST_CASE("Read one frame using ROI", "[.files]") { + // We know that the frame has 97 clusters + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile> f(fpath); + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 50; + roi.ymin = 200; + roi.ymax = 249; + f.set_roi(roi); + auto clusters = f.read_frame(); + REQUIRE(clusters.size() == 49); + REQUIRE(clusters.frame_number() == 135); + + // Check that all clusters are within the ROI + for (size_t i = 0; i < clusters.size(); i++) { + auto c = clusters[i]; + REQUIRE(c.x >= roi.xmin); + REQUIRE(c.x <= roi.xmax); + REQUIRE(c.y >= roi.ymin); + REQUIRE(c.y <= roi.ymax); + } + + CHECK(clusters[0].x == 1); + CHECK(clusters[0].y == 200); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data), + std::begin(expected_cluster_data))); +} + + + +TEST_CASE("Read clusters from single frame file", "[.files]") { + + // frame_number, num_clusters [135] 97 + // [ 1 200] [0 1 2 3 4 5 6 7 8] + // [ 2 201] [ 9 10 11 12 13 14 15 16 17] + // [ 3 202] [18 19 20 21 22 23 24 25 26] + // [ 4 203] [27 28 29 30 31 32 33 34 35] + // [ 5 204] [36 37 38 39 40 41 42 43 44] + // [ 6 205] [45 46 47 48 49 50 51 52 53] + // [ 7 206] [54 55 56 57 58 59 60 61 62] + // [ 8 207] [63 64 65 66 67 68 69 70 71] + // [ 9 208] [72 73 74 75 76 77 78 79 80] + // [ 10 209] [81 82 83 84 85 86 87 88 89] + // [ 11 210] [90 91 92 93 94 95 96 97 98] + // [ 12 211] [ 99 100 101 102 103 104 105 106 107] + // [ 13 212] [108 109 110 111 112 113 114 115 116] + // [ 14 213] [117 118 119 120 121 122 123 124 125] + // [ 15 214] [126 127 128 129 130 131 132 133 134] + // [ 16 215] [135 136 137 138 139 140 141 142 143] + // [ 17 216] [144 145 146 147 148 149 150 151 152] + // [ 18 217] [153 154 155 156 157 158 159 160 161] + // [ 19 218] [162 163 164 165 166 167 168 169 170] + // [ 20 219] [171 172 173 174 175 176 177 178 179] + // [ 21 220] [180 181 182 183 184 185 186 187 188] + // [ 22 221] [189 190 191 192 193 194 195 196 197] + // [ 23 222] [198 199 200 201 202 203 204 205 206] + // [ 24 223] [207 208 209 210 211 212 213 214 215] + // [ 25 224] [216 217 218 219 220 221 222 223 224] + // [ 26 225] [225 226 227 228 229 230 231 232 233] + // [ 27 226] [234 235 236 237 238 239 240 241 242] + // [ 28 227] [243 244 245 246 247 248 249 250 251] + // [ 29 228] [252 253 254 255 256 257 258 259 260] + // [ 30 229] [261 262 263 264 265 266 267 268 269] + // [ 31 230] [270 271 272 273 274 275 276 277 278] + // [ 32 231] [279 280 281 282 283 284 285 286 287] + // [ 33 232] [288 289 290 291 292 293 294 295 296] + // [ 34 233] [297 298 299 300 301 302 303 304 305] + // [ 35 234] [306 307 308 309 310 311 312 313 314] + // [ 36 235] [315 316 317 318 319 320 321 322 323] + // [ 37 236] [324 325 326 327 328 329 330 331 332] + // [ 38 237] [333 334 335 336 337 338 339 340 341] + // [ 39 238] [342 343 344 345 346 347 348 349 350] + // [ 40 239] [351 352 353 354 355 356 357 358 359] + // [ 41 240] [360 361 362 363 364 365 366 367 368] + // [ 42 241] [369 370 371 372 373 374 375 376 377] + // [ 43 242] [378 379 380 381 382 383 384 385 386] + // [ 44 243] [387 388 389 390 391 392 393 394 395] + // [ 45 244] [396 397 398 399 400 401 402 403 404] + // [ 46 245] [405 406 407 408 409 410 411 412 413] + // [ 47 246] [414 415 416 417 418 419 420 421 422] + // [ 48 247] [423 424 425 426 427 428 429 430 431] + // [ 49 248] [432 433 434 435 436 437 438 439 440] + // [ 50 249] [441 442 443 444 445 446 447 448 449] + // [ 51 250] [450 451 452 453 454 455 456 457 458] + // [ 52 251] [459 460 461 462 463 464 465 466 467] + // [ 53 252] [468 469 470 471 472 473 474 475 476] + // [ 54 253] [477 478 479 480 481 482 483 484 485] + // [ 55 254] [486 487 488 489 490 491 492 493 494] + // [ 56 255] [495 496 497 498 499 500 501 502 503] + // [ 57 256] [504 505 506 507 508 509 510 511 512] + // [ 58 257] [513 514 515 516 517 518 519 520 521] + // [ 59 258] [522 523 524 525 526 527 528 529 530] + // [ 60 259] [531 532 533 534 535 536 537 538 539] + // [ 61 260] [540 541 542 543 544 545 546 547 548] + // [ 62 261] [549 550 551 552 553 554 555 556 557] + // [ 63 262] [558 559 560 561 562 563 564 565 566] + // [ 64 263] [567 568 569 570 571 572 573 574 575] + // [ 65 264] [576 577 578 579 580 581 582 583 584] + // [ 66 265] [585 586 587 588 589 590 591 592 593] + // [ 67 266] [594 595 596 597 598 599 600 601 602] + // [ 68 267] [603 604 605 606 607 608 609 610 611] + // [ 69 268] [612 613 614 615 616 617 618 619 620] + // [ 70 269] [621 622 623 624 625 626 627 628 629] + // [ 71 270] [630 631 632 633 634 635 636 637 638] + // [ 72 271] [639 640 641 642 643 644 645 646 647] + // [ 73 272] [648 649 650 651 652 653 654 655 656] + // [ 74 273] [657 658 659 660 661 662 663 664 665] + // [ 75 274] [666 667 668 669 670 671 672 673 674] + // [ 76 275] [675 676 677 678 679 680 681 682 683] + // [ 77 276] [684 685 686 687 688 689 690 691 692] + // [ 78 277] [693 694 695 696 697 698 699 700 701] + // [ 79 278] [702 703 704 705 706 707 708 709 710] + // [ 80 279] [711 712 713 714 715 716 717 718 719] + // [ 81 280] [720 721 722 723 724 725 726 727 728] + // [ 82 281] [729 730 731 732 733 734 735 736 737] + // [ 83 282] [738 739 740 741 742 743 744 745 746] + // [ 84 283] [747 748 749 750 751 752 753 754 755] + // [ 85 284] [756 757 758 759 760 761 762 763 764] + // [ 86 285] [765 766 767 768 769 770 771 772 773] + // [ 87 286] [774 775 776 777 778 779 780 781 782] + // [ 88 287] [783 784 785 786 787 788 789 790 791] + // [ 89 288] [792 793 794 795 796 797 798 799 800] + // [ 90 289] [801 802 803 804 805 806 807 808 809] + // [ 91 290] [810 811 812 813 814 815 816 817 818] + // [ 92 291] [819 820 821 822 823 824 825 826 827] + // [ 93 292] [828 829 830 831 832 833 834 835 836] + // [ 94 293] [837 838 839 840 841 842 843 844 845] + // [ 95 294] [846 847 848 849 850 851 852 853 854] + // [ 96 295] [855 856 857 858 859 860 861 862 863] + // [ 97 296] [864 865 866 867 868 869 870 871 872] + + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; + + REQUIRE(std::filesystem::exists(fpath)); + + SECTION("Read fewer clusters than available") { + ClusterFile> f(fpath); + auto clusters = f.read_clusters(50); + REQUIRE(clusters.size() == 50); + REQUIRE(clusters.frame_number() == 135); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + REQUIRE(clusters[0].x == 1); + REQUIRE(clusters[0].y == 200); + CHECK(std::equal(std::begin(clusters[0].data), + std::end(clusters[0].data), + std::begin(expected_cluster_data))); + } + SECTION("Read more clusters than available") { + ClusterFile> f(fpath); + // 100 is the maximum number of clusters read + auto clusters = f.read_clusters(100); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + REQUIRE(clusters[0].x == 1); + REQUIRE(clusters[0].y == 200); + CHECK(std::equal(std::begin(clusters[0].data), + std::end(clusters[0].data), + std::begin(expected_cluster_data))); + } + SECTION("Read all clusters") { + ClusterFile> f(fpath); + auto clusters = f.read_clusters(97); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); + REQUIRE(clusters[0].x == 1); + REQUIRE(clusters[0].y == 200); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + CHECK(std::equal(std::begin(clusters[0].data), + std::end(clusters[0].data), + std::begin(expected_cluster_data))); + } +} + +TEST_CASE("Read clusters from single frame file with ROI", "[.files]") { + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile> f(fpath); + + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 50; + roi.ymin = 200; + roi.ymax = 249; + f.set_roi(roi); + + auto clusters = f.read_clusters(10); + + CHECK(clusters.size() == 10); + CHECK(clusters.frame_number() == 135); + CHECK(clusters[0].x == 1); + CHECK(clusters[0].y == 200); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data), + std::begin(expected_cluster_data))); +} + +TEST_CASE("Read cluster from multiple frame file", "[.files]") { + + using ClusterType = Cluster; + + auto fpath = + test_data_path() / "clust" / "Two_frames_2x2double_test_clusters.clust"; + + REQUIRE(std::filesystem::exists(fpath)); + + // Two_frames_2x2double_test_clusters.clust + // frame number, num_clusters 0, 4 + //[10, 20], {0. ,0., 0., 0.} + //[11, 30], {1., 1., 1., 1.} + //[12, 40], {2., 2., 2., 2.} + //[13, 50], {3., 3., 3., 3.} + // 1,4 + //[10, 20], {4., 4., 4., 4.} + //[11, 30], {5., 5., 5., 5.} + //[12, 40], {6., 6., 6., 6.} + //[13, 50], {7., 7., 7., 7.} + + SECTION("Read clusters from both frames") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(2); + REQUIRE(clusters.size() == 2); + REQUIRE(clusters.frame_number() == 0); + + auto clusters1 = f.read_clusters(3); + + REQUIRE(clusters1.size() == 3); + REQUIRE(clusters1.frame_number() == 1); + } + + SECTION("Read all clusters") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(8); + REQUIRE(clusters.size() == 8); + REQUIRE(clusters.frame_number() == 1); + } + + SECTION("Read clusters from one frame") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(2); + REQUIRE(clusters.size() == 2); + REQUIRE(clusters.frame_number() == 0); + + auto clusters1 = f.read_clusters(1); + + REQUIRE(clusters1.size() == 1); + REQUIRE(clusters1.frame_number() == 0); + } +} + +TEST_CASE("Write cluster with potential padding", "[.files][.ClusterFile]") { + + using ClusterType = Cluster; + + REQUIRE(std::filesystem::exists(test_data_path() / "clust")); + + auto fpath = test_data_path() / "clust" / "single_frame_2_clusters.clust"; + + ClusterFile file(fpath, 1000, "w"); + + ClusterVector clustervec(2); + int16_t coordinate = 5; + clustervec.push_back(ClusterType{ + coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}}); + clustervec.push_back(ClusterType{ + coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}}); + + file.write_frame(clustervec); + + file.close(); + + file.open("r"); + + auto read_cluster_vector = file.read_frame(); + + CHECK(read_cluster_vector.size() == 2); + CHECK(read_cluster_vector.frame_number() == 0); + + CHECK(read_cluster_vector[0].x == clustervec[0].x); + CHECK(read_cluster_vector[0].y == clustervec[0].y); + CHECK(std::equal( + clustervec[0].data.begin(), clustervec[0].data.end(), + read_cluster_vector[0].data.begin(), [](double a, double b) { + return std::abs(a - b) < std::numeric_limits::epsilon(); + })); + + CHECK(read_cluster_vector[1].x == clustervec[1].x); + CHECK(read_cluster_vector[1].y == clustervec[1].y); + CHECK(std::equal( + clustervec[1].data.begin(), clustervec[1].data.end(), + read_cluster_vector[1].data.begin(), [](double a, double b) { + return std::abs(a - b) < std::numeric_limits::epsilon(); + })); +} + +TEST_CASE("Read frame and modify cluster data", "[.files][.ClusterFile]") { + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile> f(fpath); + + auto clusters = f.read_frame(); + CHECK(clusters.size() == 97); + CHECK(clusters.frame_number() == 135); + + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + clusters.push_back( + Cluster{0, 0, {0, 1, 2, 3, 4, 5, 6, 7, 8}}); + + CHECK(clusters.size() == 98); + CHECK(clusters[0].x == 1); + CHECK(clusters[0].y == 200); + + CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data), + std::begin(expected_cluster_data))); +} diff --git a/src/ClusterFinder.test.cpp b/src/ClusterFinder.test.cpp index 768e632..8989581 100644 --- a/src/ClusterFinder.test.cpp +++ b/src/ClusterFinder.test.cpp @@ -1,19 +1,18 @@ #include "aare/ClusterFinder.hpp" #include "aare/Pedestal.hpp" -#include #include +#include #include #include using namespace aare; -//TODO! Find a way to test the cluster finder - - +// TODO! Find a way to test the cluster finder // class ClusterFinderUnitTest : public ClusterFinder { // public: -// ClusterFinderUnitTest(int cluster_sizeX, int cluster_sizeY, double nSigma = 5.0, double threshold = 0.0) +// ClusterFinderUnitTest(int cluster_sizeX, int cluster_sizeY, double nSigma +// = 5.0, double threshold = 0.0) // : ClusterFinder(cluster_sizeX, cluster_sizeY, nSigma, threshold) {} // double get_c2() { return c2; } // double get_c3() { return c3; } @@ -37,8 +36,8 @@ using namespace aare; // REQUIRE_THAT(cf.get_c3(), Catch::Matchers::WithinRel(c3, 1e-9)); // } -TEST_CASE("Construct a cluster finder"){ - ClusterFinder clusterFinder({400,400}, {3,3}); +TEST_CASE("Construct a cluster finder") { + ClusterFinder clusterFinder({400, 400}); // REQUIRE(clusterFinder.get_cluster_sizeX() == 3); // REQUIRE(clusterFinder.get_cluster_sizeY() == 3); // REQUIRE(clusterFinder.get_threshold() == 1); @@ -49,16 +48,17 @@ TEST_CASE("Construct a cluster finder"){ // aare::Pedestal pedestal(10, 10, 5); // NDArray frame({10, 10}); // frame = 0; -// ClusterFinder clusterFinder(3, 3, 1, 1); // 3x3 cluster, 1 nSigma, 1 threshold +// ClusterFinder clusterFinder(3, 3, 1, 1); // 3x3 cluster, 1 nSigma, 1 +// threshold -// auto clusters = clusterFinder.find_clusters_without_threshold(frame.span(), pedestal); +// auto clusters = +// clusterFinder.find_clusters_without_threshold(frame.span(), pedestal); // REQUIRE(clusters.size() == 0); // frame(5, 5) = 10; -// clusters = clusterFinder.find_clusters_without_threshold(frame.span(), pedestal); -// REQUIRE(clusters.size() == 1); -// REQUIRE(clusters[0].x == 5); +// clusters = clusterFinder.find_clusters_without_threshold(frame.span(), +// pedestal); REQUIRE(clusters.size() == 1); REQUIRE(clusters[0].x == 5); // REQUIRE(clusters[0].y == 5); // for (int i = 0; i < 3; i++) { // for (int j = 0; j < 3; j++) { diff --git a/src/ClusterFinderMT.test.cpp b/src/ClusterFinderMT.test.cpp new file mode 100644 index 0000000..9289592 --- /dev/null +++ b/src/ClusterFinderMT.test.cpp @@ -0,0 +1,99 @@ + +#include "aare/ClusterFinderMT.hpp" +#include "aare/Cluster.hpp" +#include "aare/ClusterCollector.hpp" +#include "aare/File.hpp" + +#include "test_config.hpp" + +#include +#include +#include + +using namespace aare; + +// wrapper function to access private member variables for testing +template +class ClusterFinderMTWrapper + : public ClusterFinderMT { + + public: + ClusterFinderMTWrapper(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0, + size_t capacity = 2000, size_t n_threads = 3) + : ClusterFinderMT( + image_size, nSigma, capacity, n_threads) {} + + size_t get_m_input_queues_size() const { + return this->m_input_queues.size(); + } + + size_t get_m_output_queues_size() const { + return this->m_output_queues.size(); + } + + size_t get_m_cluster_finders_size() const { + return this->m_cluster_finders.size(); + } + + bool m_output_queues_are_empty() const { + for (auto &queue : this->m_output_queues) { + if (!queue->isEmpty()) + return false; + } + return true; + } + + bool m_input_queues_are_empty() const { + for (auto &queue : this->m_input_queues) { + if (!queue->isEmpty()) + return false; + } + return true; + } + + bool m_sink_is_empty() const { return this->m_sink.isEmpty(); } + + size_t m_sink_size() const { return this->m_sink.sizeGuess(); } +}; + +TEST_CASE("multithreaded cluster finder", "[.files][.ClusterFinder]") { + auto fpath = "/mnt/sls_det_storage/matterhorn_data/aare_test_data/" + "Moench03new/cu_half_speed_master_4.json"; + + File file(fpath); + + size_t n_threads = 2; + size_t n_frames_pd = 10; + + using ClusterType = Cluster; + + ClusterFinderMTWrapper cf( + {static_cast(file.rows()), static_cast(file.cols())}, + 5, 2000, n_threads); // no idea what frame type is!!! default uint16_t + + CHECK(cf.get_m_input_queues_size() == n_threads); + CHECK(cf.get_m_output_queues_size() == n_threads); + CHECK(cf.get_m_cluster_finders_size() == n_threads); + CHECK(cf.m_output_queues_are_empty() == true); + CHECK(cf.m_input_queues_are_empty() == true); + + for (size_t i = 0; i < n_frames_pd; ++i) { + cf.find_clusters(file.read_frame().view()); + } + + cf.stop(); + + CHECK(cf.m_output_queues_are_empty() == true); + CHECK(cf.m_input_queues_are_empty() == true); + + CHECK(cf.m_sink_size() == n_frames_pd); + ClusterCollector clustercollector(&cf); + + clustercollector.stop(); + + CHECK(cf.m_sink_size() == 0); + + auto clustervec = clustercollector.steal_clusters(); + // CHECK(clustervec.size() == ) //dont know how many clusters to expect +} diff --git a/src/ClusterVector.test.cpp b/src/ClusterVector.test.cpp new file mode 100644 index 0000000..1214b6b --- /dev/null +++ b/src/ClusterVector.test.cpp @@ -0,0 +1,268 @@ +#include "aare/ClusterVector.hpp" +#include + +#include +#include +#include + +using aare::Cluster; +using aare::ClusterVector; + +TEST_CASE("item_size return the size of the cluster stored") { + using C1 = Cluster; + ClusterVector cv(4); + CHECK(cv.item_size() == sizeof(C1)); + + // Sanity check + // 2*2*4 = 16 bytes of data for the cluster + // 2*2 = 4 bytes for the x and y coordinates + REQUIRE(cv.item_size() == 20); + + using C2 = Cluster; + ClusterVector cv2(4); + CHECK(cv2.item_size() == sizeof(C2)); + + using C3 = Cluster; + ClusterVector cv3(4); + CHECK(cv3.item_size() == sizeof(C3)); + + using C4 = Cluster; + ClusterVector cv4(4); + CHECK(cv4.item_size() == sizeof(C4)); + + using C5 = Cluster; + ClusterVector cv5(4); + CHECK(cv5.item_size() == sizeof(C5)); + + using C6 = Cluster; + ClusterVector cv6(4); + CHECK(cv6.item_size() == sizeof(C6)); + + using C7 = Cluster; + ClusterVector cv7(4); + CHECK(cv7.item_size() == sizeof(C7)); +} + +TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read", + "[.ClusterVector]") { + + ClusterVector> cv(4); + REQUIRE(cv.capacity() == 4); + REQUIRE(cv.size() == 0); + REQUIRE(cv.cluster_size_x() == 2); + REQUIRE(cv.cluster_size_y() == 2); + // int16_t, int16_t, 2x2 int32_t = 20 bytes + REQUIRE(cv.item_size() == 20); + + // Create a cluster and push back into the vector + Cluster c1 = {1, 2, {3, 4, 5, 6}}; + cv.push_back(c1); + REQUIRE(cv.size() == 1); + REQUIRE(cv.capacity() == 4); + + auto c2 = cv[0]; + + // Check that the data is the same + REQUIRE(c1.x == c2.x); + REQUIRE(c1.y == c2.y); + for (size_t i = 0; i < 4; i++) { + REQUIRE(c1.data[i] == c2.data[i]); + } +} + +TEST_CASE("Summing 3x1 clusters of int64", "[.ClusterVector]") { + ClusterVector> cv(2); + REQUIRE(cv.capacity() == 2); + REQUIRE(cv.size() == 0); + REQUIRE(cv.cluster_size_x() == 3); + REQUIRE(cv.cluster_size_y() == 1); + + // Create a cluster and push back into the vector + Cluster c1 = {1, 2, {3, 4, 5}}; + cv.push_back(c1); + REQUIRE(cv.capacity() == 2); + REQUIRE(cv.size() == 1); + + Cluster c2 = {6, 7, {8, 9, 10}}; + cv.push_back(c2); + REQUIRE(cv.capacity() == 2); + REQUIRE(cv.size() == 2); + + Cluster c3 = {11, 12, {13, 14, 15}}; + cv.push_back(c3); + REQUIRE(cv.capacity() == 4); + REQUIRE(cv.size() == 3); + + /* + auto sums = cv.sum(); + REQUIRE(sums.size() == 3); + REQUIRE(sums[0] == 12); + REQUIRE(sums[1] == 27); + REQUIRE(sums[2] == 42); + */ +} + +TEST_CASE("Storing floats", "[.ClusterVector]") { + ClusterVector> cv(10); + REQUIRE(cv.capacity() == 10); + REQUIRE(cv.size() == 0); + REQUIRE(cv.cluster_size_x() == 2); + REQUIRE(cv.cluster_size_y() == 4); + + // Create a cluster and push back into the vector + Cluster c1 = {1, 2, {3.0, 4.0, 5.0, 6.0, 3.0, 4.0, 5.0, 6.0}}; + cv.push_back(c1); + REQUIRE(cv.capacity() == 10); + REQUIRE(cv.size() == 1); + + Cluster c2 = { + 6, 7, {8.0, 9.0, 10.0, 11.0, 8.0, 9.0, 10.0, 11.0}}; + cv.push_back(c2); + REQUIRE(cv.capacity() == 10); + REQUIRE(cv.size() == 2); + + /* + auto sums = cv.sum(); + REQUIRE(sums.size() == 2); + REQUIRE_THAT(sums[0], Catch::Matchers::WithinAbs(36.0, 1e-6)); + REQUIRE_THAT(sums[1], Catch::Matchers::WithinAbs(76.0, 1e-6)); + */ +} + +TEST_CASE("Push back more than initial capacity", "[.ClusterVector]") { + + ClusterVector> cv(2); + auto initial_data = cv.data(); + Cluster c1 = {1, 2, {3, 4, 5, 6}}; + cv.push_back(c1); + REQUIRE(cv.size() == 1); + REQUIRE(cv.capacity() == 2); + + Cluster c2 = {6, 7, {8, 9, 10, 11}}; + cv.push_back(c2); + REQUIRE(cv.size() == 2); + REQUIRE(cv.capacity() == 2); + + Cluster c3 = {11, 12, {13, 14, 15, 16}}; + cv.push_back(c3); + REQUIRE(cv.size() == 3); + REQUIRE(cv.capacity() == 4); + + Cluster *ptr = + reinterpret_cast *>(cv.data()); + REQUIRE(ptr[0].x == 1); + REQUIRE(ptr[0].y == 2); + REQUIRE(ptr[1].x == 6); + REQUIRE(ptr[1].y == 7); + REQUIRE(ptr[2].x == 11); + REQUIRE(ptr[2].y == 12); + + // We should have allocated a new buffer, since we outgrew the initial + // capacity + REQUIRE(initial_data != cv.data()); +} + +TEST_CASE("Concatenate two cluster vectors where the first has enough capacity", + "[.ClusterVector]") { + ClusterVector> cv1(12); + Cluster c1 = {1, 2, {3, 4, 5, 6}}; + cv1.push_back(c1); + Cluster c2 = {6, 7, {8, 9, 10, 11}}; + cv1.push_back(c2); + + ClusterVector> cv2(2); + Cluster c3 = {11, 12, {13, 14, 15, 16}}; + cv2.push_back(c3); + Cluster c4 = {16, 17, {18, 19, 20, 21}}; + cv2.push_back(c4); + + cv1 += cv2; + REQUIRE(cv1.size() == 4); + REQUIRE(cv1.capacity() == 12); + + Cluster *ptr = + reinterpret_cast *>(cv1.data()); + REQUIRE(ptr[0].x == 1); + REQUIRE(ptr[0].y == 2); + REQUIRE(ptr[1].x == 6); + REQUIRE(ptr[1].y == 7); + REQUIRE(ptr[2].x == 11); + REQUIRE(ptr[2].y == 12); + REQUIRE(ptr[3].x == 16); + REQUIRE(ptr[3].y == 17); +} + +TEST_CASE("Concatenate two cluster vectors where we need to allocate", + "[.ClusterVector]") { + ClusterVector> cv1(2); + Cluster c1 = {1, 2, {3, 4, 5, 6}}; + cv1.push_back(c1); + Cluster c2 = {6, 7, {8, 9, 10, 11}}; + cv1.push_back(c2); + + ClusterVector> cv2(2); + Cluster c3 = {11, 12, {13, 14, 15, 16}}; + cv2.push_back(c3); + Cluster c4 = {16, 17, {18, 19, 20, 21}}; + cv2.push_back(c4); + + cv1 += cv2; + REQUIRE(cv1.size() == 4); + REQUIRE(cv1.capacity() == 4); + + Cluster *ptr = + reinterpret_cast *>(cv1.data()); + REQUIRE(ptr[0].x == 1); + REQUIRE(ptr[0].y == 2); + REQUIRE(ptr[1].x == 6); + REQUIRE(ptr[1].y == 7); + REQUIRE(ptr[2].x == 11); + REQUIRE(ptr[2].y == 12); + REQUIRE(ptr[3].x == 16); + REQUIRE(ptr[3].y == 17); +} + +struct ClusterTestData { + uint8_t ClusterSizeX; + uint8_t ClusterSizeY; + std::vector index_map_x; + std::vector index_map_y; +}; + +TEST_CASE("Gain Map Calculation Index Map", "[.ClusterVector][.gain_map]") { + + auto clustertestdata = GENERATE( + ClusterTestData{3, + 3, + {-1, 0, 1, -1, 0, 1, -1, 0, 1}, + {-1, -1, -1, 0, 0, 0, 1, 1, 1}}, + ClusterTestData{ + 4, + 4, + {-2, -1, 0, 1, -2, -1, 0, 1, -2, -1, 0, 1, -2, -1, 0, 1}, + {-2, -2, -2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1, 1, 1, 1}}, + ClusterTestData{2, 2, {-1, 0, -1, 0}, {-1, -1, 0, 0}}, + ClusterTestData{5, + 5, + {-2, -1, 0, 1, 2, -2, -1, 0, 1, 2, -2, -1, 0, + 1, 2, -2, -1, 0, 1, 2, -2, -1, 0, 1, 2}, + {-2, -2, -2, -2, -2, -1, -1, -1, -1, -1, 0, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2}}); + + uint8_t ClusterSizeX = clustertestdata.ClusterSizeX; + uint8_t ClusterSizeY = clustertestdata.ClusterSizeY; + + std::vector index_map_x(ClusterSizeX * ClusterSizeY); + std::vector index_map_y(ClusterSizeX * ClusterSizeY); + + int64_t index_cluster_center_x = ClusterSizeX / 2; + int64_t index_cluster_center_y = ClusterSizeY / 2; + + for (size_t j = 0; j < ClusterSizeX * ClusterSizeY; j++) { + index_map_x[j] = j % ClusterSizeX - index_cluster_center_x; + index_map_y[j] = j / ClusterSizeX - index_cluster_center_y; + } + + CHECK(index_map_x == clustertestdata.index_map_x); + CHECK(index_map_y == clustertestdata.index_map_y); +} \ No newline at end of file diff --git a/src/Dtype.cpp b/src/Dtype.cpp index 565d509..b818ea3 100644 --- a/src/Dtype.cpp +++ b/src/Dtype.cpp @@ -70,7 +70,7 @@ uint8_t Dtype::bitdepth() const { /** * @brief Get the number of bytes of the data type */ -size_t Dtype::bytes() const { return bitdepth() / 8; } +size_t Dtype::bytes() const { return bitdepth() / bits_per_byte; } /** * @brief Construct a DType object from a TypeIndex diff --git a/src/File.cpp b/src/File.cpp index 4573610..42813bc 100644 --- a/src/File.cpp +++ b/src/File.cpp @@ -2,6 +2,7 @@ #ifdef HDF5_FOUND #include "aare/Hdf5File.hpp" #endif +#include "aare/JungfrauDataFile.hpp" #include "aare/NumpyFile.hpp" #include "aare/RawFile.hpp" @@ -40,7 +41,9 @@ File::File(const std::filesystem::path &fname, const std::string &mode, throw std::runtime_error("Enable HDF5 compile option: AARE_HDF5=ON"); } #endif - else { + else if(fname.extension() == ".dat"){ + file_impl = std::make_unique(fname); + } else { throw std::runtime_error("Unsupported file type"); } } @@ -58,6 +61,8 @@ File& File::operator=(File &&other) noexcept { return *this; } +// void File::close() { file_impl->close(); } + Frame File::read_frame() { return file_impl->read_frame(); } Frame File::read_frame(size_t frame_index) { return file_impl->read_frame(frame_index); @@ -71,6 +76,8 @@ void File::read_into(std::byte *image_buf) { file_impl->read_into(image_buf); } void File::read_into(std::byte *image_buf, size_t n_frames) { file_impl->read_into(image_buf, n_frames); } + +size_t File::frame_number() { return file_impl->frame_number(tell()); } size_t File::frame_number(size_t frame_index) { return file_impl->frame_number(frame_index); } @@ -82,7 +89,7 @@ size_t File::tell() const { return file_impl->tell(); } size_t File::rows() const { return file_impl->rows(); } size_t File::cols() const { return file_impl->cols(); } size_t File::bitdepth() const { return file_impl->bitdepth(); } -size_t File::bytes_per_pixel() const { return file_impl->bitdepth() / 8; } +size_t File::bytes_per_pixel() const { return file_impl->bitdepth() / bits_per_byte; } DetectorType File::detector_type() const { return file_impl->detector_type(); } diff --git a/src/FilePtr.cpp b/src/FilePtr.cpp new file mode 100644 index 0000000..e3cdb4b --- /dev/null +++ b/src/FilePtr.cpp @@ -0,0 +1,44 @@ + +#include "aare/FilePtr.hpp" +#include +#include +#include + +namespace aare { + +FilePtr::FilePtr(const std::filesystem::path& fname, const std::string& mode = "rb") { + fp_ = fopen(fname.c_str(), mode.c_str()); + if (!fp_) + throw std::runtime_error(fmt::format("Could not open: {}", fname.c_str())); +} + +FilePtr::FilePtr(FilePtr &&other) { std::swap(fp_, other.fp_); } + +FilePtr &FilePtr::operator=(FilePtr &&other) { + std::swap(fp_, other.fp_); + return *this; +} + +FILE *FilePtr::get() { return fp_; } + +ssize_t FilePtr::tell() { + auto pos = ftell(fp_); + if (pos == -1) + throw std::runtime_error(fmt::format("Error getting file position: {}", error_msg())); + return pos; +} +FilePtr::~FilePtr() { + if (fp_) + fclose(fp_); // check? +} + +std::string FilePtr::error_msg(){ + if (feof(fp_)) { + return "End of file reached"; + } + if (ferror(fp_)) { + return fmt::format("Error reading file: {}", std::strerror(errno)); + } + return ""; +} +} // namespace aare diff --git a/src/Fit.cpp b/src/Fit.cpp new file mode 100644 index 0000000..25000de --- /dev/null +++ b/src/Fit.cpp @@ -0,0 +1,525 @@ +#include "aare/Fit.hpp" +#include "aare/utils/task.hpp" +#include "aare/utils/par.hpp" +#include +#include +#include + +#include + + +namespace aare { + +namespace func { + +double gaus(const double x, const double *par) { + return par[0] * exp(-pow(x - par[1], 2) / (2 * pow(par[2], 2))); +} + +NDArray gaus(NDView x, NDView par) { + NDArray y({x.shape(0)}, 0); + for (ssize_t i = 0; i < x.size(); i++) { + y(i) = gaus(x(i), par.data()); + } + return y; +} + +double pol1(const double x, const double *par) { return par[0] * x + par[1]; } + +NDArray pol1(NDView x, NDView par) { + NDArray y({x.shape()}, 0); + for (ssize_t i = 0; i < x.size(); i++) { + y(i) = pol1(x(i), par.data()); + } + return y; +} + +double scurve(const double x, const double * par) { + return (par[0] + par[1] * x) + 0.5 * (1 + erf((x - par[2]) / (sqrt(2) * par[3]))) * (par[4] + par[5] * (x - par[2])); +} + +NDArray scurve(NDView x, NDView par) { + NDArray y({x.shape()}, 0); + for (ssize_t i = 0; i < x.size(); i++) { + y(i) = scurve(x(i), par.data()); + } + return y; +} + +double scurve2(const double x, const double * par) { + return (par[0] + par[1] * x) + 0.5 * (1 - erf((x - par[2]) / (sqrt(2) * par[3]))) * (par[4] + par[5] * (x - par[2])); +} + +NDArray scurve2(NDView x, NDView par) { + NDArray y({x.shape()}, 0); + for (ssize_t i = 0; i < x.size(); i++) { + y(i) = scurve2(x(i), par.data()); + } + return y; +} + +} // namespace func + +NDArray fit_gaus(NDView x, NDView y) { + NDArray result = gaus_init_par(x, y); + lm_status_struct status; + + lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(), + aare::func::gaus, &lm_control_double, &status); + + return result; +} + +NDArray fit_gaus(NDView x, NDView y, + int n_threads) { + NDArray result({y.shape(0), y.shape(1), 3}, 0); + + auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView values(&y(row, col, 0), {y.shape(2)}); + auto res = fit_gaus(x, values); + result(row, col, 0) = res(0); + result(row, col, 1) = res(1); + result(row, col, 2) = res(2); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); + return result; +} + +std::array gaus_init_par(const NDView x, const NDView y) { + std::array start_par{0, 0, 0}; + auto e = std::max_element(y.begin(), y.end()); + auto idx = std::distance(y.begin(), e); + + start_par[0] = *e; // For amplitude we use the maximum value + start_par[1] = + x[idx]; // For the mean we use the x value of the maximum value + + // For sigma we estimate the fwhm and divide by 2.35 + // assuming equally spaced x values + auto delta = x[1] - x[0]; + start_par[2] = + std::count_if(y.begin(), y.end(), + [e](double val) { return val > *e / 2; }) * + delta / 2.35; + + return start_par; +} + + +std::array pol1_init_par(const NDView x, const NDView y){ + // Estimate the initial parameters for the fit + std::array start_par{0, 0}; + + + auto y2 = std::max_element(y.begin(), y.end()); + auto x2 = x[std::distance(y.begin(), y2)]; + auto y1 = std::min_element(y.begin(), y.end()); + auto x1 = x[std::distance(y.begin(), y1)]; + + start_par[0] = + (*y2 - *y1) / (x2 - x1); // For amplitude we use the maximum value + start_par[1] = + *y1 - ((*y2 - *y1) / (x2 - x1)) * + x1; // For the mean we use the x value of the maximum value + return start_par; +} + +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, + double &chi2) { + + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 3 || par_err_out.size() != 3) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 3"); + } + + + // /* Collection of output parameters for status info. */ + // typedef struct { + // double fnorm; /* norm of the residue vector fvec. */ + // int nfev; /* actual number of iterations. */ + // int outcome; /* Status indicator. Nonnegative values are used as + // index + // for the message text lm_infmsg, set in lmmin.c. */ + // int userbreak; /* Set when function evaluation requests termination. + // */ + // } lm_status_struct; + + + lm_status_struct status; + par_out = gaus_init_par(x, y); + std::array cov{0, 0, 0, 0, 0, 0, 0 , 0 , 0}; + + // void lmcurve2( const int n_par, double *par, double *parerr, double *covar, const int m_dat, const double *t, const double *y, const double *dy, double (*f)( const double ti, const double *par ), const lm_control_struct *control, lm_status_struct *status); + // n_par - Number of free variables. Length of parameter vector par. + // par - Parameter vector. On input, it must contain a reasonable guess. On output, it contains the solution found to minimize ||r||. + // parerr - Parameter uncertainties vector. Array of length n_par or NULL. On output, unless it or covar is NULL, it contains the weighted parameter uncertainties for the found parameters. + // covar - Covariance matrix. Array of length n_par * n_par or NULL. On output, unless it is NULL, it contains the covariance matrix. + // m_dat - Number of data points. Length of vectors t, y, dy. Must statisfy n_par <= m_dat. + // t - Array of length m_dat. Contains the abcissae (time, or "x") for which function f will be evaluated. + // y - Array of length m_dat. Contains the ordinate values that shall be fitted. + // dy - Array of length m_dat. Contains the standard deviations of the values y. + // f - A user-supplied parametric function f(ti;par). + // control - Parameter collection for tuning the fit procedure. In most cases, the default &lm_control_double is adequate. If f is only computed with single-precision accuracy, &lm_control_float should be used. Parameters are explained in lmmin2(3). + // status - A record used to return information about the minimization process: For details, see lmmin2(3). + + lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::gaus, + &lm_control_double, &status); + + // Calculate chi2 + chi2 = 0; + for (ssize_t i = 0; i < y.size(); i++) { + chi2 += std::pow((y(i) - func::gaus(x(i), par_out.data())) / y_err(i), 2); + } +} + +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + + fit_gaus(x, y_view, y_err_view, par_out_view, par_err_out_view, + chi2_out(row, col)); + + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); +} + +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2) { + + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 2 || par_err_out.size() != 2) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 2"); + } + + lm_status_struct status; + par_out = pol1_init_par(x, y); + std::array cov{0, 0, 0, 0}; + + lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::pol1, + &lm_control_double, &status); + + // Calculate chi2 + chi2 = 0; + for (ssize_t i = 0; i < y.size(); i++) { + chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); + } +} + +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + + fit_pol1(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col)); + + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); + +} + +NDArray fit_pol1(NDView x, NDView y) { + // // Check that we have the correct sizes + // if (y.size() != x.size() || y.size() != y_err.size() || + // par_out.size() != 2 || par_err_out.size() != 2) { + // throw std::runtime_error("Data, x, data_err must have the same size " + // "and par_out, par_err_out must have size 2"); + // } + NDArray par = pol1_init_par(x, y); + + lm_status_struct status; + lmcurve(par.size(), par.data(), x.size(), x.data(), y.data(), + aare::func::pol1, &lm_control_double, &status); + + return par; +} + +NDArray fit_pol1(NDView x, NDView y, + int n_threads) { + NDArray result({y.shape(0), y.shape(1), 2}, 0); + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView values(&y(row, col, 0), {y.shape(2)}); + auto res = fit_pol1(x, values); + result(row, col, 0) = res(0); + result(row, col, 1) = res(1); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + + RunInParallel(process, tasks); + return result; +} + +// ~~ S-CURVES ~~ + +// SCURVE -- +std::array scurve_init_par(const NDView x, const NDView y){ + // Estimate the initial parameters for the fit + std::array start_par{0, 0, 0, 0, 0, 0}; + + auto ymax = std::max_element(y.begin(), y.end()); + auto ymin = std::min_element(y.begin(), y.end()); + start_par[4] = *ymin + (*ymax - *ymin) / 2; + + // Find the first x where the corresponding y value is above the threshold (start_par[4]) + for (ssize_t i = 0; i < y.size(); ++i) { + if (y[i] >= start_par[4]) { + start_par[2] = x[i]; + break; // Exit the loop after finding the first valid x + } + } + + start_par[3] = 2 * sqrt(start_par[2]); + start_par[0] = 100; + start_par[1] = 0.25; + start_par[5] = 1; + return start_par; +} + +// - No error +NDArray fit_scurve(NDView x, NDView y) { + NDArray result = scurve_init_par(x, y); + lm_status_struct status; + + lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(), + aare::func::scurve, &lm_control_double, &status); + + return result; +} + +NDArray fit_scurve(NDView x, NDView y, int n_threads) { + NDArray result({y.shape(0), y.shape(1), 6}, 0); + + auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView values(&y(row, col, 0), {y.shape(2)}); + auto res = fit_scurve(x, values); + result(row, col, 0) = res(0); + result(row, col, 1) = res(1); + result(row, col, 2) = res(2); + result(row, col, 3) = res(3); + result(row, col, 4) = res(4); + result(row, col, 5) = res(5); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); + return result; +} + +// - Error +void fit_scurve(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2) { + + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 6 || par_err_out.size() != 6) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 6"); + } + + lm_status_struct status; + par_out = scurve_init_par(x, y); + std::array cov = {0}; // size 6x6 + // std::array cov{0, 0, 0, 0}; + + lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::scurve, + &lm_control_double, &status); + + // Calculate chi2 + chi2 = 0; + for (ssize_t i = 0; i < y.size(); i++) { + chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); + } +} + +void fit_scurve(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + + fit_scurve(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col)); + + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); + +} + +// SCURVE2 --- + +std::array scurve2_init_par(const NDView x, const NDView y){ + // Estimate the initial parameters for the fit + std::array start_par{0, 0, 0, 0, 0, 0}; + + auto ymax = std::max_element(y.begin(), y.end()); + auto ymin = std::min_element(y.begin(), y.end()); + start_par[4] = *ymin + (*ymax - *ymin) / 2; + + // Find the first x where the corresponding y value is above the threshold (start_par[4]) + for (ssize_t i = 0; i < y.size(); ++i) { + if (y[i] <= start_par[4]) { + start_par[2] = x[i]; + break; // Exit the loop after finding the first valid x + } + } + + start_par[3] = 2 * sqrt(start_par[2]); + start_par[0] = 100; + start_par[1] = 0.25; + start_par[5] = -1; + return start_par; +} + +// - No error +NDArray fit_scurve2(NDView x, NDView y) { + NDArray result = scurve2_init_par(x, y); + lm_status_struct status; + + lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(), + aare::func::scurve2, &lm_control_double, &status); + + return result; +} + +NDArray fit_scurve2(NDView x, NDView y, int n_threads) { + NDArray result({y.shape(0), y.shape(1), 6}, 0); + + auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView values(&y(row, col, 0), {y.shape(2)}); + auto res = fit_scurve2(x, values); + result(row, col, 0) = res(0); + result(row, col, 1) = res(1); + result(row, col, 2) = res(2); + result(row, col, 3) = res(3); + result(row, col, 4) = res(4); + result(row, col, 5) = res(5); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); + return result; +} + +// - Error +void fit_scurve2(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2) { + + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 6 || par_err_out.size() != 6) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 6"); + } + + lm_status_struct status; + par_out = scurve2_init_par(x, y); + std::array cov = {0}; // size 6x6 + // std::array cov{0, 0, 0, 0}; + + lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::scurve2, + &lm_control_double, &status); + + // Calculate chi2 + chi2 = 0; + for (ssize_t i = 0; i < y.size(); i++) { + chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); + } +} + +void fit_scurve2(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + + fit_scurve2(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col)); + + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); + +} + +} // namespace aare \ No newline at end of file diff --git a/src/Frame.test.cpp b/src/Frame.test.cpp index 33bbbb6..4063701 100644 --- a/src/Frame.test.cpp +++ b/src/Frame.test.cpp @@ -19,7 +19,7 @@ TEST_CASE("Construct a frame") { // data should be initialized to 0 for (size_t i = 0; i < rows; i++) { for (size_t j = 0; j < cols; j++) { - uint8_t *data = (uint8_t *)frame.pixel_ptr(i, j); + uint8_t *data = reinterpret_cast(frame.pixel_ptr(i, j)); REQUIRE(data != nullptr); REQUIRE(*data == 0); } @@ -40,7 +40,7 @@ TEST_CASE("Set a value in a 8 bit frame") { // only the value we did set should be non-zero for (size_t i = 0; i < rows; i++) { for (size_t j = 0; j < cols; j++) { - uint8_t *data = (uint8_t *)frame.pixel_ptr(i, j); + uint8_t *data = reinterpret_cast(frame.pixel_ptr(i, j)); REQUIRE(data != nullptr); if (i == 5 && j == 7) { REQUIRE(*data == value); @@ -65,7 +65,7 @@ TEST_CASE("Set a value in a 64 bit frame") { // only the value we did set should be non-zero for (size_t i = 0; i < rows; i++) { for (size_t j = 0; j < cols; j++) { - uint64_t *data = (uint64_t *)frame.pixel_ptr(i, j); + uint64_t *data = reinterpret_cast(frame.pixel_ptr(i, j)); REQUIRE(data != nullptr); if (i == 5 && j == 7) { REQUIRE(*data == value); @@ -149,4 +149,5 @@ TEST_CASE("test explicit copy constructor") { REQUIRE(frame2.bitdepth() == bitdepth); REQUIRE(frame2.bytes() == rows * cols * bitdepth / 8); REQUIRE(frame2.data() != data); -} \ No newline at end of file +} + diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp new file mode 100644 index 0000000..4bc2b34 --- /dev/null +++ b/src/Interpolator.cpp @@ -0,0 +1,56 @@ +#include "aare/Interpolator.hpp" + +namespace aare { + +Interpolator::Interpolator(NDView etacube, NDView xbins, + NDView ybins, NDView ebins) + : m_ietax(etacube), m_ietay(etacube), m_etabinsx(xbins), m_etabinsy(ybins), + m_energy_bins(ebins) { + if (etacube.shape(0) != xbins.size() || etacube.shape(1) != ybins.size() || + etacube.shape(2) != ebins.size()) { + throw std::invalid_argument( + "The shape of the etacube does not match the shape of the bins"); + } + + // Cumulative sum in the x direction + for (ssize_t i = 1; i < m_ietax.shape(0); i++) { + for (ssize_t j = 0; j < m_ietax.shape(1); j++) { + for (ssize_t k = 0; k < m_ietax.shape(2); k++) { + m_ietax(i, j, k) += m_ietax(i - 1, j, k); + } + } + } + + // Normalize by the highest row, if norm less than 1 don't do anything + for (ssize_t i = 0; i < m_ietax.shape(0); i++) { + for (ssize_t j = 0; j < m_ietax.shape(1); j++) { + for (ssize_t k = 0; k < m_ietax.shape(2); k++) { + auto val = m_ietax(m_ietax.shape(0) - 1, j, k); + double norm = val < 1 ? 1 : val; + m_ietax(i, j, k) /= norm; + } + } + } + + // Cumulative sum in the y direction + for (ssize_t i = 0; i < m_ietay.shape(0); i++) { + for (ssize_t j = 1; j < m_ietay.shape(1); j++) { + for (ssize_t k = 0; k < m_ietay.shape(2); k++) { + m_ietay(i, j, k) += m_ietay(i, j - 1, k); + } + } + } + + // Normalize by the highest column, if norm less than 1 don't do anything + for (ssize_t i = 0; i < m_ietay.shape(0); i++) { + for (ssize_t j = 0; j < m_ietay.shape(1); j++) { + for (ssize_t k = 0; k < m_ietay.shape(2); k++) { + auto val = m_ietay(i, m_ietay.shape(1) - 1, k); + double norm = val < 1 ? 1 : val; + m_ietay(i, j, k) /= norm; + } + } + } +} + +} // namespace aare \ No newline at end of file diff --git a/src/JungfrauDataFile.cpp b/src/JungfrauDataFile.cpp new file mode 100644 index 0000000..59a1a0a --- /dev/null +++ b/src/JungfrauDataFile.cpp @@ -0,0 +1,238 @@ +#include "aare/JungfrauDataFile.hpp" +#include "aare/algorithm.hpp" +#include "aare/defs.hpp" + +#include +#include + +namespace aare { + +JungfrauDataFile::JungfrauDataFile(const std::filesystem::path &fname) { + + if (!std::filesystem::exists(fname)) { + throw std::runtime_error(LOCATION + + "File does not exist: " + fname.string()); + } + find_frame_size(fname); + parse_fname(fname); + scan_files(); + open_file(m_current_file_index); +} + + +// FileInterface + +Frame JungfrauDataFile::read_frame(){ + Frame f(rows(), cols(), Dtype::UINT16); + read_into(reinterpret_cast(f.data()), nullptr); + return f; +} + +Frame JungfrauDataFile::read_frame(size_t frame_number){ + seek(frame_number); + Frame f(rows(), cols(), Dtype::UINT16); + read_into(reinterpret_cast(f.data()), nullptr); + return f; +} + +std::vector JungfrauDataFile::read_n(size_t n_frames) { + std::vector frames; + for(size_t i = 0; i < n_frames; ++i){ + frames.push_back(read_frame()); + } + return frames; +} + +void JungfrauDataFile::read_into(std::byte *image_buf) { + read_into(image_buf, nullptr); +} +void JungfrauDataFile::read_into(std::byte *image_buf, size_t n_frames) { + read_into(image_buf, n_frames, nullptr); +} + +size_t JungfrauDataFile::frame_number(size_t frame_index) { + seek(frame_index); + return read_header().framenum; +} + +std::array JungfrauDataFile::shape() const { + return {static_cast(rows()), static_cast(cols())}; +} + +DetectorType JungfrauDataFile::detector_type() const { return DetectorType::Jungfrau; } + +std::string JungfrauDataFile::base_name() const { return m_base_name; } + +size_t JungfrauDataFile::bytes_per_frame() { return m_bytes_per_frame; } + +size_t JungfrauDataFile::pixels_per_frame() { return m_rows * m_cols; } + +size_t JungfrauDataFile::bytes_per_pixel() const { return sizeof(pixel_type); } + +size_t JungfrauDataFile::bitdepth() const { + return bytes_per_pixel() * bits_per_byte; +} + +void JungfrauDataFile::seek(size_t frame_index) { + if (frame_index >= m_total_frames) { + throw std::runtime_error(LOCATION + "Frame index out of range: " + + std::to_string(frame_index)); + } + m_current_frame_index = frame_index; + auto file_index = first_larger(m_last_frame_in_file, frame_index); + + if (file_index != m_current_file_index) + open_file(file_index); + + auto frame_offset = (file_index) + ? frame_index - m_last_frame_in_file[file_index - 1] + : frame_index; + auto byte_offset = frame_offset * (m_bytes_per_frame + header_size); + m_fp.seek(byte_offset); +} + +size_t JungfrauDataFile::tell() { return m_current_frame_index; } +size_t JungfrauDataFile::total_frames() const { return m_total_frames; } +size_t JungfrauDataFile::rows() const { return m_rows; } +size_t JungfrauDataFile::cols() const { return m_cols; } + +size_t JungfrauDataFile::n_files() const { return m_last_frame_in_file.size(); } + +void JungfrauDataFile::find_frame_size(const std::filesystem::path &fname) { + + static constexpr size_t module_data_size = + header_size + sizeof(pixel_type) * 512 * 1024; + static constexpr size_t half_data_size = + header_size + sizeof(pixel_type) * 256 * 1024; + static constexpr size_t chip_data_size = + header_size + sizeof(pixel_type) * 256 * 256; + + auto file_size = std::filesystem::file_size(fname); + if (file_size == 0) { + throw std::runtime_error(LOCATION + + "Cannot guess frame size: file is empty"); + } + + if (file_size % module_data_size == 0) { + m_rows = 512; + m_cols = 1024; + m_bytes_per_frame = module_data_size - header_size; + } else if (file_size % half_data_size == 0) { + m_rows = 256; + m_cols = 1024; + m_bytes_per_frame = half_data_size - header_size; + } else if (file_size % chip_data_size == 0) { + m_rows = 256; + m_cols = 256; + m_bytes_per_frame = chip_data_size - header_size; + } else { + throw std::runtime_error(LOCATION + + "Cannot find frame size: file size is not a " + "multiple of any known frame size"); + } +} + +void JungfrauDataFile::parse_fname(const std::filesystem::path &fname) { + m_path = fname.parent_path(); + m_base_name = fname.stem(); + + // find file index, then remove if from the base name + if (auto pos = m_base_name.find_last_of('_'); pos != std::string::npos) { + m_offset = std::stoul(m_base_name.substr(pos + 1)); + m_base_name.erase(pos); + } +} + +void JungfrauDataFile::scan_files() { + // find how many files we have and the number of frames in each file + m_last_frame_in_file.clear(); + size_t file_index = m_offset; + while (std::filesystem::exists(fpath(file_index))) { + auto n_frames = std::filesystem::file_size(fpath(file_index)) / + (m_bytes_per_frame + header_size); + m_last_frame_in_file.push_back(n_frames); + ++file_index; + } + + // find where we need to open the next file and total number of frames + m_last_frame_in_file = cumsum(m_last_frame_in_file); + m_total_frames = m_last_frame_in_file.back(); +} + +void JungfrauDataFile::read_into(std::byte *image_buf, + JungfrauDataHeader *header) { + + // read header if not passed nullptr + if (header) { + if (auto rc = fread(header, sizeof(JungfrauDataHeader), 1, m_fp.get()); + rc != 1) { + throw std::runtime_error( + LOCATION + + "Could not read header from file:" + m_fp.error_msg()); + } + } else { + m_fp.seek(header_size, SEEK_CUR); + } + + // read data + if (auto rc = fread(image_buf, 1, m_bytes_per_frame, m_fp.get()); + rc != m_bytes_per_frame) { + throw std::runtime_error(LOCATION + "Could not read image from file" + + m_fp.error_msg()); + } + + // prepare for next read + // if we are at the end of the file, open the next file + ++m_current_frame_index; + if (m_current_frame_index >= m_last_frame_in_file[m_current_file_index] && + (m_current_frame_index < m_total_frames)) { + ++m_current_file_index; + open_file(m_current_file_index); + } +} + +void JungfrauDataFile::read_into(std::byte *image_buf, size_t n_frames, + JungfrauDataHeader *header) { + if (header) { + for (size_t i = 0; i < n_frames; ++i) + read_into(image_buf + i * m_bytes_per_frame, header + i); + }else{ + for (size_t i = 0; i < n_frames; ++i) + read_into(image_buf + i * m_bytes_per_frame, nullptr); + } +} + +void JungfrauDataFile::read_into(NDArray* image, JungfrauDataHeader* header) { + if(image->shape()!=shape()){ + throw std::runtime_error(LOCATION + + "Image shape does not match file size: " + std::to_string(rows()) + "x" + std::to_string(cols())); + } + read_into(reinterpret_cast(image->data()), header); +} + + +JungfrauDataHeader JungfrauDataFile::read_header() { + JungfrauDataHeader header; + if (auto rc = fread(&header, 1, sizeof(header), m_fp.get()); + rc != sizeof(header)) { + throw std::runtime_error(LOCATION + "Could not read header from file" + + m_fp.error_msg()); + } + m_fp.seek(-header_size, SEEK_CUR); + return header; +} + +void JungfrauDataFile::open_file(size_t file_index) { + // fmt::print(stderr, "Opening file: {}\n", + // fpath(file_index+m_offset).string()); + m_fp = FilePtr(fpath(file_index + m_offset), "rb"); + m_current_file_index = file_index; +} + +std::filesystem::path JungfrauDataFile::fpath(size_t file_index) const { + auto fname = fmt::format("{}_{:0{}}.dat", m_base_name, file_index, + n_digits_in_file_index); + return m_path / fname; +} + +} // namespace aare diff --git a/src/JungfrauDataFile.test.cpp b/src/JungfrauDataFile.test.cpp new file mode 100644 index 0000000..ce51168 --- /dev/null +++ b/src/JungfrauDataFile.test.cpp @@ -0,0 +1,114 @@ +#include "aare/JungfrauDataFile.hpp" + +#include +#include "test_config.hpp" + +using aare::JungfrauDataFile; +using aare::JungfrauDataHeader; +TEST_CASE("Open a Jungfrau data file", "[.files]") { + //we know we have 4 files with 7, 7, 7, and 3 frames + //firs frame number if 1 and the bunch id is frame_number**2 + //so we can check the header + auto fpath = test_data_path() / "dat" / "AldoJF500k_000000.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + REQUIRE(f.rows() == 512); + REQUIRE(f.cols() == 1024); + REQUIRE(f.bytes_per_frame() == 1048576); + REQUIRE(f.pixels_per_frame() == 524288); + REQUIRE(f.bytes_per_pixel() == 2); + REQUIRE(f.bitdepth() == 16); + REQUIRE(f.base_name() == "AldoJF500k"); + REQUIRE(f.n_files() == 4); + REQUIRE(f.tell() == 0); + REQUIRE(f.total_frames() == 24); + REQUIRE(f.current_file() == fpath); + + //Check that the frame number and buch id is read correctly + for (size_t i = 0; i < 24; ++i) { + JungfrauDataHeader header; + aare::NDArray image(f.shape()); + f.read_into(&image, &header); + REQUIRE(header.framenum == i + 1); + REQUIRE(header.bunchid == (i + 1) * (i + 1)); + REQUIRE(image.shape(0) == 512); + REQUIRE(image.shape(1) == 1024); + } +} + +TEST_CASE("Seek in a JungfrauDataFile", "[.files]"){ + auto fpath = test_data_path() / "dat" / "AldoJF65k_000000.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + + //The file should have 113 frames + f.seek(19); + REQUIRE(f.tell() == 19); + auto h = f.read_header(); + REQUIRE(h.framenum == 19+1); + + //Reading again does not change the file pointer + auto h2 = f.read_header(); + REQUIRE(h2.framenum == 19+1); + + f.seek(59); + REQUIRE(f.tell() == 59); + auto h3 = f.read_header(); + REQUIRE(h3.framenum == 59+1); + + JungfrauDataHeader h4; + aare::NDArray image(f.shape()); + f.read_into(&image, &h4); + REQUIRE(h4.framenum == 59+1); + + //now we should be on the next frame + REQUIRE(f.tell() == 60); + REQUIRE(f.read_header().framenum == 60+1); + + REQUIRE_THROWS(f.seek(86356)); //out of range +} + +TEST_CASE("Open a Jungfrau data file with non zero file index", "[.files]"){ + + auto fpath = test_data_path() / "dat" / "AldoJF65k_000003.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + + //18 files per data file, opening the 3rd file we ignore the first 3 + REQUIRE(f.total_frames() == 113-18*3); + REQUIRE(f.tell() == 0); + + //Frame numbers start at 1 in the first file + REQUIRE(f.read_header().framenum == 18*3+1); + + // moving relative to the third file + f.seek(5); + REQUIRE(f.read_header().framenum == 18*3+1+5); + + // ignoring the first 3 files + REQUIRE(f.n_files() == 4); + + REQUIRE(f.current_file().stem() == "AldoJF65k_000003"); + +} + +TEST_CASE("Read into throws if size doesn't match", "[.files]"){ + auto fpath = test_data_path() / "dat" / "AldoJF65k_000000.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + + aare::NDArray image({39, 85}); + JungfrauDataHeader header; + + REQUIRE_THROWS(f.read_into(&image, &header)); + REQUIRE_THROWS(f.read_into(&image, nullptr)); + REQUIRE_THROWS(f.read_into(&image)); + + REQUIRE(f.tell() == 0); + + +} \ No newline at end of file diff --git a/src/NDArray.test.cpp b/src/NDArray.test.cpp index 54099fd..819a1a9 100644 --- a/src/NDArray.test.cpp +++ b/src/NDArray.test.cpp @@ -2,6 +2,7 @@ #include #include #include +#include using aare::NDArray; using aare::NDView; @@ -34,8 +35,26 @@ TEST_CASE("Construct from an NDView") { } } +TEST_CASE("3D NDArray from NDView"){ + std::vector data(27); + std::iota(data.begin(), data.end(), 0); + NDView view(data.data(), Shape<3>{3, 3, 3}); + NDArray image(view); + REQUIRE(image.shape() == view.shape()); + REQUIRE(image.size() == view.size()); + REQUIRE(image.data() != view.data()); + + for(ssize_t i=0; i shape{{20}}; + std::array shape{{20}}; NDArray img(shape, 3); REQUIRE(img.size() == 20); REQUIRE(img(5) == 3); @@ -52,7 +71,7 @@ TEST_CASE("Accessing a const object") { } TEST_CASE("Indexing of a 2D image") { - std::array shape{{3, 7}}; + std::array shape{{3, 7}}; NDArray img(shape, 5); for (uint32_t i = 0; i != img.size(); ++i) { REQUIRE(img(i) == 5); @@ -95,7 +114,7 @@ TEST_CASE("Divide double by int") { } TEST_CASE("Elementwise multiplication of 3D image") { - std::array shape{3, 4, 2}; + std::array shape{3, 4, 2}; NDArray a{shape}; NDArray b{shape}; for (uint32_t i = 0; i != a.size(); ++i) { @@ -160,18 +179,18 @@ TEST_CASE("Compare two images") { } TEST_CASE("Size and shape matches") { - int64_t w = 15; - int64_t h = 75; - std::array shape{w, h}; + ssize_t w = 15; + ssize_t h = 75; + std::array shape{w, h}; NDArray a{shape}; - REQUIRE(a.size() == static_cast(w * h)); + REQUIRE(a.size() == w * h); REQUIRE(a.shape() == shape); } TEST_CASE("Initial value matches for all elements") { double v = 4.35; NDArray a{{5, 5}, v}; - for (uint32_t i = 0; i < a.size(); ++i) { + for (int i = 0; i < a.size(); ++i) { REQUIRE(a(i) == v); } } @@ -205,7 +224,7 @@ TEST_CASE("Bitwise and on data") { TEST_CASE("Elementwise operations on images") { - std::array shape{5, 5}; + std::array shape{5, 5}; double a_val = 3.0; double b_val = 8.0; @@ -379,4 +398,32 @@ TEST_CASE("Elementwise operations on images") { REQUIRE(A(i) == a_val); } } +} + +TEST_CASE("Assign an std::array to a 1D NDArray") { + NDArray a{{5}, 0}; + std::array b{1, 2, 3, 4, 5}; + a = b; + for (uint32_t i = 0; i < a.size(); ++i) { + REQUIRE(a(i) == b[i]); + } +} + +TEST_CASE("Assign an std::array to a 1D NDArray of a different size") { + NDArray a{{3}, 0}; + std::array b{1, 2, 3, 4, 5}; + a = b; + + REQUIRE(a.size() == 5); + for (uint32_t i = 0; i < a.size(); ++i) { + REQUIRE(a(i) == b[i]); + } +} + +TEST_CASE("Construct an NDArray from an std::array") { + std::array b{1, 2, 3, 4, 5}; + NDArray a(b); + for (uint32_t i = 0; i < a.size(); ++i) { + REQUIRE(a(i) == b[i]); + } } \ No newline at end of file diff --git a/src/NDView.test.cpp b/src/NDView.test.cpp index 3070de6..89e76e9 100644 --- a/src/NDView.test.cpp +++ b/src/NDView.test.cpp @@ -3,6 +3,7 @@ #include #include +#include using aare::NDView; using aare::Shape; @@ -21,10 +22,8 @@ TEST_CASE("Element reference 1D") { } TEST_CASE("Element reference 2D") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<2>{3, 4}); REQUIRE(vec.size() == static_cast(data.size())); @@ -58,10 +57,8 @@ TEST_CASE("Element reference 3D") { } TEST_CASE("Plus and miuns with single value") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<2>{3, 4}); data += 5; int i = 0; @@ -116,10 +113,8 @@ TEST_CASE("elementwise assign") { } TEST_CASE("iterators") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<1>{12}); int i = 0; for (const auto item : data) { @@ -147,7 +142,7 @@ TEST_CASE("iterators") { // for (int i = 0; i != 12; ++i) { // vec.push_back(i); // } -// std::vector shape{3, 4}; +// std::vector shape{3, 4}; // NDView data(vec.data(), shape); // } @@ -156,8 +151,8 @@ TEST_CASE("divide with another span") { std::vector vec1{3, 2, 1}; std::vector result{3, 6, 3}; - NDView data0(vec0.data(), Shape<1>{static_cast(vec0.size())}); - NDView data1(vec1.data(), Shape<1>{static_cast(vec1.size())}); + NDView data0(vec0.data(), Shape<1>{static_cast(vec0.size())}); + NDView data1(vec1.data(), Shape<1>{static_cast(vec1.size())}); data0 /= data1; @@ -167,27 +162,31 @@ TEST_CASE("divide with another span") { } TEST_CASE("Retrieve shape") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<2>{3, 4}); REQUIRE(data.shape()[0] == 3); REQUIRE(data.shape()[1] == 4); } TEST_CASE("compare two views") { - std::vector vec1; - for (int i = 0; i != 12; ++i) { - vec1.push_back(i); - } + std::vector vec1(12); + std::iota(vec1.begin(), vec1.end(), 0); NDView view1(vec1.data(), Shape<2>{3, 4}); - std::vector vec2; - for (int i = 0; i != 12; ++i) { - vec2.push_back(i); - } + std::vector vec2(12); + std::iota(vec2.begin(), vec2.end(), 0); NDView view2(vec2.data(), Shape<2>{3, 4}); REQUIRE((view1 == view2)); +} + + +TEST_CASE("Create a view over a vector"){ + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); + auto v = aare::make_view(vec); + REQUIRE(v.shape()[0] == 12); + REQUIRE(v[0] == 0); + REQUIRE(v[11] == 11); } \ No newline at end of file diff --git a/src/NumpyFile.cpp b/src/NumpyFile.cpp index ea58a9a..e375ce3 100644 --- a/src/NumpyFile.cpp +++ b/src/NumpyFile.cpp @@ -197,4 +197,4 @@ void NumpyFile::load_metadata() { m_header = {dtype, fortran_order, shape}; } -} // namespace aare \ No newline at end of file +} // namespace aare diff --git a/src/RawFile.cpp b/src/RawFile.cpp index bf3b6df..122cf96 100644 --- a/src/RawFile.cpp +++ b/src/RawFile.cpp @@ -1,6 +1,9 @@ #include "aare/RawFile.hpp" +#include "aare/algorithm.hpp" #include "aare/PixelMap.hpp" #include "aare/defs.hpp" +#include "aare/logger.hpp" +#include "aare/geo_helpers.hpp" #include #include @@ -13,20 +16,14 @@ RawFile::RawFile(const std::filesystem::path &fname, const std::string &mode) : m_master(fname) { m_mode = mode; if (mode == "r") { - - n_subfiles = find_number_of_subfiles(); // f0,f1...fn - n_subfile_parts = - m_master.geometry().col * m_master.geometry().row; // d0,d1...dn - - - find_geometry(); - update_geometry_with_roi(); - + if (m_master.roi()){ + m_geometry = update_geometry_with_roi(m_geometry, m_master.roi().value()); + } open_subfiles(); } else { throw std::runtime_error(LOCATION + - "Unsupported mode. Can only read RawFiles."); + " Unsupported mode. Can only read RawFiles."); } } @@ -63,18 +60,21 @@ void RawFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *h this->get_frame_into(m_current_frame++, image_buf, header); image_buf += bytes_per_frame(); if(header) - header+=n_mod(); + header+=n_modules(); } } -size_t RawFile::n_mod() const { return n_subfile_parts; } +size_t RawFile::n_modules() const { return m_master.n_modules(); } size_t RawFile::bytes_per_frame() { - return m_rows * m_cols * m_master.bitdepth() / 8; + return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() / bits_per_byte; +} +size_t RawFile::pixels_per_frame() { + // return m_rows * m_cols; + return m_geometry.pixels_x * m_geometry.pixels_y; } -size_t RawFile::pixels_per_frame() { return m_rows * m_cols; } DetectorType RawFile::detector_type() const { return m_master.detector_type(); } @@ -92,24 +92,18 @@ void RawFile::seek(size_t frame_index) { size_t RawFile::tell() { return m_current_frame; } size_t RawFile::total_frames() const { return m_master.frames_in_file(); } -size_t RawFile::rows() const { return m_rows; } -size_t RawFile::cols() const { return m_cols; } +size_t RawFile::rows() const { return m_geometry.pixels_y; } +size_t RawFile::cols() const { return m_geometry.pixels_x; } size_t RawFile::bitdepth() const { return m_master.bitdepth(); } xy RawFile::geometry() { return m_master.geometry(); } void RawFile::open_subfiles() { if (m_mode == "r") - for (size_t i = 0; i != n_subfiles; ++i) { - auto v = std::vector(n_subfile_parts); - for (size_t j = 0; j != n_subfile_parts; ++j) { - auto pos = m_module_pixel_0[j]; - v[j] = new RawSubFile(m_master.data_fname(j, i), - m_master.detector_type(), pos.height, - pos.width, m_master.bitdepth(), - positions[j].row, positions[j].col); - - } - subfiles.push_back(v); + for (size_t i = 0; i != n_modules(); ++i) { + auto pos = m_geometry.module_pixel_0[i]; + m_subfiles.emplace_back(std::make_unique( + m_master.data_fname(i, 0), m_master.detector_type(), pos.height, + pos.width, m_master.bitdepth(), pos.row_index, pos.col_index)); } else { throw std::runtime_error(LOCATION + @@ -134,127 +128,52 @@ DetectorHeader RawFile::read_header(const std::filesystem::path &fname) { return h; } -int RawFile::find_number_of_subfiles() { - int n_files = 0; - // f0,f1...fn How many files is the data split into? - while (std::filesystem::exists(m_master.data_fname(0, n_files))) - n_files++; // increment after test - -#ifdef AARE_VERBOSE - fmt::print("Found: {} subfiles\n", n_files); -#endif - return n_files; - -} RawMasterFile RawFile::master() const { return m_master; } +/** + * @brief Find the geometry of the detector by opening all the subfiles and + * reading the headers. + */ void RawFile::find_geometry() { + + //Hold the maximal row and column number found + //Later used for calculating the total number of rows and columns uint16_t r{}; uint16_t c{}; - for (size_t i = 0; i < n_subfile_parts; i++) { - auto h = this->read_header(m_master.data_fname(i, 0)); + for (size_t i = 0; i < n_modules(); i++) { + auto h = read_header(m_master.data_fname(i, 0)); r = std::max(r, h.row); c = std::max(c, h.column); - positions.push_back({h.row, h.column}); + // positions.push_back({h.row, h.column}); + ModuleGeometry g; - g.x = h.column * m_master.pixels_x(); - g.y = h.row * m_master.pixels_y(); + g.origin_x = h.column * m_master.pixels_x(); + g.origin_y = h.row * m_master.pixels_y(); + g.row_index = h.row; + g.col_index = h.column; g.width = m_master.pixels_x(); g.height = m_master.pixels_y(); - m_module_pixel_0.push_back(g); + m_geometry.module_pixel_0.push_back(g); } r++; c++; - m_rows = (r * m_master.pixels_y()); - m_cols = (c * m_master.pixels_x()); - - m_rows += static_cast((r - 1) * cfg.module_gap_row); - -#ifdef AARE_VERBOSE - fmt::print("\nRawFile::find_geometry()\n"); - for (size_t i = 0; i < m_module_pixel_0.size(); i++) { - fmt::print("Module {} at position: (r:{},c:{})\n", i, - m_module_pixel_0[i].y, m_module_pixel_0[i].x); - } - fmt::print("Image size: {}x{}\n\n", m_rows, m_cols); -#endif -} - -void RawFile::update_geometry_with_roi() { - // TODO! implement this - if (m_master.roi()) { - auto roi = m_master.roi().value(); - - // TODO! can we do this cleaner? - int pos_y = 0; - int pos_y_increment = 0; - for (size_t row = 0; row < m_master.geometry().row; row++) { - int pos_x = 0; - for (size_t col = 0; col < m_master.geometry().col; col++) { - auto &m = m_module_pixel_0[row * m_master.geometry().col + col]; - auto original_height = m.height; - auto original_width = m.width; - - // module is to the left of the roi - if (m.x + m.width < roi.xmin) { - m.width = 0; - - // roi is in module - } else { - // here we only arrive when the roi is in or to the left of - // the module - if (roi.xmin > m.x) { - m.width -= roi.xmin - m.x; - } - if (roi.xmax < m.x + m.width) { - m.width -= m.x + original_width - roi.xmax; - } - m.x = pos_x; - pos_x += m.width; - } - - if (m.y + m.height < roi.ymin) { - m.height = 0; - } else { - if ((roi.ymin > m.y) && (roi.ymin < m.y + m.height)) { - m.height -= roi.ymin - m.y; - - } - if (roi.ymax < m.y + m.height) { - m.height -= m.y + original_height - roi.ymax; - } - m.y = pos_y; - pos_y_increment = m.height; - } - } - // increment pos_y - pos_y += pos_y_increment; - } - - m_rows = roi.height(); - m_cols = roi.width(); - } - -#ifdef AARE_VERBOSE - fmt::print("RawFile::update_geometry_with_roi()\n"); - for (const auto &m : m_module_pixel_0) { - fmt::print("Module at position: (r:{}, c:{}, h:{}, w:{})\n", m.y, m.x, - m.height, m.width); - } - fmt::print("Updated image size: {}x{}\n\n", m_rows, m_cols); - fmt::print("\n"); -#endif + m_geometry.pixels_y = (r * m_master.pixels_y()); + m_geometry.pixels_x = (c * m_master.pixels_x()); + m_geometry.modules_x = c; + m_geometry.modules_y = r; + m_geometry.pixels_y += static_cast((r - 1) * cfg.module_gap_row); } + Frame RawFile::get_frame(size_t frame_index) { - auto f = Frame(m_rows, m_cols, Dtype::from_bitdepth(m_master.bitdepth())); + auto f = Frame(m_geometry.pixels_y, m_geometry.pixels_x, Dtype::from_bitdepth(m_master.bitdepth())); std::byte *frame_buffer = f.data(); get_frame_into(frame_index, frame_buffer); return f; @@ -266,62 +185,58 @@ size_t RawFile::bytes_per_pixel() const { } void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, DetectorHeader *header) { + LOG(logDEBUG) << "RawFile::get_frame_into(" << frame_index << ")"; if (frame_index >= total_frames()) { throw std::runtime_error(LOCATION + "Frame number out of range"); } - std::vector frame_numbers(n_subfile_parts); - std::vector frame_indices(n_subfile_parts, frame_index); + std::vector frame_numbers(n_modules()); + std::vector frame_indices(n_modules(), frame_index); // sync the frame numbers - if (n_subfile_parts != 1) { - for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { - auto subfile_id = frame_index / m_master.max_frames_per_file(); - frame_numbers[part_idx] = - subfiles[subfile_id][part_idx]->frame_number( - frame_index % m_master.max_frames_per_file()); + if (n_modules() != 1) { //if we have more than one module + for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) { + frame_numbers[part_idx] = m_subfiles[part_idx]->frame_number(frame_index); } + // 1. if frame number vector is the same break - while (std::adjacent_find(frame_numbers.begin(), frame_numbers.end(), - std::not_equal_to<>()) != - frame_numbers.end()) { + while (!all_equal(frame_numbers)) { + // 2. find the index of the minimum frame number, auto min_frame_idx = std::distance( frame_numbers.begin(), std::min_element(frame_numbers.begin(), frame_numbers.end())); + // 3. increase its index and update its respective frame number frame_indices[min_frame_idx]++; + // 4. if we can't increase its index => throw error if (frame_indices[min_frame_idx] >= total_frames()) { throw std::runtime_error(LOCATION + "Frame number out of range"); } - auto subfile_id = - frame_indices[min_frame_idx] / m_master.max_frames_per_file(); + frame_numbers[min_frame_idx] = - subfiles[subfile_id][min_frame_idx]->frame_number( - frame_indices[min_frame_idx] % - m_master.max_frames_per_file()); + m_subfiles[min_frame_idx]->frame_number(frame_indices[min_frame_idx]); } } if (m_master.geometry().col == 1) { // get the part from each subfile and copy it to the frame - for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { + for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) { auto corrected_idx = frame_indices[part_idx]; - auto subfile_id = corrected_idx / m_master.max_frames_per_file(); - + // This is where we start writing - auto offset = (m_module_pixel_0[part_idx].y * m_cols + - m_module_pixel_0[part_idx].x)*m_master.bitdepth()/8; + auto offset = (m_geometry.module_pixel_0[part_idx].origin_y * m_geometry.pixels_x + + m_geometry.module_pixel_0[part_idx].origin_x)*m_master.bitdepth()/8; - if (m_module_pixel_0[part_idx].x!=0) - throw std::runtime_error(LOCATION + "Implementation error. x pos not 0."); + if (m_geometry.module_pixel_0[part_idx].origin_x!=0) + throw std::runtime_error(LOCATION + " Implementation error. x pos not 0."); - //TODO! Risk for out of range access - subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file()); - subfiles[subfile_id][part_idx]->read_into(frame_buffer + offset, header); + //TODO! What if the files don't match? + m_subfiles[part_idx]->seek(corrected_idx); + m_subfiles[part_idx]->read_into(frame_buffer + offset, header); if (header) ++header; } @@ -330,31 +245,30 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect //TODO! should we read row by row? // create a buffer large enough to hold a full module - auto bytes_per_part = m_master.pixels_y() * m_master.pixels_x() * m_master.bitdepth() / 8; // TODO! replace with image_size_in_bytes + auto *part_buffer = new std::byte[bytes_per_part]; // TODO! if we have many submodules we should reorder them on the module // level - for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { - auto pos = m_module_pixel_0[part_idx]; + for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) { + auto pos = m_geometry.module_pixel_0[part_idx]; auto corrected_idx = frame_indices[part_idx]; - auto subfile_id = corrected_idx / m_master.max_frames_per_file(); - subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file()); - subfiles[subfile_id][part_idx]->read_into(part_buffer, header); + m_subfiles[part_idx]->seek(corrected_idx); + m_subfiles[part_idx]->read_into(part_buffer, header); if(header) ++header; for (size_t cur_row = 0; cur_row < static_cast(pos.height); cur_row++) { - auto irow = (pos.y + cur_row); - auto icol = pos.x; - auto dest = (irow * this->m_cols + icol); + auto irow = (pos.origin_y + cur_row); + auto icol = pos.origin_x; + auto dest = (irow * this->m_geometry.pixels_x + icol); dest = dest * m_master.bitdepth() / 8; memcpy(frame_buffer + dest, part_buffer + cur_row * pos.width * @@ -365,6 +279,7 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect } delete[] part_buffer; } + } std::vector RawFile::read_n(size_t n_frames) { @@ -381,23 +296,8 @@ size_t RawFile::frame_number(size_t frame_index) { if (frame_index >= m_master.frames_in_file()) { throw std::runtime_error(LOCATION + " Frame number out of range"); } - size_t subfile_id = frame_index / m_master.max_frames_per_file(); - if (subfile_id >= subfiles.size()) { - throw std::runtime_error( - LOCATION + " Subfile out of range. Possible missing data."); - } - return subfiles[subfile_id][0]->frame_number( - frame_index % m_master.max_frames_per_file()); + return m_subfiles[0]->frame_number(frame_index); } -RawFile::~RawFile() { - // TODO! Fix this, for file closing - for (auto &vec : subfiles) { - for (auto *subfile : vec) { - delete subfile; - } - } -} - -} // namespace aare \ No newline at end of file +} // namespace aare diff --git a/src/RawFile.test.cpp b/src/RawFile.test.cpp index faefd28..9109985 100644 --- a/src/RawFile.test.cpp +++ b/src/RawFile.test.cpp @@ -1,10 +1,13 @@ #include "aare/File.hpp" +#include "aare/RawMasterFile.hpp" //needed for ROI +#include "aare/RawFile.hpp" #include #include #include "test_config.hpp" + using aare::File; TEST_CASE("Read number of frames from a jungfrau raw file", "[.integration]") { @@ -96,11 +99,11 @@ TEST_CASE("Read frame numbers from a raw file", "[.integration]") { } } -TEST_CASE("Compare reading from a numpy file with a raw file", "[.integration]") { - auto fpath_raw = test_data_path() / "jungfrau" / "jungfrau_single_master_0.json"; +TEST_CASE("Compare reading from a numpy file with a raw file", "[.files]") { + auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_master_0.json"; REQUIRE(std::filesystem::exists(fpath_raw)); - auto fpath_npy = test_data_path() / "jungfrau" / "jungfrau_single_0.npy"; + auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; REQUIRE(std::filesystem::exists(fpath_npy)); File raw(fpath_raw, "r"); @@ -110,6 +113,7 @@ TEST_CASE("Compare reading from a numpy file with a raw file", "[.integration]") CHECK(npy.total_frames() == 10); for (size_t i = 0; i < 10; ++i) { + CHECK(raw.tell() == i); auto raw_frame = raw.read_frame(); auto npy_frame = npy.read_frame(); CHECK((raw_frame.view() == npy_frame.view())); @@ -148,3 +152,5 @@ TEST_CASE("Read file with unordered frames", "[.integration]") { File f(fpath); REQUIRE_THROWS((f.read_frame())); } + + diff --git a/src/RawMasterFile.cpp b/src/RawMasterFile.cpp index 30d93a4..8a2db87 100644 --- a/src/RawMasterFile.cpp +++ b/src/RawMasterFile.cpp @@ -140,6 +140,10 @@ std::optional RawMasterFile::number_of_rows() const { xy RawMasterFile::geometry() const { return m_geometry; } +size_t RawMasterFile::n_modules() const { + return m_geometry.row * m_geometry.col; +} + std::optional RawMasterFile::quad() const { return m_quad; } // optional values, these may or may not be present in the master file @@ -417,4 +421,4 @@ void RawMasterFile::parse_raw(const std::filesystem::path &fpath) { if(m_frames_in_file==0) m_frames_in_file = m_total_frames_expected; } -} // namespace aare \ No newline at end of file +} // namespace aare diff --git a/src/RawSubFile.cpp b/src/RawSubFile.cpp index 6fae7ce..a8d29ce 100644 --- a/src/RawSubFile.cpp +++ b/src/RawSubFile.cpp @@ -1,87 +1,136 @@ #include "aare/RawSubFile.hpp" #include "aare/PixelMap.hpp" +#include "aare/algorithm.hpp" +#include "aare/utils/ifstream_helpers.hpp" +#include "aare/logger.hpp" + + #include // memcpy #include #include +#include + + + namespace aare { RawSubFile::RawSubFile(const std::filesystem::path &fname, DetectorType detector, size_t rows, size_t cols, size_t bitdepth, uint32_t pos_row, uint32_t pos_col) - : m_detector_type(detector), m_bitdepth(bitdepth), m_fname(fname), m_rows(rows), m_cols(cols), - m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row), m_pos_col(pos_col) { + : m_detector_type(detector), m_bitdepth(bitdepth), + m_rows(rows), m_cols(cols), + m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row), + m_pos_col(pos_col) { + + LOG(logDEBUG) << "RawSubFile::RawSubFile()"; if (m_detector_type == DetectorType::Moench03_old) { m_pixel_map = GenerateMoench03PixelMap(); - }else if(m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0){ + } else if (m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0) { m_pixel_map = GenerateEigerFlipRowsPixelMap(); } - if (std::filesystem::exists(fname)) { - n_frames = std::filesystem::file_size(fname) / - (sizeof(DetectorHeader) + rows * cols * bitdepth / 8); - } else { - throw std::runtime_error( - LOCATION + fmt::format("File {} does not exist", m_fname.string())); - } - // fp = fopen(m_fname.string().c_str(), "rb"); - m_file.open(m_fname, std::ios::binary); - if (!m_file.is_open()) { - throw std::runtime_error( - LOCATION + fmt::format("Could not open file {}", m_fname.string())); - } - -#ifdef AARE_VERBOSE - fmt::print("Opened file: {} with {} frames\n", m_fname.string(), n_frames); - fmt::print("m_rows: {}, m_cols: {}, m_bitdepth: {}\n", m_rows, m_cols, - m_bitdepth); - fmt::print("file size: {}\n", std::filesystem::file_size(fname)); -#endif + parse_fname(fname); + scan_files(); + open_file(m_current_file_index); // open the first file } void RawSubFile::seek(size_t frame_index) { - if (frame_index >= n_frames) { - throw std::runtime_error("Frame number out of range"); + LOG(logDEBUG) << "RawSubFile::seek(" << frame_index << ")"; + if (frame_index >= m_total_frames) { + throw std::runtime_error(LOCATION + " Frame index out of range: " + + std::to_string(frame_index)); } - m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index); + m_current_frame_index = frame_index; + auto file_index = first_larger(m_last_frame_in_file, frame_index); + + if (file_index != m_current_file_index) + open_file(file_index); + + auto frame_offset = (file_index) + ? frame_index - m_last_frame_in_file[file_index - 1] + : frame_index; + auto byte_offset = frame_offset * (m_bytes_per_frame + sizeof(DetectorHeader)); + m_file.seekg(byte_offset); } size_t RawSubFile::tell() { - return m_file.tellg() / (sizeof(DetectorHeader) + bytes_per_frame()); + LOG(logDEBUG) << "RawSubFile::tell():" << m_current_frame_index; + return m_current_frame_index; } - void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { - if(header){ - m_file.read(reinterpret_cast(header), sizeof(DetectorHeader)); + LOG(logDEBUG) << "RawSubFile::read_into()"; + + if (header) { + m_file.read(reinterpret_cast(header), sizeof(DetectorHeader)); } else { m_file.seekg(sizeof(DetectorHeader), std::ios::cur); } - //TODO! expand support for different bitdepths - if(m_pixel_map){ + if (m_file.fail()){ + throw std::runtime_error(LOCATION + ifstream_error_msg(m_file)); + } + + // TODO! expand support for different bitdepths + if (m_pixel_map) { // read into a temporary buffer and then copy the data to the buffer // in the correct order - // currently this only supports 16 bit data! - auto part_buffer = new std::byte[bytes_per_frame()]; - m_file.read(reinterpret_cast(part_buffer), bytes_per_frame()); - auto *data = reinterpret_cast(image_buf); - auto *part_data = reinterpret_cast(part_buffer); - for (size_t i = 0; i < pixels_per_frame(); i++) { - data[i] = part_data[(*m_pixel_map)(i)]; + // TODO! add 4 bit support + if(m_bitdepth == 8){ + read_with_map(image_buf); + }else if (m_bitdepth == 16) { + read_with_map(image_buf); + } else if (m_bitdepth == 32) { + read_with_map(image_buf); + }else{ + throw std::runtime_error("Unsupported bitdepth for read with pixel map"); } - delete[] part_buffer; + } else { // read directly into the buffer m_file.read(reinterpret_cast(image_buf), bytes_per_frame()); } + + if (m_file.fail()){ + throw std::runtime_error(LOCATION + ifstream_error_msg(m_file)); + } + + ++ m_current_frame_index; + if (m_current_frame_index >= m_last_frame_in_file[m_current_file_index] && + (m_current_frame_index < m_total_frames)) { + ++m_current_file_index; + open_file(m_current_file_index); + } } +void RawSubFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) { + for (size_t i = 0; i < n_frames; i++) { + read_into(image_buf, header); + image_buf += bytes_per_frame(); + if (header) { + ++header; + } + } +} + + + +template +void RawSubFile::read_with_map(std::byte *image_buf) { + auto part_buffer = new std::byte[bytes_per_frame()]; + m_file.read(reinterpret_cast(part_buffer), bytes_per_frame()); + auto *data = reinterpret_cast(image_buf); + auto *part_data = reinterpret_cast(part_buffer); + for (size_t i = 0; i < pixels_per_frame(); i++) { + data[i] = part_data[(*m_pixel_map)(i)]; + } + delete[] part_buffer; +} size_t RawSubFile::rows() const { return m_rows; } size_t RawSubFile::cols() const { return m_cols; } - void RawSubFile::get_part(std::byte *buffer, size_t frame_index) { seek(frame_index); read_into(buffer, nullptr); @@ -94,5 +143,69 @@ size_t RawSubFile::frame_number(size_t frame_index) { return h.frameNumber; } +void RawSubFile::parse_fname(const std::filesystem::path &fname) { + LOG(logDEBUG) << "RawSubFile::parse_fname()"; + // data has the format: /path/too/data/jungfrau_single_d0_f1_0.raw + // d0 is the module index, will not change for this file + // f1 is the file index - thi is the one we need + // 0 is the measurement index, will not change + m_path = fname.parent_path(); + m_base_name = fname.filename(); + + // Regex to extract numbers after 'd' and 'f' + std::regex pattern(R"(^(.*_d)(\d+)(_f)(\d+)(_\d+\.raw)$)"); + std::smatch match; + + if (std::regex_match(m_base_name, match, pattern)) { + m_offset = std::stoi(match[4].str()); // find the first file index in case of a truncated series + m_base_name = match[1].str() + match[2].str() + match[3].str() + "{}" + match[5].str(); + LOG(logDEBUG) << "Base name: " << m_base_name; + LOG(logDEBUG) << "Offset: " << m_offset; + LOG(logDEBUG) << "Path: " << m_path.string(); + } else { + throw std::runtime_error( + LOCATION + fmt::format("Could not parse file name {}", fname.string())); + } +} + +std::filesystem::path RawSubFile::fpath(size_t file_index) const { + auto fname = fmt::format(m_base_name, file_index); + return m_path / fname; +} + +void RawSubFile::open_file(size_t file_index) { + m_file.close(); + auto fname = fpath(file_index+m_offset); + LOG(logDEBUG) << "RawSubFile::open_file(): " << fname.string(); + m_file.open(fname, std::ios::binary); + if (!m_file.is_open()) { + throw std::runtime_error( + LOCATION + fmt::format("Could not open file {}", fpath(file_index).string())); + } + m_current_file_index = file_index; +} + +void RawSubFile::scan_files() { + LOG(logDEBUG) << "RawSubFile::scan_files()"; + // find how many files we have and the number of frames in each file + m_last_frame_in_file.clear(); + size_t file_index = m_offset; + + while (std::filesystem::exists(fpath(file_index))) { + auto n_frames = std::filesystem::file_size(fpath(file_index)) / + (m_bytes_per_frame + sizeof(DetectorHeader)); + m_last_frame_in_file.push_back(n_frames); + LOG(logDEBUG) << "Found: " << n_frames << " frames in file: " << fpath(file_index).string(); + ++file_index; + } + + // find where we need to open the next file and total number of frames + m_last_frame_in_file = cumsum(m_last_frame_in_file); + if(m_last_frame_in_file.empty()){ + m_total_frames = 0; + }else{ + m_total_frames = m_last_frame_in_file.back(); + } +} } // namespace aare \ No newline at end of file diff --git a/src/RawSubFile.test.cpp b/src/RawSubFile.test.cpp new file mode 100644 index 0000000..89cf858 --- /dev/null +++ b/src/RawSubFile.test.cpp @@ -0,0 +1,76 @@ +#include "aare/RawSubFile.hpp" +#include "aare/File.hpp" +#include "aare/NDArray.hpp" +#include +#include "test_config.hpp" + +using namespace aare; + +TEST_CASE("Read frames directly from a RawSubFile", "[.files]"){ + auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f0_0.raw"; + REQUIRE(std::filesystem::exists(fpath_raw)); + + RawSubFile f(fpath_raw, DetectorType::Jungfrau, 512, 1024, 16); + REQUIRE(f.rows() == 512); + REQUIRE(f.cols() == 1024); + REQUIRE(f.pixels_per_frame() == 512 * 1024); + REQUIRE(f.bytes_per_frame() == 512 * 1024 * 2); + REQUIRE(f.bytes_per_pixel() == 2); + + + auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; + REQUIRE(std::filesystem::exists(fpath_npy)); + + //Numpy file with the same data to use as reference + File npy(fpath_npy, "r"); + + CHECK(f.frames_in_file() == 10); + CHECK(npy.total_frames() == 10); + + + DetectorHeader header{}; + NDArray image({static_cast(f.rows()), static_cast(f.cols())}); + for (size_t i = 0; i < 10; ++i) { + CHECK(f.tell() == i); + f.read_into(image.buffer(), &header); + auto npy_frame = npy.read_frame(); + CHECK((image.view() == npy_frame.view())); + } +} + +TEST_CASE("Read frames directly from a RawSubFile starting at the second file", "[.files]"){ + // we know this file has 10 frames with frame numbers 1 to 10 + // f0 1,2,3 + // f1 4,5,6 <-- starting here + // f2 7,8,9 + // f3 10 + + auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f1_0.raw"; + REQUIRE(std::filesystem::exists(fpath_raw)); + + RawSubFile f(fpath_raw, DetectorType::Jungfrau, 512, 1024, 16); + + + auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; + REQUIRE(std::filesystem::exists(fpath_npy)); + + //Numpy file with the same data to use as reference + File npy(fpath_npy, "r"); + npy.seek(3); + + CHECK(f.frames_in_file() == 7); + CHECK(npy.total_frames() == 10); + + + DetectorHeader header{}; + NDArray image({static_cast(f.rows()), static_cast(f.cols())}); + for (size_t i = 0; i < 7; ++i) { + CHECK(f.tell() == i); + f.read_into(image.buffer(), &header); + // frame numbers start at 1 frame index at 0 + // adding 3 + 1 to verify the frame number + CHECK(header.frameNumber == i + 4); + auto npy_frame = npy.read_frame(); + CHECK((image.view() == npy_frame.view())); + } +} \ No newline at end of file diff --git a/src/algorithm.test.cpp b/src/algorithm.test.cpp new file mode 100644 index 0000000..bf49c52 --- /dev/null +++ b/src/algorithm.test.cpp @@ -0,0 +1,195 @@ + + +#include +#include + +TEST_CASE("Find the closed index in a 1D array", "[algorithm]") { + aare::NDArray arr({5}); + for (ssize_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::nearest_index(arr, 2.3) == 2); + REQUIRE(aare::nearest_index(arr, 2.6) == 3); + REQUIRE(aare::nearest_index(arr, 45.0) == 4); + REQUIRE(aare::nearest_index(arr, 0.0) == 0); + REQUIRE(aare::nearest_index(arr, -1.0) == 0); +} + +TEST_CASE("Passing integers to nearest_index works", "[algorithm]") { + aare::NDArray arr({5}); + for (ssize_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::nearest_index(arr, 2) == 2); + REQUIRE(aare::nearest_index(arr, 3) == 3); + REQUIRE(aare::nearest_index(arr, 45) == 4); + REQUIRE(aare::nearest_index(arr, 0) == 0); + REQUIRE(aare::nearest_index(arr, -1) == 0); +} + +TEST_CASE("nearest_index works with std::vector", "[algorithm]") { + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::nearest_index(vec, 2.123) == 2); + REQUIRE(aare::nearest_index(vec, 2.66) == 3); + REQUIRE(aare::nearest_index(vec, 4555555.0) == 4); + REQUIRE(aare::nearest_index(vec, 0.0) == 0); + REQUIRE(aare::nearest_index(vec, -10.0) == 0); +} + +TEST_CASE("nearest index works with std::array", "[algorithm]") { + std::array arr = {0, 1, 2, 3, 4}; + REQUIRE(aare::nearest_index(arr, 2.123) == 2); + REQUIRE(aare::nearest_index(arr, 2.501) == 3); + REQUIRE(aare::nearest_index(arr, 4555555.0) == 4); + REQUIRE(aare::nearest_index(arr, 0.0) == 0); + REQUIRE(aare::nearest_index(arr, -10.0) == 0); +} + +TEST_CASE("nearest index when there is no different uses the first element", + "[algorithm]") { + std::vector vec = {5, 5, 5, 5, 5}; + REQUIRE(aare::nearest_index(vec, 5) == 0); +} + +TEST_CASE("nearest index when there is no different uses the first element " + "also when all smaller", + "[algorithm]") { + std::vector vec = {5, 5, 5, 5, 5}; + REQUIRE(aare::nearest_index(vec, 10) == 0); +} + +TEST_CASE("last smaller", "[algorithm]") { + aare::NDArray arr({5}); + for (ssize_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, -10.0) == 0); + REQUIRE(aare::last_smaller(arr, 0.0) == 0); + REQUIRE(aare::last_smaller(arr, 2.3) == 2); + REQUIRE(aare::last_smaller(arr, 253.) == 4); +} + +TEST_CASE("returns last bin strictly smaller", "[algorithm]") { + aare::NDArray arr({5}); + for (ssize_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, 2.0) == 1); +} + +TEST_CASE("last_smaller with all elements smaller returns last element", + "[algorithm]") { + aare::NDArray arr({5}); + for (ssize_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, 50.) == 4); +} + +TEST_CASE("last_smaller with all elements bigger returns first element", + "[algorithm]") { + aare::NDArray arr({5}); + for (ssize_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, -50.) == 0); +} + +TEST_CASE("last smaller with all elements equal returns the first element", + "[algorithm]") { + std::vector vec = {5, 5, 5, 5, 5, 5, 5}; + REQUIRE(aare::last_smaller(vec, 5) == 0); +} + +TEST_CASE("first_lager with vector", "[algorithm]") { + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::first_larger(vec, 2.5) == 3); +} + +TEST_CASE("first_lager with all elements smaller returns last element", + "[algorithm]") { + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::first_larger(vec, 50.) == 4); +} + +TEST_CASE("first_lager with all elements bigger returns first element", + "[algorithm]") { + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::first_larger(vec, -50.) == 0); +} + +TEST_CASE("first_lager with all elements the same as the check returns last", + "[algorithm]") { + std::vector vec = {14, 14, 14, 14, 14}; + REQUIRE(aare::first_larger(vec, 14) == 4); +} + +TEST_CASE("first larger with the same element", "[algorithm]") { + std::vector vec = {7, 8, 9, 10, 11}; + REQUIRE(aare::first_larger(vec, 9) == 3); +} + +TEST_CASE("cumsum works", "[algorithm]") { + std::vector vec = {0, 1, 2, 3, 4}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == vec.size()); + REQUIRE(result[0] == 0); + REQUIRE(result[1] == 1); + REQUIRE(result[2] == 3); + REQUIRE(result[3] == 6); + REQUIRE(result[4] == 10); +} +TEST_CASE("cumsum works with empty vector", "[algorithm]") { + std::vector vec = {}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == 0); +} +TEST_CASE("cumsum works with negative numbers", "[algorithm]") { + std::vector vec = {0, -1, -2, -3, -4}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == vec.size()); + REQUIRE(result[0] == 0); + REQUIRE(result[1] == -1); + REQUIRE(result[2] == -3); + REQUIRE(result[3] == -6); + REQUIRE(result[4] == -10); +} + + +TEST_CASE("cumsum on an empty vector", "[algorithm]") { + std::vector vec = {}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == 0); + +} + +TEST_CASE("All equal on an empty vector is false", "[algorithm]") { + std::vector vec = {}; + REQUIRE(aare::all_equal(vec) == false); +} + +TEST_CASE("All equal on a vector with 1 element is true", "[algorithm]") { + std::vector vec = {1}; + REQUIRE(aare::all_equal(vec) == true); +} + +TEST_CASE("All equal on a vector with 2 elements is true", "[algorithm]") { + std::vector vec = {1, 1}; + REQUIRE(aare::all_equal(vec) == true); +} + +TEST_CASE("All equal on a vector with two different elements is false", "[algorithm]") { + std::vector vec = {1, 2}; + REQUIRE(aare::all_equal(vec) == false); +} + +TEST_CASE("Last element is different", "[algorithm]") { + std::vector vec = {1, 1, 1, 1, 2}; + REQUIRE(aare::all_equal(vec) == false); +} diff --git a/src/decode.cpp b/src/decode.cpp new file mode 100644 index 0000000..436ad7b --- /dev/null +++ b/src/decode.cpp @@ -0,0 +1,102 @@ +#include "aare/decode.hpp" +#include +namespace aare { + +uint16_t adc_sar_05_decode64to16(uint64_t input){ + + //we want bits 29,19,28,18,31,21,27,20,24,23,25,22 and then pad to 16 + uint16_t output = 0; + output |= ((input >> 22) & 1) << 11; + output |= ((input >> 25) & 1) << 10; + output |= ((input >> 23) & 1) << 9; + output |= ((input >> 24) & 1) << 8; + output |= ((input >> 20) & 1) << 7; + output |= ((input >> 27) & 1) << 6; + output |= ((input >> 21) & 1) << 5; + output |= ((input >> 31) & 1) << 4; + output |= ((input >> 18) & 1) << 3; + output |= ((input >> 28) & 1) << 2; + output |= ((input >> 19) & 1) << 1; + output |= ((input >> 29) & 1) << 0; + return output; +} + +void adc_sar_05_decode64to16(NDView input, NDView output){ + if(input.shape() != output.shape()){ + throw std::invalid_argument(LOCATION + " input and output shapes must match"); + } + + for(ssize_t i = 0; i < input.shape(0); i++){ + for(ssize_t j = 0; j < input.shape(1); j++){ + output(i,j) = adc_sar_05_decode64to16(input(i,j)); + } + } +} + +uint16_t adc_sar_04_decode64to16(uint64_t input){ + + // bit_map = array([15,17,19,21,23,4,6,8,10,12,14,16] LSB->MSB + uint16_t output = 0; + output |= ((input >> 16) & 1) << 11; + output |= ((input >> 14) & 1) << 10; + output |= ((input >> 12) & 1) << 9; + output |= ((input >> 10) & 1) << 8; + output |= ((input >> 8) & 1) << 7; + output |= ((input >> 6) & 1) << 6; + output |= ((input >> 4) & 1) << 5; + output |= ((input >> 23) & 1) << 4; + output |= ((input >> 21) & 1) << 3; + output |= ((input >> 19) & 1) << 2; + output |= ((input >> 17) & 1) << 1; + output |= ((input >> 15) & 1) << 0; + return output; +} + +void adc_sar_04_decode64to16(NDView input, NDView output){ + if(input.shape() != output.shape()){ + throw std::invalid_argument(LOCATION + " input and output shapes must match"); + } + for(ssize_t i = 0; i < input.shape(0); i++){ + for(ssize_t j = 0; j < input.shape(1); j++){ + output(i,j) = adc_sar_04_decode64to16(input(i,j)); + } + } +} + +double apply_custom_weights(uint16_t input, const NDView weights) { + if(weights.size() > 16){ + throw std::invalid_argument("weights size must be less than or equal to 16"); + } + + double result = 0.0; + for (ssize_t i = 0; i < weights.size(); ++i) { + result += ((input >> i) & 1) * std::pow(weights[i], i); + } + return result; + +} + +void apply_custom_weights(NDView input, NDView output, const NDView weights) { + if(input.shape() != output.shape()){ + throw std::invalid_argument(LOCATION + " input and output shapes must match"); + } + + //Calculate weights to avoid repeatedly calling std::pow + std::vector weights_powers(weights.size()); + for (ssize_t i = 0; i < weights.size(); ++i) { + weights_powers[i] = std::pow(weights[i], i); + } + + // Apply custom weights to each element in the input array + for (ssize_t i = 0; i < input.shape(0); i++) { + double result = 0.0; + for (size_t bit_index = 0; bit_index < weights_powers.size(); ++bit_index) { + result += ((input(i) >> bit_index) & 1) * weights_powers[bit_index]; + } + output(i) = result; + } +} + + + +} // namespace aare diff --git a/src/decode.test.cpp b/src/decode.test.cpp new file mode 100644 index 0000000..1e4b2fc --- /dev/null +++ b/src/decode.test.cpp @@ -0,0 +1,80 @@ +#include "aare/decode.hpp" + +#include +#include +#include "aare/NDArray.hpp" +using Catch::Matchers::WithinAbs; +#include + +TEST_CASE("test_adc_sar_05_decode64to16"){ + uint64_t input = 0; + uint16_t output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 0); + + + // bit 29 on th input is bit 0 on the output + input = 1UL << 29; + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 1); + + // test all bits by iteratting through the bitlist + std::vector bitlist = {29, 19, 28, 18, 31, 21, 27, 20, 24, 23, 25, 22}; + for (size_t i = 0; i < bitlist.size(); i++) { + input = 1UL << bitlist[i]; + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == (1 << i)); + } + + + // test a few "random" values + input = 0; + input |= (1UL << 29); + input |= (1UL << 19); + input |= (1UL << 28); + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 7UL); + + + input = 0; + input |= (1UL << 18); + input |= (1UL << 27); + input |= (1UL << 25); + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 1096UL); + + input = 0; + input |= (1UL << 25); + input |= (1UL << 22); + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 3072UL); + } + + + TEST_CASE("test_apply_custom_weights") { + + uint16_t input = 1; + aare::NDArray weights_data({3}, 0.0); + weights_data(0) = 1.7; + weights_data(1) = 2.1; + weights_data(2) = 1.8; + + auto weights = weights_data.view(); + + + double output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(1.0, 0.001)); + + input = 1 << 1; + output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(2.1, 0.001)); + + + input = 1 << 2; + output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(3.24, 0.001)); + + input = 0b111; + output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(6.34, 0.001)); + + } \ No newline at end of file diff --git a/src/geo_helpers.cpp b/src/geo_helpers.cpp new file mode 100644 index 0000000..39086ec --- /dev/null +++ b/src/geo_helpers.cpp @@ -0,0 +1,71 @@ + +#include "aare/geo_helpers.hpp" +#include "fmt/core.h" + +namespace aare{ + +DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) { + #ifdef AARE_VERBOSE + fmt::println("update_geometry_with_roi() called with ROI: {} {} {} {}", + roi.xmin, roi.xmax, roi.ymin, roi.ymax); + fmt::println("Geometry: {} {} {} {} {} {}", + geo.modules_x, geo.modules_y, geo.pixels_x, geo.pixels_y, geo.module_gap_row, geo.module_gap_col); + #endif + int pos_y = 0; + int pos_y_increment = 0; + for (int row = 0; row < geo.modules_y; row++) { + int pos_x = 0; + for (int col = 0; col < geo.modules_x; col++) { + auto &m = geo.module_pixel_0[row * geo.modules_x + col]; + auto original_height = m.height; + auto original_width = m.width; + + // module is to the left of the roi + if (m.origin_x + m.width < roi.xmin) { + m.width = 0; + + // roi is in module + } else { + // here we only arrive when the roi is in or to the left of + // the module + if (roi.xmin > m.origin_x) { + m.width -= roi.xmin - m.origin_x; + } + if (roi.xmax < m.origin_x + original_width) { + m.width -= m.origin_x + original_width - roi.xmax; + } + m.origin_x = pos_x; + pos_x += m.width; + } + + if (m.origin_y + m.height < roi.ymin) { + m.height = 0; + } else { + if ((roi.ymin > m.origin_y) && (roi.ymin < m.origin_y + m.height)) { + m.height -= roi.ymin - m.origin_y; + + } + if (roi.ymax < m.origin_y + original_height) { + m.height -= m.origin_y + original_height - roi.ymax; + } + m.origin_y = pos_y; + pos_y_increment = m.height; + } + #ifdef AARE_VERBOSE + fmt::println("Module {} {} {} {}", m.origin_x, m.origin_y, m.width, m.height); + #endif + } + // increment pos_y + pos_y += pos_y_increment; + } + + // m_rows = roi.height(); + // m_cols = roi.width(); + geo.pixels_x = roi.width(); + geo.pixels_y = roi.height(); + + return geo; + +} + +} // namespace aare \ No newline at end of file diff --git a/src/geo_helpers.test.cpp b/src/geo_helpers.test.cpp new file mode 100644 index 0000000..08ee96c --- /dev/null +++ b/src/geo_helpers.test.cpp @@ -0,0 +1,230 @@ +#include "aare/File.hpp" +#include "aare/RawMasterFile.hpp" //needed for ROI +#include "aare/RawFile.hpp" + +#include +#include + +#include "aare/geo_helpers.hpp" +#include "test_config.hpp" + +TEST_CASE("Simple ROIs on one module"){ + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) + aare::DetectorGeometry geo; + + + aare::ModuleGeometry mod; + mod.origin_x = 0; + mod.origin_y = 0; + mod.width = 1024; + mod.height = 512; + + + + geo.pixels_x = 1024; + geo.pixels_y = 512; + geo.modules_x = 1; + geo.modules_y = 1; + geo.module_pixel_0.push_back(mod); + + SECTION("ROI is the whole module"){ + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 1024; + roi.ymin = 0; + roi.ymax = 512; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 1024); + REQUIRE(updated_geo.pixels_y == 512); + REQUIRE(updated_geo.modules_x == 1); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 512); + REQUIRE(updated_geo.module_pixel_0[0].width == 1024); + } + SECTION("ROI is the top left corner of the module"){ + aare::ROI roi; + roi.xmin = 100; + roi.xmax = 200; + roi.ymin = 150; + roi.ymax = 200; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 100); + REQUIRE(updated_geo.pixels_y == 50); + REQUIRE(updated_geo.modules_x == 1); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 50); + REQUIRE(updated_geo.module_pixel_0[0].width == 100); + } + + SECTION("ROI is a small square"){ + aare::ROI roi; + roi.xmin = 1000; + roi.xmax = 1010; + roi.ymin = 500; + roi.ymax = 510; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 10); + REQUIRE(updated_geo.pixels_y == 10); + REQUIRE(updated_geo.modules_x == 1); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 10); + REQUIRE(updated_geo.module_pixel_0[0].width == 10); + } + SECTION("ROI is a few columns"){ + aare::ROI roi; + roi.xmin = 750; + roi.xmax = 800; + roi.ymin = 0; + roi.ymax = 512; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 50); + REQUIRE(updated_geo.pixels_y == 512); + REQUIRE(updated_geo.modules_x == 1); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 512); + REQUIRE(updated_geo.module_pixel_0[0].width == 50); + } +} + + + +TEST_CASE("Two modules side by side"){ + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) + aare::DetectorGeometry geo; + + + aare::ModuleGeometry mod; + mod.origin_x = 0; + mod.origin_y = 0; + mod.width = 1024; + mod.height = 512; + + geo.pixels_x = 2048; + geo.pixels_y = 512; + geo.modules_x = 2; + geo.modules_y = 1; + + geo.module_pixel_0.push_back(mod); + mod.origin_x = 1024; + geo.module_pixel_0.push_back(mod); + + SECTION("ROI is the whole image"){ + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 2048; + roi.ymin = 0; + roi.ymax = 512; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 2048); + REQUIRE(updated_geo.pixels_y == 512); + REQUIRE(updated_geo.modules_x == 2); + REQUIRE(updated_geo.modules_y == 1); + } + SECTION("rectangle on both modules"){ + aare::ROI roi; + roi.xmin = 800; + roi.xmax = 1300; + roi.ymin = 200; + roi.ymax = 499; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 500); + REQUIRE(updated_geo.pixels_y == 299); + REQUIRE(updated_geo.modules_x == 2); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 299); + REQUIRE(updated_geo.module_pixel_0[0].width == 224); + REQUIRE(updated_geo.module_pixel_0[1].height == 299); + REQUIRE(updated_geo.module_pixel_0[1].width == 276); + } +} + +TEST_CASE("Three modules side by side"){ + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) + aare::DetectorGeometry geo; + aare::ROI roi; + roi.xmin = 700; + roi.xmax = 2500; + roi.ymin = 0; + roi.ymax = 123; + + aare::ModuleGeometry mod; + mod.origin_x = 0; + mod.origin_y = 0; + mod.width = 1024; + mod.height = 512; + + geo.pixels_x = 3072; + geo.pixels_y = 512; + geo.modules_x = 3; + geo.modules_y = 1; + + geo.module_pixel_0.push_back(mod); + mod.origin_x = 1024; + geo.module_pixel_0.push_back(mod); + mod.origin_x = 2048; + geo.module_pixel_0.push_back(mod); + + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 1800); + REQUIRE(updated_geo.pixels_y == 123); + REQUIRE(updated_geo.modules_x == 3); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 123); + REQUIRE(updated_geo.module_pixel_0[0].width == 324); + REQUIRE(updated_geo.module_pixel_0[1].height == 123); + REQUIRE(updated_geo.module_pixel_0[1].width == 1024); + REQUIRE(updated_geo.module_pixel_0[2].height == 123); + REQUIRE(updated_geo.module_pixel_0[2].width == 452); +} + +TEST_CASE("Four modules as a square"){ + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) + aare::DetectorGeometry geo; + aare::ROI roi; + roi.xmin = 500; + roi.xmax = 2000; + roi.ymin = 500; + roi.ymax = 600; + + aare::ModuleGeometry mod; + mod.origin_x = 0; + mod.origin_y = 0; + mod.width = 1024; + mod.height = 512; + + geo.pixels_x = 2048; + geo.pixels_y = 1024; + geo.modules_x = 2; + geo.modules_y = 2; + + geo.module_pixel_0.push_back(mod); + mod.origin_x = 1024; + geo.module_pixel_0.push_back(mod); + mod.origin_x = 0; + mod.origin_y = 512; + geo.module_pixel_0.push_back(mod); + mod.origin_x = 1024; + geo.module_pixel_0.push_back(mod); + + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 1500); + REQUIRE(updated_geo.pixels_y == 100); + REQUIRE(updated_geo.modules_x == 2); + REQUIRE(updated_geo.modules_y == 2); + REQUIRE(updated_geo.module_pixel_0[0].height == 12); + REQUIRE(updated_geo.module_pixel_0[0].width == 524); + REQUIRE(updated_geo.module_pixel_0[1].height == 12); + REQUIRE(updated_geo.module_pixel_0[1].width == 976); + REQUIRE(updated_geo.module_pixel_0[2].height == 88); + REQUIRE(updated_geo.module_pixel_0[2].width == 524); + REQUIRE(updated_geo.module_pixel_0[3].height == 88); + REQUIRE(updated_geo.module_pixel_0[3].width == 976); +} \ No newline at end of file diff --git a/src/utils/ifstream_helpers.cpp b/src/utils/ifstream_helpers.cpp new file mode 100644 index 0000000..74c56f3 --- /dev/null +++ b/src/utils/ifstream_helpers.cpp @@ -0,0 +1,18 @@ +#include "aare/utils/ifstream_helpers.hpp" + +namespace aare { + +std::string ifstream_error_msg(std::ifstream &ifs) { + std::ios_base::iostate state = ifs.rdstate(); + if (state & std::ios_base::eofbit) { + return " End of file reached"; + } else if (state & std::ios_base::badbit) { + return " Bad file stream"; + } else if (state & std::ios_base::failbit) { + return " File read failed"; + }else{ + return " Unknown/no error"; + } +} + +} // namespace aare diff --git a/src/utils/task.cpp b/src/utils/task.cpp new file mode 100644 index 0000000..af6756e --- /dev/null +++ b/src/utils/task.cpp @@ -0,0 +1,30 @@ +#include "aare/utils/task.hpp" + +namespace aare { + +std::vector> split_task(int first, int last, + int n_threads) { + std::vector> vec; + vec.reserve(n_threads); + + int n_frames = last - first; + + if (n_threads >= n_frames) { + for (int i = 0; i != n_frames; ++i) { + vec.push_back({i, i + 1}); + } + return vec; + } + + int step = (n_frames) / n_threads; + for (int i = 0; i != n_threads; ++i) { + int start = step * i; + int stop = step * (i + 1); + if (i == n_threads - 1) + stop = last; + vec.push_back({start, stop}); + } + return vec; +} + +} // namespace aare \ No newline at end of file diff --git a/src/utils/task.test.cpp b/src/utils/task.test.cpp new file mode 100644 index 0000000..e19994a --- /dev/null +++ b/src/utils/task.test.cpp @@ -0,0 +1,32 @@ +#include "aare/utils/task.hpp" + +#include +#include + + +TEST_CASE("Split a range into multiple tasks"){ + + auto tasks = aare::split_task(0, 10, 3); + REQUIRE(tasks.size() == 3); + REQUIRE(tasks[0].first == 0); + REQUIRE(tasks[0].second == 3); + REQUIRE(tasks[1].first == 3); + REQUIRE(tasks[1].second == 6); + REQUIRE(tasks[2].first == 6); + REQUIRE(tasks[2].second == 10); + + tasks = aare::split_task(0, 10, 1); + REQUIRE(tasks.size() == 1); + REQUIRE(tasks[0].first == 0); + REQUIRE(tasks[0].second == 10); + + tasks = aare::split_task(0, 10, 10); + REQUIRE(tasks.size() == 10); + for (int i = 0; i < 10; i++){ + REQUIRE(tasks[i].first == i); + REQUIRE(tasks[i].second == i+1); + } + + + +} \ No newline at end of file diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 3170f7c..1906508 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -17,8 +17,8 @@ endif() list(APPEND CMAKE_MODULE_PATH ${Catch2_SOURCE_DIR}/extras) add_executable(tests test.cpp) -target_link_libraries(tests PRIVATE Catch2::Catch2WithMain) - +target_link_libraries(tests PRIVATE Catch2::Catch2WithMain aare_core aare_compiler_flags) +# target_compile_options(tests PRIVATE -fno-omit-frame-pointer -fsanitize=address) set_target_properties(tests PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR} OUTPUT_NAME run_tests @@ -34,7 +34,7 @@ set(TestSources target_sources(tests PRIVATE ${TestSources} ) #Work around to remove, this is not the way to do it =) -target_link_libraries(tests PRIVATE aare_core aare_compiler_flags) +# target_link_libraries(tests PRIVATE aare_core aare_compiler_flags) #configure a header to pass test file paths diff --git a/tests/test.cpp b/tests/test.cpp index 7c638e4..513f690 100644 --- a/tests/test.cpp +++ b/tests/test.cpp @@ -3,6 +3,7 @@ #include #include #include +#include TEST_CASE("Test suite can find data assets", "[.integration]") { auto fpath = test_data_path() / "numpy" / "test_numpy_file.npy"; @@ -18,4 +19,20 @@ TEST_CASE("Test suite can open data assets", "[.integration]") { TEST_CASE("Test float32 and char8") { REQUIRE(sizeof(float) == 4); REQUIRE(CHAR_BIT == 8); -} \ No newline at end of file +} + +/** + * Uncomment the following tests to verify that asan is working + */ + +// TEST_CASE("trigger asan stack"){ +// int arr[5] = {1,2,3,4,5}; +// int val = arr[7]; +// fmt::print("val: {}\n", val); +// } + +// TEST_CASE("trigger asan heap"){ +// auto *ptr = new int[5]; +// ptr[70] = 5; +// fmt::print("ptr: {}\n", ptr[70]); +// } \ No newline at end of file diff --git a/tests/test_config.hpp.in b/tests/test_config.hpp.in index 62993b7..e314b8f 100644 --- a/tests/test_config.hpp.in +++ b/tests/test_config.hpp.in @@ -7,6 +7,6 @@ inline auto test_data_path(){ if(const char* env_p = std::getenv("AARE_TEST_DATA")){ return std::filesystem::path(env_p); }else{ - throw std::runtime_error("AARE_TEST_DATA_PATH not set"); + throw std::runtime_error("Path to test data: $AARE_TEST_DATA not set"); } } \ No newline at end of file diff --git a/update_version.py b/update_version.py new file mode 100644 index 0000000..476895a --- /dev/null +++ b/update_version.py @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: LGPL-3.0-or-other +# Copyright (C) 2021 Contributors to the Aare Package +""" +Script to update VERSION file with semantic versioning if provided as an argument, or with 0.0.0 if no argument is provided. +""" + +import sys +import os +import re + +from packaging.version import Version, InvalidVersion + + +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + +def is_integer(value): + try: + int(value) + except ValueError: + return False + else: + return True + + +def get_version(): + + # Check at least one argument is passed + if len(sys.argv) < 2: + return "0.0.0" + + version = sys.argv[1] + + try: + v = Version(version) # normalize check if version follows PEP 440 specification + + version_normalized = version.replace("-", ".") + + version_normalized = re.sub(r'0*(\d+)', lambda m : str(int(m.group(0))), version_normalized) #remove leading zeros + + return version_normalized + + except InvalidVersion as e: + print(f"Invalid version {version}. Version format must follow semantic versioning format of python PEP 440 version identification specification.") + sys.exit(1) + + +def write_version_to_file(version): + version_file_path = os.path.join(SCRIPT_DIR, "VERSION") + with open(version_file_path, "w") as version_file: + version_file.write(version) + print(f"Version {version} written to VERSION file.") + +# Main script +if __name__ == "__main__": + + version = get_version() + write_version_to_file(version) \ No newline at end of file