mirror of
https://github.com/slsdetectorgroup/aare.git
synced 2025-12-14 17:11:25 +01:00
Compare commits
38 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 119ca96a52 | |||
| 053536d135 | |||
| 286b2888ca | |||
| 52aa1d4d9b | |||
| 8556ab6564 | |||
| 51a87e2a1e | |||
| e6dd1f3ec2 | |||
| 65672d06f3 | |||
| bceefe6d64 | |||
| cc57cc7c27 | |||
| d89530ed22 | |||
| 7917e6f81a | |||
| a26073fb41 | |||
| f3f3e2af6a | |||
| 031d9503d8 | |||
| cba2e46e2f | |||
| b4a9b4caec | |||
| be7f510775 | |||
| 56fa6f6bfb | |||
| ca4d392b2f | |||
| 3b65e92cb7 | |||
| 755a8fb2b7 | |||
| dc7f6d44f2 | |||
| 480e28c927 | |||
| d7242671b2 | |||
| a6a02249bc | |||
| a3f831dc9e | |||
| 76b8872fe6 | |||
| 55236ce6cc | |||
| e7d3e667b0 | |||
| d9cbf0f481 | |||
| 5681e18403 | |||
| 0b252709bd | |||
| e5df929a9a | |||
| b7337fc6c5 | |||
| 09de69c090 | |||
| b23e697e26 | |||
| 4233509615 |
2
.github/workflows/build_and_deploy_conda.yml
vendored
2
.github/workflows/build_and_deploy_conda.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: [ubuntu-latest] # macos-12, windows-2019]
|
||||
platform: [ubuntu-latest, ] # macos-12, windows-2019]
|
||||
python-version: ["3.12",]
|
||||
|
||||
runs-on: ${{ matrix.platform }}
|
||||
|
||||
@@ -2,15 +2,11 @@ name: Build the package using cmake then documentation
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
pull_request:
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
|
||||
env:
|
||||
# Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.)
|
||||
BUILD_TYPE: Debug
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -22,7 +18,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: [ubuntu-latest, macos-latest]
|
||||
platform: [ubuntu-latest, ]
|
||||
python-version: ["3.12",]
|
||||
|
||||
runs-on: ${{ matrix.platform }}
|
||||
@@ -43,20 +39,15 @@ jobs:
|
||||
channels: conda-forge
|
||||
conda-remove-defaults: "true"
|
||||
|
||||
- name: Build library and docs
|
||||
- name: Build library
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DAARE_SYSTEM_LIBRARIES=ON -DAARE_PYTHON_BINDINGS=ON -DAARE_DOCS=ON -DAARE_TESTS=ON
|
||||
make -j 4
|
||||
cmake .. -DAARE_SYSTEM_LIBRARIES=ON -DAARE_DOCS=ON
|
||||
make -j 2
|
||||
make docs
|
||||
|
||||
- name: C++ unit tests
|
||||
working-directory: ${{github.workspace}}/build
|
||||
run: ctest -C ${{env.BUILD_TYPE}} -j4
|
||||
|
||||
- name: Upload static files as artifact
|
||||
if: matrix.platform == 'ubuntu-latest'
|
||||
id: deployment
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
100
CMakeLists.txt
100
CMakeLists.txt
@@ -1,4 +1,3 @@
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
cmake_minimum_required(VERSION 3.15)
|
||||
|
||||
project(aare
|
||||
@@ -54,6 +53,7 @@ option(AARE_DOCS "Build documentation" OFF)
|
||||
option(AARE_VERBOSE "Verbose output" OFF)
|
||||
option(AARE_CUSTOM_ASSERT "Use custom assert" OFF)
|
||||
option(AARE_INSTALL_PYTHONEXT "Install the python extension in the install tree under CMAKE_INSTALL_PREFIX/aare/" OFF)
|
||||
option(AARE_HDF5 "Hdf5 File Format" OFF)
|
||||
option(AARE_ASAN "Enable AddressSanitizer" OFF)
|
||||
|
||||
# Configure which of the dependencies to use FetchContent for
|
||||
@@ -78,6 +78,17 @@ if(AARE_SYSTEM_LIBRARIES)
|
||||
# on conda-forge
|
||||
endif()
|
||||
|
||||
if(AARE_VERBOSE)
|
||||
add_compile_definitions(AARE_VERBOSE)
|
||||
add_compile_definitions(AARE_LOG_LEVEL=aare::logDEBUG5)
|
||||
else()
|
||||
add_compile_definitions(AARE_LOG_LEVEL=aare::logINFOBLUE)
|
||||
endif()
|
||||
|
||||
if(AARE_CUSTOM_ASSERT)
|
||||
add_compile_definitions(AARE_CUSTOM_ASSERT)
|
||||
endif()
|
||||
|
||||
if(AARE_BENCHMARKS)
|
||||
add_subdirectory(benchmarks)
|
||||
endif()
|
||||
@@ -325,10 +336,13 @@ if(AARE_ASAN)
|
||||
)
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if(AARE_TESTS)
|
||||
enable_testing()
|
||||
add_subdirectory(tests)
|
||||
target_compile_definitions(tests PRIVATE AARE_TESTS)
|
||||
endif()
|
||||
|
||||
###------------------------------------------------------------------------------MAIN LIBRARY
|
||||
@@ -343,6 +357,10 @@ set(PUBLICHEADERS
|
||||
include/aare/CtbRawFile.hpp
|
||||
include/aare/ClusterVector.hpp
|
||||
include/aare/decode.hpp
|
||||
include/aare/type_traits.hpp
|
||||
include/aare/scan_parameters.hpp
|
||||
include/aare/to_string.hpp
|
||||
include/aare/string_utils.hpp
|
||||
include/aare/defs.hpp
|
||||
include/aare/Dtype.hpp
|
||||
include/aare/File.hpp
|
||||
@@ -351,7 +369,7 @@ set(PUBLICHEADERS
|
||||
include/aare/FilePtr.hpp
|
||||
include/aare/Frame.hpp
|
||||
include/aare/GainMap.hpp
|
||||
include/aare/DetectorGeometry.hpp
|
||||
include/aare/geo_helpers.hpp
|
||||
include/aare/JungfrauDataFile.hpp
|
||||
include/aare/logger.hpp
|
||||
include/aare/NDArray.hpp
|
||||
@@ -369,27 +387,44 @@ set(PUBLICHEADERS
|
||||
|
||||
|
||||
set(SourceFiles
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/calibration.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/CtbRawFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/DetectorGeometry.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/to_string.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/string_utils.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/FilePtr.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Fit.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp
|
||||
)
|
||||
)
|
||||
|
||||
# HDF5
|
||||
if (AARE_HDF5)
|
||||
find_package(HDF5 1.10 COMPONENTS CXX REQUIRED)
|
||||
add_definitions(
|
||||
${HDF5_DEFINITIONS}
|
||||
)
|
||||
list (APPEND PUBLICHEADERS
|
||||
include/aare/Hdf5File.hpp
|
||||
include/aare/Hdf5MasterFile.hpp
|
||||
)
|
||||
list (APPEND SourceFiles
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Hdf5File.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Hdf5MasterFile.cpp
|
||||
)
|
||||
endif (AARE_HDF5)
|
||||
|
||||
add_library(aare_core STATIC ${SourceFiles})
|
||||
target_include_directories(aare_core PUBLIC
|
||||
@@ -413,20 +448,14 @@ target_link_libraries(
|
||||
|
||||
)
|
||||
|
||||
set_property(TARGET aare_core PROPERTY POSITION_INDEPENDENT_CODE ON)
|
||||
|
||||
if(AARE_TESTS)
|
||||
target_compile_definitions(aare_core PRIVATE AARE_TESTS)
|
||||
endif()
|
||||
if(AARE_VERBOSE)
|
||||
target_compile_definitions(aare_core PUBLIC AARE_VERBOSE)
|
||||
target_compile_definitions(aare_core PUBLIC AARE_LOG_LEVEL=aare::logDEBUG5)
|
||||
else()
|
||||
target_compile_definitions(aare_core PUBLIC AARE_LOG_LEVEL=aare::logERROR)
|
||||
endif()
|
||||
|
||||
if(AARE_CUSTOM_ASSERT)
|
||||
target_compile_definitions(aare_core PUBLIC AARE_CUSTOM_ASSERT)
|
||||
if (AARE_HDF5 AND HDF5_FOUND)
|
||||
add_definitions(-DHDF5_FOUND)
|
||||
target_link_libraries(aare_core PUBLIC
|
||||
${HDF5_LIBRARIES}
|
||||
)
|
||||
target_include_directories(aare_core PUBLIC
|
||||
${HDF5_INCLUDE_DIRS}
|
||||
)
|
||||
endif()
|
||||
|
||||
set_target_properties(aare_core PROPERTIES
|
||||
@@ -434,16 +463,20 @@ set_target_properties(aare_core PROPERTIES
|
||||
PUBLIC_HEADER "${PUBLICHEADERS}"
|
||||
)
|
||||
|
||||
if (AARE_PYTHON_BINDINGS)
|
||||
set_property(TARGET aare_core PROPERTY POSITION_INDEPENDENT_CODE ON)
|
||||
endif()
|
||||
|
||||
if(AARE_TESTS)
|
||||
set(TestSources
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/calibration.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/to_string.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/scan_parameters.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/DetectorGeometry.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolation.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp
|
||||
@@ -462,10 +495,22 @@ if(AARE_TESTS)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.test.cpp
|
||||
|
||||
)
|
||||
if(HDF5_FOUND)
|
||||
list (APPEND TestSources
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Hdf5MasterFile.test.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/Hdf5File.test.cpp
|
||||
)
|
||||
endif()
|
||||
target_sources(tests PRIVATE ${TestSources} )
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
|
||||
###------------------------------------------------------------------------------------------
|
||||
###------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
if(AARE_MASTER_PROJECT)
|
||||
install(TARGETS aare_core aare_compiler_flags
|
||||
EXPORT "${TARGETS_EXPORT_NAME}"
|
||||
@@ -475,6 +520,7 @@ if(AARE_MASTER_PROJECT)
|
||||
)
|
||||
endif()
|
||||
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
set(CMAKE_INSTALL_RPATH $ORIGIN)
|
||||
set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
|
||||
|
||||
|
||||
373
LICENSE
373
LICENSE
@@ -1,373 +0,0 @@
|
||||
Mozilla Public License Version 2.0
|
||||
==================================
|
||||
|
||||
1. Definitions
|
||||
--------------
|
||||
|
||||
1.1. "Contributor"
|
||||
means each individual or legal entity that creates, contributes to
|
||||
the creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
means the combination of the Contributions of others (if any) used
|
||||
by a Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
means Source Code Form to which the initial Contributor has attached
|
||||
the notice in Exhibit A, the Executable Form of such Source Code
|
||||
Form, and Modifications of such Source Code Form, in each case
|
||||
including portions thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
(a) that the initial Contributor has attached the notice described
|
||||
in Exhibit B to the Covered Software; or
|
||||
|
||||
(b) that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the
|
||||
terms of a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
means a work that combines Covered Software with other material, in
|
||||
a separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
means having the right to grant, to the maximum extent possible,
|
||||
whether at the time of the initial grant or subsequently, any and
|
||||
all of the rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
means any of the following:
|
||||
|
||||
(a) any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered
|
||||
Software; or
|
||||
|
||||
(b) any new file in Source Code Form that contains any Covered
|
||||
Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the
|
||||
License, by the making, using, selling, offering for sale, having
|
||||
made, import, or transfer of either its Contributions or its
|
||||
Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
means either the GNU General Public License, Version 2.0, the GNU
|
||||
Lesser General Public License, Version 2.1, the GNU Affero General
|
||||
Public License, Version 3.0, or any later versions of those
|
||||
licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that
|
||||
controls, is controlled by, or is under common control with You. For
|
||||
purposes of this definition, "control" means (a) the power, direct
|
||||
or indirect, to cause the direction or management of such entity,
|
||||
whether by contract or otherwise, or (b) ownership of more than
|
||||
fifty percent (50%) of the outstanding shares or beneficial
|
||||
ownership of such entity.
|
||||
|
||||
2. License Grants and Conditions
|
||||
--------------------------------
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
(a) under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
(b) under Patent Claims of such Contributor to make, use, sell, offer
|
||||
for sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
(a) for any code that a Contributor has removed from Covered Software;
|
||||
or
|
||||
|
||||
(b) for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
(c) under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights
|
||||
to grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
|
||||
in Section 2.1.
|
||||
|
||||
3. Responsibilities
|
||||
-------------------
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
(a) such Covered Software must also be made available in Source Code
|
||||
Form, as described in Section 3.1, and You must inform recipients of
|
||||
the Executable Form how they can obtain a copy of such Source Code
|
||||
Form by reasonable means in a timely manner, at a charge no more
|
||||
than the cost of distribution to the recipient; and
|
||||
|
||||
(b) You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter
|
||||
the recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty,
|
||||
or limitations of liability) contained within the Source Code Form of
|
||||
the Covered Software, except that You may alter any license notices to
|
||||
the extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
---------------------------------------------------
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this
|
||||
License with respect to some or all of the Covered Software due to
|
||||
statute, judicial order, or regulation then You must: (a) comply with
|
||||
the terms of this License to the maximum extent possible; and (b)
|
||||
describe the limitations and the code they affect. Such description must
|
||||
be placed in a text file included with all distributions of the Covered
|
||||
Software under this License. Except to the extent prohibited by statute
|
||||
or regulation, such description must be sufficiently detailed for a
|
||||
recipient of ordinary skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
--------------
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically
|
||||
if You fail to comply with any of its terms. However, if You become
|
||||
compliant, then the rights granted under this License from a particular
|
||||
Contributor are reinstated (a) provisionally, unless and until such
|
||||
Contributor explicitly and finally terminates Your grants, and (b) on an
|
||||
ongoing basis, if such Contributor fails to notify You of the
|
||||
non-compliance by some reasonable means prior to 60 days after You have
|
||||
come back into compliance. Moreover, Your grants from a particular
|
||||
Contributor are reinstated on an ongoing basis if such Contributor
|
||||
notifies You of the non-compliance by some reasonable means, this is the
|
||||
first time You have received notice of non-compliance with this License
|
||||
from such Contributor, and You become compliant prior to 30 days after
|
||||
Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
|
||||
end user license agreements (excluding distributors and resellers) which
|
||||
have been validly granted by You or Your distributors under this License
|
||||
prior to termination shall survive termination.
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 6. Disclaimer of Warranty *
|
||||
* ------------------------- *
|
||||
* *
|
||||
* Covered Software is provided under this License on an "as is" *
|
||||
* basis, without warranty of any kind, either expressed, implied, or *
|
||||
* statutory, including, without limitation, warranties that the *
|
||||
* Covered Software is free of defects, merchantable, fit for a *
|
||||
* particular purpose or non-infringing. The entire risk as to the *
|
||||
* quality and performance of the Covered Software is with You. *
|
||||
* Should any Covered Software prove defective in any respect, You *
|
||||
* (not any Contributor) assume the cost of any necessary servicing, *
|
||||
* repair, or correction. This disclaimer of warranty constitutes an *
|
||||
* essential part of this License. No use of any Covered Software is *
|
||||
* authorized under this License except under this disclaimer. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 7. Limitation of Liability *
|
||||
* -------------------------- *
|
||||
* *
|
||||
* Under no circumstances and under no legal theory, whether tort *
|
||||
* (including negligence), contract, or otherwise, shall any *
|
||||
* Contributor, or anyone who distributes Covered Software as *
|
||||
* permitted above, be liable to You for any direct, indirect, *
|
||||
* special, incidental, or consequential damages of any character *
|
||||
* including, without limitation, damages for lost profits, loss of *
|
||||
* goodwill, work stoppage, computer failure or malfunction, or any *
|
||||
* and all other commercial damages or losses, even if such party *
|
||||
* shall have been informed of the possibility of such damages. This *
|
||||
* limitation of liability shall not apply to liability for death or *
|
||||
* personal injury resulting from such party's negligence to the *
|
||||
* extent applicable law prohibits such limitation. Some *
|
||||
* jurisdictions do not allow the exclusion or limitation of *
|
||||
* incidental or consequential damages, so this exclusion and *
|
||||
* limitation may not apply to You. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
8. Litigation
|
||||
-------------
|
||||
|
||||
Any litigation relating to this License may be brought only in the
|
||||
courts of a jurisdiction where the defendant maintains its principal
|
||||
place of business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions.
|
||||
Nothing in this Section shall prevent a party's ability to bring
|
||||
cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
----------------
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides
|
||||
that the language of a contract shall be construed against the drafter
|
||||
shall not be used to construe this License against a Contributor.
|
||||
|
||||
10. Versions of the License
|
||||
---------------------------
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses
|
||||
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
-------------------------------------------
|
||||
|
||||
This Source Code Form is subject to the terms of the Mozilla Public
|
||||
License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular
|
||||
file, then You may include the notice in a location (such as a LICENSE
|
||||
file in a relevant directory) where a recipient would be likely to look
|
||||
for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
---------------------------------------------------------
|
||||
|
||||
This Source Code Form is "Incompatible With Secondary Licenses", as
|
||||
defined by the Mozilla Public License, v. 2.0.
|
||||
@@ -1,14 +1,6 @@
|
||||
# aare
|
||||
Data analysis library for PSI hybrid detectors
|
||||
|
||||
## Documentation
|
||||
|
||||
Detailed documentation including installation can be found in [Documentation](https://slsdetectorgroup.github.io/aare/)
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the MPL-2.0 license.
|
||||
See the LICENSE file or https://www.mozilla.org/en-US/MPL/ for details.
|
||||
|
||||
## Build and install
|
||||
|
||||
|
||||
95
RELEASE.md
95
RELEASE.md
@@ -1,76 +1,14 @@
|
||||
# Release notes
|
||||
|
||||
## head
|
||||
|
||||
### New Features:
|
||||
|
||||
- Expanding 24 to 32 bit data
|
||||
- Decoding digital data from Mythen 302
|
||||
|
||||
|
||||
### 2025.11.21
|
||||
|
||||
### New Features:
|
||||
|
||||
- Added SPDX-License-Identifier: MPL-2.0 to source files
|
||||
- Calculate Eta3 supports all cluster types
|
||||
- interpolation class supports using cross eta3x3 and eta3x3 on full cluster as well as eta2x2 on full cluster
|
||||
- interpolation class has option to calculate the rosenblatt transform
|
||||
- reduction operations to reduce Clusters of general size to 2x2 or 3x3 clusters
|
||||
- `max_sum_2x2` including index of subcluster with highest energy is now available from Python API
|
||||
- interpolation supports bilinear interpolation of eta values for more fine grained transformed uniform coordinates
|
||||
- Interpolation is documented
|
||||
|
||||
- Added tell to ClusterFile. Returns position in bytes for debugging
|
||||
|
||||
### Resolved Features:
|
||||
|
||||
- calculate_eta coincides with theoretical definition
|
||||
|
||||
### Bugfixes:
|
||||
|
||||
- eta calculation assumes correct photon center
|
||||
- eta transformation to uniform coordinates starts at 0
|
||||
- Bug in interpolation
|
||||
- File supports reading new master json file format (multiple ROI's not supported yet)
|
||||
|
||||
|
||||
### API Changes:
|
||||
|
||||
- ClusterFinder for 2x2 Cluster disabled
|
||||
- eta stores corner as enum class cTopLeft, cTopRight, BottomLeft, cBottomRight indicating 2x2 subcluster with largest energy relative to cluster center
|
||||
- max_sum_2x2 returns corner as index
|
||||
|
||||
### 2025.8.22
|
||||
|
||||
Features:
|
||||
|
||||
- Apply calibration works in G0 if passes a 2D calibration and pedestal
|
||||
- count pixels that switch
|
||||
- calculate pedestal (also g0 version)
|
||||
- NDArray::view() needs an lvalue to reduce issues with the view outliving the array
|
||||
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- Now using glibc 2.17 in conda builds (was using the host)
|
||||
- Fixed shifted pixels in clusters close to the edge of a frame
|
||||
|
||||
### 2025.7.18
|
||||
### head
|
||||
|
||||
Features:
|
||||
|
||||
- Cluster finder now works with 5x5, 7x7 and 9x9 clusters
|
||||
- Added ClusterVector::empty() member
|
||||
- Added apply_calibration function for Jungfrau data
|
||||
|
||||
Bugfixes:
|
||||
- Fixed reading RawFiles with ROI fully excluding some sub files.
|
||||
- Decoding of MH02 files placed the pixels in wrong position
|
||||
- Removed unused file: ClusterFile.cpp
|
||||
|
||||
|
||||
### 2025.5.22
|
||||
### 2025.05.22
|
||||
|
||||
Features:
|
||||
|
||||
@@ -80,34 +18,5 @@ Bugfixes:
|
||||
|
||||
- Fixed crash when opening raw files with large number of data files
|
||||
|
||||
## Download, Documentation & Support
|
||||
|
||||
### Download
|
||||
|
||||
The Source Code:
|
||||
https://github.com/slsdetectorgroup/aare
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
|
||||
Documentation including installation details:
|
||||
https://github.com/slsdetectorgroup/aare
|
||||
|
||||
|
||||
### Support
|
||||
|
||||
|
||||
erik.frojdh@psi.ch \
|
||||
alice.mazzoleni@psi.ch \
|
||||
dhanya.thattil@psi.ch
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
include(FetchContent)
|
||||
|
||||
@@ -16,7 +15,7 @@ FetchContent_MakeAvailable(benchmark)
|
||||
|
||||
add_executable(benchmarks)
|
||||
|
||||
target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp reduce_benchmark.cpp)
|
||||
target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp)
|
||||
|
||||
# Link Google Benchmark and other necessary libraries
|
||||
target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#include "aare/CalculateEta.hpp"
|
||||
#include "aare/ClusterFile.hpp"
|
||||
#include <benchmark/benchmark.h>
|
||||
@@ -9,7 +8,6 @@ class ClusterFixture : public benchmark::Fixture {
|
||||
public:
|
||||
Cluster<int, 2, 2> cluster_2x2{};
|
||||
Cluster<int, 3, 3> cluster_3x3{};
|
||||
Cluster<int, 4, 4> cluster_4x4{};
|
||||
|
||||
private:
|
||||
using benchmark::Fixture::SetUp;
|
||||
@@ -28,13 +26,6 @@ class ClusterFixture : public benchmark::Fixture {
|
||||
|
||||
cluster_3x3.x = 0;
|
||||
cluster_3x3.y = 0;
|
||||
|
||||
int temp_data3[16] = {1, 2, 3, 4, 5, 6, 7, 8,
|
||||
9, 10, 11, 12, 13, 14, 15, 16};
|
||||
std::copy(std::begin(temp_data3), std::end(temp_data3),
|
||||
std::begin(cluster_4x4.data));
|
||||
cluster_4x4.x = 0;
|
||||
cluster_4x4.y = 0;
|
||||
}
|
||||
|
||||
// void TearDown(::benchmark::State& state) {
|
||||
@@ -76,29 +67,4 @@ BENCHMARK_F(ClusterFixture, CalculateGeneralEtaFor3x3Cluster)
|
||||
benchmark::DoNotOptimize(eta);
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_F(ClusterFixture, Calculate2x2Etawithreduction)
|
||||
(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
auto reduced_cluster = reduce_to_2x2(cluster_4x4);
|
||||
Eta2 eta = calculate_eta2(reduced_cluster);
|
||||
auto reduced_cluster_from_3x3 = reduce_to_2x2(cluster_3x3);
|
||||
Eta2 eta2 = calculate_eta2(reduced_cluster_from_3x3);
|
||||
benchmark::DoNotOptimize(eta);
|
||||
benchmark::DoNotOptimize(eta2);
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_F(ClusterFixture, Calculate2x2Etawithoutreduction)
|
||||
(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
Eta2 eta = calculate_eta2(cluster_4x4);
|
||||
Eta2 eta2 = calculate_eta2(cluster_3x3);
|
||||
benchmark::DoNotOptimize(eta);
|
||||
benchmark::DoNotOptimize(eta2);
|
||||
}
|
||||
}
|
||||
|
||||
// BENCHMARK_MAIN();
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#include "aare/NDArray.hpp"
|
||||
#include <benchmark/benchmark.h>
|
||||
|
||||
|
||||
@@ -1,169 +0,0 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#include "aare/Cluster.hpp"
|
||||
#include <benchmark/benchmark.h>
|
||||
|
||||
using namespace aare;
|
||||
|
||||
class ClustersForReduceFixture : public benchmark::Fixture {
|
||||
public:
|
||||
Cluster<int, 5, 5> cluster_5x5{};
|
||||
Cluster<int, 3, 3> cluster_3x3{};
|
||||
|
||||
private:
|
||||
using benchmark::Fixture::SetUp;
|
||||
|
||||
void SetUp([[maybe_unused]] const benchmark::State &state) override {
|
||||
int temp_data[25] = {1, 1, 1, 1, 1, 1, 1, 2, 1, 1,
|
||||
1, 2, 3, 1, 2, 1, 1, 1, 1, 2};
|
||||
std::copy(std::begin(temp_data), std::end(temp_data),
|
||||
std::begin(cluster_5x5.data));
|
||||
|
||||
cluster_5x5.x = 5;
|
||||
cluster_5x5.y = 5;
|
||||
|
||||
int temp_data2[9] = {1, 1, 1, 2, 3, 1, 2, 2, 1};
|
||||
std::copy(std::begin(temp_data2), std::end(temp_data2),
|
||||
std::begin(cluster_3x3.data));
|
||||
|
||||
cluster_3x3.x = 5;
|
||||
cluster_3x3.y = 5;
|
||||
}
|
||||
|
||||
// void TearDown(::benchmark::State& state) {
|
||||
// }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
Cluster<T, 3, 3, uint16_t> reduce_to_3x3(const Cluster<T, 5, 5, uint16_t> &c) {
|
||||
Cluster<T, 3, 3, uint16_t> result;
|
||||
|
||||
// Write out the sums in the hope that the compiler can optimize this
|
||||
std::array<T, 9> sum_3x3_subclusters;
|
||||
|
||||
// Write out the sums in the hope that the compiler can optimize this
|
||||
sum_3x3_subclusters[0] = c.data[0] + c.data[1] + c.data[2] + c.data[5] +
|
||||
c.data[6] + c.data[7] + c.data[10] + c.data[11] +
|
||||
c.data[12];
|
||||
sum_3x3_subclusters[1] = c.data[1] + c.data[2] + c.data[3] + c.data[6] +
|
||||
c.data[7] + c.data[8] + c.data[11] + c.data[12] +
|
||||
c.data[13];
|
||||
sum_3x3_subclusters[2] = c.data[2] + c.data[3] + c.data[4] + c.data[7] +
|
||||
c.data[8] + c.data[9] + c.data[12] + c.data[13] +
|
||||
c.data[14];
|
||||
sum_3x3_subclusters[3] = c.data[5] + c.data[6] + c.data[7] + c.data[10] +
|
||||
c.data[11] + c.data[12] + c.data[15] + c.data[16] +
|
||||
c.data[17];
|
||||
sum_3x3_subclusters[4] = c.data[6] + c.data[7] + c.data[8] + c.data[11] +
|
||||
c.data[12] + c.data[13] + c.data[16] + c.data[17] +
|
||||
c.data[18];
|
||||
sum_3x3_subclusters[5] = c.data[7] + c.data[8] + c.data[9] + c.data[12] +
|
||||
c.data[13] + c.data[14] + c.data[17] + c.data[18] +
|
||||
c.data[19];
|
||||
sum_3x3_subclusters[6] = c.data[10] + c.data[11] + c.data[12] + c.data[15] +
|
||||
c.data[16] + c.data[17] + c.data[20] + c.data[21] +
|
||||
c.data[22];
|
||||
sum_3x3_subclusters[7] = c.data[11] + c.data[12] + c.data[13] + c.data[16] +
|
||||
c.data[17] + c.data[18] + c.data[21] + c.data[22] +
|
||||
c.data[23];
|
||||
sum_3x3_subclusters[8] = c.data[12] + c.data[13] + c.data[14] + c.data[17] +
|
||||
c.data[18] + c.data[19] + c.data[22] + c.data[23] +
|
||||
c.data[24];
|
||||
|
||||
auto index = std::max_element(sum_3x3_subclusters.begin(),
|
||||
sum_3x3_subclusters.end()) -
|
||||
sum_3x3_subclusters.begin();
|
||||
|
||||
switch (index) {
|
||||
case 0:
|
||||
result.x = c.x - 1;
|
||||
result.y = c.y + 1;
|
||||
result.data = {c.data[0], c.data[1], c.data[2], c.data[5], c.data[6],
|
||||
c.data[7], c.data[10], c.data[11], c.data[12]};
|
||||
break;
|
||||
case 1:
|
||||
result.x = c.x;
|
||||
result.y = c.y + 1;
|
||||
result.data = {c.data[1], c.data[2], c.data[3], c.data[6], c.data[7],
|
||||
c.data[8], c.data[11], c.data[12], c.data[13]};
|
||||
break;
|
||||
case 2:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y + 1;
|
||||
result.data = {c.data[2], c.data[3], c.data[4], c.data[7], c.data[8],
|
||||
c.data[9], c.data[12], c.data[13], c.data[14]};
|
||||
break;
|
||||
case 3:
|
||||
result.x = c.x - 1;
|
||||
result.y = c.y;
|
||||
result.data = {c.data[5], c.data[6], c.data[7],
|
||||
c.data[10], c.data[11], c.data[12],
|
||||
c.data[15], c.data[16], c.data[17]};
|
||||
break;
|
||||
case 4:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y;
|
||||
result.data = {c.data[6], c.data[7], c.data[8],
|
||||
c.data[11], c.data[12], c.data[13],
|
||||
c.data[16], c.data[17], c.data[18]};
|
||||
break;
|
||||
case 5:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y;
|
||||
result.data = {c.data[7], c.data[8], c.data[9],
|
||||
c.data[12], c.data[13], c.data[14],
|
||||
c.data[17], c.data[18], c.data[19]};
|
||||
break;
|
||||
case 6:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y - 1;
|
||||
result.data = {c.data[10], c.data[11], c.data[12],
|
||||
c.data[15], c.data[16], c.data[17],
|
||||
c.data[20], c.data[21], c.data[22]};
|
||||
break;
|
||||
case 7:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y - 1;
|
||||
result.data = {c.data[11], c.data[12], c.data[13],
|
||||
c.data[16], c.data[17], c.data[18],
|
||||
c.data[21], c.data[22], c.data[23]};
|
||||
break;
|
||||
case 8:
|
||||
result.x = c.x + 1;
|
||||
result.y = c.y - 1;
|
||||
result.data = {c.data[12], c.data[13], c.data[14],
|
||||
c.data[17], c.data[18], c.data[19],
|
||||
c.data[22], c.data[23], c.data[24]};
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
BENCHMARK_F(ClustersForReduceFixture, Reduce2x2)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
benchmark::DoNotOptimize(reduce_to_2x2<int, 3, 3, uint16_t>(
|
||||
cluster_3x3)); // make sure compiler evaluates the expression
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_F(ClustersForReduceFixture, SpecificReduce2x2)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
benchmark::DoNotOptimize(reduce_to_2x2<int>(cluster_3x3));
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_F(ClustersForReduceFixture, Reduce3x3)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
benchmark::DoNotOptimize(
|
||||
reduce_to_3x3<int, 5, 5, uint16_t>(cluster_5x5));
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_F(ClustersForReduceFixture, SpecificReduce3x3)(benchmark::State &st) {
|
||||
for (auto _ : st) {
|
||||
// This code gets timed
|
||||
benchmark::DoNotOptimize(reduce_to_3x3<int>(cluster_5x5));
|
||||
}
|
||||
}
|
||||
@@ -3,14 +3,3 @@ python:
|
||||
- 3.12
|
||||
- 3.13
|
||||
|
||||
c_compiler:
|
||||
- gcc # [linux]
|
||||
|
||||
c_stdlib:
|
||||
- sysroot # [linux]
|
||||
|
||||
cxx_compiler:
|
||||
- gxx # [linux]
|
||||
|
||||
c_stdlib_version: # [linux]
|
||||
- 2.17 # [linux]
|
||||
|
||||
@@ -16,8 +16,6 @@ build:
|
||||
|
||||
requirements:
|
||||
build:
|
||||
- {{ compiler('c') }}
|
||||
- {{ stdlib("c") }}
|
||||
- {{ compiler('cxx') }}
|
||||
- cmake
|
||||
- ninja
|
||||
@@ -49,5 +47,4 @@ test:
|
||||
- python -m pytest python/tests
|
||||
|
||||
about:
|
||||
license: SPDX-License-Identifier MPL-2.0
|
||||
summary: Data analysis library for hybrid pixel detectors from PSI
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
find_package(Doxygen REQUIRED)
|
||||
find_package(Sphinx REQUIRED)
|
||||
|
||||
@@ -12,19 +11,14 @@ set(SPHINX_SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/src)
|
||||
set(SPHINX_BUILD ${CMAKE_CURRENT_BINARY_DIR})
|
||||
|
||||
|
||||
file(GLOB_RECURSE SPHINX_SOURCE_FILES
|
||||
CONFIGURE_DEPENDS
|
||||
RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}/src"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/src/*.rst"
|
||||
)
|
||||
file(GLOB SPHINX_SOURCE_FILES CONFIGURE_DEPENDS "src/*.rst")
|
||||
|
||||
foreach(relpath IN LISTS SPHINX_SOURCE_FILES)
|
||||
set(src "${CMAKE_CURRENT_SOURCE_DIR}/src/${relpath}")
|
||||
set(dst "${SPHINX_BUILD}/src/${relpath}")
|
||||
|
||||
message(STATUS "Copying ${src} to ${dst}")
|
||||
configure_file("${src}" "${dst}" COPYONLY)
|
||||
endforeach()
|
||||
foreach(filename ${SPHINX_SOURCE_FILES})
|
||||
get_filename_component(fname ${filename} NAME)
|
||||
message(STATUS "Copying ${filename} to ${SPHINX_BUILD}/src/${fname}")
|
||||
configure_file(${filename} "${SPHINX_BUILD}/src/${fname}")
|
||||
endforeach(filename ${SPHINX_SOURCE_FILES})
|
||||
|
||||
configure_file(
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in"
|
||||
@@ -32,8 +26,6 @@ configure_file(
|
||||
@ONLY
|
||||
)
|
||||
|
||||
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/figures"
|
||||
DESTINATION "${SPHINX_BUILD}")
|
||||
|
||||
configure_file(
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/static/extra.css"
|
||||
@@ -52,3 +44,12 @@ add_custom_target(
|
||||
COMMENT "Generating documentation with Sphinx"
|
||||
)
|
||||
|
||||
add_custom_target(
|
||||
rst
|
||||
COMMAND ${SPHINX_EXECUTABLE} -a -b html
|
||||
-Dbreathe_projects.aare=${CMAKE_CURRENT_BINARY_DIR}/xml
|
||||
-c "${SPHINX_BUILD}"
|
||||
${SPHINX_BUILD}/src
|
||||
${SPHINX_BUILD}/html
|
||||
COMMENT "Generating documentation with Sphinx"
|
||||
)
|
||||
@@ -886,7 +886,7 @@ EXCLUDE_SYMLINKS = NO
|
||||
# Note that the wildcards are matched against the file with absolute path, so to
|
||||
# exclude all test directories for example use the pattern */test/*
|
||||
|
||||
EXCLUDE_PATTERNS = *build* */docs/* */tests/* *.test.cpp* */python/* */manual */slsDetectorServers/* */libs/* */integrationTests *README* *_deps* *TobiSchluter*
|
||||
EXCLUDE_PATTERNS = */docs/* */tests/* */python/* */manual */slsDetectorServers/* */libs/* */integrationTests *README* */slsDetectorGui/* */ctbGui/* */slsDetectorCalibration/* *TobiSchluter*
|
||||
|
||||
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
|
||||
# (namespaces, classes, functions, etc.) that should be excluded from the
|
||||
|
||||
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 6.7 KiB |
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 10 KiB |
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 13 KiB |
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 9.5 KiB |
@@ -1,15 +0,0 @@
|
||||
Cluster
|
||||
========
|
||||
|
||||
.. doxygenstruct:: aare::Cluster
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
|
||||
|
||||
**Free Functions:**
|
||||
|
||||
.. doxygenfunction:: aare::reduce_to_3x3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>&)
|
||||
|
||||
.. doxygenfunction:: aare::reduce_to_2x2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>&)
|
||||
|
||||
@@ -4,5 +4,4 @@ ClusterFile
|
||||
.. doxygenclass:: aare::ClusterFile
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
|
||||
:private-members:
|
||||
@@ -3,20 +3,4 @@ ClusterVector
|
||||
|
||||
.. doxygenclass:: aare::ClusterVector
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
|
||||
|
||||
|
||||
.. doxygenclass:: aare::ClusterVector< Cluster< T, ClusterSizeX, ClusterSizeY, CoordType > >
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
|
||||
|
||||
**Free Functions:**
|
||||
|
||||
.. doxygenfunction:: aare::reduce_to_3x3(const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>&)
|
||||
|
||||
.. doxygenfunction:: aare::reduce_to_2x2(const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>&)
|
||||
|
||||
:undoc-members:
|
||||
8
docs/src/Hdf5File.rst
Normal file
8
docs/src/Hdf5File.rst
Normal file
@@ -0,0 +1,8 @@
|
||||
Hdf5File
|
||||
===============
|
||||
|
||||
|
||||
.. doxygenclass:: aare::Hdf5File
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
14
docs/src/Hdf5MasterFile.rst
Normal file
14
docs/src/Hdf5MasterFile.rst
Normal file
@@ -0,0 +1,14 @@
|
||||
Hdf5MasterFile
|
||||
===============
|
||||
|
||||
|
||||
.. doxygenclass:: aare::Hdf5MasterFile
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
|
||||
|
||||
.. doxygenclass:: aare::Hdf5FileNameComponents
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
@@ -1,102 +0,0 @@
|
||||
Interpolation
|
||||
==============
|
||||
|
||||
Interpolation class for :math:`\eta` Interpolation.
|
||||
|
||||
The Interpolator class provides methods to interpolate the positions of photons based on their :math:`\eta` values.
|
||||
|
||||
.. warning::
|
||||
The interpolation might lead to erroneous photon positions for clusters at the boarders of a frame. Make sure to filter out such cases.
|
||||
|
||||
:math:`\eta`-Functions:
|
||||
---------------------------
|
||||
|
||||
.. doxygenstruct:: aare::Eta2
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
|
||||
.. note::
|
||||
The corner value ``c`` is only relevant when one uses ``calculate_eta_2`` or ``calculate_full_eta2``. Otherwise its default value is ``cTopLeft``.
|
||||
|
||||
Supported are the following :math:`\eta`-functions:
|
||||
|
||||
|
||||
.. image:: ../figures/Eta2x2.png
|
||||
:target: ../figures/Eta2x2.png
|
||||
:width: 650px
|
||||
:align: center
|
||||
:alt: Eta2x2
|
||||
|
||||
.. math::
|
||||
\begin{equation*}
|
||||
{\color{blue}{\eta_x}} = \frac{Q_{1,1}}{Q_{1,0} + Q_{1,1}} \quad \quad
|
||||
{\color{green}{\eta_y}} = \frac{Q_{1,1}}{Q_{0,1} + Q_{1,1}}
|
||||
\end{equation*}
|
||||
|
||||
|
||||
.. doxygenfunction:: aare::calculate_eta2(const ClusterVector<ClusterType>&)
|
||||
|
||||
.. doxygenfunction:: aare::calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>&)
|
||||
|
||||
.. image:: ../figures/Eta2x2Full.png
|
||||
:target: ../figures/Eta2x2Full.png
|
||||
:width: 650px
|
||||
:align: center
|
||||
:alt: Eta2x2 Full
|
||||
|
||||
.. math::
|
||||
\begin{equation*}
|
||||
{\color{blue}{\eta_x}} = \frac{Q_{0,1} + Q_{1,1}}{\sum_i^{2}\sum_j^{2}Q_{i,j}} \quad \quad
|
||||
{\textcolor{green}{\eta_y}} = \frac{Q_{1,0} + Q_{1,1}}{\sum_i^{2}\sum_j^{2}Q_{i,j}}
|
||||
\end{equation*}
|
||||
|
||||
|
||||
.. doxygenfunction:: aare::calculate_full_eta2(const ClusterVector<ClusterType>&)
|
||||
|
||||
.. doxygenfunction:: aare::calculate_full_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>&)
|
||||
|
||||
.. image:: ../figures/Eta3x3.png
|
||||
:target: ../figures/Eta3x3.png
|
||||
:width: 650px
|
||||
:align: center
|
||||
:alt: Eta3x3
|
||||
|
||||
.. math::
|
||||
\begin{equation*}
|
||||
{\color{blue}{\eta_x}} = \frac{\sum_{i}^{3} Q_{i,2} - \sum_{i}^{3} Q_{i,0}}{\sum_{i}^{3}\sum_{j}^{3} Q_{i,j}} \quad \quad
|
||||
{\color{green}{\eta_y}} = \frac{\sum_{j}^{3} Q_{2,j} - \sum_{j}^{3} Q_{0,j}}{\sum_{i}^{3}\sum_{j}^{3} Q_{i,j}}
|
||||
\end{equation*}
|
||||
|
||||
.. doxygenfunction:: aare::calculate_eta3(const ClusterVector<Cluster<T, 3,3, CoordType>>&)
|
||||
|
||||
.. doxygenfunction:: aare::calculate_eta3(const Cluster<T, 3, 3, CoordType>&)
|
||||
|
||||
.. image:: ../figures/Eta3x3Cross.png
|
||||
:target: ../figures/Eta3x3Cross.png
|
||||
:width: 650px
|
||||
:align: center
|
||||
:alt: Cross Eta3x3
|
||||
|
||||
.. math::
|
||||
\begin{equation*}
|
||||
{\color{blue}{\eta_x}} = \frac{Q_{1,2} - Q_{1,0}}{Q_{1,0} + Q_{1,1} + Q_{1,0}} \quad \quad
|
||||
{\color{green}{\eta_y}} = \frac{Q_{0,2} - Q_{0,1}}{Q_{0,1} + Q_{1,1} + Q_{1,2}}
|
||||
\end{equation*}
|
||||
|
||||
.. doxygenfunction:: aare::calculate_cross_eta3(const ClusterVector<Cluster<T, 3,3, CoordType>>&)
|
||||
|
||||
.. doxygenfunction:: aare::calculate_cross_eta3(const Cluster<T, 3, 3, CoordType>&)
|
||||
|
||||
Interpolation class:
|
||||
---------------------
|
||||
|
||||
.. Warning::
|
||||
Make sure to use the same :math:`\eta`-function during interpolation as given by the joint :math:`\eta`-distribution passed to the constructor.
|
||||
|
||||
.. doxygenclass:: aare::Interpolator
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Tests
|
||||
****************
|
||||
|
||||
We test the code both from C++ and Python. By default only tests that does not require additional data are run.
|
||||
We test the code both from the C++ and Python API. By default only tests that does not require image data is run.
|
||||
|
||||
C++
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
@@ -15,7 +15,7 @@ C++
|
||||
make -j 4
|
||||
|
||||
export AARE_TEST_DATA=/path/to/test/data
|
||||
./run_test [.with-data] #or using ctest, [.with-data] is the option to include tests needing data
|
||||
./run_test [.files] #or using ctest, [.files] is the option to include tests needing data
|
||||
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ Python
|
||||
.. code-block:: bash
|
||||
|
||||
#From the root dir of the library
|
||||
python -m pytest python/tests --with-data # passing --with-data will run the tests needing data
|
||||
python -m pytest python/tests --files # passing --files will run the tests needing data
|
||||
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ Getting the test data
|
||||
.. attention ::
|
||||
|
||||
The tests needing the test data are not run by default. To make the data available, you need to set the environment variable
|
||||
AARE_TEST_DATA to the path of the test data directory. Then pass either [.with-data] for the C++ tests or --files for Python
|
||||
AARE_TEST_DATA to the path of the test data directory. Then pass either [.files] for the C++ tests or --files for Python
|
||||
|
||||
The image files needed for the test are large and are not included in the repository. They are stored
|
||||
using GIT LFS in a separate repository. To get the test data, you need to clone the repository.
|
||||
|
||||
@@ -37,7 +37,7 @@ unfamiliar steps.
|
||||
|
||||
|
||||
Checklists for deployment
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Feature:**
|
||||
|
||||
|
||||
@@ -22,14 +22,20 @@ AARE
|
||||
|
||||
.. toctree::
|
||||
:caption: Python API
|
||||
:maxdepth: 3
|
||||
:hidden:
|
||||
|
||||
pycalibration
|
||||
python/cluster/index
|
||||
python/file/index
|
||||
pyFit
|
||||
:maxdepth: 1
|
||||
|
||||
pyFile
|
||||
pyCtbRawFile
|
||||
pyClusterFile
|
||||
pyClusterVector
|
||||
pyJungfrauDataFile
|
||||
pyRawFile
|
||||
pyRawMasterFile
|
||||
pyHdf5File
|
||||
pyHdf5MasterFile
|
||||
pyVarClusterFinder
|
||||
|
||||
pyFit
|
||||
|
||||
|
||||
.. toctree::
|
||||
@@ -42,17 +48,17 @@ AARE
|
||||
Frame
|
||||
File
|
||||
Dtype
|
||||
Cluster
|
||||
ClusterFinder
|
||||
ClusterFinderMT
|
||||
ClusterFile
|
||||
ClusterVector
|
||||
Interpolation
|
||||
JungfrauDataFile
|
||||
Pedestal
|
||||
RawFile
|
||||
RawSubFile
|
||||
RawMasterFile
|
||||
Hdf5File
|
||||
Hdf5MasterFile
|
||||
VarClusterFinder
|
||||
|
||||
|
||||
|
||||
11
docs/src/pyClusterFile.rst
Normal file
11
docs/src/pyClusterFile.rst
Normal file
@@ -0,0 +1,11 @@
|
||||
|
||||
ClusterFile
|
||||
============
|
||||
|
||||
.. py:currentmodule:: aare
|
||||
|
||||
.. autoclass:: ClusterFile
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
:inherited-members:
|
||||
@@ -1,13 +1,9 @@
|
||||
.. _py_clustervector:
|
||||
|
||||
ClusterVector
|
||||
================
|
||||
|
||||
The ClusterVector, holds clusters from the ClusterFinder. Since it is templated
|
||||
in C++ we use a suffix indicating the type of cluster it holds. The suffix follows
|
||||
the same pattern as for ClusterFile i.e. ``ClusterVector_Cluster3x3i``
|
||||
for a vector holding 3x3 integer clusters.
|
||||
|
||||
in C++ we use a suffix indicating the data type in python. The suffix is
|
||||
``_i`` for integer, ``_f`` for float, and ``_d`` for double.
|
||||
|
||||
At the moment the functionality from python is limited and it is not supported
|
||||
to push_back clusters to the vector. The intended use case is to pass it to
|
||||
@@ -30,29 +26,8 @@ C++ functions that support the ClusterVector or to view it as a numpy array.
|
||||
|
||||
.. py:currentmodule:: aare
|
||||
|
||||
.. autoclass:: ClusterVector
|
||||
:members:
|
||||
:undoc-members:
|
||||
:inherited-members:
|
||||
|
||||
Below is the API of the ClusterVector_Cluster3x3i but all variants share the same API.
|
||||
|
||||
.. autoclass:: aare._aare.ClusterVector_Cluster3x3i
|
||||
:special-members: __init__
|
||||
.. autoclass:: ClusterVector_i
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
:inherited-members:
|
||||
|
||||
|
||||
**Free Functions:**
|
||||
|
||||
.. autofunction:: reduce_to_3x3
|
||||
:noindex:
|
||||
|
||||
Reduce a single Cluster to 3x3 by taking the 3x3 subcluster with highest photon energy.
|
||||
|
||||
.. autofunction:: reduce_to_2x2
|
||||
:noindex:
|
||||
|
||||
Reduce a single Cluster to 2x2 by taking the 2x2 subcluster with highest photon energy.
|
||||
:inherited-members:
|
||||
11
docs/src/pyCtbRawFile.rst
Normal file
11
docs/src/pyCtbRawFile.rst
Normal file
@@ -0,0 +1,11 @@
|
||||
|
||||
CtbRawFile
|
||||
============
|
||||
|
||||
.. py:currentmodule:: aare
|
||||
|
||||
.. autoclass:: CtbRawFile
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
:inherited-members:
|
||||
10
docs/src/pyHdf5File.rst
Normal file
10
docs/src/pyHdf5File.rst
Normal file
@@ -0,0 +1,10 @@
|
||||
Hdf5File
|
||||
===================
|
||||
|
||||
.. py:currentmodule:: aare
|
||||
|
||||
.. autoclass:: Hdf5File
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
:inherited-members:
|
||||
10
docs/src/pyHdf5MasterFile.rst
Normal file
10
docs/src/pyHdf5MasterFile.rst
Normal file
@@ -0,0 +1,10 @@
|
||||
Hdf5MasterFile
|
||||
===================
|
||||
|
||||
.. py:currentmodule:: aare
|
||||
|
||||
.. autoclass:: Hdf5MasterFile
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
:inherited-members:
|
||||
@@ -1,40 +0,0 @@
|
||||
|
||||
Calibration
|
||||
==============
|
||||
|
||||
Functions for applying calibration to data.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import aare
|
||||
|
||||
# Load calibration data for a single JF module (512x1024 pixels)
|
||||
calibration = aare.load_calibration('path/to/calibration/file.bin')
|
||||
|
||||
raw_data = ... # Load your raw data here
|
||||
pedestal = ... # Load your pedestal data here
|
||||
|
||||
# Apply calibration to raw data to convert from raw ADC values to keV
|
||||
data = aare.apply_calibration(raw_data, pd=pedestal, cal=calibration)
|
||||
|
||||
# If you pass a 2D pedestal and calibration only G0 will be used for the conversion
|
||||
# Pixels that switched to G1 or G2 will be set to 0
|
||||
data = aare.apply_calibration(raw_data, pd=pedestal[0], cal=calibration[0])
|
||||
|
||||
|
||||
|
||||
.. py:currentmodule:: aare
|
||||
|
||||
.. autofunction:: apply_calibration
|
||||
|
||||
.. autofunction:: load_calibration
|
||||
|
||||
.. autofunction:: calculate_pedestal
|
||||
|
||||
.. autofunction:: calculate_pedestal_float
|
||||
|
||||
.. autofunction:: calculate_pedestal_g0
|
||||
|
||||
.. autofunction:: calculate_pedestal_g0_float
|
||||
|
||||
.. autofunction:: count_switching_pixels
|
||||
@@ -1,11 +0,0 @@
|
||||
Cluster & Interpolation
|
||||
==========================
|
||||
|
||||
.. toctree::
|
||||
:caption: Cluster & Interpolation
|
||||
:maxdepth: 1
|
||||
|
||||
pyCluster
|
||||
pyClusterVector
|
||||
pyInterpolation
|
||||
pyVarClusterFinder
|
||||
@@ -1,23 +0,0 @@
|
||||
Cluster
|
||||
========
|
||||
|
||||
.. py:currentmodule:: aare
|
||||
|
||||
.. autoclass:: Cluster
|
||||
:members:
|
||||
:undoc-members:
|
||||
:inherited-members:
|
||||
|
||||
|
||||
Below is the API of a cluster of size :math:`3\times 3` and type ``int`` but all variants share the same API.
|
||||
|
||||
.. autoclass:: aare._aare.Cluster3x3i
|
||||
:special-members: __init__
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
:inherited-members:
|
||||
|
||||
.. note::
|
||||
More functions can be found in the :ref:`ClusterVector <py_clustervector>` documentation. Generally apply functions directly on the ``ClusterVector`` instead of looping over individual clusters.
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
Interpolation
|
||||
==============
|
||||
|
||||
Interpolation class for :math:`\eta` Interpolation.
|
||||
|
||||
The Interpolator class provides methods to interpolate the positions of photons based on their :math:`\eta` values.
|
||||
|
||||
.. warning::
|
||||
The interpolation might lead to erroneous photon positions for clusters at the boarders of a frame. Make sure to filter out such cases.
|
||||
|
||||
Below is an example of the Eta class of type ``double``. Supported are ``Etaf`` of type ``float`` and ``Etai`` of type ``int``.
|
||||
|
||||
.. autoclass:: aare._aare.Etad
|
||||
:members:
|
||||
:private-members:
|
||||
|
||||
.. note::
|
||||
The corner value ``c`` is only relevant when one uses ``calculate_eta_2`` or ``calculate_full_eta2``. Otherwise its default value is ``cTopLeft``.
|
||||
|
||||
Supported are the following :math:`\eta`-functions:
|
||||
|
||||
.. py:currentmodule:: aare
|
||||
|
||||
.. image:: ../../../figures/Eta2x2.png
|
||||
:target: ../../../figures/Eta2x2.png
|
||||
:width: 650px
|
||||
:align: center
|
||||
:alt: Eta2x2
|
||||
|
||||
.. math::
|
||||
\begin{equation*}
|
||||
{\color{blue}{\eta_x}} = \frac{Q_{1,1}}{Q_{1,0} + Q_{1,1}} \quad \quad
|
||||
{\color{green}{\eta_y}} = \frac{Q_{1,1}}{Q_{0,1} + Q_{1,1}}
|
||||
\end{equation*}
|
||||
|
||||
.. autofunction:: calculate_eta2
|
||||
|
||||
.. image:: ../../../figures/Eta2x2Full.png
|
||||
:target: ../../../figures/Eta2x2Full.png
|
||||
:width: 650px
|
||||
:align: center
|
||||
:alt: Eta2x2 Full
|
||||
|
||||
.. math::
|
||||
\begin{equation*}
|
||||
{\color{blue}{\eta_x}} = \frac{Q_{0,1} + Q_{1,1}}{\sum_i^{2}\sum_j^{2}Q_{i,j}} \quad \quad
|
||||
{\textcolor{green}{\eta_y}} = \frac{Q_{1,0} + Q_{1,1}}{\sum_i^{2}\sum_j^{2}Q_{i,j}}
|
||||
\end{equation*}
|
||||
|
||||
.. autofunction:: calculate_full_eta2
|
||||
|
||||
.. image:: ../../../figures/Eta3x3.png
|
||||
:target: ../../../figures/Eta3x3.png
|
||||
:width: 650px
|
||||
:align: center
|
||||
:alt: Eta3x3
|
||||
|
||||
.. math::
|
||||
\begin{equation*}
|
||||
{\color{blue}{\eta_x}} = \frac{\sum_{i}^{3} Q_{i,2} - \sum_{i}^{3} Q_{i,0}}{\sum_{i}^{3}\sum_{j}^{3} Q_{i,j}} \quad \quad
|
||||
{\color{green}{\eta_y}} = \frac{\sum_{j}^{3} Q_{2,j} - \sum_{j}^{3} Q_{0,j}}{\sum_{i}^{3}\sum_{j}^{3} Q_{i,j}}
|
||||
\end{equation*}
|
||||
|
||||
.. autofunction:: calculate_eta3
|
||||
|
||||
.. image:: ../../../figures/Eta3x3Cross.png
|
||||
:target: ../../../figures/Eta3x3Cross.png
|
||||
:width: 650px
|
||||
:align: center
|
||||
:alt: Cross Eta3x3
|
||||
|
||||
.. math::
|
||||
\begin{equation*}
|
||||
{\color{blue}{\eta_x}} = \frac{Q_{1,2} - Q_{1,0}}{Q_{1,0} + Q_{1,1} + Q_{1,0}} \quad \quad
|
||||
{\color{green}{\eta_y}} = \frac{Q_{0,2} - Q_{0,1}}{Q_{0,1} + Q_{1,1} + Q_{1,2}}
|
||||
\end{equation*}
|
||||
|
||||
.. autofunction:: calculate_cross_eta3
|
||||
|
||||
|
||||
Interpolation class for :math:`\eta`-Interpolation
|
||||
----------------------------------------------------
|
||||
|
||||
.. Warning::
|
||||
Make sure to use the same :math:`\eta`-function during interpolation as given by the joint :math:`\eta`-distribution passed to the constructor.
|
||||
|
||||
.. py:currentmodule:: aare
|
||||
|
||||
.. autoclass:: Interpolator
|
||||
:special-members: __init__
|
||||
:members:
|
||||
:undoc-members:
|
||||
:inherited-members:
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
File I/O
|
||||
===================
|
||||
|
||||
.. toctree::
|
||||
:caption: File I/O
|
||||
:maxdepth: 1
|
||||
|
||||
pyClusterFile
|
||||
pyCtbRawFile
|
||||
pyFile
|
||||
pyJungfrauDataFile
|
||||
pyRawFile
|
||||
pyRawMasterFile
|
||||
pyTransform
|
||||
@@ -1,26 +0,0 @@
|
||||
|
||||
ClusterFile
|
||||
============
|
||||
|
||||
|
||||
The :class:`ClusterFile` class is the main interface to read and write clusters in aare. Unfortunately the
|
||||
old file format does not include metadata like the cluster size and the data type. This means that the
|
||||
user has to know this information from other sources. Specifying the wrong cluster size or data type
|
||||
will lead to garbage data being read.
|
||||
|
||||
.. py:currentmodule:: aare
|
||||
|
||||
.. autoclass:: ClusterFile
|
||||
:members:
|
||||
:undoc-members:
|
||||
:inherited-members:
|
||||
|
||||
|
||||
Below is the API of the ClusterFile_Cluster3x3i but all variants share the same API.
|
||||
|
||||
.. autoclass:: aare._aare.ClusterFile_Cluster3x3i
|
||||
:special-members: __init__
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
:inherited-members:
|
||||
@@ -1,25 +0,0 @@
|
||||
|
||||
CtbRawFile
|
||||
============
|
||||
|
||||
Read analog, digital and transceiver samples from a raw file containing
|
||||
data from the Chip Test Board. Uses :mod:`aare.transform` to decode the
|
||||
data into a format that the user can work with.
|
||||
|
||||
.. code:: python
|
||||
|
||||
import aare
|
||||
from aare.transform import Mythen302Transform
|
||||
my302 = Mythen302Transform(offset = 4)
|
||||
|
||||
with aare.CtbRawFile(fname, transform = my302) as f:
|
||||
for header, data in f:
|
||||
#do something with the data
|
||||
|
||||
.. py:currentmodule:: aare
|
||||
|
||||
.. autoclass:: CtbRawFile
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
:inherited-members:
|
||||
@@ -1,27 +0,0 @@
|
||||
Transform
|
||||
===================
|
||||
|
||||
The transform module takes data read by :class:`aare.CtbRawFile` and decodes it
|
||||
to a useful image format. Depending on detector it supports both analog
|
||||
and digital samples.
|
||||
|
||||
For convenience the following transform objects are defined with a short name
|
||||
|
||||
.. code:: python
|
||||
|
||||
moench05 = Moench05Transform()
|
||||
moench05_1g = Moench05Transform1g()
|
||||
moench05_old = Moench05TransformOld()
|
||||
matterhorn02 = Matterhorn02Transform()
|
||||
adc_sar_04_64to16 = AdcSar04Transform64to16()
|
||||
adc_sar_05_64to16 = AdcSar05Transform64to16()
|
||||
|
||||
.. py:currentmodule:: aare
|
||||
|
||||
.. automodule:: aare.transform
|
||||
:members:
|
||||
:undoc-members:
|
||||
:private-members:
|
||||
:special-members: __call__
|
||||
:show-inheritance:
|
||||
:inherited-members:
|
||||
@@ -1,103 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import fnmatch
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
CPP_PATTERNS = ["*.h", "*.hpp", "*.cpp"]
|
||||
PY_PATTERNS = ["*.py"]
|
||||
CMAKE_PATTERNS = ["CMakeLists.txt"]
|
||||
|
||||
FILE_PATTERNS = CPP_PATTERNS + PY_PATTERNS + CMAKE_PATTERNS
|
||||
LICENSE_TEXT = "SPDX-License-Identifier: MPL-2.0"
|
||||
|
||||
|
||||
def get_comment_prefix(filename: str) -> str | None:
|
||||
if any(fnmatch.fnmatch(filename, p) for p in CPP_PATTERNS):
|
||||
return "// "
|
||||
if any(fnmatch.fnmatch(filename, p) for p in (PY_PATTERNS + CMAKE_PATTERNS)):
|
||||
return "# "
|
||||
return None
|
||||
|
||||
|
||||
def matches_pattern(filename: str) -> bool:
|
||||
return any(fnmatch.fnmatch(filename, p) for p in FILE_PATTERNS)
|
||||
|
||||
|
||||
def process_file(filepath: Path) -> bool:
|
||||
filename = filepath.name
|
||||
prefix = get_comment_prefix(filename)
|
||||
if not prefix:
|
||||
return False
|
||||
|
||||
license_line = f"{prefix}{LICENSE_TEXT}\n"
|
||||
|
||||
try:
|
||||
with filepath.open("r", encoding="utf-8") as f:
|
||||
lines = f.readlines()
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error reading {filepath}: {e}")
|
||||
return False
|
||||
|
||||
# Skip if SPDX already present anywhere in the file
|
||||
if any("SPDX-License-Identifier" in line for line in lines):
|
||||
return False
|
||||
|
||||
insert_index = 0
|
||||
|
||||
# For Python, keep shebang on the very first line
|
||||
if filename.endswith(".py") and lines:
|
||||
if lines[0].startswith("#!"):
|
||||
insert_index = 1
|
||||
|
||||
lines.insert(insert_index, license_line)
|
||||
|
||||
try:
|
||||
with filepath.open("w", encoding="utf-8") as f:
|
||||
f.writelines(lines)
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error writing {filepath}: {e}")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Add SPDX-License-Identifier: MPL-2.0 to source files."
|
||||
)
|
||||
parser.add_argument(
|
||||
"path",
|
||||
help="Root directory to recursively process "
|
||||
"(*.h, *.cpp, *.py, and CMakeLists.txt).",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
root_path = Path(args.path).expanduser().resolve()
|
||||
|
||||
if not root_path.exists():
|
||||
print(f"Error: Path does not exist: {root_path}")
|
||||
raise SystemExit(1)
|
||||
|
||||
if not root_path.is_dir():
|
||||
print(f"Error: Path is not a directory: {root_path}")
|
||||
raise SystemExit(1)
|
||||
|
||||
print(f"Processing directory: {root_path}")
|
||||
modified = 0
|
||||
|
||||
for dirpath, _, files in os.walk(root_path):
|
||||
dirpath = Path(dirpath)
|
||||
for name in files:
|
||||
if matches_pattern(name):
|
||||
fullpath = dirpath / name
|
||||
if process_file(fullpath):
|
||||
print(f"✔ Added SPDX: {fullpath}")
|
||||
modified += 1
|
||||
|
||||
print(f"\nDone. Updated {modified} file(s).")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -3,16 +3,11 @@ channels:
|
||||
- conda-forge
|
||||
dependencies:
|
||||
- anaconda-client
|
||||
- catch2
|
||||
- conda-build
|
||||
- doxygen
|
||||
- sphinx
|
||||
- sphinx=7.1.2
|
||||
- breathe
|
||||
- sphinx_rtd_theme
|
||||
- furo
|
||||
- zeromq
|
||||
- pybind11
|
||||
- numpy
|
||||
- matplotlib
|
||||
- nlohmann_json
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include "aare/defs.hpp"
|
||||
#include <array>
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
|
||||
#include "aare/Cluster.hpp"
|
||||
#include "aare/ClusterVector.hpp"
|
||||
#include "aare/NDArray.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
|
||||
namespace aare {
|
||||
|
||||
enum class corner : int {
|
||||
cBottomLeft = 0,
|
||||
cBottomRight = 1,
|
||||
cTopLeft = 2,
|
||||
cTopRight = 3
|
||||
};
|
||||
|
||||
enum class pixel : int {
|
||||
pBottomLeft = 0,
|
||||
pBottom = 1,
|
||||
@@ -20,427 +25,146 @@ enum class pixel : int {
|
||||
pTopRight = 8
|
||||
};
|
||||
|
||||
// TODO: better to have sum after x,y
|
||||
/**
|
||||
* eta struct
|
||||
*/
|
||||
template <typename T> struct Eta2 {
|
||||
/// @brief eta in x direction
|
||||
double x{};
|
||||
/// @brief eta in y direction
|
||||
double y{};
|
||||
/// @brief index of subcluster given as corner relative to cluster center
|
||||
corner c{0};
|
||||
/// @brief photon energy (cluster sum)
|
||||
T sum{};
|
||||
double x;
|
||||
double y;
|
||||
int c;
|
||||
T sum;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Calculate the eta2 values for all clusters in a ClusterVector
|
||||
* @brief Calculate the eta2 values for all clusters in a Clustervector
|
||||
*/
|
||||
template <typename ClusterType,
|
||||
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||
std::vector<Eta2<typename ClusterType::value_type>>
|
||||
calculate_eta2(const ClusterVector<ClusterType> &clusters) {
|
||||
|
||||
std::vector<Eta2<typename ClusterType::value_type>> eta2{};
|
||||
eta2.reserve(clusters.size());
|
||||
NDArray<double, 2> calculate_eta2(const ClusterVector<ClusterType> &clusters) {
|
||||
NDArray<double, 2> eta2({static_cast<int64_t>(clusters.size()), 2});
|
||||
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
auto e = calculate_eta2(clusters[i]);
|
||||
eta2.push_back(e);
|
||||
eta2(i, 0) = e.x;
|
||||
eta2(i, 1) = e.y;
|
||||
}
|
||||
|
||||
return eta2;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Calculate the full eta2 values for all clusters in a ClusterVector
|
||||
*/
|
||||
template <typename ClusterType,
|
||||
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||
std::vector<Eta2<typename ClusterType::value_type>>
|
||||
calculate_full_eta2(const ClusterVector<ClusterType> &clusters) {
|
||||
std::vector<Eta2<typename ClusterType::value_type>> eta2{};
|
||||
eta2.reserve(clusters.size());
|
||||
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
auto e = calculate_full_eta2(clusters[i]);
|
||||
eta2.push_back(e);
|
||||
}
|
||||
|
||||
return eta2;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Calculate eta3 for all 3x3 clusters in a ClusterVector
|
||||
*/
|
||||
template <typename ClusterType,
|
||||
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||
std::vector<Eta2<typename ClusterType::value_type>>
|
||||
calculate_eta3(const ClusterVector<ClusterType> &clusters) {
|
||||
std::vector<Eta2<typename ClusterType::value_type>> eta2{};
|
||||
eta2.reserve(clusters.size());
|
||||
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
auto e = calculate_eta3(clusters[i]);
|
||||
eta2.push_back(e);
|
||||
}
|
||||
|
||||
return eta2;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Calculate cross eta3 for all 3x3 clusters in a ClusterVector
|
||||
*/
|
||||
template <typename ClusterType,
|
||||
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||
std::vector<Eta2<typename ClusterType::value_type>>
|
||||
calculate_cross_eta3(const ClusterVector<ClusterType> &clusters) {
|
||||
std::vector<Eta2<typename ClusterType::value_type>> eta2{};
|
||||
eta2.reserve(clusters.size());
|
||||
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
auto e = calculate_cross_eta3(clusters[i]);
|
||||
eta2.push_back(e);
|
||||
}
|
||||
|
||||
return eta2;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief helper function to calculate eta2 x and y values
|
||||
* @param eta reference to the Eta2 object to update
|
||||
* @param left_x value of the left pixel
|
||||
* @param right_x value of the right pixel
|
||||
* @param bottom_y value of the bottom pixel
|
||||
* @param top_y value of the top pixel
|
||||
*/
|
||||
template <typename T>
|
||||
inline void calculate_eta2(Eta2<T> &eta, const T left_x, const T right_x,
|
||||
const T bottom_y, const T top_y) {
|
||||
if ((right_x + left_x) != 0)
|
||||
eta.x = static_cast<double>(right_x) /
|
||||
static_cast<double>(right_x + left_x); // between (0,1) the
|
||||
// closer to zero left
|
||||
// value probably larger
|
||||
if ((top_y + bottom_y) != 0)
|
||||
eta.y = static_cast<double>(top_y) /
|
||||
static_cast<double>(top_y + bottom_y); // between (0,1) the
|
||||
// closer to zero bottom
|
||||
// value probably larger
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Calculate the eta2 values for a generic sized cluster and return them
|
||||
* in a Eta2 struct containing etay, etax and the index (as corner) of the
|
||||
* respective 2x2 subcluster relative to the cluster center.
|
||||
* in a Eta2 struct containing etay, etax and the index of the respective 2x2
|
||||
* subcluster.
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
typename CoordType>
|
||||
Eta2<T>
|
||||
calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||
|
||||
static_assert(ClusterSizeX > 1 && ClusterSizeY > 1);
|
||||
Eta2<T> eta{};
|
||||
|
||||
auto max_sum = cl.max_sum_2x2();
|
||||
eta.sum = max_sum.first;
|
||||
auto c = max_sum.second;
|
||||
|
||||
size_t cluster_center_index =
|
||||
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||
|
||||
auto max_sum = cl.max_sum_2x2();
|
||||
eta.sum = max_sum.sum;
|
||||
corner c = max_sum.index;
|
||||
size_t index_bottom_left_max_2x2_subcluster =
|
||||
(int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1);
|
||||
|
||||
// subcluster top right from center
|
||||
switch (c) {
|
||||
case (corner::cTopLeft):
|
||||
calculate_eta2(eta, cl.data[cluster_center_index - 1],
|
||||
cl.data[cluster_center_index],
|
||||
cl.data[cluster_center_index - ClusterSizeX],
|
||||
cl.data[cluster_center_index]);
|
||||
// dx = -1
|
||||
// dy = -1
|
||||
break;
|
||||
case (corner::cTopRight):
|
||||
calculate_eta2(eta, cl.data[cluster_center_index],
|
||||
cl.data[cluster_center_index + 1],
|
||||
cl.data[cluster_center_index - ClusterSizeX],
|
||||
cl.data[cluster_center_index]);
|
||||
// dx = 0
|
||||
// dy = -1
|
||||
break;
|
||||
case (corner::cBottomLeft):
|
||||
calculate_eta2(eta, cl.data[cluster_center_index - 1],
|
||||
cl.data[cluster_center_index],
|
||||
cl.data[cluster_center_index],
|
||||
cl.data[cluster_center_index + ClusterSizeX]);
|
||||
// dx = -1
|
||||
// dy = 0
|
||||
break;
|
||||
case (corner::cBottomRight):
|
||||
calculate_eta2(eta, cl.data[cluster_center_index],
|
||||
cl.data[cluster_center_index + 1],
|
||||
cl.data[cluster_center_index],
|
||||
cl.data[cluster_center_index + ClusterSizeX]);
|
||||
// dx = 0
|
||||
// dy = 0
|
||||
break;
|
||||
// check that cluster center is in max subcluster
|
||||
if (cluster_center_index != index_bottom_left_max_2x2_subcluster &&
|
||||
cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 &&
|
||||
cluster_center_index !=
|
||||
index_bottom_left_max_2x2_subcluster + ClusterSizeX &&
|
||||
cluster_center_index !=
|
||||
index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1)
|
||||
throw std::runtime_error("Photon center is not in max 2x2_subcluster");
|
||||
|
||||
if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) %
|
||||
ClusterSizeX ==
|
||||
0) {
|
||||
if ((cl.data[cluster_center_index + 1] +
|
||||
cl.data[cluster_center_index]) != 0)
|
||||
|
||||
eta.x = static_cast<double>(cl.data[cluster_center_index + 1]) /
|
||||
static_cast<double>((cl.data[cluster_center_index + 1] +
|
||||
cl.data[cluster_center_index]));
|
||||
} else {
|
||||
if ((cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index - 1]) != 0)
|
||||
|
||||
eta.x = static_cast<double>(cl.data[cluster_center_index]) /
|
||||
static_cast<double>((cl.data[cluster_center_index - 1] +
|
||||
cl.data[cluster_center_index]));
|
||||
}
|
||||
|
||||
eta.c = c;
|
||||
|
||||
return eta;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Calculate the eta2 values for a generic sized cluster and return them
|
||||
* in a Eta2 struct containing etay, etax and the index (as corner) of the
|
||||
* respective 2x2 subcluster relative to the cluster center.
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType>
|
||||
Eta2<T> calculate_full_eta2(
|
||||
const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||
|
||||
static_assert(ClusterSizeX > 1 && ClusterSizeY > 1);
|
||||
Eta2<T> eta{};
|
||||
|
||||
constexpr size_t cluster_center_index =
|
||||
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||
|
||||
auto max_sum = cl.max_sum_2x2();
|
||||
eta.sum = max_sum.sum;
|
||||
corner c = max_sum.index;
|
||||
|
||||
// subcluster top right from center
|
||||
switch (c) {
|
||||
case (corner::cTopLeft):
|
||||
if (eta.sum != 0) {
|
||||
eta.x = static_cast<double>(
|
||||
cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index - ClusterSizeX]) /
|
||||
static_cast<double>(eta.sum);
|
||||
|
||||
eta.y = static_cast<double>(cl.data[cluster_center_index - 1] +
|
||||
cl.data[cluster_center_index]) /
|
||||
static_cast<double>(eta.sum);
|
||||
}
|
||||
// dx = -1
|
||||
// dy = -1
|
||||
break;
|
||||
case (corner::cTopRight):
|
||||
if (eta.sum != 0) {
|
||||
eta.x = static_cast<double>(
|
||||
cl.data[cluster_center_index + 1] +
|
||||
cl.data[cluster_center_index - ClusterSizeX + 1]) /
|
||||
static_cast<double>(eta.sum);
|
||||
eta.y = static_cast<double>(cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index + 1]) /
|
||||
static_cast<double>(eta.sum);
|
||||
}
|
||||
// dx = 0
|
||||
// dy = -1
|
||||
break;
|
||||
case (corner::cBottomLeft):
|
||||
if (eta.sum != 0) {
|
||||
eta.x = static_cast<double>(
|
||||
cl.data[cluster_center_index] +
|
||||
if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) /
|
||||
ClusterSizeX <
|
||||
1) {
|
||||
assert(cluster_center_index + ClusterSizeX <
|
||||
ClusterSizeX * ClusterSizeY); // suppress warning
|
||||
if ((cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index + ClusterSizeX]) != 0)
|
||||
eta.y = static_cast<double>(
|
||||
cl.data[cluster_center_index + ClusterSizeX]) /
|
||||
static_cast<double>(eta.sum);
|
||||
eta.y = static_cast<double>(
|
||||
cl.data[cluster_center_index + ClusterSizeX] +
|
||||
cl.data[cluster_center_index + ClusterSizeX - 1]) /
|
||||
static_cast<double>(eta.sum);
|
||||
}
|
||||
// dx = -1
|
||||
// dy = 0
|
||||
break;
|
||||
case (corner::cBottomRight):
|
||||
if (eta.sum != 0) {
|
||||
eta.x = static_cast<double>(
|
||||
cl.data[cluster_center_index + 1] +
|
||||
cl.data[cluster_center_index + ClusterSizeX + 1]) /
|
||||
static_cast<double>(eta.sum);
|
||||
eta.y = static_cast<double>(
|
||||
cl.data[cluster_center_index + ClusterSizeX] +
|
||||
cl.data[cluster_center_index + ClusterSizeX + 1]) /
|
||||
static_cast<double>(eta.sum);
|
||||
}
|
||||
// dx = 0
|
||||
// dy = 0
|
||||
break;
|
||||
static_cast<double>(
|
||||
(cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index + ClusterSizeX]));
|
||||
} else {
|
||||
if ((cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index - ClusterSizeX]) != 0)
|
||||
eta.y = static_cast<double>(cl.data[cluster_center_index]) /
|
||||
static_cast<double>(
|
||||
(cl.data[cluster_center_index] +
|
||||
cl.data[cluster_center_index - ClusterSizeX]));
|
||||
}
|
||||
|
||||
eta.c = c;
|
||||
|
||||
eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no
|
||||
// underyling enum class
|
||||
return eta;
|
||||
}
|
||||
|
||||
// TODO! Look up eta2 calculation - photon center should be top right corner
|
||||
template <typename T>
|
||||
Eta2<T> calculate_eta2(const Cluster<T, 2, 2, uint16_t> &cl) {
|
||||
Eta2<T> calculate_eta2(const Cluster<T, 2, 2, int16_t> &cl) {
|
||||
Eta2<T> eta{};
|
||||
|
||||
// TODO: maybe have as member function of cluster
|
||||
const uint8_t photon_hit_index =
|
||||
std::max_element(cl.data.begin(), cl.data.end()) - cl.data.begin();
|
||||
|
||||
eta.c = static_cast<corner>(3 - photon_hit_index);
|
||||
|
||||
switch (eta.c) {
|
||||
case corner::cTopLeft:
|
||||
calculate_eta2(eta, cl.data[2], cl.data[3], cl.data[1], cl.data[3]);
|
||||
break;
|
||||
case corner::cTopRight:
|
||||
calculate_eta2(eta, cl.data[2], cl.data[3], cl.data[0], cl.data[2]);
|
||||
break;
|
||||
case corner::cBottomLeft:
|
||||
calculate_eta2(eta, cl.data[0], cl.data[1], cl.data[1], cl.data[3]);
|
||||
break;
|
||||
case corner::cBottomRight:
|
||||
calculate_eta2(eta, cl.data[0], cl.data[1], cl.data[0], cl.data[2]);
|
||||
break;
|
||||
}
|
||||
|
||||
if ((cl.data[0] + cl.data[1]) != 0)
|
||||
eta.x = static_cast<double>(cl.data[1]) / (cl.data[0] + cl.data[1]);
|
||||
if ((cl.data[0] + cl.data[2]) != 0)
|
||||
eta.y = static_cast<double>(cl.data[2]) / (cl.data[0] + cl.data[2]);
|
||||
eta.sum = cl.sum();
|
||||
|
||||
eta.c = static_cast<int>(corner::cBottomLeft); // TODO! This is not correct,
|
||||
// but need to put something
|
||||
return eta;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Eta2<T> calculate_full_eta2(const Cluster<T, 2, 2, uint16_t> &cl) {
|
||||
// calculates Eta3 for 3x3 cluster based on code from analyze_cluster
|
||||
// TODO only supported for 3x3 Clusters
|
||||
template <typename T> Eta2<T> calculate_eta3(const Cluster<T, 3, 3> &cl) {
|
||||
|
||||
Eta2<T> eta{};
|
||||
|
||||
eta.sum = cl.sum();
|
||||
T sum = 0;
|
||||
|
||||
const uint8_t photon_hit_index =
|
||||
std::max_element(cl.data.begin(), cl.data.end()) - cl.data.begin();
|
||||
std::for_each(std::begin(cl.data), std::end(cl.data),
|
||||
[&sum](T x) { sum += x; });
|
||||
|
||||
eta.c = static_cast<corner>(3 - photon_hit_index);
|
||||
eta.sum = sum;
|
||||
|
||||
if (eta.sum != 0) {
|
||||
eta.x = static_cast<double>(cl.data[1] + cl.data[3]) /
|
||||
static_cast<double>(eta.sum);
|
||||
eta.y = static_cast<double>(cl.data[2] + cl.data[3]) /
|
||||
static_cast<double>(eta.sum);
|
||||
}
|
||||
|
||||
return eta;
|
||||
}
|
||||
|
||||
// TODO generalize
|
||||
template <typename T>
|
||||
Eta2<T> calculate_eta2(const Cluster<T, 1, 2, uint16_t> &cl) {
|
||||
Eta2<T> eta{};
|
||||
|
||||
eta.x = 0;
|
||||
eta.y = static_cast<double>(cl.data[1]) / cl.data[0];
|
||||
eta.sum = cl.sum();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Eta2<T> calculate_eta2(const Cluster<T, 2, 1, uint16_t> &cl) {
|
||||
Eta2<T> eta{};
|
||||
|
||||
eta.x = static_cast<double>(cl.data[1]) / cl.data[0];
|
||||
eta.y = 0;
|
||||
eta.sum = cl.sum();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief calculates cross Eta3 for 3x3 cluster
|
||||
* cross Eta3 calculates the eta by taking into account only the cross pixels
|
||||
* {top, bottom, left, right, center}
|
||||
*/
|
||||
template <typename T, typename CoordType = uint16_t>
|
||||
Eta2<T> calculate_cross_eta3(const Cluster<T, 3, 3, CoordType> &cl) {
|
||||
|
||||
Eta2<T> eta{};
|
||||
|
||||
T photon_energy = cl.sum();
|
||||
|
||||
eta.sum = photon_energy;
|
||||
eta.c = corner::cBottomLeft;
|
||||
|
||||
if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0)
|
||||
|
||||
eta.x =
|
||||
static_cast<double>(-cl.data[3] + cl.data[3 + 2]) /
|
||||
eta.x = static_cast<double>(-cl.data[3] + cl.data[3 + 2]) /
|
||||
|
||||
static_cast<double>(cl.data[3] + cl.data[4] + cl.data[5]); // (-1,1)
|
||||
(cl.data[3] + cl.data[4] + cl.data[5]);
|
||||
|
||||
if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0)
|
||||
|
||||
eta.y = static_cast<double>(-cl.data[1] + cl.data[2 * 3 + 1]) /
|
||||
|
||||
static_cast<double>(cl.data[1] + cl.data[4] + cl.data[7]);
|
||||
(cl.data[1] + cl.data[4] + cl.data[7]);
|
||||
|
||||
return eta;
|
||||
}
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
Eta2<T> calculate_cross_eta3(
|
||||
const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||
|
||||
static_assert(ClusterSizeX > 2 && ClusterSizeY > 2,
|
||||
"calculate_eta3 only defined for clusters larger than 2x2");
|
||||
|
||||
if constexpr (ClusterSizeX != 3 || ClusterSizeY != 3) {
|
||||
auto reduced_cluster = reduce_cluster_to_3x3(cl);
|
||||
return calculate_cross_eta3(reduced_cluster);
|
||||
} else {
|
||||
return calculate_cross_eta3(cl);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief calculates Eta3 for 3x3 cluster
|
||||
* It calculates the eta by taking into account all pixels in the 3x3 cluster
|
||||
*/
|
||||
template <typename T, typename CoordType = uint16_t>
|
||||
Eta2<T> calculate_eta3(const Cluster<T, 3, 3, CoordType> &cl) {
|
||||
|
||||
Eta2<T> eta{};
|
||||
|
||||
T photon_energy = cl.sum();
|
||||
|
||||
eta.sum = photon_energy;
|
||||
|
||||
// TODO: how do we handle potential arithmetic overflows? - T could be
|
||||
// uint16
|
||||
if (photon_energy != 0) {
|
||||
std::array<T, 2> column_sums{
|
||||
static_cast<T>(cl.data[0] + cl.data[3] + cl.data[6]),
|
||||
static_cast<T>(cl.data[2] + cl.data[5] + cl.data[8])};
|
||||
|
||||
eta.x = static_cast<double>(-column_sums[0] + column_sums[1]) /
|
||||
static_cast<double>(photon_energy);
|
||||
|
||||
std::array<T, 2> row_sums{
|
||||
static_cast<T>(cl.data[0] + cl.data[1] + cl.data[2]),
|
||||
static_cast<T>(cl.data[6] + cl.data[7] + cl.data[8])};
|
||||
|
||||
eta.y = static_cast<double>(-row_sums[0] + row_sums[1]) /
|
||||
static_cast<double>(photon_energy);
|
||||
}
|
||||
|
||||
return eta;
|
||||
}
|
||||
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
Eta2<T>
|
||||
calculate_eta3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||
|
||||
static_assert(ClusterSizeX > 2 && ClusterSizeY > 2,
|
||||
"calculate_eta3 only defined for clusters larger than 2x2");
|
||||
|
||||
if constexpr (ClusterSizeX != 3 || ClusterSizeY != 3) {
|
||||
auto reduced_cluster = reduce_cluster_to_3x3(cl);
|
||||
return calculate_eta3(reduced_cluster);
|
||||
} else {
|
||||
return calculate_eta3(cl);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
|
||||
#include <chrono>
|
||||
|
||||
183
include/aare/Cluster.hpp
Executable file → Normal file
183
include/aare/Cluster.hpp
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
/************************************************
|
||||
* @file Cluster.hpp
|
||||
@@ -9,7 +8,6 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "defs.hpp"
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <cstdint>
|
||||
@@ -19,12 +17,8 @@
|
||||
namespace aare {
|
||||
|
||||
// requires clause c++20 maybe update
|
||||
|
||||
/**
|
||||
* @brief Cluster struct
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
typename CoordType = int16_t>
|
||||
struct Cluster {
|
||||
|
||||
static_assert(std::is_arithmetic_v<T>, "T needs to be an arithmetic type");
|
||||
@@ -33,11 +27,8 @@ struct Cluster {
|
||||
static_assert(ClusterSizeX > 0 && ClusterSizeY > 0,
|
||||
"Cluster sizes must be bigger than zero");
|
||||
|
||||
/// @brief Cluster center x coordinate (in pixel coordinates)
|
||||
CoordType x;
|
||||
/// @brief Cluster center y coordinate (in pixel coordinates)
|
||||
CoordType y;
|
||||
/// @brief Cluster data stored in row-major order starting from top-left
|
||||
std::array<T, ClusterSizeX * ClusterSizeY> data;
|
||||
|
||||
static constexpr uint8_t cluster_size_x = ClusterSizeX;
|
||||
@@ -45,18 +36,9 @@ struct Cluster {
|
||||
using value_type = T;
|
||||
using coord_type = CoordType;
|
||||
|
||||
/**
|
||||
* @brief Sum of all elements in the cluster
|
||||
*/
|
||||
T sum() const { return std::accumulate(data.begin(), data.end(), T{}); }
|
||||
|
||||
// TODO: handle 1 dimensional clusters
|
||||
/**
|
||||
* @brief sum of 2x2 subcluster with highest energy
|
||||
* @return photon energy of subcluster, 2x2 subcluster index relative to
|
||||
* cluster center
|
||||
*/
|
||||
Sum_index_pair<T, corner> max_sum_2x2() const {
|
||||
std::pair<T, int> max_sum_2x2() const {
|
||||
|
||||
if constexpr (cluster_size_x == 3 && cluster_size_y == 3) {
|
||||
std::array<T, 4> sum_2x2_subclusters;
|
||||
@@ -67,166 +49,31 @@ struct Cluster {
|
||||
int index = std::max_element(sum_2x2_subclusters.begin(),
|
||||
sum_2x2_subclusters.end()) -
|
||||
sum_2x2_subclusters.begin();
|
||||
return Sum_index_pair<T, corner>{sum_2x2_subclusters[index],
|
||||
corner{index}};
|
||||
return std::make_pair(sum_2x2_subclusters[index], index);
|
||||
} else if constexpr (cluster_size_x == 2 && cluster_size_y == 2) {
|
||||
return Sum_index_pair<T, corner>{
|
||||
data[0] + data[1] + data[2] + data[3], corner{0}};
|
||||
return std::make_pair(data[0] + data[1] + data[2] + data[3], 0);
|
||||
} else {
|
||||
constexpr size_t cluster_center_index =
|
||||
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||
constexpr size_t num_2x2_subclusters =
|
||||
(ClusterSizeX - 1) * (ClusterSizeY - 1);
|
||||
|
||||
std::array<T, 4> sum_2x2_subcluster{0};
|
||||
// subcluster top left from center
|
||||
sum_2x2_subcluster[0] =
|
||||
data[cluster_center_index] + data[cluster_center_index - 1] +
|
||||
data[cluster_center_index - ClusterSizeX] +
|
||||
data[cluster_center_index - 1 - ClusterSizeX];
|
||||
// subcluster top right from center
|
||||
if (ClusterSizeX > 2) {
|
||||
sum_2x2_subcluster[1] =
|
||||
data[cluster_center_index] +
|
||||
data[cluster_center_index + 1] +
|
||||
data[cluster_center_index - ClusterSizeX] +
|
||||
data[cluster_center_index - ClusterSizeX + 1];
|
||||
}
|
||||
// subcluster bottom left from center
|
||||
if (ClusterSizeY > 2) {
|
||||
sum_2x2_subcluster[2] =
|
||||
data[cluster_center_index] +
|
||||
data[cluster_center_index - 1] +
|
||||
data[cluster_center_index + ClusterSizeX] +
|
||||
data[cluster_center_index + ClusterSizeX - 1];
|
||||
}
|
||||
// subcluster bottom right from center
|
||||
if (ClusterSizeX > 2 && ClusterSizeY > 2) {
|
||||
sum_2x2_subcluster[3] =
|
||||
data[cluster_center_index] +
|
||||
data[cluster_center_index + 1] +
|
||||
data[cluster_center_index + ClusterSizeX] +
|
||||
data[cluster_center_index + ClusterSizeX + 1];
|
||||
std::array<T, num_2x2_subclusters> sum_2x2_subcluster;
|
||||
for (size_t i = 0; i < ClusterSizeY - 1; ++i) {
|
||||
for (size_t j = 0; j < ClusterSizeX - 1; ++j)
|
||||
sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] =
|
||||
data[i * ClusterSizeX + j] +
|
||||
data[i * ClusterSizeX + j + 1] +
|
||||
data[(i + 1) * ClusterSizeX + j] +
|
||||
data[(i + 1) * ClusterSizeX + j + 1];
|
||||
}
|
||||
|
||||
int index = std::max_element(sum_2x2_subcluster.begin(),
|
||||
sum_2x2_subcluster.end()) -
|
||||
sum_2x2_subcluster.begin();
|
||||
return Sum_index_pair<T, corner>{sum_2x2_subcluster[index],
|
||||
corner{index}};
|
||||
return std::make_pair(sum_2x2_subcluster[index], index);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Reduce a cluster to a 2x2 cluster by selecting the 2x2 block with the
|
||||
* highest sum.
|
||||
* @param c Cluster to reduce
|
||||
* @return reduced cluster
|
||||
* @note The cluster is filled using row major ordering starting at the top-left
|
||||
* (thus for a max subcluster in the top left cornern the photon hit is at
|
||||
* the fourth position)
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = uint16_t>
|
||||
Cluster<T, 2, 2, CoordType>
|
||||
reduce_to_2x2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
|
||||
|
||||
static_assert(ClusterSizeX >= 2 && ClusterSizeY >= 2,
|
||||
"Cluster sizes must be at least 2x2 for reduction to 2x2");
|
||||
|
||||
Cluster<T, 2, 2, CoordType> result{};
|
||||
|
||||
auto [sum, index] = c.max_sum_2x2();
|
||||
|
||||
constexpr int16_t cluster_center_index =
|
||||
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||
|
||||
int16_t index_top_left_max_2x2_subcluster = cluster_center_index;
|
||||
switch (index) {
|
||||
case corner::cTopLeft:
|
||||
index_top_left_max_2x2_subcluster -= (ClusterSizeX + 1);
|
||||
break;
|
||||
case corner::cTopRight:
|
||||
index_top_left_max_2x2_subcluster -= ClusterSizeX;
|
||||
break;
|
||||
case corner::cBottomLeft:
|
||||
index_top_left_max_2x2_subcluster -= 1;
|
||||
break;
|
||||
case corner::cBottomRight:
|
||||
// no change needed
|
||||
break;
|
||||
}
|
||||
|
||||
result.x = c.x;
|
||||
result.y = c.y;
|
||||
|
||||
result.data = {
|
||||
c.data[index_top_left_max_2x2_subcluster],
|
||||
c.data[index_top_left_max_2x2_subcluster + 1],
|
||||
c.data[index_top_left_max_2x2_subcluster + ClusterSizeX],
|
||||
c.data[index_top_left_max_2x2_subcluster + ClusterSizeX + 1]};
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Cluster<T, 2, 2, uint16_t> reduce_to_2x2(const Cluster<T, 3, 3, uint16_t> &c) {
|
||||
Cluster<T, 2, 2, uint16_t> result{};
|
||||
|
||||
auto [s, i] = c.max_sum_2x2();
|
||||
result.x = c.x;
|
||||
result.y = c.y;
|
||||
switch (i) {
|
||||
case corner::cTopLeft:
|
||||
result.data = {c.data[0], c.data[1], c.data[3], c.data[4]};
|
||||
break;
|
||||
case corner::cTopRight:
|
||||
result.data = {c.data[1], c.data[2], c.data[4], c.data[5]};
|
||||
break;
|
||||
case corner::cBottomLeft:
|
||||
result.data = {c.data[3], c.data[4], c.data[6], c.data[7]};
|
||||
break;
|
||||
case corner::cBottomRight:
|
||||
result.data = {c.data[4], c.data[5], c.data[7], c.data[8]};
|
||||
break;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reduce a cluster to a 3x3 cluster
|
||||
* @param c Cluster to reduce
|
||||
* @return reduced cluster
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType = int16_t>
|
||||
Cluster<T, 3, 3, CoordType>
|
||||
reduce_to_3x3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
|
||||
|
||||
static_assert(ClusterSizeX >= 3 && ClusterSizeY >= 3,
|
||||
"Cluster sizes must be at least 3x3 for reduction to 3x3");
|
||||
|
||||
Cluster<T, 3, 3, CoordType> result{};
|
||||
|
||||
int16_t cluster_center_index =
|
||||
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||
|
||||
result.x = c.x;
|
||||
result.y = c.y;
|
||||
|
||||
result.data = {c.data[cluster_center_index - ClusterSizeX - 1],
|
||||
c.data[cluster_center_index - ClusterSizeX],
|
||||
c.data[cluster_center_index - ClusterSizeX + 1],
|
||||
c.data[cluster_center_index - 1],
|
||||
c.data[cluster_center_index],
|
||||
c.data[cluster_center_index + 1],
|
||||
c.data[cluster_center_index + ClusterSizeX - 1],
|
||||
c.data[cluster_center_index + ClusterSizeX],
|
||||
c.data[cluster_center_index + ClusterSizeX + 1]};
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// Type Traits for is_cluster_type
|
||||
template <typename T>
|
||||
struct is_cluster : std::false_type {}; // Default case: Not a Cluster
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include <atomic>
|
||||
#include <thread>
|
||||
@@ -6,7 +5,6 @@
|
||||
#include "aare/ClusterFinderMT.hpp"
|
||||
#include "aare/ClusterVector.hpp"
|
||||
#include "aare/ProducerConsumerQueue.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
|
||||
namespace aare {
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
|
||||
#include "aare/Cluster.hpp"
|
||||
@@ -15,7 +14,7 @@
|
||||
namespace aare {
|
||||
|
||||
/*
|
||||
Binary cluster file. Expects data to be laid out as:
|
||||
Binary cluster file. Expects data to be layed out as:
|
||||
int32_t frame_number
|
||||
uint32_t number_of_clusters
|
||||
int16_t x, int16_t y, int32_t data[9] x number_of_clusters
|
||||
@@ -190,16 +189,6 @@ class ClusterFile {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the current position in the file (bytes)
|
||||
*/
|
||||
int64_t tell() {
|
||||
if (!fp) {
|
||||
throw std::runtime_error(LOCATION + "File not opened");
|
||||
}
|
||||
return ftell(fp);
|
||||
}
|
||||
|
||||
/** @brief Open the file in specific mode
|
||||
*
|
||||
*/
|
||||
@@ -365,20 +354,15 @@ template <typename ClusterType, typename Enable>
|
||||
ClusterVector<ClusterType>
|
||||
ClusterFile<ClusterType, Enable>::read_frame_without_cut() {
|
||||
if (m_mode != "r") {
|
||||
throw std::runtime_error(LOCATION + "File not opened for reading");
|
||||
throw std::runtime_error("File not opened for reading");
|
||||
}
|
||||
if (m_num_left) {
|
||||
throw std::runtime_error(
|
||||
LOCATION + "There are still photons left in the last frame");
|
||||
"There are still photons left in the last frame");
|
||||
}
|
||||
int32_t frame_number;
|
||||
if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) {
|
||||
if (feof(fp))
|
||||
throw std::runtime_error(LOCATION + "Unexpected end of file");
|
||||
else if (ferror(fp))
|
||||
throw std::runtime_error(LOCATION + "Error reading from file");
|
||||
|
||||
throw std::runtime_error(LOCATION + "Unexpected error (not feof or ferror) when reading frame number");
|
||||
throw std::runtime_error(LOCATION + "Could not read frame number");
|
||||
}
|
||||
|
||||
int32_t n_clusters; // Saved as 32bit integer in the cluster file
|
||||
@@ -454,8 +438,8 @@ bool ClusterFile<ClusterType, Enable>::is_selected(ClusterType &cl) {
|
||||
|
||||
if (m_noise_map) {
|
||||
auto sum_1x1 = cl.data[cluster_center_index]; // central pixel
|
||||
auto sum_2x2 = cl.max_sum_2x2().sum; // highest sum of 2x2 subclusters
|
||||
auto total_sum = cl.sum(); // sum of all pixels
|
||||
auto sum_2x2 = cl.max_sum_2x2().first; // highest sum of 2x2 subclusters
|
||||
auto total_sum = cl.sum(); // sum of all pixels
|
||||
|
||||
auto noise =
|
||||
(*m_noise_map)(cl.y, cl.x); // TODO! check if this is correct
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include <atomic>
|
||||
#include <filesystem>
|
||||
@@ -11,8 +10,7 @@
|
||||
namespace aare {
|
||||
|
||||
template <typename ClusterType,
|
||||
typename = std::enable_if_t<is_cluster_v<ClusterType>>,
|
||||
typename = std::enable_if_t<no_2x2_cluster<ClusterType>::value>>
|
||||
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||
class ClusterFileSink {
|
||||
ProducerConsumerQueue<ClusterVector<ClusterType>> *m_source;
|
||||
std::atomic<bool> m_stop_requested{false};
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include "aare/ClusterFile.hpp"
|
||||
#include "aare/ClusterVector.hpp"
|
||||
@@ -11,16 +10,8 @@
|
||||
|
||||
namespace aare {
|
||||
|
||||
template <typename ClusterType,
|
||||
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||
struct no_2x2_cluster {
|
||||
constexpr static bool value =
|
||||
ClusterType::cluster_size_x > 2 && ClusterType::cluster_size_y > 2;
|
||||
};
|
||||
|
||||
template <typename ClusterType = Cluster<int32_t, 3, 3>,
|
||||
typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double,
|
||||
typename = std::enable_if_t<no_2x2_cluster<ClusterType>::value>>
|
||||
typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double>
|
||||
class ClusterFinder {
|
||||
Shape<2> m_image_size;
|
||||
const PEDESTAL_TYPE m_nSigma;
|
||||
@@ -145,7 +136,7 @@ class ClusterFinder {
|
||||
// don't have a photon
|
||||
int i = 0;
|
||||
for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) {
|
||||
for (int ic = -dx; ic < dx + has_center_pixel_x; ic++) {
|
||||
for (int ic = -dx; ic < dx + has_center_pixel_y; ic++) {
|
||||
if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
|
||||
iy + ir >= 0 && iy + ir < frame.shape(0)) {
|
||||
CT tmp =
|
||||
@@ -154,8 +145,8 @@ class ClusterFinder {
|
||||
m_pedestal.mean(iy + ir, ix + ic));
|
||||
cluster.data[i] =
|
||||
tmp; // Watch for out of bounds access
|
||||
i++;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
@@ -33,8 +32,7 @@ struct FrameWrapper {
|
||||
* @tparam CT type of the cluster data
|
||||
*/
|
||||
template <typename ClusterType = Cluster<int32_t, 3, 3>,
|
||||
typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double,
|
||||
typename = std::enable_if_t<no_2x2_cluster<ClusterType>::value>>
|
||||
typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double>
|
||||
class ClusterFinderMT {
|
||||
|
||||
protected:
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include "aare/Cluster.hpp" //TODO maybe store in seperate file !!!
|
||||
#include <algorithm>
|
||||
@@ -29,7 +28,7 @@ class ClusterVector; // Forward declaration
|
||||
* needed.
|
||||
* @tparam T data type of the pixels in the cluster
|
||||
* @tparam CoordType data type of the x and y coordinates of the cluster
|
||||
* (normally uint16_t)
|
||||
* (normally int16_t)
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType>
|
||||
@@ -87,14 +86,15 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
/**
|
||||
* @brief Sum the pixels in the 2x2 subcluster with the biggest pixel sum in
|
||||
* each cluster
|
||||
* @return vector of sums index pairs for each cluster
|
||||
* @return std::vector<T> vector of sums for each cluster
|
||||
*/
|
||||
std::vector<Sum_index_pair<T, corner>> sum_2x2() {
|
||||
std::vector<Sum_index_pair<T, corner>> sums_2x2(m_data.size());
|
||||
std::vector<T> sum_2x2() {
|
||||
std::vector<T> sums_2x2(m_data.size());
|
||||
|
||||
std::transform(
|
||||
m_data.begin(), m_data.end(), sums_2x2.begin(),
|
||||
[](const ClusterType &cluster) { return cluster.max_sum_2x2(); });
|
||||
std::transform(m_data.begin(), m_data.end(), sums_2x2.begin(),
|
||||
[](const ClusterType &cluster) {
|
||||
return cluster.max_sum_2x2().first;
|
||||
});
|
||||
|
||||
return sums_2x2;
|
||||
}
|
||||
@@ -122,11 +122,6 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
*/
|
||||
size_t size() const { return m_data.size(); }
|
||||
|
||||
/**
|
||||
* @brief Check if the vector is empty
|
||||
*/
|
||||
bool empty() const { return m_data.empty(); }
|
||||
|
||||
uint8_t cluster_size_x() const { return ClusterSizeX; }
|
||||
|
||||
uint8_t cluster_size_y() const { return ClusterSizeY; }
|
||||
@@ -172,42 +167,4 @@ class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Reduce a cluster to a 2x2 cluster by selecting the 2x2 block with the
|
||||
* highest sum.
|
||||
* @param cv Clustervector containing clusters to reduce
|
||||
* @return Clustervector with reduced clusters
|
||||
* @note The cluster is filled using row major ordering starting at the top-left
|
||||
* (thus for a max subcluster in the top left cornern the photon hit is at
|
||||
* the fourth position)
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType>
|
||||
ClusterVector<Cluster<T, 2, 2, CoordType>> reduce_to_2x2(
|
||||
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
|
||||
&cv) {
|
||||
ClusterVector<Cluster<T, 2, 2, CoordType>> result;
|
||||
for (const auto &c : cv) {
|
||||
result.push_back(reduce_to_2x2(c));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reduce a cluster to a 3x3 cluster
|
||||
* @param cv Clustervector containing clusters to reduce
|
||||
* @return Clustervector with reduced clusters
|
||||
*/
|
||||
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||
typename CoordType>
|
||||
ClusterVector<Cluster<T, 3, 3, CoordType>> reduce_to_3x3(
|
||||
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
|
||||
&cv) {
|
||||
ClusterVector<Cluster<T, 3, 3, CoordType>> result;
|
||||
for (const auto &c : cv) {
|
||||
result.push_back(reduce_to_3x3(c));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
|
||||
#include "aare/FileInterface.hpp"
|
||||
|
||||
@@ -1,82 +0,0 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include "aare/RawMasterFile.hpp" //ROI refactor away
|
||||
#include "aare/defs.hpp"
|
||||
namespace aare {
|
||||
|
||||
struct ModuleConfig {
|
||||
int module_gap_row{};
|
||||
int module_gap_col{};
|
||||
|
||||
bool operator==(const ModuleConfig &other) const {
|
||||
if (module_gap_col != other.module_gap_col)
|
||||
return false;
|
||||
if (module_gap_row != other.module_gap_row)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Class to hold the geometry of a module. Where pixel 0 is located and
|
||||
* the size of the module
|
||||
*/
|
||||
struct ModuleGeometry {
|
||||
int origin_x{};
|
||||
int origin_y{};
|
||||
int height{};
|
||||
int width{};
|
||||
int row_index{};
|
||||
int col_index{};
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Class to hold the geometry of a detector. Number of modules, their
|
||||
* size and where pixel 0 for each module is located
|
||||
*/
|
||||
class DetectorGeometry {
|
||||
public:
|
||||
DetectorGeometry(const xy &geometry, const ssize_t module_pixels_x,
|
||||
const ssize_t module_pixels_y,
|
||||
const xy udp_interfaces_per_module = xy{1, 1},
|
||||
const bool quad = false);
|
||||
|
||||
~DetectorGeometry() = default;
|
||||
|
||||
/**
|
||||
* @brief Update the detector geometry given a region of interest
|
||||
*
|
||||
* @param roi
|
||||
* @return DetectorGeometry
|
||||
*/
|
||||
void update_geometry_with_roi(ROI roi);
|
||||
|
||||
size_t n_modules() const;
|
||||
|
||||
size_t n_modules_in_roi() const;
|
||||
|
||||
size_t pixels_x() const;
|
||||
size_t pixels_y() const;
|
||||
|
||||
size_t modules_x() const;
|
||||
size_t modules_y() const;
|
||||
|
||||
const std::vector<ssize_t> &get_modules_in_roi() const;
|
||||
|
||||
ssize_t get_modules_in_roi(const size_t index) const;
|
||||
|
||||
const std::vector<ModuleGeometry> &get_module_geometries() const;
|
||||
|
||||
const ModuleGeometry &get_module_geometries(const size_t index) const;
|
||||
|
||||
private:
|
||||
size_t m_modules_x{};
|
||||
size_t m_modules_y{};
|
||||
size_t m_pixels_x{};
|
||||
size_t m_pixels_y{};
|
||||
static constexpr ModuleConfig cfg{0, 0};
|
||||
std::vector<ModuleGeometry> module_geometries{};
|
||||
std::vector<ssize_t> modules_in_roi{};
|
||||
};
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include <cstdint>
|
||||
#include <map>
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include "aare/FileInterface.hpp"
|
||||
#include <memory>
|
||||
@@ -8,7 +7,7 @@ namespace aare {
|
||||
/**
|
||||
* @brief RAII File class for reading, and in the future potentially writing
|
||||
* image files in various formats. Minimal generic interface. For specail
|
||||
* fuctions plase use the RawFile or NumpyFile classes directly. Wraps
|
||||
* fuctions plase use the RawFile, NumpyFile or Hdf5File classes directly. Wraps
|
||||
* FileInterface to abstract the underlying file format
|
||||
* @note **frame_number** refers the the frame number sent by the detector while
|
||||
* **frame_index** is the position of the frame in the file
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include "aare/Dtype.hpp"
|
||||
#include "aare/Frame.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
#include "aare/to_string.hpp"
|
||||
|
||||
#include <filesystem>
|
||||
#include <vector>
|
||||
@@ -47,7 +46,7 @@ struct FileConfig {
|
||||
|
||||
/**
|
||||
* @brief FileInterface class to define the interface for file operations
|
||||
* @note parent class for NumpyFile and RawFile
|
||||
* @note parent class for NumpyFile, RawFile and Hdf5File
|
||||
* @note all functions are pure virtual and must be implemented by the derived
|
||||
* classes
|
||||
*/
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include <cstdio>
|
||||
#include <filesystem>
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
|
||||
#include <cmath>
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include "aare/Dtype.hpp"
|
||||
#include "aare/NDArray.hpp"
|
||||
@@ -106,7 +105,7 @@ class Frame {
|
||||
* @tparam T type of the pixels
|
||||
* @return NDView<T, 2>
|
||||
*/
|
||||
template <typename T> NDView<T, 2> view() & {
|
||||
template <typename T> NDView<T, 2> view() {
|
||||
std::array<ssize_t, 2> shape = {static_cast<ssize_t>(m_rows),
|
||||
static_cast<ssize_t>(m_cols)};
|
||||
T *data = reinterpret_cast<T *>(m_data);
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
/************************************************
|
||||
* @file GainMap.hpp
|
||||
* @short function to apply gain map of image size to a vector of clusters -
|
||||
|
||||
211
include/aare/Hdf5File.hpp
Normal file
211
include/aare/Hdf5File.hpp
Normal file
@@ -0,0 +1,211 @@
|
||||
#pragma once
|
||||
#include "aare/FileInterface.hpp"
|
||||
#include "aare/Frame.hpp"
|
||||
#include "aare/Hdf5MasterFile.hpp"
|
||||
#include "aare/NDArray.hpp" //for pixel map
|
||||
|
||||
#include <optional>
|
||||
|
||||
namespace aare {
|
||||
|
||||
class H5Handles {
|
||||
std::string file_name;
|
||||
std::string dataset_name;
|
||||
H5::H5File file;
|
||||
H5::DataSet dataset;
|
||||
H5::DataSpace dataspace;
|
||||
H5::DataType datatype;
|
||||
std::unique_ptr<H5::DataSpace> memspace;
|
||||
std::vector<hsize_t> dims;
|
||||
std::vector<hsize_t> count;
|
||||
std::vector<hsize_t> offset;
|
||||
|
||||
public:
|
||||
H5Handles(const std::string &fname, const std::string &dname)
|
||||
: file_name(fname), dataset_name(dname), file(fname, H5F_ACC_RDONLY),
|
||||
dataset(file.openDataSet(dname)), dataspace(dataset.getSpace()),
|
||||
datatype(dataset.getDataType()) {
|
||||
intialize_dimensions();
|
||||
initialize_memspace();
|
||||
}
|
||||
|
||||
std::vector<hsize_t> get_dims() const { return dims; }
|
||||
|
||||
void seek(size_t frame_index) {
|
||||
if (frame_index >= dims[0]) {
|
||||
throw std::runtime_error(LOCATION + "Invalid frame number");
|
||||
}
|
||||
offset[0] = static_cast<hsize_t>(frame_index);
|
||||
}
|
||||
|
||||
void get_data_into(size_t frame_index, std::byte *frame_buffer,
|
||||
size_t n_frames = 1) {
|
||||
seek(frame_index);
|
||||
count[0] = static_cast<hsize_t>(n_frames);
|
||||
// std::cout << "offset:" << ToString(offset) << " count:" <<
|
||||
// ToString(count) << std::endl;
|
||||
dataspace.selectHyperslab(H5S_SELECT_SET, count.data(), offset.data());
|
||||
dataset.read(frame_buffer, datatype, *memspace, dataspace);
|
||||
}
|
||||
|
||||
void get_header_into(size_t frame_index, int part_index,
|
||||
std::byte *header_buffer) {
|
||||
seek(frame_index);
|
||||
offset[1] = static_cast<hsize_t>(part_index);
|
||||
// std::cout << "offset:" << ToString(offset) << " count:" <<
|
||||
// ToString(count) << std::endl;
|
||||
dataspace.selectHyperslab(H5S_SELECT_SET, count.data(), offset.data());
|
||||
dataset.read(header_buffer, datatype, *memspace, dataspace);
|
||||
}
|
||||
|
||||
private:
|
||||
void intialize_dimensions() {
|
||||
int rank = dataspace.getSimpleExtentNdims();
|
||||
dims.resize(rank);
|
||||
dataspace.getSimpleExtentDims(dims.data(), nullptr);
|
||||
}
|
||||
|
||||
void initialize_memspace() {
|
||||
int rank = dataspace.getSimpleExtentNdims();
|
||||
count.clear();
|
||||
offset.clear();
|
||||
|
||||
// header datasets or header virtual datasets
|
||||
if (rank == 1 || rank == 2) {
|
||||
count = std::vector<hsize_t>(rank, 1); // slice 1 value
|
||||
offset = std::vector<hsize_t>(rank, 0);
|
||||
memspace = std::make_unique<H5::DataSpace>(H5S_SCALAR);
|
||||
} else if (rank >= 3) {
|
||||
// data dataset (frame x height x width)
|
||||
count = {1, dims[1], dims[2]};
|
||||
offset = {0, 0, 0};
|
||||
hsize_t dims_image[2] = {dims[1], dims[2]};
|
||||
memspace = std::make_unique<H5::DataSpace>(2, dims_image);
|
||||
} else {
|
||||
throw std::runtime_error(
|
||||
LOCATION + "Invalid rank for dataset: " + std::to_string(rank));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Fn>
|
||||
void read_hdf5_header_fields(DetectorHeader *header, Fn &&fn_read_field) {
|
||||
fn_read_field(0, reinterpret_cast<std::byte *>(&(header->frameNumber)));
|
||||
fn_read_field(1, reinterpret_cast<std::byte *>(&(header->expLength)));
|
||||
fn_read_field(2, reinterpret_cast<std::byte *>(&(header->packetNumber)));
|
||||
fn_read_field(3, reinterpret_cast<std::byte *>(&(header->bunchId)));
|
||||
fn_read_field(4, reinterpret_cast<std::byte *>(&(header->timestamp)));
|
||||
fn_read_field(5, reinterpret_cast<std::byte *>(&(header->modId)));
|
||||
fn_read_field(6, reinterpret_cast<std::byte *>(&(header->row)));
|
||||
fn_read_field(7, reinterpret_cast<std::byte *>(&(header->column)));
|
||||
fn_read_field(8, reinterpret_cast<std::byte *>(&(header->reserved)));
|
||||
fn_read_field(9, reinterpret_cast<std::byte *>(&(header->debug)));
|
||||
fn_read_field(10, reinterpret_cast<std::byte *>(&(header->roundRNumber)));
|
||||
fn_read_field(11, reinterpret_cast<std::byte *>(&(header->detType)));
|
||||
fn_read_field(12, reinterpret_cast<std::byte *>(&(header->version)));
|
||||
fn_read_field(13, reinterpret_cast<std::byte *>(&(header->packetMask)));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Class to read .h5 files. The class will parse the master file
|
||||
* to find the correct geometry for the frames.
|
||||
* @note A more generic interface is available in the aare::File class.
|
||||
* Consider using that unless you need hdf5 file specific functionality.
|
||||
*/
|
||||
class Hdf5File : public FileInterface {
|
||||
Hdf5MasterFile m_master;
|
||||
|
||||
size_t m_current_frame{};
|
||||
size_t m_total_frames{};
|
||||
size_t m_rows{};
|
||||
size_t m_cols{};
|
||||
|
||||
static const std::string metadata_group_name;
|
||||
static const std::vector<std::string> header_dataset_names;
|
||||
std::unique_ptr<H5Handles> m_data_dataset{nullptr};
|
||||
std::vector<std::unique_ptr<H5Handles>> m_header_datasets{};
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Hdf5File constructor
|
||||
* @param fname path to the master file (.json)
|
||||
* @param mode file mode (only "r" is supported at the moment)
|
||||
|
||||
*/
|
||||
Hdf5File(const std::filesystem::path &fname, const std::string &mode = "r");
|
||||
virtual ~Hdf5File() override;
|
||||
|
||||
Frame read_frame() override;
|
||||
Frame read_frame(size_t frame_number) override;
|
||||
std::vector<Frame> read_n(size_t n_frames) override;
|
||||
void read_into(std::byte *image_buf) override;
|
||||
void read_into(std::byte *image_buf, size_t n_frames) override;
|
||||
|
||||
// TODO! do we need to adapt the API?
|
||||
void read_into(std::byte *image_buf, DetectorHeader *header);
|
||||
void read_into(std::byte *image_buf, size_t n_frames,
|
||||
DetectorHeader *header);
|
||||
|
||||
size_t frame_number(size_t frame_index) override;
|
||||
size_t bytes_per_frame() override;
|
||||
size_t pixels_per_frame() override;
|
||||
size_t bytes_per_pixel() const;
|
||||
void seek(size_t frame_index) override;
|
||||
size_t tell() override;
|
||||
size_t total_frames() const override;
|
||||
size_t rows() const override;
|
||||
size_t cols() const override;
|
||||
size_t bitdepth() const override;
|
||||
xy geometry();
|
||||
size_t n_modules() const;
|
||||
Hdf5MasterFile master() const;
|
||||
|
||||
DetectorType detector_type() const override;
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief get the frame at the given frame index
|
||||
* @param frame_number frame number to read
|
||||
* @return Frame
|
||||
*/
|
||||
Frame get_frame(size_t frame_index);
|
||||
|
||||
/**
|
||||
* @brief read the frame at the given frame index into the image buffer
|
||||
* @param frame_number frame number to read
|
||||
* @param n_frames number of frames to read (default is 1)
|
||||
* @param image_buf buffer to store the frame
|
||||
*/
|
||||
void get_frame_into(size_t frame_index, std::byte *frame_buffer,
|
||||
size_t n_frames = 1, DetectorHeader *header = nullptr);
|
||||
|
||||
/**
|
||||
* @brief read the frame at the given frame index into the image buffer
|
||||
* @param frame_index frame number to read
|
||||
* @param n_frames number of frames to read (default is 1)
|
||||
* @param frame_buffer buffer to store the frame
|
||||
*/
|
||||
void get_data_into(size_t frame_index, std::byte *frame_buffer,
|
||||
size_t n_frames = 1);
|
||||
|
||||
/**
|
||||
* @brief read the header at the given frame index into the header buffer
|
||||
* @param frame_index frame number to read
|
||||
* @param part_index part index to read (for virtual datasets)
|
||||
* @param header buffer to store the header
|
||||
*/
|
||||
void get_header_into(size_t frame_index, int part_index,
|
||||
DetectorHeader *header);
|
||||
|
||||
/**
|
||||
* @brief read the header of the file
|
||||
* @param fname path to the data subfile
|
||||
* @return DetectorHeader
|
||||
*/
|
||||
static DetectorHeader read_header(const std::filesystem::path &fname);
|
||||
|
||||
void open_data_file();
|
||||
void open_header_files();
|
||||
};
|
||||
|
||||
} // namespace aare
|
||||
135
include/aare/Hdf5MasterFile.hpp
Normal file
135
include/aare/Hdf5MasterFile.hpp
Normal file
@@ -0,0 +1,135 @@
|
||||
#pragma once
|
||||
#include "aare/defs.hpp"
|
||||
#include "aare/scan_parameters.hpp"
|
||||
|
||||
#include "H5Cpp.h"
|
||||
#include <filesystem>
|
||||
#include <fmt/format.h>
|
||||
#include <fstream>
|
||||
#include <optional>
|
||||
|
||||
namespace aare {
|
||||
|
||||
using ns = std::chrono::nanoseconds;
|
||||
|
||||
/**
|
||||
* @brief Class for parsing a master file either in our .json format or the old
|
||||
* .Hdf5 format
|
||||
*/
|
||||
class Hdf5MasterFile {
|
||||
std::filesystem::path m_file_name{};
|
||||
std::string m_version;
|
||||
DetectorType m_type;
|
||||
TimingMode m_timing_mode;
|
||||
xy m_geometry{};
|
||||
int m_image_size_in_bytes{};
|
||||
int m_pixels_y{};
|
||||
int m_pixels_x{};
|
||||
int m_max_frames_per_file{};
|
||||
FrameDiscardPolicy m_frame_discard_policy{};
|
||||
int m_frame_padding{};
|
||||
std::optional<ScanParameters> m_scan_parameters{};
|
||||
size_t m_total_frames_expected{};
|
||||
std::optional<ns> m_exptime{};
|
||||
std::optional<ns> m_period{};
|
||||
std::optional<BurstMode> m_burst_mode{};
|
||||
std::optional<int> m_number_of_udp_interfaces{};
|
||||
int m_bitdepth{};
|
||||
std::optional<bool> m_ten_giga{};
|
||||
std::optional<int> m_threshold_energy{};
|
||||
std::optional<std::vector<int>> m_threshold_energy_all{};
|
||||
std::optional<ns> m_subexptime{};
|
||||
std::optional<ns> m_subperiod{};
|
||||
std::optional<bool> m_quad{};
|
||||
std::optional<int> m_number_of_rows{};
|
||||
std::optional<std::vector<size_t>> m_rate_corrections{};
|
||||
std::optional<uint32_t> m_adc_mask{};
|
||||
bool m_analog_flag{};
|
||||
std::optional<int> m_analog_samples{};
|
||||
bool m_digital_flag{};
|
||||
std::optional<int> m_digital_samples{};
|
||||
std::optional<int> m_dbit_offset{};
|
||||
std::optional<size_t> m_dbit_list{};
|
||||
std::optional<int> m_transceiver_mask{};
|
||||
bool m_transceiver_flag{};
|
||||
std::optional<int> m_transceiver_samples{};
|
||||
// g1 roi - will not be implemented?
|
||||
std::optional<ROI> m_roi{};
|
||||
std::optional<int> m_counter_mask{};
|
||||
std::optional<std::vector<ns>> m_exptime_array{};
|
||||
std::optional<std::vector<ns>> m_gate_delay_array{};
|
||||
std::optional<int> m_gates{};
|
||||
std::optional<std::map<std::string, std::string>>
|
||||
m_additional_json_header{};
|
||||
size_t m_frames_in_file{};
|
||||
|
||||
// TODO! should these be bool?
|
||||
|
||||
public:
|
||||
Hdf5MasterFile(const std::filesystem::path &fpath);
|
||||
|
||||
std::filesystem::path file_name() const;
|
||||
|
||||
const std::string &version() const; //!< For example "7.2"
|
||||
const DetectorType &detector_type() const;
|
||||
const TimingMode &timing_mode() const;
|
||||
xy geometry() const;
|
||||
int image_size_in_bytes() const;
|
||||
int pixels_y() const;
|
||||
int pixels_x() const;
|
||||
int max_frames_per_file() const;
|
||||
const FrameDiscardPolicy &frame_discard_policy() const;
|
||||
int frame_padding() const;
|
||||
std::optional<ScanParameters> scan_parameters() const;
|
||||
size_t total_frames_expected() const;
|
||||
std::optional<ns> exptime() const;
|
||||
std::optional<ns> period() const;
|
||||
std::optional<BurstMode> burst_mode() const;
|
||||
std::optional<int> number_of_udp_interfaces() const;
|
||||
int bitdepth() const;
|
||||
std::optional<bool> ten_giga() const;
|
||||
std::optional<int> threshold_energy() const;
|
||||
std::optional<std::vector<int>> threshold_energy_all() const;
|
||||
std::optional<ns> subexptime() const;
|
||||
std::optional<ns> subperiod() const;
|
||||
std::optional<bool> quad() const;
|
||||
std::optional<int> number_of_rows() const;
|
||||
std::optional<std::vector<size_t>> rate_corrections() const;
|
||||
std::optional<uint32_t> adc_mask() const;
|
||||
bool analog_flag() const;
|
||||
std::optional<int> analog_samples() const;
|
||||
bool digital_flag() const;
|
||||
std::optional<int> digital_samples() const;
|
||||
std::optional<int> dbit_offset() const;
|
||||
std::optional<size_t> dbit_list() const;
|
||||
std::optional<int> transceiver_mask() const;
|
||||
bool transceiver_flag() const;
|
||||
std::optional<int> transceiver_samples() const;
|
||||
// g1 roi - will not be implemented?
|
||||
std::optional<ROI> roi() const;
|
||||
std::optional<int> counter_mask() const;
|
||||
std::optional<std::vector<ns>> exptime_array() const;
|
||||
std::optional<std::vector<ns>> gate_delay_array() const;
|
||||
std::optional<int> gates() const;
|
||||
std::optional<std::map<std::string, std::string>>
|
||||
additional_json_header() const;
|
||||
size_t frames_in_file() const;
|
||||
size_t n_modules() const;
|
||||
|
||||
private:
|
||||
static const std::string metadata_group_name;
|
||||
void parse_acquisition_metadata(const std::filesystem::path &fpath);
|
||||
|
||||
template <typename T>
|
||||
T h5_read_scalar_dataset(const H5::DataSet &dataset,
|
||||
const H5::DataType &data_type);
|
||||
|
||||
template <typename T>
|
||||
T h5_get_scalar_dataset(const H5::H5File &file,
|
||||
const std::string &dataset_name);
|
||||
};
|
||||
|
||||
template <>
|
||||
std::string Hdf5MasterFile::h5_read_scalar_dataset<std::string>(
|
||||
const H5::DataSet &dataset, const H5::DataType &data_type);
|
||||
} // namespace aare
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
|
||||
#include "aare/CalculateEta.hpp"
|
||||
@@ -18,10 +17,7 @@ struct Photon {
|
||||
};
|
||||
|
||||
class Interpolator {
|
||||
// marginal CDF of eta_x (if rosenblatt applied), conditional
|
||||
// CDF of eta_x conditioned on eta_y
|
||||
NDArray<double, 3> m_ietax;
|
||||
// conditional CDF of eta_y conditioned on eta_x
|
||||
NDArray<double, 3> m_ietay;
|
||||
|
||||
NDArray<double, 1> m_etabinsx;
|
||||
@@ -29,210 +25,106 @@ class Interpolator {
|
||||
NDArray<double, 1> m_energy_bins;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Constructor for the Interpolator class
|
||||
* @param etacube joint distribution of etaX, etaY and photon energy
|
||||
* @param xbins bin edges for etaX
|
||||
* @param ybins bin edges for etaY
|
||||
* @param ebins bin edges for photon energy
|
||||
* @note note first dimension is etaX, second etaY, third photon energy
|
||||
*/
|
||||
Interpolator(NDView<double, 3> etacube, NDView<double, 1> xbins,
|
||||
NDView<double, 1> ybins, NDView<double, 1> ebins);
|
||||
|
||||
/**
|
||||
* @brief Constructor for the Interpolator class
|
||||
* @param xbins bin edges for etaX
|
||||
* @param ybins bin edges for etaY
|
||||
* @param ebins bin edges for photon energy
|
||||
*/
|
||||
Interpolator(NDView<double, 1> xbins, NDView<double, 1> ybins,
|
||||
NDView<double, 1> ebins);
|
||||
|
||||
/**
|
||||
* @brief transforms the joint eta distribution of etaX and etaY to the two
|
||||
* independant uniform distributions based on the Roseblatt transform for
|
||||
* each energy level
|
||||
* @param etacube joint distribution of etaX, etaY and photon energy
|
||||
* @note note first dimension is etaX, second etaY, third photon energy
|
||||
*/
|
||||
void rosenblatttransform(NDView<double, 3> etacube);
|
||||
|
||||
NDArray<double, 3> get_ietax() { return m_ietax; }
|
||||
NDArray<double, 3> get_ietay() { return m_ietay; }
|
||||
|
||||
/**
|
||||
* @brief interpolates the cluster centers for all clusters to a better
|
||||
* precision
|
||||
* @tparam ClusterType Type of Clusters to interpolate
|
||||
* @tparam Etafunction Function object that calculates desired eta default:
|
||||
* calculate_eta2
|
||||
* @return interpolated photons (photon positions are given as double but
|
||||
* following row column format e.g. x=0, y=0 means top row and first column
|
||||
* of frame)
|
||||
*/
|
||||
template <auto EtaFunction = calculate_eta2, typename ClusterType,
|
||||
template <typename ClusterType,
|
||||
typename Eanble = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||
std::vector<Photon> interpolate(const ClusterVector<ClusterType> &clusters);
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief implements underlying interpolation logic based on EtaFunction
|
||||
* Type
|
||||
* @tparam EtaFunction Function object that calculates desired eta default:
|
||||
* @param u: transformed photon position in x between [0,1]
|
||||
* @param v: transformed photon position in y between [0,1]
|
||||
* @param c: corner of eta
|
||||
*/
|
||||
template <auto EtaFunction, typename ClusterType>
|
||||
void interpolation_logic(Photon &photon, const double u, const double v,
|
||||
const corner c = corner::cTopLeft);
|
||||
|
||||
/**
|
||||
* @brief bilinear interpolation of the transformed eta values
|
||||
* @param ix index of etaX bin
|
||||
* @param iy index of etaY bin
|
||||
* @param ie index of energy bin
|
||||
* @return pair of interpolated transformed eta values (ietax, ietay)
|
||||
*/
|
||||
template <typename T>
|
||||
std::pair<double, double>
|
||||
bilinear_interpolation(const size_t ix, const size_t iy, const size_t ie,
|
||||
const Eta2<T> &eta);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
std::pair<double, double>
|
||||
Interpolator::bilinear_interpolation(const size_t ix, const size_t iy,
|
||||
const size_t ie, const Eta2<T> &eta) {
|
||||
auto next_index_y = static_cast<ssize_t>(iy + 1) >= m_ietax.shape(1)
|
||||
? m_ietax.shape(1) - 1
|
||||
: iy + 1;
|
||||
auto next_index_x = static_cast<ssize_t>(ix + 1) >= m_ietax.shape(0)
|
||||
? m_ietax.shape(0) - 1
|
||||
: ix + 1;
|
||||
|
||||
// bilinear interpolation
|
||||
double ietax_interp_left = linear_interpolation(
|
||||
{m_etabinsy(iy), m_etabinsy(iy + 1)},
|
||||
{m_ietax(ix, iy, ie), m_ietax(ix, next_index_y, ie)}, eta.y);
|
||||
double ietax_interp_right =
|
||||
linear_interpolation({m_etabinsy(iy), m_etabinsy(iy + 1)},
|
||||
{m_ietax(next_index_x, iy, ie),
|
||||
m_ietax(next_index_x, next_index_y, ie)},
|
||||
eta.y);
|
||||
|
||||
// transformed photon position x between [0,1]
|
||||
double ietax_interpolated =
|
||||
linear_interpolation({m_etabinsx(ix), m_etabinsx(ix + 1)},
|
||||
{ietax_interp_left, ietax_interp_right}, eta.x);
|
||||
|
||||
double ietay_interp_left = linear_interpolation(
|
||||
{m_etabinsx(ix), m_etabinsx(ix + 1)},
|
||||
{m_ietay(ix, iy, ie), m_ietay(next_index_x, iy, ie)}, eta.x);
|
||||
double ietay_interp_right =
|
||||
linear_interpolation({m_etabinsx(ix), m_etabinsx(ix + 1)},
|
||||
{m_ietay(ix, next_index_y, ie),
|
||||
m_ietay(next_index_x, next_index_y, ie)},
|
||||
eta.x);
|
||||
|
||||
// transformed photon position y between [0,1]
|
||||
double ietay_interpolated =
|
||||
linear_interpolation({m_etabinsy(iy), m_etabinsy(iy + 1)},
|
||||
{ietay_interp_left, ietay_interp_right}, eta.y);
|
||||
|
||||
return {ietax_interpolated, ietay_interpolated};
|
||||
}
|
||||
|
||||
template <auto EtaFunction, typename ClusterType, typename Enable>
|
||||
// TODO: generalize to support any clustertype!!! otherwise add std::enable_if_t
|
||||
// to only take Cluster2x2 and Cluster3x3
|
||||
template <typename ClusterType, typename Enable>
|
||||
std::vector<Photon>
|
||||
Interpolator::interpolate(const ClusterVector<ClusterType> &clusters) {
|
||||
std::vector<Photon> photons;
|
||||
photons.reserve(clusters.size());
|
||||
|
||||
for (const ClusterType &cluster : clusters) {
|
||||
if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) {
|
||||
for (const ClusterType &cluster : clusters) {
|
||||
|
||||
auto eta = EtaFunction(cluster);
|
||||
auto eta = calculate_eta2(cluster);
|
||||
|
||||
Photon photon;
|
||||
photon.x = cluster.x;
|
||||
photon.y = cluster.y;
|
||||
photon.energy = static_cast<decltype(photon.energy)>(eta.sum);
|
||||
Photon photon;
|
||||
photon.x = cluster.x;
|
||||
photon.y = cluster.y;
|
||||
photon.energy = static_cast<decltype(photon.energy)>(eta.sum);
|
||||
|
||||
// std::cout << "eta.x: " << eta.x << " eta.y: " << eta.y << std::endl;
|
||||
// auto ie = nearest_index(m_energy_bins, photon.energy)-1;
|
||||
// auto ix = nearest_index(m_etabinsx, eta.x)-1;
|
||||
// auto iy = nearest_index(m_etabinsy, eta.y)-1;
|
||||
// Finding the index of the last element that is smaller
|
||||
// should work fine as long as we have many bins
|
||||
auto ie = last_smaller(m_energy_bins, photon.energy);
|
||||
auto ix = last_smaller(m_etabinsx, eta.x);
|
||||
auto iy = last_smaller(m_etabinsy, eta.y);
|
||||
|
||||
// Finding the index of the last element that is smaller
|
||||
// should work fine as long as we have many bins
|
||||
auto ie = last_smaller(m_energy_bins, photon.energy);
|
||||
auto ix = last_smaller(m_etabinsx, eta.x);
|
||||
auto iy = last_smaller(m_etabinsy, eta.y);
|
||||
// fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy);
|
||||
|
||||
// std::cout << "ix: " << ix << " iy: " << iy << std::endl;
|
||||
double dX, dY;
|
||||
// cBottomLeft = 0,
|
||||
// cBottomRight = 1,
|
||||
// cTopLeft = 2,
|
||||
// cTopRight = 3
|
||||
switch (static_cast<corner>(eta.c)) {
|
||||
case corner::cTopLeft:
|
||||
dX = -1.;
|
||||
dY = 0;
|
||||
break;
|
||||
case corner::cTopRight:;
|
||||
dX = 0;
|
||||
dY = 0;
|
||||
break;
|
||||
case corner::cBottomLeft:
|
||||
dX = -1.;
|
||||
dY = -1.;
|
||||
break;
|
||||
case corner::cBottomRight:
|
||||
dX = 0.;
|
||||
dY = -1.;
|
||||
break;
|
||||
}
|
||||
photon.x += m_ietax(ix, iy, ie) * 2 + dX;
|
||||
photon.y += m_ietay(ix, iy, ie) * 2 + dY;
|
||||
photons.push_back(photon);
|
||||
}
|
||||
} else if (clusters.cluster_size_x() == 2 ||
|
||||
clusters.cluster_size_y() == 2) {
|
||||
for (const ClusterType &cluster : clusters) {
|
||||
auto eta = calculate_eta2(cluster);
|
||||
|
||||
// TODO: bilinear interpolation only works if all bins have a size > 1 -
|
||||
// otherwise bilinear interpolation with zero values which skew the
|
||||
// results
|
||||
// TODO: maybe trim the bins at the edges with zero values beforehand
|
||||
// auto [ietax_interpolated, ietay_interpolated] =
|
||||
// bilinear_interpolation(ix, iy, ie, eta);
|
||||
Photon photon;
|
||||
photon.x = cluster.x;
|
||||
photon.y = cluster.y;
|
||||
photon.energy = static_cast<decltype(photon.energy)>(eta.sum);
|
||||
|
||||
double ietax_interpolated = m_ietax(ix, iy, ie);
|
||||
double ietay_interpolated = m_ietay(ix, iy, ie);
|
||||
// Now do some actual interpolation.
|
||||
// Find which energy bin the cluster is in
|
||||
// auto ie = nearest_index(m_energy_bins, photon.energy)-1;
|
||||
// auto ix = nearest_index(m_etabinsx, eta.x)-1;
|
||||
// auto iy = nearest_index(m_etabinsy, eta.y)-1;
|
||||
// Finding the index of the last element that is smaller
|
||||
// should work fine as long as we have many bins
|
||||
auto ie = last_smaller(m_energy_bins, photon.energy);
|
||||
auto ix = last_smaller(m_etabinsx, eta.x);
|
||||
auto iy = last_smaller(m_etabinsy, eta.y);
|
||||
|
||||
interpolation_logic<EtaFunction, ClusterType>(
|
||||
photon, ietax_interpolated, ietay_interpolated, eta.c);
|
||||
photon.x += m_ietax(ix, iy, ie) *
|
||||
2; // eta goes between 0 and 1 but we could move the hit
|
||||
// anywhere in the 2x2
|
||||
photon.y += m_ietay(ix, iy, ie) * 2;
|
||||
photons.push_back(photon);
|
||||
}
|
||||
|
||||
photons.push_back(photon);
|
||||
} else {
|
||||
throw std::runtime_error(
|
||||
"Only 3x3 and 2x2 clusters are supported for interpolation");
|
||||
}
|
||||
|
||||
return photons;
|
||||
}
|
||||
|
||||
template <auto EtaFunction, typename ClusterType>
|
||||
void Interpolator::interpolation_logic(Photon &photon, const double u,
|
||||
const double v, const corner c) {
|
||||
|
||||
// std::cout << "u: " << u << " v: " << v << std::endl;
|
||||
|
||||
// TODO: try to call this with std::is_same_v and have it constexpr if
|
||||
// possible
|
||||
if (EtaFunction == &calculate_eta2<typename ClusterType::value_type,
|
||||
ClusterType::cluster_size_x,
|
||||
ClusterType::cluster_size_y,
|
||||
typename ClusterType::coord_type> ||
|
||||
EtaFunction == &calculate_full_eta2<typename ClusterType::value_type,
|
||||
ClusterType::cluster_size_x,
|
||||
ClusterType::cluster_size_y,
|
||||
typename ClusterType::coord_type>) {
|
||||
double dX{}, dY{};
|
||||
|
||||
// TODO: could also chaneg the sign of the eta calculation
|
||||
switch (c) {
|
||||
case corner::cTopLeft:
|
||||
dX = -1.0;
|
||||
dY = -1.0;
|
||||
break;
|
||||
case corner::cTopRight:;
|
||||
dX = 0.0;
|
||||
dY = -1.0;
|
||||
break;
|
||||
case corner::cBottomLeft:
|
||||
dX = -1.0;
|
||||
dY = 0.0;
|
||||
break;
|
||||
case corner::cBottomRight:
|
||||
dX = 0.0;
|
||||
dY = 0.0;
|
||||
break;
|
||||
}
|
||||
photon.x = photon.x + 0.5 + u + dX; // use pixel center + 0.5
|
||||
photon.y = photon.y + 0.5 + v +
|
||||
dY; // eta2 calculates the ratio between bottom and sum of
|
||||
// bottom and top shift by 1 add eta value correctly
|
||||
} else {
|
||||
photon.x += u;
|
||||
photon.y += v;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
/*
|
||||
Container holding image data, or a time series of image data in contigious
|
||||
@@ -26,7 +25,7 @@ template <typename T, ssize_t Ndim = 2>
|
||||
class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
std::array<ssize_t, Ndim> shape_;
|
||||
std::array<ssize_t, Ndim> strides_;
|
||||
size_t size_{}; //TODO! do we need to store size when we have shape?
|
||||
size_t size_{};
|
||||
T *data_;
|
||||
|
||||
public:
|
||||
@@ -34,7 +33,7 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
* @brief Default constructor. Will construct an empty NDArray.
|
||||
*
|
||||
*/
|
||||
NDArray() : shape_(), strides_(c_strides<Ndim>(shape_)), data_(nullptr) {};
|
||||
NDArray() : shape_(), strides_(c_strides<Ndim>(shape_)), data_(nullptr){};
|
||||
|
||||
/**
|
||||
* @brief Construct a new NDArray object with a given shape.
|
||||
@@ -44,7 +43,8 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
*/
|
||||
explicit NDArray(std::array<ssize_t, Ndim> shape)
|
||||
: shape_(shape), strides_(c_strides<Ndim>(shape_)),
|
||||
size_(num_elements(shape_)),
|
||||
size_(std::accumulate(shape_.begin(), shape_.end(), 1,
|
||||
std::multiplies<>())),
|
||||
data_(new T[size_]) {}
|
||||
|
||||
/**
|
||||
@@ -79,24 +79,6 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
|
||||
other.reset(); // TODO! is this necessary?
|
||||
}
|
||||
|
||||
|
||||
//Move constructor from an an array with Ndim + 1
|
||||
template <ssize_t M, typename = std::enable_if_t<(M == Ndim + 1)>>
|
||||
NDArray(NDArray<T, M> &&other)
|
||||
: shape_(drop_first_dim(other.shape())),
|
||||
strides_(c_strides<Ndim>(shape_)), size_(num_elements(shape_)),
|
||||
data_(other.data()) {
|
||||
|
||||
// For now only allow move if the size matches, to avoid unreachable data
|
||||
// if the use case arises we can remove this check
|
||||
if(size() != other.size()) {
|
||||
data_ = nullptr; // avoid double free, other will clean up the memory in it's destructor
|
||||
throw std::runtime_error(LOCATION +
|
||||
"Size mismatch in move constructor of NDArray<T, Ndim-1>");
|
||||
}
|
||||
other.reset();
|
||||
}
|
||||
|
||||
// Copy constructor
|
||||
NDArray(const NDArray &other)
|
||||
: shape_(other.shape_), strides_(c_strides<Ndim>(shape_)),
|
||||
@@ -398,6 +380,12 @@ NDArray<T, Ndim> NDArray<T, Ndim>::operator*(const T &value) {
|
||||
result *= value;
|
||||
return result;
|
||||
}
|
||||
// template <typename T, ssize_t Ndim> void NDArray<T, Ndim>::Print() {
|
||||
// if (shape_[0] < 20 && shape_[1] < 20)
|
||||
// Print_all();
|
||||
// else
|
||||
// Print_some();
|
||||
// }
|
||||
|
||||
template <typename T, ssize_t Ndim>
|
||||
std::ostream &operator<<(std::ostream &os, const NDArray<T, Ndim> &arr) {
|
||||
@@ -449,23 +437,4 @@ NDArray<T, Ndim> load(const std::string &pathname,
|
||||
return img;
|
||||
}
|
||||
|
||||
template <typename RT, typename NT, typename DT, ssize_t Ndim>
|
||||
NDArray<RT, Ndim> safe_divide(const NDArray<NT, Ndim> &numerator,
|
||||
const NDArray<DT, Ndim> &denominator) {
|
||||
if (numerator.shape() != denominator.shape()) {
|
||||
throw std::runtime_error(
|
||||
"Shapes of numerator and denominator must match");
|
||||
}
|
||||
NDArray<RT, Ndim> result(numerator.shape());
|
||||
for (ssize_t i = 0; i < numerator.size(); ++i) {
|
||||
if (denominator[i] != 0) {
|
||||
result[i] =
|
||||
static_cast<RT>(numerator[i]) / static_cast<RT>(denominator[i]);
|
||||
} else {
|
||||
result[i] = RT{0}; // or handle division by zero as needed
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include "aare/ArrayExpr.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
@@ -27,33 +26,6 @@ Shape<Ndim> make_shape(const std::vector<size_t> &shape) {
|
||||
return arr;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Helper function to drop the first dimension of a shape.
|
||||
* This is useful when you want to create a 2D view from a 3D array.
|
||||
* @param shape The shape to drop the first dimension from.
|
||||
* @return A new shape with the first dimension dropped.
|
||||
*/
|
||||
template<size_t Ndim>
|
||||
Shape<Ndim-1> drop_first_dim(const Shape<Ndim> &shape) {
|
||||
static_assert(Ndim > 1, "Cannot drop first dimension from a 1D shape");
|
||||
Shape<Ndim - 1> new_shape;
|
||||
std::copy(shape.begin() + 1, shape.end(), new_shape.begin());
|
||||
return new_shape;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Helper function when constructing NDArray/NDView. Calculates the number
|
||||
* of elements in the resulting array from a shape.
|
||||
* @param shape The shape to calculate the number of elements for.
|
||||
* @return The number of elements in and NDArray/NDView of that shape.
|
||||
*/
|
||||
template <size_t Ndim>
|
||||
size_t num_elements(const Shape<Ndim> &shape) {
|
||||
return std::accumulate(shape.begin(), shape.end(), 1,
|
||||
std::multiplies<size_t>());
|
||||
}
|
||||
|
||||
template <ssize_t Dim = 0, typename Strides>
|
||||
ssize_t element_offset(const Strides & /*unused*/) {
|
||||
return 0;
|
||||
@@ -94,28 +66,24 @@ class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
|
||||
: buffer_(buffer), strides_(c_strides<Ndim>(shape)), shape_(shape),
|
||||
size_(std::accumulate(std::begin(shape), std::end(shape), 1,
|
||||
std::multiplies<>())) {}
|
||||
|
||||
|
||||
// NDView(T *buffer, const std::vector<ssize_t> &shape)
|
||||
// : buffer_(buffer),
|
||||
// strides_(c_strides<Ndim>(make_array<Ndim>(shape))),
|
||||
// shape_(make_array<Ndim>(shape)),
|
||||
// size_(std::accumulate(std::begin(shape), std::end(shape), 1,
|
||||
// std::multiplies<>())) {}
|
||||
|
||||
template <typename... Ix>
|
||||
std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) {
|
||||
return buffer_[element_offset(strides_, index...)];
|
||||
}
|
||||
|
||||
template <typename... Ix>
|
||||
std::enable_if_t<sizeof...(Ix) == 1 && (Ndim > 1), NDView<T, Ndim - 1>> operator()(Ix... index) {
|
||||
// return a view of the next dimension
|
||||
std::array<ssize_t, Ndim - 1> new_shape{};
|
||||
std::copy_n(shape_.begin() + 1, Ndim - 1, new_shape.begin());
|
||||
return NDView<T, Ndim - 1>(&buffer_[element_offset(strides_, index...)],
|
||||
new_shape);
|
||||
|
||||
}
|
||||
|
||||
template <typename... Ix>
|
||||
std::enable_if_t<sizeof...(Ix) == Ndim, const T &> operator()(Ix... index) const {
|
||||
std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) const {
|
||||
return buffer_[element_offset(strides_, index...)];
|
||||
}
|
||||
|
||||
|
||||
ssize_t size() const { return static_cast<ssize_t>(size_); }
|
||||
size_t total_bytes() const { return size_ * sizeof(T); }
|
||||
std::array<ssize_t, Ndim> strides() const noexcept { return strides_; }
|
||||
@@ -124,27 +92,13 @@ class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
|
||||
T *end() { return buffer_ + size_; }
|
||||
T const *begin() const { return buffer_; }
|
||||
T const *end() const { return buffer_ + size_; }
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief Access element at index i.
|
||||
*/
|
||||
T &operator[](ssize_t i) { return buffer_[i]; }
|
||||
|
||||
/**
|
||||
* @brief Access element at index i.
|
||||
*/
|
||||
const T &operator[](ssize_t i) const { return buffer_[i]; }
|
||||
T &operator()(ssize_t i) const { return buffer_[i]; }
|
||||
T &operator[](ssize_t i) const { return buffer_[i]; }
|
||||
|
||||
bool operator==(const NDView &other) const {
|
||||
if (size_ != other.size_)
|
||||
return false;
|
||||
if (shape_ != other.shape_)
|
||||
return false;
|
||||
for (size_t i = 0; i != size_; ++i) {
|
||||
for (uint64_t i = 0; i != size_; ++i) {
|
||||
if (buffer_[i] != other.buffer_[i])
|
||||
return false;
|
||||
}
|
||||
@@ -203,25 +157,8 @@ class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
|
||||
auto shape(ssize_t i) const { return shape_[i]; }
|
||||
|
||||
T *data() { return buffer_; }
|
||||
const T *data() const { return buffer_; }
|
||||
void print_all() const;
|
||||
|
||||
/**
|
||||
* @brief Create a subview of a range of the first dimension.
|
||||
* This is useful for splitting a batches of frames in parallel processing.
|
||||
* @param first The first index of the subview (inclusive).
|
||||
* @param last The last index of the subview (exclusive).
|
||||
* @return A new NDView that is a subview of the current view.
|
||||
* @throws std::runtime_error if the range is invalid.
|
||||
*/
|
||||
NDView sub_view(ssize_t first, ssize_t last) const {
|
||||
if (first < 0 || last > shape_[0] || first >= last)
|
||||
throw std::runtime_error(LOCATION + "Invalid sub_view range");
|
||||
auto new_shape = shape_;
|
||||
new_shape[0] = last - first;
|
||||
return NDView(buffer_ + first * strides_[0], new_shape);
|
||||
}
|
||||
|
||||
private:
|
||||
T *buffer_{nullptr};
|
||||
std::array<ssize_t, Ndim> strides_{};
|
||||
@@ -243,7 +180,6 @@ class NDView : public ArrayExpr<NDView<T, Ndim>, Ndim> {
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, ssize_t Ndim> void NDView<T, Ndim>::print_all() const {
|
||||
for (auto row = 0; row < shape_[0]; ++row) {
|
||||
for (auto col = 0; col < shape_[1]; ++col) {
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include "aare/Dtype.hpp"
|
||||
#include "aare/FileInterface.hpp"
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
#pragma once
|
||||
#include <algorithm>
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include "aare/Frame.hpp"
|
||||
#include "aare/NDArray.hpp"
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
|
||||
#include "aare/NDArray.hpp"
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
|
||||
@@ -1,20 +1,27 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include "aare/DetectorGeometry.hpp"
|
||||
#include "aare/FileInterface.hpp"
|
||||
#include "aare/Frame.hpp"
|
||||
#include "aare/NDArray.hpp" //for pixel map
|
||||
#include "aare/RawMasterFile.hpp"
|
||||
#include "aare/RawSubFile.hpp"
|
||||
|
||||
#ifdef AARE_TESTS
|
||||
#include "../tests/friend_test.hpp"
|
||||
#endif
|
||||
|
||||
#include <optional>
|
||||
|
||||
namespace aare {
|
||||
|
||||
struct ModuleConfig {
|
||||
int module_gap_row{};
|
||||
int module_gap_col{};
|
||||
|
||||
bool operator==(const ModuleConfig &other) const {
|
||||
if (module_gap_col != other.module_gap_col)
|
||||
return false;
|
||||
if (module_gap_row != other.module_gap_row)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Class to read .raw files. The class will parse the master file
|
||||
* to find the correct geometry for the frames.
|
||||
@@ -22,12 +29,11 @@ namespace aare {
|
||||
* Consider using that unless you need raw file specific functionality.
|
||||
*/
|
||||
class RawFile : public FileInterface {
|
||||
|
||||
std::vector<std::unique_ptr<RawSubFile>> m_subfiles;
|
||||
|
||||
ModuleConfig cfg{0, 0};
|
||||
RawMasterFile m_master;
|
||||
size_t m_current_frame{};
|
||||
|
||||
size_t m_current_subfile{};
|
||||
DetectorGeometry m_geometry;
|
||||
|
||||
public:
|
||||
@@ -61,21 +67,13 @@ class RawFile : public FileInterface {
|
||||
size_t rows() const override;
|
||||
size_t cols() const override;
|
||||
size_t bitdepth() const override;
|
||||
xy geometry();
|
||||
size_t n_modules() const;
|
||||
size_t n_modules_in_roi() const;
|
||||
xy geometry() const;
|
||||
|
||||
RawMasterFile master() const;
|
||||
|
||||
DetectorType detector_type() const override;
|
||||
|
||||
/**
|
||||
* @brief read the header of the file
|
||||
* @param fname path to the data subfile
|
||||
* @return DetectorHeader
|
||||
*/
|
||||
static DetectorHeader read_header(const std::filesystem::path &fname);
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief read the frame at the given frame index into the image buffer
|
||||
@@ -93,7 +91,15 @@ class RawFile : public FileInterface {
|
||||
*/
|
||||
Frame get_frame(size_t frame_index);
|
||||
|
||||
/**
|
||||
* @brief read the header of the file
|
||||
* @param fname path to the data subfile
|
||||
* @return DetectorHeader
|
||||
*/
|
||||
static DetectorHeader read_header(const std::filesystem::path &fname);
|
||||
|
||||
void open_subfiles();
|
||||
void find_geometry();
|
||||
};
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include "aare/defs.hpp"
|
||||
#include <algorithm>
|
||||
#include "aare/scan_parameters.hpp"
|
||||
|
||||
#include <filesystem>
|
||||
#include <fmt/format.h>
|
||||
#include <fstream>
|
||||
@@ -41,31 +41,6 @@ class RawFileNameComponents {
|
||||
void set_old_scheme(bool old_scheme);
|
||||
};
|
||||
|
||||
class ScanParameters {
|
||||
bool m_enabled = false;
|
||||
DACIndex m_dac{};
|
||||
int m_start = 0;
|
||||
int m_stop = 0;
|
||||
int m_step = 0;
|
||||
int64_t m_settleTime = 0; // [ns]
|
||||
|
||||
public:
|
||||
ScanParameters(const std::string &par);
|
||||
ScanParameters(const bool enabled, const DACIndex dac, const int start,
|
||||
const int stop, const int step, const int64_t settleTime);
|
||||
ScanParameters() = default;
|
||||
ScanParameters(const ScanParameters &) = default;
|
||||
ScanParameters &operator=(const ScanParameters &) = default;
|
||||
ScanParameters(ScanParameters &&) = default;
|
||||
int start() const;
|
||||
int stop() const;
|
||||
int step() const;
|
||||
DACIndex dac() const;
|
||||
bool enabled() const;
|
||||
int64_t settleTime() const;
|
||||
void increment_stop();
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Class for parsing a master file either in our .json format or the old
|
||||
* .raw format
|
||||
@@ -82,10 +57,8 @@ class RawMasterFile {
|
||||
size_t m_pixels_y{};
|
||||
size_t m_pixels_x{};
|
||||
size_t m_bitdepth{};
|
||||
uint8_t m_quad = 0;
|
||||
|
||||
xy m_geometry{};
|
||||
xy m_udp_interfaces_per_module{1, 1};
|
||||
|
||||
size_t m_max_frames_per_file{};
|
||||
// uint32_t m_adc_mask{}; // TODO! implement reading
|
||||
@@ -103,7 +76,7 @@ class RawMasterFile {
|
||||
std::optional<size_t> m_digital_samples;
|
||||
std::optional<size_t> m_transceiver_samples;
|
||||
std::optional<size_t> m_number_of_rows;
|
||||
std::optional<uint8_t> m_counter_mask;
|
||||
std::optional<uint8_t> m_quad;
|
||||
|
||||
std::optional<ROI> m_roi;
|
||||
|
||||
@@ -122,19 +95,17 @@ class RawMasterFile {
|
||||
size_t max_frames_per_file() const;
|
||||
size_t bitdepth() const;
|
||||
size_t frame_padding() const;
|
||||
xy udp_interfaces_per_module() const;
|
||||
const FrameDiscardPolicy &frame_discard_policy() const;
|
||||
|
||||
size_t total_frames_expected() const;
|
||||
xy geometry() const;
|
||||
size_t n_modules() const;
|
||||
uint8_t quad() const;
|
||||
|
||||
std::optional<size_t> analog_samples() const;
|
||||
std::optional<size_t> digital_samples() const;
|
||||
std::optional<size_t> transceiver_samples() const;
|
||||
std::optional<size_t> number_of_rows() const;
|
||||
std::optional<uint8_t> counter_mask() const;
|
||||
std::optional<uint8_t> quad() const;
|
||||
|
||||
std::optional<ROI> roi() const;
|
||||
|
||||
@@ -143,7 +114,6 @@ class RawMasterFile {
|
||||
private:
|
||||
void parse_json(const std::filesystem::path &fpath);
|
||||
void parse_raw(const std::filesystem::path &fpath);
|
||||
void retrieve_geometry();
|
||||
};
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include "aare/Frame.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
@@ -241,14 +240,14 @@ template <typename T> void VarClusterFinder<T>::first_pass() {
|
||||
|
||||
for (ssize_t i = 0; i < original_.size(); ++i) {
|
||||
if (use_noise_map)
|
||||
threshold_ = 5 * noiseMap[i];
|
||||
binary_[i] = (original_[i] > threshold_);
|
||||
threshold_ = 5 * noiseMap(i);
|
||||
binary_(i) = (original_(i) > threshold_);
|
||||
}
|
||||
|
||||
for (int i = 0; i < shape_[0]; ++i) {
|
||||
for (int j = 0; j < shape_[1]; ++j) {
|
||||
|
||||
// do we have something to process?
|
||||
// do we have someting to process?
|
||||
if (binary_(i, j)) {
|
||||
auto tmp = check_neighbours(i, j);
|
||||
if (tmp != 0) {
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
#pragma once
|
||||
#include <aare/NDArray.hpp>
|
||||
@@ -110,19 +109,4 @@ template <typename Container> bool all_equal(const Container &c) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* linear interpolation
|
||||
* @param bin_edge left and right bin edges
|
||||
* @param bin_values function values at bin edges
|
||||
* @param coord coordinate to interpolate at
|
||||
* @return interpolated value at coord
|
||||
*/
|
||||
inline double linear_interpolation(const std::pair<double, double> &bin_edge,
|
||||
const std::pair<double, double> &bin_values,
|
||||
const double coord) {
|
||||
const double bin_width = bin_edge.second - bin_edge.first;
|
||||
return bin_values.first * (1 - (coord - bin_edge.first) / bin_width) +
|
||||
bin_values.second * (coord - bin_edge.first) / bin_width;
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,210 +0,0 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
|
||||
#include "aare/NDArray.hpp"
|
||||
#include "aare/NDView.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
#include "aare/utils/par.hpp"
|
||||
#include "aare/utils/task.hpp"
|
||||
#include <cstdint>
|
||||
#include <future>
|
||||
|
||||
namespace aare {
|
||||
|
||||
// Really try to convince the compile to inline this function
|
||||
// TODO! Clang?
|
||||
#if (defined(_MSC_VER) || defined(__INTEL_COMPILER))
|
||||
#define STRONG_INLINE __forceinline
|
||||
#else
|
||||
#define STRONG_INLINE inline
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__)
|
||||
#define ALWAYS_INLINE __attribute__((always_inline)) inline
|
||||
#else
|
||||
#define ALWAYS_INLINE STRONG_INLINE
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Get the gain from the raw ADC value. In Jungfrau the gain is
|
||||
* encoded in the left most 2 bits of the raw value.
|
||||
* 00 -> gain 0
|
||||
* 01 -> gain 1
|
||||
* 11 -> gain 2
|
||||
* @param raw the raw ADC value
|
||||
* @return the gain as an integer
|
||||
*/
|
||||
ALWAYS_INLINE int get_gain(uint16_t raw) {
|
||||
switch (raw >> 14) {
|
||||
case 0:
|
||||
return 0;
|
||||
case 1:
|
||||
return 1;
|
||||
case 3:
|
||||
return 2;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uint16_t get_value(uint16_t raw) { return raw & ADC_MASK; }
|
||||
|
||||
ALWAYS_INLINE std::pair<uint16_t, int16_t> get_value_and_gain(uint16_t raw) {
|
||||
static_assert(
|
||||
sizeof(std::pair<uint16_t, int16_t>) ==
|
||||
sizeof(uint16_t) + sizeof(int16_t),
|
||||
"Size of pair<uint16_t, int16_t> does not match expected size");
|
||||
return {get_value(raw), get_gain(raw)};
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void apply_calibration_impl(NDView<T, 3> res, NDView<uint16_t, 3> raw_data,
|
||||
NDView<T, 3> ped, NDView<T, 3> cal, int start,
|
||||
int stop) {
|
||||
|
||||
for (int frame_nr = start; frame_nr != stop; ++frame_nr) {
|
||||
for (int row = 0; row != raw_data.shape(1); ++row) {
|
||||
for (int col = 0; col != raw_data.shape(2); ++col) {
|
||||
auto [value, gain] =
|
||||
get_value_and_gain(raw_data(frame_nr, row, col));
|
||||
|
||||
// Using multiplication does not seem to speed up the code here
|
||||
// ADU/keV is the standard unit for the calibration which
|
||||
// means rewriting the formula is not worth it.
|
||||
res(frame_nr, row, col) =
|
||||
(value - ped(gain, row, col)) / cal(gain, row, col);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void apply_calibration_impl(NDView<T, 3> res, NDView<uint16_t, 3> raw_data,
|
||||
NDView<T, 2> ped, NDView<T, 2> cal, int start,
|
||||
int stop) {
|
||||
|
||||
for (int frame_nr = start; frame_nr != stop; ++frame_nr) {
|
||||
for (int row = 0; row != raw_data.shape(1); ++row) {
|
||||
for (int col = 0; col != raw_data.shape(2); ++col) {
|
||||
auto [value, gain] =
|
||||
get_value_and_gain(raw_data(frame_nr, row, col));
|
||||
|
||||
// Using multiplication does not seem to speed up the code here
|
||||
// ADU/keV is the standard unit for the calibration which
|
||||
// means rewriting the formula is not worth it.
|
||||
|
||||
// Set the value to 0 if the gain is not 0
|
||||
if (gain == 0)
|
||||
res(frame_nr, row, col) =
|
||||
(value - ped(row, col)) / cal(row, col);
|
||||
else
|
||||
res(frame_nr, row, col) = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T, ssize_t Ndim = 3>
|
||||
void apply_calibration(NDView<T, 3> res, NDView<uint16_t, 3> raw_data,
|
||||
NDView<T, Ndim> ped, NDView<T, Ndim> cal,
|
||||
ssize_t n_threads = 4) {
|
||||
std::vector<std::future<void>> futures;
|
||||
futures.reserve(n_threads);
|
||||
auto limits = split_task(0, raw_data.shape(0), n_threads);
|
||||
for (const auto &lim : limits)
|
||||
futures.push_back(std::async(
|
||||
static_cast<void (*)(NDView<T, 3>, NDView<uint16_t, 3>,
|
||||
NDView<T, Ndim>, NDView<T, Ndim>, int, int)>(
|
||||
apply_calibration_impl),
|
||||
res, raw_data, ped, cal, lim.first, lim.second));
|
||||
for (auto &f : futures)
|
||||
f.get();
|
||||
}
|
||||
|
||||
template <bool only_gain0>
|
||||
std::pair<NDArray<size_t, 3>, NDArray<size_t, 3>>
|
||||
sum_and_count_per_gain(NDView<uint16_t, 3> raw_data) {
|
||||
constexpr ssize_t num_gains = only_gain0 ? 1 : 3;
|
||||
NDArray<size_t, 3> accumulator(
|
||||
std::array<ssize_t, 3>{num_gains, raw_data.shape(1), raw_data.shape(2)},
|
||||
0);
|
||||
NDArray<size_t, 3> count(
|
||||
std::array<ssize_t, 3>{num_gains, raw_data.shape(1), raw_data.shape(2)},
|
||||
0);
|
||||
for (int frame_nr = 0; frame_nr != raw_data.shape(0); ++frame_nr) {
|
||||
for (int row = 0; row != raw_data.shape(1); ++row) {
|
||||
for (int col = 0; col != raw_data.shape(2); ++col) {
|
||||
auto [value, gain] =
|
||||
get_value_and_gain(raw_data(frame_nr, row, col));
|
||||
if (gain != 0 && only_gain0)
|
||||
continue;
|
||||
accumulator(gain, row, col) += value;
|
||||
count(gain, row, col) += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {std::move(accumulator), std::move(count)};
|
||||
}
|
||||
|
||||
template <typename T, bool only_gain0 = false>
|
||||
NDArray<T, 3 - static_cast<ssize_t>(only_gain0)>
|
||||
calculate_pedestal(NDView<uint16_t, 3> raw_data, ssize_t n_threads) {
|
||||
|
||||
constexpr ssize_t num_gains = only_gain0 ? 1 : 3;
|
||||
std::vector<std::future<std::pair<NDArray<size_t, 3>, NDArray<size_t, 3>>>>
|
||||
futures;
|
||||
futures.reserve(n_threads);
|
||||
|
||||
auto subviews = make_subviews(raw_data, n_threads);
|
||||
|
||||
for (auto view : subviews) {
|
||||
futures.push_back(std::async(
|
||||
static_cast<std::pair<NDArray<size_t, 3>, NDArray<size_t, 3>> (*)(
|
||||
NDView<uint16_t, 3>)>(&sum_and_count_per_gain<only_gain0>),
|
||||
view));
|
||||
}
|
||||
Shape<3> shape{num_gains, raw_data.shape(1), raw_data.shape(2)};
|
||||
NDArray<size_t, 3> accumulator(shape, 0);
|
||||
NDArray<size_t, 3> count(shape, 0);
|
||||
|
||||
// Combine the results from the futures
|
||||
for (auto &f : futures) {
|
||||
auto [acc, cnt] = f.get();
|
||||
accumulator += acc;
|
||||
count += cnt;
|
||||
}
|
||||
|
||||
|
||||
// Will move to a NDArray<T, 3 - static_cast<ssize_t>(only_gain0)>
|
||||
// if only_gain0 is true
|
||||
return safe_divide<T>(accumulator, count);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Count the number of switching pixels in the raw data.
|
||||
* This function counts the number of pixels that switch between G1 and G2 gain.
|
||||
* It returns an NDArray with the number of switching pixels per pixel.
|
||||
* @param raw_data The NDView containing the raw data
|
||||
* @return An NDArray with the number of switching pixels per pixel
|
||||
*/
|
||||
NDArray<int, 2> count_switching_pixels(NDView<uint16_t, 3> raw_data);
|
||||
|
||||
/**
|
||||
* @brief Count the number of switching pixels in the raw data.
|
||||
* This function counts the number of pixels that switch between G1 and G2 gain.
|
||||
* It returns an NDArray with the number of switching pixels per pixel.
|
||||
* @param raw_data The NDView containing the raw data
|
||||
* @param n_threads The number of threads to use for parallel processing
|
||||
* @return An NDArray with the number of switching pixels per pixel
|
||||
*/
|
||||
NDArray<int, 2> count_switching_pixels(NDView<uint16_t, 3> raw_data,
|
||||
ssize_t n_threads);
|
||||
|
||||
template <typename T>
|
||||
auto calculate_pedestal_g0(NDView<uint16_t, 3> raw_data, ssize_t n_threads) {
|
||||
return calculate_pedestal<T, true>(raw_data, n_threads);
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,12 +1,10 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include "aare/defs.hpp"
|
||||
|
||||
#include <aare/NDView.hpp>
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
namespace aare {
|
||||
|
||||
|
||||
uint16_t adc_sar_05_decode64to16(uint64_t input);
|
||||
uint16_t adc_sar_04_decode64to16(uint64_t input);
|
||||
void adc_sar_05_decode64to16(NDView<uint64_t, 2> input,
|
||||
@@ -14,25 +12,6 @@ void adc_sar_05_decode64to16(NDView<uint64_t, 2> input,
|
||||
void adc_sar_04_decode64to16(NDView<uint64_t, 2> input,
|
||||
NDView<uint16_t, 2> output);
|
||||
|
||||
/**
|
||||
* @brief Called with a 32 bit unsigned integer, shift by offset
|
||||
* and then return the lower 24 bits as an 32 bit integer
|
||||
* @param input 32-ibt input value
|
||||
* @param offset (should be in range 0-7 to allow for full 24 bits)
|
||||
* @return uint32_t
|
||||
*/
|
||||
uint32_t mask32to24bits(uint32_t input, BitOffset offset={});
|
||||
|
||||
/**
|
||||
* @brief Expand 24 bit values in a 8bit buffer to 32bit unsigned integers
|
||||
* Used for detectors with 24bit counters in combination with CTB
|
||||
*
|
||||
* @param input View of the 24 bit data as uint8_t (no 24bit native data type exists)
|
||||
* @param output Destination of the expanded data (32bit, unsigned)
|
||||
* @param offset Offset within the first byte to where the data starts (0-7 bits)
|
||||
*/
|
||||
void expand24to32bit(NDView<uint8_t,1> input, NDView<uint32_t,1> output, BitOffset offset={});
|
||||
|
||||
/**
|
||||
* @brief Apply custom weights to a 16-bit input value. Will sum up
|
||||
* weights[i]**i for each bit i that is set in the input value.
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
|
||||
#include "aare/Dtype.hpp"
|
||||
#include "aare/type_traits.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
@@ -175,6 +178,35 @@ template <typename T> struct t_xy {
|
||||
};
|
||||
using xy = t_xy<uint32_t>;
|
||||
|
||||
/**
|
||||
* @brief Class to hold the geometry of a module. Where pixel 0 is located and
|
||||
* the size of the module
|
||||
*/
|
||||
struct ModuleGeometry {
|
||||
int origin_x{};
|
||||
int origin_y{};
|
||||
int height{};
|
||||
int width{};
|
||||
int row_index{};
|
||||
int col_index{};
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Class to hold the geometry of a detector. Number of modules, their
|
||||
* size and where pixel 0 for each module is located
|
||||
*/
|
||||
struct DetectorGeometry {
|
||||
int modules_x{};
|
||||
int modules_y{};
|
||||
int pixels_x{};
|
||||
int pixels_y{};
|
||||
int module_gap_row{};
|
||||
int module_gap_col{};
|
||||
std::vector<ModuleGeometry> module_pixel_0;
|
||||
|
||||
auto size() const { return module_pixel_0.size(); }
|
||||
};
|
||||
|
||||
struct ROI {
|
||||
ssize_t xmin{};
|
||||
ssize_t xmax{};
|
||||
@@ -216,171 +248,15 @@ enum class DetectorType {
|
||||
Unknown
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Enum class to define the Digital to Analog converter
|
||||
* The values are the same as in slsDetectorPackage
|
||||
*/
|
||||
enum DACIndex {
|
||||
DAC_0,
|
||||
DAC_1,
|
||||
DAC_2,
|
||||
DAC_3,
|
||||
DAC_4,
|
||||
DAC_5,
|
||||
DAC_6,
|
||||
DAC_7,
|
||||
DAC_8,
|
||||
DAC_9,
|
||||
DAC_10,
|
||||
DAC_11,
|
||||
DAC_12,
|
||||
DAC_13,
|
||||
DAC_14,
|
||||
DAC_15,
|
||||
DAC_16,
|
||||
DAC_17,
|
||||
VSVP,
|
||||
VTRIM,
|
||||
VRPREAMP,
|
||||
VRSHAPER,
|
||||
VSVN,
|
||||
VTGSTV,
|
||||
VCMP_LL,
|
||||
VCMP_LR,
|
||||
VCAL,
|
||||
VCMP_RL,
|
||||
RXB_RB,
|
||||
RXB_LB,
|
||||
VCMP_RR,
|
||||
VCP,
|
||||
VCN,
|
||||
VISHAPER,
|
||||
VTHRESHOLD,
|
||||
IO_DELAY,
|
||||
VREF_DS,
|
||||
VOUT_CM,
|
||||
VIN_CM,
|
||||
VREF_COMP,
|
||||
VB_COMP,
|
||||
VDD_PROT,
|
||||
VIN_COM,
|
||||
VREF_PRECH,
|
||||
VB_PIXBUF,
|
||||
VB_DS,
|
||||
VREF_H_ADC,
|
||||
VB_COMP_FE,
|
||||
VB_COMP_ADC,
|
||||
VCOM_CDS,
|
||||
VREF_RSTORE,
|
||||
VB_OPA_1ST,
|
||||
VREF_COMP_FE,
|
||||
VCOM_ADC1,
|
||||
VREF_L_ADC,
|
||||
VREF_CDS,
|
||||
VB_CS,
|
||||
VB_OPA_FD,
|
||||
VCOM_ADC2,
|
||||
VCASSH,
|
||||
VTH2,
|
||||
VRSHAPER_N,
|
||||
VIPRE_OUT,
|
||||
VTH3,
|
||||
VTH1,
|
||||
VICIN,
|
||||
VCAS,
|
||||
VCAL_N,
|
||||
VIPRE,
|
||||
VCAL_P,
|
||||
VDCSH,
|
||||
VBP_COLBUF,
|
||||
VB_SDA,
|
||||
VCASC_SFP,
|
||||
VIPRE_CDS,
|
||||
IBIAS_SFP,
|
||||
ADC_VPP,
|
||||
HIGH_VOLTAGE,
|
||||
TEMPERATURE_ADC,
|
||||
TEMPERATURE_FPGA,
|
||||
TEMPERATURE_FPGAEXT,
|
||||
TEMPERATURE_10GE,
|
||||
TEMPERATURE_DCDC,
|
||||
TEMPERATURE_SODL,
|
||||
TEMPERATURE_SODR,
|
||||
TEMPERATURE_FPGA2,
|
||||
TEMPERATURE_FPGA3,
|
||||
TRIMBIT_SCAN,
|
||||
V_POWER_A = 100,
|
||||
V_POWER_B = 101,
|
||||
V_POWER_C = 102,
|
||||
V_POWER_D = 103,
|
||||
V_POWER_IO = 104,
|
||||
V_POWER_CHIP = 105,
|
||||
I_POWER_A = 106,
|
||||
I_POWER_B = 107,
|
||||
I_POWER_C = 108,
|
||||
I_POWER_D = 109,
|
||||
I_POWER_IO = 110,
|
||||
V_LIMIT = 111,
|
||||
SLOW_ADC0 = 1000,
|
||||
SLOW_ADC1,
|
||||
SLOW_ADC2,
|
||||
SLOW_ADC3,
|
||||
SLOW_ADC4,
|
||||
SLOW_ADC5,
|
||||
SLOW_ADC6,
|
||||
SLOW_ADC7,
|
||||
SLOW_ADC_TEMP
|
||||
};
|
||||
|
||||
// helper pair class to easily expose in python
|
||||
template <typename T1, typename T2> struct Sum_index_pair {
|
||||
T1 sum;
|
||||
T2 index;
|
||||
};
|
||||
|
||||
enum class corner : int {
|
||||
cTopLeft = 0,
|
||||
cTopRight = 1,
|
||||
cBottomLeft = 2,
|
||||
cBottomRight = 3
|
||||
};
|
||||
|
||||
enum class TimingMode { Auto, Trigger };
|
||||
enum class FrameDiscardPolicy { NoDiscard, Discard, DiscardPartial };
|
||||
|
||||
template <class T> T StringTo(const std::string &arg) { return T(arg); }
|
||||
|
||||
template <class T> std::string ToString(T arg) { return T(arg); }
|
||||
|
||||
template <> DetectorType StringTo(const std::string & /*name*/);
|
||||
template <> std::string ToString(DetectorType arg);
|
||||
|
||||
template <> TimingMode StringTo(const std::string & /*mode*/);
|
||||
|
||||
template <> FrameDiscardPolicy StringTo(const std::string & /*mode*/);
|
||||
enum class BurstMode {
|
||||
Burst_Interal,
|
||||
Burst_External,
|
||||
Continuous_Internal,
|
||||
Continuous_External
|
||||
};
|
||||
|
||||
using DataTypeVariants = std::variant<uint16_t, uint32_t>;
|
||||
|
||||
constexpr uint16_t ADC_MASK =
|
||||
0x3FFF; // used to mask out the gain bits in Jungfrau
|
||||
|
||||
/**
|
||||
* @brief Convert a string to a DACIndex
|
||||
* @param arg string representation of the dacIndex
|
||||
* @return DACIndex
|
||||
* @throw invalid argument error if the string does not match any DACIndex
|
||||
*/
|
||||
template <> DACIndex StringTo(const std::string &arg);
|
||||
|
||||
class BitOffset{
|
||||
uint8_t m_offset{};
|
||||
public:
|
||||
BitOffset() = default;
|
||||
explicit BitOffset(uint32_t offset);
|
||||
uint8_t value() const {return m_offset;}
|
||||
bool operator==(const BitOffset& other) const;
|
||||
bool operator<(const BitOffset& other) const;
|
||||
|
||||
};
|
||||
|
||||
} // namespace aare
|
||||
15
include/aare/geo_helpers.hpp
Normal file
15
include/aare/geo_helpers.hpp
Normal file
@@ -0,0 +1,15 @@
|
||||
#pragma once
|
||||
#include "aare/RawMasterFile.hpp" //ROI refactor away
|
||||
#include "aare/defs.hpp"
|
||||
namespace aare {
|
||||
|
||||
/**
|
||||
* @brief Update the detector geometry given a region of interest
|
||||
*
|
||||
* @param geo
|
||||
* @param roi
|
||||
* @return DetectorGeometry
|
||||
*/
|
||||
DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, ROI roi);
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
/*Utility to log to console*/
|
||||
|
||||
|
||||
51
include/aare/scan_parameters.hpp
Normal file
51
include/aare/scan_parameters.hpp
Normal file
@@ -0,0 +1,51 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
|
||||
namespace aare {
|
||||
|
||||
class ScanParameters {
|
||||
bool m_enabled = false;
|
||||
std::string m_dac;
|
||||
int m_start = 0;
|
||||
int m_stop = 0;
|
||||
int m_step = 0;
|
||||
// ns m_dac_settle_time{0};
|
||||
// TODO! add settleTime, requires string to time conversion
|
||||
|
||||
public:
|
||||
// "[enabled\ndac dac 4\nstart 500\nstop 2200\nstep 5\nsettleTime 100us\n]"
|
||||
// TODO: use StringTo<ScanParameters> and move this to to_string
|
||||
// add ways of setting the members of the class
|
||||
|
||||
ScanParameters(const std::string &par) {
|
||||
std::istringstream iss(par.substr(1, par.size() - 2));
|
||||
std::string line;
|
||||
while (std::getline(iss, line)) {
|
||||
if (line == "enabled") {
|
||||
m_enabled = true;
|
||||
} else if (line.find("dac") != std::string::npos) {
|
||||
m_dac = line.substr(4);
|
||||
} else if (line.find("start") != std::string::npos) {
|
||||
m_start = std::stoi(line.substr(6));
|
||||
} else if (line.find("stop") != std::string::npos) {
|
||||
m_stop = std::stoi(line.substr(5));
|
||||
} else if (line.find("step") != std::string::npos) {
|
||||
m_step = std::stoi(line.substr(5));
|
||||
}
|
||||
}
|
||||
};
|
||||
ScanParameters() = default;
|
||||
ScanParameters(const ScanParameters &) = default;
|
||||
ScanParameters &operator=(const ScanParameters &) = default;
|
||||
ScanParameters(ScanParameters &&) = default;
|
||||
int start() const { return m_start; };
|
||||
int stop() const { return m_stop; };
|
||||
int step() const { return m_step; };
|
||||
const std::string &dac() const { return m_dac; };
|
||||
bool enabled() const { return m_enabled; };
|
||||
void increment_stop() { m_stop += 1; };
|
||||
};
|
||||
|
||||
} // namespace aare
|
||||
11
include/aare/string_utils.hpp
Normal file
11
include/aare/string_utils.hpp
Normal file
@@ -0,0 +1,11 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace aare {
|
||||
|
||||
std::string RemoveUnit(std::string &str);
|
||||
|
||||
void TrimWhiteSpaces(std::string &s);
|
||||
|
||||
} // namespace aare
|
||||
288
include/aare/to_string.hpp
Normal file
288
include/aare/to_string.hpp
Normal file
@@ -0,0 +1,288 @@
|
||||
#pragma once
|
||||
|
||||
#include "aare/defs.hpp"
|
||||
#include "aare/scan_parameters.hpp"
|
||||
#include "aare/string_utils.hpp"
|
||||
|
||||
#include <optional>
|
||||
#include <chrono>
|
||||
|
||||
|
||||
namespace aare {
|
||||
|
||||
// generic
|
||||
template <class T, typename = std::enable_if_t<!is_duration<T>::value>>
|
||||
std::string ToString(T arg) {
|
||||
return T(arg);
|
||||
}
|
||||
|
||||
template <typename T,
|
||||
std::enable_if_t<!is_duration<T>::value && !is_container<T>::value,
|
||||
int> = 0>
|
||||
T StringTo(const std::string &arg) {
|
||||
return T(arg);
|
||||
}
|
||||
|
||||
// time
|
||||
|
||||
/** Convert std::chrono::duration with specified output unit */
|
||||
template <typename T, typename Rep = double>
|
||||
typename std::enable_if<is_duration<T>::value, std::string>::type
|
||||
ToString(T t, const std::string &unit) {
|
||||
using std::chrono::duration;
|
||||
using std::chrono::duration_cast;
|
||||
std::ostringstream os;
|
||||
if (unit == "ns")
|
||||
os << duration_cast<duration<Rep, std::nano>>(t).count() << unit;
|
||||
else if (unit == "us")
|
||||
os << duration_cast<duration<Rep, std::micro>>(t).count() << unit;
|
||||
else if (unit == "ms")
|
||||
os << duration_cast<duration<Rep, std::milli>>(t).count() << unit;
|
||||
else if (unit == "s")
|
||||
os << duration_cast<duration<Rep>>(t).count() << unit;
|
||||
else
|
||||
throw std::runtime_error("Unknown unit: " + unit);
|
||||
return os.str();
|
||||
}
|
||||
|
||||
/** Convert std::chrono::duration automatically selecting the unit */
|
||||
template <typename From>
|
||||
typename std::enable_if<is_duration<From>::value, std::string>::type
|
||||
ToString(From t) {
|
||||
|
||||
using std::chrono::abs;
|
||||
using std::chrono::duration_cast;
|
||||
using std::chrono::microseconds;
|
||||
using std::chrono::milliseconds;
|
||||
using std::chrono::nanoseconds;
|
||||
auto tns = duration_cast<nanoseconds>(t);
|
||||
if (abs(tns) < microseconds(1)) {
|
||||
return ToString(tns, "ns");
|
||||
} else if (abs(tns) < milliseconds(1)) {
|
||||
return ToString(tns, "us");
|
||||
} else if (abs(tns) < milliseconds(99)) {
|
||||
return ToString(tns, "ms");
|
||||
} else {
|
||||
return ToString(tns, "s");
|
||||
}
|
||||
}
|
||||
template <class Rep, class Period>
|
||||
std::ostream &operator<<(std::ostream &os,
|
||||
const std::chrono::duration<Rep, Period> &d) {
|
||||
return os << ToString(d);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T StringTo(const std::string &t, const std::string &unit) {
|
||||
double tval{0};
|
||||
try {
|
||||
tval = std::stod(t);
|
||||
} catch (const std::invalid_argument &e) {
|
||||
throw std::invalid_argument("[ERROR] Could not convert string to time");
|
||||
}
|
||||
|
||||
using std::chrono::duration;
|
||||
using std::chrono::duration_cast;
|
||||
if (unit == "ns") {
|
||||
return duration_cast<T>(duration<double, std::nano>(tval));
|
||||
} else if (unit == "us") {
|
||||
return duration_cast<T>(duration<double, std::micro>(tval));
|
||||
} else if (unit == "ms") {
|
||||
return duration_cast<T>(duration<double, std::milli>(tval));
|
||||
} else if (unit == "s" || unit.empty()) {
|
||||
return duration_cast<T>(std::chrono::duration<double>(tval));
|
||||
} else {
|
||||
throw std::invalid_argument("[ERROR] Invalid unit in conversion from "
|
||||
"string to std::chrono::duration");
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, std::enable_if_t<is_duration<T>::value, int> = 0>
|
||||
T StringTo(const std::string &t) {
|
||||
std::string tmp{t};
|
||||
auto unit = RemoveUnit(tmp);
|
||||
return StringTo<T>(tmp, unit);
|
||||
}
|
||||
|
||||
template <> inline bool StringTo(const std::string &s) {
|
||||
int i = std::stoi(s, nullptr, 10);
|
||||
switch (i) {
|
||||
case 0:
|
||||
return false;
|
||||
case 1:
|
||||
return true;
|
||||
default:
|
||||
throw std::runtime_error("Unknown boolean. Expecting be 0 or 1.");
|
||||
}
|
||||
}
|
||||
|
||||
template <> inline uint8_t StringTo(const std::string &s) {
|
||||
int base = s.find("0x") != std::string::npos ? 16 : 10;
|
||||
int value = std::stoi(s, nullptr, base);
|
||||
if (value < std::numeric_limits<uint8_t>::min() ||
|
||||
value > std::numeric_limits<uint8_t>::max()) {
|
||||
throw std::runtime_error("Cannot scan uint8_t from string '" + s +
|
||||
"'. Value must be in range 0 - 255.");
|
||||
}
|
||||
return static_cast<uint8_t>(value);
|
||||
}
|
||||
|
||||
template <> inline uint16_t StringTo(const std::string &s) {
|
||||
int base = s.find("0x") != std::string::npos ? 16 : 10;
|
||||
int value = std::stoi(s, nullptr, base);
|
||||
if (value < std::numeric_limits<uint16_t>::min() ||
|
||||
value > std::numeric_limits<uint16_t>::max()) {
|
||||
throw std::runtime_error("Cannot scan uint16_t from string '" + s +
|
||||
"'. Value must be in range 0 - 65535.");
|
||||
}
|
||||
return static_cast<uint16_t>(value);
|
||||
}
|
||||
|
||||
template <> inline uint32_t StringTo(const std::string &s) {
|
||||
int base = s.find("0x") != std::string::npos ? 16 : 10;
|
||||
return std::stoul(s, nullptr, base);
|
||||
}
|
||||
|
||||
template <> inline uint64_t StringTo(const std::string &s) {
|
||||
int base = s.find("0x") != std::string::npos ? 16 : 10;
|
||||
return std::stoull(s, nullptr, base);
|
||||
}
|
||||
|
||||
template <> inline int StringTo(const std::string &s) {
|
||||
int base = s.find("0x") != std::string::npos ? 16 : 10;
|
||||
return std::stoi(s, nullptr, base);
|
||||
}
|
||||
|
||||
/*template <> inline size_t StringTo(const std::string &s) {
|
||||
int base = s.find("0x") != std::string::npos ? 16 : 10;
|
||||
return std::stoull(s, nullptr, base);
|
||||
}*/
|
||||
|
||||
// vector
|
||||
template <typename T> std::string ToString(const std::vector<T> &vec) {
|
||||
std::ostringstream oss;
|
||||
oss << "[";
|
||||
for (size_t i = 0; i < vec.size(); ++i) {
|
||||
oss << vec[i];
|
||||
if (i != vec.size() - 1)
|
||||
oss << ", ";
|
||||
}
|
||||
oss << "]";
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::ostream &operator<<(std::ostream &os, const std::vector<T> &v) {
|
||||
return os << ToString(v);
|
||||
}
|
||||
|
||||
template <typename Container,
|
||||
std::enable_if_t<is_container<Container>::value &&
|
||||
!is_std_string_v<Container> /*&&
|
||||
!is_map_v<Container>*/
|
||||
,
|
||||
int> = 0>
|
||||
Container StringTo(const std::string &s) {
|
||||
using Value = typename Container::value_type;
|
||||
|
||||
// strip outer brackets
|
||||
std::string str = s;
|
||||
str.erase(
|
||||
std::remove_if(str.begin(), str.end(),
|
||||
[](unsigned char c) { return c == '[' || c == ']'; }),
|
||||
str.end());
|
||||
|
||||
std::stringstream ss(str);
|
||||
std::string item;
|
||||
Container result;
|
||||
|
||||
while (std::getline(ss, item, ',')) {
|
||||
TrimWhiteSpaces(item);
|
||||
if (!item.empty()) {
|
||||
result.push_back(StringTo<Value>(item));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// map
|
||||
template <typename KeyType, typename ValueType>
|
||||
std::string ToString(const std::map<KeyType, ValueType> &m) {
|
||||
std::ostringstream os;
|
||||
os << '{';
|
||||
if (!m.empty()) {
|
||||
auto it = m.cbegin();
|
||||
os << ToString(it->first) << ": " << ToString(it->second);
|
||||
it++;
|
||||
while (it != m.cend()) {
|
||||
os << ", " << ToString(it->first) << ": " << ToString(it->second);
|
||||
it++;
|
||||
}
|
||||
}
|
||||
os << '}';
|
||||
return os.str();
|
||||
}
|
||||
|
||||
template <>
|
||||
inline std::map<std::string, std::string> StringTo(const std::string &s) {
|
||||
std::map<std::string, std::string> result;
|
||||
std::string str = s;
|
||||
|
||||
// Remove outer braces if present
|
||||
if (!str.empty() && str.front() == '{' && str.back() == '}') {
|
||||
str = str.substr(1, str.size() - 2);
|
||||
}
|
||||
|
||||
std::stringstream ss(str);
|
||||
std::string item;
|
||||
|
||||
while (std::getline(ss, item, ',')) {
|
||||
auto colon_pos = item.find(':');
|
||||
if (colon_pos == std::string::npos)
|
||||
throw std::runtime_error("Missing ':' in item: " + item);
|
||||
|
||||
std::string key = item.substr(0, colon_pos);
|
||||
std::string value = item.substr(colon_pos + 1);
|
||||
|
||||
TrimWhiteSpaces(key);
|
||||
TrimWhiteSpaces(value);
|
||||
|
||||
result[key] = value;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// optional
|
||||
template <class T> std::string ToString(const std::optional<T> &opt) {
|
||||
return opt ? ToString(*opt) : "nullopt";
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::ostream &operator<<(std::ostream &os, const std::optional<T> &opt) {
|
||||
if (opt)
|
||||
os << *opt;
|
||||
else
|
||||
os << "nullopt";
|
||||
return os;
|
||||
}
|
||||
|
||||
// enums
|
||||
template <> std::string ToString(DetectorType arg);
|
||||
template <> DetectorType StringTo(const std::string & /*name*/);
|
||||
|
||||
template <> std::string ToString(TimingMode arg);
|
||||
template <> TimingMode StringTo(const std::string & /*mode*/);
|
||||
|
||||
template <> std::string ToString(FrameDiscardPolicy arg);
|
||||
template <> FrameDiscardPolicy StringTo(const std::string & /*mode*/);
|
||||
|
||||
template <> std::string ToString(BurstMode arg);
|
||||
template <> BurstMode StringTo(const std::string & /*mode*/);
|
||||
|
||||
template <> std::string ToString(ROI arg);
|
||||
std::ostream &operator<<(std::ostream &os, const ROI &roi);
|
||||
|
||||
template <> std::string ToString(ScanParameters arg);
|
||||
std::ostream &operator<<(std::ostream &os, const ScanParameters &r);
|
||||
|
||||
} // namespace aare
|
||||
72
include/aare/type_traits.hpp
Normal file
72
include/aare/type_traits.hpp
Normal file
@@ -0,0 +1,72 @@
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
namespace aare {
|
||||
|
||||
/**
|
||||
* Type trait to check if a template parameter is a std::chrono::duration
|
||||
*/
|
||||
|
||||
template <typename T, typename _ = void>
|
||||
struct is_duration : std::false_type {};
|
||||
|
||||
template <typename... Ts> struct is_duration_helper {};
|
||||
|
||||
template <typename T>
|
||||
struct is_duration<T,
|
||||
typename std::conditional<
|
||||
false,
|
||||
is_duration_helper<typename T::rep, typename T::period,
|
||||
decltype(std::declval<T>().min()),
|
||||
decltype(std::declval<T>().max()),
|
||||
decltype(std::declval<T>().zero())>,
|
||||
void>::type> : public std::true_type {};
|
||||
|
||||
/**
|
||||
* Type trait to evaluate if template parameter is
|
||||
* complying with a standard container
|
||||
*/
|
||||
template <typename T, typename _ = void>
|
||||
struct is_container : std::false_type {};
|
||||
|
||||
template <typename... Ts> struct is_container_helper {};
|
||||
|
||||
template <typename T>
|
||||
struct is_container<
|
||||
T, typename std::conditional<
|
||||
false,
|
||||
is_container_helper<
|
||||
typename std::remove_reference<T>::type::value_type,
|
||||
typename std::remove_reference<T>::type::size_type,
|
||||
typename std::remove_reference<T>::type::iterator,
|
||||
typename std::remove_reference<T>::type::const_iterator,
|
||||
decltype(std::declval<T>().size()),
|
||||
decltype(std::declval<T>().begin()),
|
||||
decltype(std::declval<T>().end()),
|
||||
decltype(std::declval<T>().cbegin()),
|
||||
decltype(std::declval<T>().cend()),
|
||||
decltype(std::declval<T>().empty())>,
|
||||
void>::type> : public std::true_type {};
|
||||
|
||||
/**
|
||||
* Type trait to evaluate if template parameter is
|
||||
* complying with a std::string
|
||||
*/
|
||||
template <typename T>
|
||||
inline constexpr bool is_std_string_v =
|
||||
std::is_same_v<std::decay_t<T>, std::string>;
|
||||
|
||||
/**
|
||||
* Type trait to evaluate if template parameter is
|
||||
* complying with std::map
|
||||
*/
|
||||
template <typename T> struct is_map : std::false_type {};
|
||||
|
||||
template <typename K, typename V, typename... Args>
|
||||
struct is_map<std::map<K, V, Args...>> : std::true_type {};
|
||||
|
||||
template <typename T>
|
||||
inline constexpr bool is_map_v = is_map<std::decay_t<T>>::value;
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
|
||||
#include <fstream>
|
||||
|
||||
@@ -1,11 +1,7 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "aare/utils/task.hpp"
|
||||
|
||||
namespace aare {
|
||||
|
||||
template <typename F>
|
||||
@@ -19,17 +15,4 @@ void RunInParallel(F func, const std::vector<std::pair<int, int>> &tasks) {
|
||||
thread.join();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
std::vector<NDView<T,3>> make_subviews(NDView<T, 3> &data, ssize_t n_threads) {
|
||||
std::vector<NDView<T, 3>> subviews;
|
||||
subviews.reserve(n_threads);
|
||||
auto limits = split_task(0, data.shape(0), n_threads);
|
||||
for (const auto &lim : limits) {
|
||||
subviews.push_back(data.sub_view(lim.first, lim.second));
|
||||
}
|
||||
return subviews;
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
@@ -1,5 +1,4 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ dependencies = [
|
||||
"numpy",
|
||||
"matplotlib",
|
||||
]
|
||||
license = { file = "LICENSE" }
|
||||
|
||||
|
||||
[tool.cibuildwheel]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user