mirror of
https://github.com/slsdetectorgroup/aare.git
synced 2025-12-21 20:41:26 +01:00
Compare commits
411 Commits
developer_
...
dev/reuse
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1f2d937d74 | ||
|
|
c0357e2020 | ||
| dfb29b719f | |||
|
|
7f3123d68f | ||
| fb95e518b4 | |||
|
|
80a2b02345 | ||
| e795310b16 | |||
| 1d8b68bf75 | |||
|
|
cf53922bd3 | ||
|
|
13cffb1ea8 | ||
|
|
bea373a112 | ||
|
|
a66ce15a6c | ||
|
|
44fd015cfe | ||
|
|
6fe822d5dd | ||
|
|
8ed874a679 | ||
| dd5ed138cf | |||
| 8201c5e999 | |||
| 03af5927ad | |||
|
|
452cfcb60f | ||
|
|
e8402d9d36 | ||
| a9de336817 | |||
| 6f7cb4ae30 | |||
| 267ca87ab0 | |||
|
|
5dbb969bcc | ||
|
|
d58d8ea82f | ||
| f61f76ccf7 | |||
|
|
200ae91622 | ||
|
|
53aed8d8c6 | ||
| 7fb500c44c | |||
| 8989d2eb4a | |||
| c8c681faa8 | |||
| 0faaf2bbc7 | |||
|
|
ac83eeff9b | ||
| df7b9be5a5 | |||
| dbffea15c0 | |||
| 6e38c3259b | |||
| 73e8fd31c9 | |||
| b28abb2668 | |||
| 01fa61cf47 | |||
| 790dd63ba3 | |||
| 45f506b473 | |||
| 6f10afbcdc | |||
| e418986fd2 | |||
| 723c8dd013 | |||
| 351f4626b3 | |||
| 516ef88d10 | |||
| 5329be816e | |||
| 72a2604ca5 | |||
| c78a73ebaf | |||
| 77a9788891 | |||
| c0ee17275e | |||
| ad3ef88607 | |||
| f814b3f4e7 | |||
| 1f46266183 | |||
| d3d9f760b3 | |||
| 0891ffb1ee | |||
| 0b74bc25d5 | |||
| 3ec40fa809 | |||
| 74280379ce | |||
| 474c35cc6b | |||
| e2a97d3c45 | |||
| bce8e9d5fc | |||
| 4c1e276e2c | |||
| 12114e7275 | |||
| 7926993bb2 | |||
| ed7fb1f1f9 | |||
|
|
8ab98b356b | ||
| d908ad3636 | |||
| 8733a1d66f | |||
| 437f7cec89 | |||
|
|
6c3524298f | ||
| b59277c4bf | |||
| cb163c79b4 | |||
|
|
a0fb4900f0 | ||
|
|
91d74110fa | ||
| f54e76e6bf | |||
|
|
c6da36d10b | ||
| 5107513ff5 | |||
| f7aa66a2c9 | |||
| 3ac94641e3 | |||
|
|
89bb8776ea | ||
|
|
1527a45cf3 | ||
|
|
3d6858ad33 | ||
|
|
d6222027d0 | ||
| 1195a5e100 | |||
| 1347158235 | |||
|
|
8c4d8b687e | ||
|
|
b8e91d0282 | ||
|
|
46876bfa73 | ||
|
|
348fd0f937 | ||
|
|
0fea0f5b0e | ||
|
|
cb439efb48 | ||
|
|
5de402f91b | ||
|
|
9a7713e98a | ||
|
|
b898e1c8d0 | ||
|
|
4073c0cbe0 | ||
|
|
9a3694b980 | ||
|
|
abae2674a9 | ||
| 1414d75320 | |||
|
|
85c3bf7bed | ||
|
|
fa3b7a5afe | ||
|
|
e95326faa1 | ||
|
|
8143524acf | ||
|
|
8e2346abf8 | ||
|
|
8eb7fec435 | ||
|
|
d8952eccc6 | ||
|
|
83717571c8 | ||
|
|
97dae4ac60 | ||
|
|
5a9c3b717e | ||
|
|
e3f4b34b72 | ||
|
|
6ec8fbee72 | ||
| 30822d9c5f | |||
| ff7312f45d | |||
| 8e7c9eadff | |||
| d35b7762b4 | |||
| df4dbb8fd0 | |||
| c92be4bca2 | |||
| 664055de92 | |||
| 318e640639 | |||
| c6990dabad | |||
| c9fe16b4c2 | |||
|
|
64438c8803 | ||
| 9f8eee5d08 | |||
| 35114cde9d | |||
| b13f864b2b | |||
| 05828baa54 | |||
| 0f56846e3d | |||
| be67bbab6b | |||
|
|
8354439605 | ||
|
|
11fa95b23c | ||
| bd7870e75a | |||
| 75f63607fc | |||
| cfe7c31fe4 | |||
|
|
4976ec1651 | ||
|
|
a9a55fb27d | ||
|
|
3cc44f780f | ||
| 19ecc82fff | |||
| 2a069f3b6e | |||
| f9751902a2 | |||
| 923f7d22b8 | |||
| 6438a4bef1 | |||
| ad7525cd02 | |||
| 87d8682b1e | |||
|
|
efd2338f54 | ||
|
|
b97f1e24f9 | ||
|
|
1bc2fd770a | ||
| 9c6e629298 | |||
|
|
69964e08d5 | ||
|
|
94ac58b09e | ||
|
|
9ecf4f4b44 | ||
|
|
f2a024644b | ||
|
|
9e1b8731b0 | ||
|
|
a6eebbe9bd | ||
|
|
81588fba3b | ||
| 276283ff14 | |||
|
|
cf158e2dcd | ||
|
|
12ae1424fb | ||
| 6db201f397 | |||
| d5226909fe | |||
|
|
eb6862ff99 | ||
|
|
f06e722dce | ||
|
|
2e0424254c | ||
|
|
7b5e32a824 | ||
|
|
86d343f5f5 | ||
|
|
fd0196f2fd | ||
|
|
129e7e9f9d | ||
|
|
58c934d9cf | ||
|
|
4088b0889d | ||
|
|
d5f8daf194 | ||
|
|
c6e8e5f6a1 | ||
|
|
b501c31e38 | ||
|
|
326941e2b4 | ||
|
|
84aafa75f6 | ||
|
|
177459c98a | ||
|
|
c49a2fdf8e | ||
|
|
14211047ff | ||
|
|
acd9d5d487 | ||
|
|
d4050ec557 | ||
|
|
fca9d5d2fa | ||
|
|
1174f7f434 | ||
| 2bb7d360bf | |||
|
|
a90e532b21 | ||
|
|
8d8182c632 | ||
|
|
5f34ab6df1 | ||
|
|
5c8a5099fd | ||
|
|
7c93632605 | ||
|
|
54def26334 | ||
|
|
a59e9656be | ||
| 3f753ec900 | |||
|
|
15e52565a9 | ||
|
|
e71569b15e | ||
|
|
92f5421481 | ||
|
|
113f34cc98 | ||
|
|
53a90e197e | ||
|
|
6e4db45b57 | ||
|
|
76f050f69f | ||
|
|
a13affa4d3 | ||
|
|
8b0eee1e66 | ||
|
|
894065fe9c | ||
|
|
f16273a566 | ||
| 20d1d02fda | |||
|
|
10e4e10431 | ||
|
|
017960d963 | ||
|
|
a12e43b176 | ||
|
|
9de84a7f87 | ||
|
|
885309d97c | ||
|
|
e24ed68416 | ||
|
|
248d25486f | ||
|
|
7db1ae4d94 | ||
|
|
a24bbd9cf9 | ||
|
|
d7ef9bb1d8 | ||
|
|
de9fc16e89 | ||
|
|
85a6b5b95e | ||
|
|
50eeba4005 | ||
|
|
98d2d6098e | ||
|
|
61af1105a1 | ||
|
|
240960d3e7 | ||
|
|
04728929cb | ||
|
|
3083d51699 | ||
|
|
4240942cec | ||
|
|
745d09fbe9 | ||
|
|
e1533282f1 | ||
|
|
8cad7a50a6 | ||
|
|
9d8e803474 | ||
|
|
a42c0d645b | ||
|
|
508adf5016 | ||
|
|
e038bd1646 | ||
|
|
7e5f91c6ec | ||
|
|
ed9ef7c600 | ||
| 57bb6c71ae | |||
| f8f98b6ec3 | |||
| 0876b6891a | |||
|
|
6ad76f63c1 | ||
| 6e7e81b36b | |||
|
|
5d8ad27b21 | ||
|
|
b529b6d33b | ||
|
|
602b04e49f | ||
|
|
11cd2ec654 | ||
|
|
e59a361b51 | ||
|
|
1ad362ccfc | ||
|
|
332bdeb02b | ||
|
|
3a987319d4 | ||
|
|
5614cb4673 | ||
|
|
8ae6bb76f8 | ||
|
|
1d2c38c1d4 | ||
|
|
b7a47576a1 | ||
|
|
fc1c9f35d6 | ||
|
|
5d2f25a6e9 | ||
|
|
6a83988485 | ||
| 8abfc68138 | |||
|
|
8ff6f9f506 | ||
|
|
dadf5f4869 | ||
|
|
dcb9a98faa | ||
|
|
7309cff47c | ||
|
|
c0c5e07ad8 | ||
|
|
2faa317bdf | ||
|
|
f7031d7f87 | ||
|
|
d86cb533c8 | ||
|
|
4c750cc3be | ||
|
|
e96fe31f11 | ||
|
|
cd5a738696 | ||
|
|
1ba43b69d3 | ||
|
|
fff536782b | ||
|
|
5a3ca2ae2d | ||
|
|
078e5d81ec | ||
|
|
6cde968c60 | ||
|
|
f6d736facd | ||
|
|
e1cc774d6c | ||
|
|
d0f435a7ab | ||
|
|
7ce02006f2 | ||
|
|
7550a2cb97 | ||
|
|
caf7b4ecdb | ||
|
|
72d10b7735 | ||
|
|
cc95561eda | ||
|
|
dc9e10016d | ||
|
|
21ce7a3efa | ||
|
|
acdce8454b | ||
|
|
d07da42745 | ||
|
|
7d6223d52d | ||
|
|
da67f58323 | ||
|
|
e6098c02ef | ||
|
|
29b1dc8df3 | ||
|
|
f88b53387f | ||
|
|
a0f481c0ee | ||
|
|
b3a9e9576b | ||
|
|
60534add92 | ||
|
|
7f2a23d5b1 | ||
|
|
6a150e8d98 | ||
|
|
b43003966f | ||
|
|
c2d039a5bd | ||
|
|
6fd52f6b8d | ||
|
|
659f1f36c5 | ||
|
|
0047d15de1 | ||
|
|
a1b7fb8fc8 | ||
| 2e4a491d7a | |||
| fdce2f69b9 | |||
|
|
ada4d41f4a | ||
|
|
115dfc0abf | ||
| 31b834c3fd | |||
|
|
feed4860a6 | ||
|
|
0df8e4bb7d | ||
|
|
8bf9ac55ce | ||
|
|
2d33fd4813 | ||
|
|
996a8861f6 | ||
| 06670a7e24 | |||
|
|
8e3d997bed | ||
|
|
a3f813f9b4 | ||
| d48482e9da | |||
|
|
8f729fc83e | ||
|
|
f9a2d49244 | ||
|
|
9f7cdbcb48 | ||
|
|
3b0e13e41f | ||
|
|
3af8182998 | ||
|
|
99e829fd06 | ||
|
|
47e867fc1a | ||
|
|
8ea4372cf1 | ||
|
|
75f83e5e3b | ||
|
|
30d05f9203 | ||
|
|
37d3dfcf71 | ||
|
|
35c6706b3c | ||
|
|
9ab61cac4e | ||
|
|
13394c3a61 | ||
|
|
088288787a | ||
|
|
9d4459eb8c | ||
|
|
95ff77c8fc | ||
|
|
62a14dda13 | ||
|
|
632c2ee0c8 | ||
|
|
17f8d28019 | ||
|
|
e77b615293 | ||
|
|
0d058274d5 | ||
|
|
5cde7a99b5 | ||
|
|
dcedb4fb13 | ||
|
|
7ffd732d98 | ||
|
|
fbaf9dce89 | ||
|
|
dc889dab76 | ||
|
|
cb94d079af | ||
|
|
13b2cb40b6 | ||
|
|
17917ac7ea | ||
|
|
db936b6357 | ||
|
|
2ee1a5583e | ||
|
|
349e3af8e1 | ||
|
|
a0b6c4cc03 | ||
|
|
5f21759c8c | ||
|
|
ecf1b2a90b | ||
|
|
b172c7aa0a | ||
|
|
d8d1f0c517 | ||
|
|
d5fb823ae4 | ||
|
|
9c220bff51 | ||
|
|
b2e5c71f9c | ||
|
|
cbfd1f0b6c | ||
|
|
5b2809d6b0 | ||
|
|
4bb8487e2c | ||
|
|
1cc7690f9a | ||
|
|
25812cb291 | ||
|
|
654c1db3f4 | ||
|
|
2efb763242 | ||
|
|
7f244e22a2 | ||
|
|
d98b45235f | ||
|
|
80a39415de | ||
|
|
b8a4498379 | ||
|
|
49da039ff9 | ||
|
|
563c39c0dd | ||
|
|
ae1166b908 | ||
|
|
ec61132296 | ||
|
|
cee0d71b9c | ||
|
|
19c6a4091f | ||
|
|
92d9c28c73 | ||
|
|
b7e6962e44 | ||
|
|
13ac6b0f37 | ||
|
|
79d924c2a3 | ||
|
|
9b733fd0ec | ||
|
|
6505f37d87 | ||
|
|
a466887064 | ||
|
|
dde92b993f | ||
|
|
1b61155c5c | ||
|
|
738934f2a0 | ||
|
|
6b8f2478b6 | ||
|
|
41fbddb750 | ||
|
|
504e8b4565 | ||
|
|
acdcaac338 | ||
|
|
8b43011fa1 | ||
|
|
801adccbd7 | ||
|
|
da5ba034b8 | ||
|
|
1cbded04f8 | ||
|
|
9b33ad0ee8 | ||
|
|
1f539a234b | ||
|
|
29a42507d7 | ||
|
|
5035c20aa4 | ||
|
|
f754e0f769 | ||
|
|
be019b9769 | ||
|
|
af4f000fe7 | ||
|
|
b37f4845cf | ||
|
|
b037aebc5f | ||
|
|
dea5aaf9cf | ||
|
|
4cc6aa9d40 | ||
|
|
c3a5d22f83 | ||
|
|
a8afa04129 | ||
|
|
eb855fb9a3 | ||
|
|
b4fe044679 | ||
|
|
082d793161 | ||
|
|
9f29f173ff | ||
|
|
8a10bcbbdb | ||
|
|
c509e29b52 | ||
|
|
1a16d4522e | ||
|
|
8a435cbe9b | ||
|
|
7f9151f270 | ||
|
|
abb1d20ca3 | ||
|
|
a4fb217e3f | ||
|
|
5d643dc133 | ||
|
|
54dd88f070 | ||
|
|
b1b020ad60 |
7
.clang-format
Normal file
7
.clang-format
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
BasedOnStyle: LLVM
|
||||||
|
IndentWidth: 4
|
||||||
|
|
||||||
|
UseTab: Never
|
||||||
|
ColumnLimit: 80
|
||||||
|
AlignConsecutiveAssignments: false
|
||||||
|
AlignConsecutiveMacros: true
|
||||||
42
.clang-tidy
Normal file
42
.clang-tidy
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
|
||||||
|
---
|
||||||
|
Checks: '*,
|
||||||
|
-altera-*,
|
||||||
|
-android-cloexec-fopen,
|
||||||
|
-cppcoreguidelines-pro-bounds-array-to-pointer-decay,
|
||||||
|
-cppcoreguidelines-pro-bounds-pointer-arithmetic,
|
||||||
|
-fuchsia*,
|
||||||
|
-readability-else-after-return,
|
||||||
|
-readability-avoid-const-params-in-decls,
|
||||||
|
-readability-identifier-length,
|
||||||
|
-cppcoreguidelines-pro-bounds-constant-array-index,
|
||||||
|
-cppcoreguidelines-pro-type-reinterpret-cast,
|
||||||
|
-llvm-header-guard,
|
||||||
|
-modernize-use-nodiscard,
|
||||||
|
-misc-non-private-member-variables-in-classes,
|
||||||
|
-readability-static-accessed-through-instance,
|
||||||
|
-readability-braces-around-statements,
|
||||||
|
-readability-isolate-declaration,
|
||||||
|
-readability-implicit-bool-conversion,
|
||||||
|
-readability-identifier-length,
|
||||||
|
-readability-identifier-naming,
|
||||||
|
-hicpp-signed-bitwise,
|
||||||
|
-hicpp-no-array-decay,
|
||||||
|
-hicpp-braces-around-statements,
|
||||||
|
-google-runtime-references,
|
||||||
|
-google-readability-todo,
|
||||||
|
-google-readability-braces-around-statements,
|
||||||
|
-modernize-use-trailing-return-type,
|
||||||
|
-llvmlibc-*'
|
||||||
|
|
||||||
|
HeaderFilterRegex: \.hpp
|
||||||
|
FormatStyle: none
|
||||||
|
CheckOptions:
|
||||||
|
- { key: readability-identifier-naming.NamespaceCase, value: lower_case }
|
||||||
|
# - { key: readability-identifier-naming.FunctionCase, value: lower_case }
|
||||||
|
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
|
||||||
|
# - { key: readability-identifier-naming.MethodCase, value: CamelCase }
|
||||||
|
# - { key: readability-identifier-naming.StructCase, value: CamelCase }
|
||||||
|
# - { key: readability-identifier-naming.VariableCase, value: lower_case }
|
||||||
|
- { key: readability-identifier-naming.GlobalConstantCase, value: UPPER_CASE }
|
||||||
|
...
|
||||||
58
.gitea/workflows/cmake_build.yml
Normal file
58
.gitea/workflows/cmake_build.yml
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
name: Build the package using cmake then documentation
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pages: write
|
||||||
|
id-token: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
platform: [ubuntu-latest, ]
|
||||||
|
python-version: ["3.12", ]
|
||||||
|
|
||||||
|
runs-on: ${{ matrix.platform }}
|
||||||
|
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: "bash -l {0}"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup dev env
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get -y install cmake gcc g++
|
||||||
|
|
||||||
|
- name: Get conda
|
||||||
|
uses: conda-incubator/setup-miniconda@v3
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
environment-file: etc/dev-env.yml
|
||||||
|
miniforge-version: latest
|
||||||
|
channels: conda-forge
|
||||||
|
conda-remove-defaults: "true"
|
||||||
|
|
||||||
|
- name: Build library
|
||||||
|
run: |
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake .. -DAARE_SYSTEM_LIBRARIES=ON -DAARE_DOCS=ON
|
||||||
|
make -j 2
|
||||||
|
make docs
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
36
.gitea/workflows/rh8-native.yml
Normal file
36
.gitea/workflows/rh8-native.yml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
name: Build on RHEL8
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: "ubuntu-latest"
|
||||||
|
container:
|
||||||
|
image: gitea.psi.ch/images/rhel8-developer-gitea-actions
|
||||||
|
steps:
|
||||||
|
# workaround until actions/checkout@v4 is available for RH8
|
||||||
|
# - uses: actions/checkout@v4
|
||||||
|
- name: Clone repository
|
||||||
|
run: |
|
||||||
|
echo Cloning ${{ github.ref_name }}
|
||||||
|
git clone https://${{secrets.GITHUB_TOKEN}}@gitea.psi.ch/${{ github.repository }}.git --branch=${{ github.ref_name }} .
|
||||||
|
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
dnf install -y cmake python3.12 python3.12-devel python3.12-pip
|
||||||
|
|
||||||
|
- name: Build library
|
||||||
|
run: |
|
||||||
|
mkdir build && cd build
|
||||||
|
cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON -DPython_FIND_VIRTUALENV=FIRST
|
||||||
|
make -j 2
|
||||||
|
|
||||||
|
- name: C++ unit tests
|
||||||
|
working-directory: ${{gitea.workspace}}/build
|
||||||
|
run: ctest
|
||||||
31
.gitea/workflows/rh9-native.yml
Normal file
31
.gitea/workflows/rh9-native.yml
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
name: Build on RHEL9
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: "ubuntu-latest"
|
||||||
|
container:
|
||||||
|
image: gitea.psi.ch/images/rhel9-developer-gitea-actions
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
dnf install -y cmake python3.12 python3.12-devel python3.12-pip
|
||||||
|
|
||||||
|
- name: Build library
|
||||||
|
run: |
|
||||||
|
mkdir build && cd build
|
||||||
|
cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON
|
||||||
|
make -j 2
|
||||||
|
|
||||||
|
- name: C++ unit tests
|
||||||
|
working-directory: ${{gitea.workspace}}/build
|
||||||
|
run: ctest
|
||||||
42
.github/workflows/build_and_deploy_conda.yml
vendored
Normal file
42
.github/workflows/build_and_deploy_conda.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
name: Build pkgs and deploy if on main
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types:
|
||||||
|
- published
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
platform: [ubuntu-latest] # macos-12, windows-2019]
|
||||||
|
python-version: ["3.12",]
|
||||||
|
|
||||||
|
runs-on: ${{ matrix.platform }}
|
||||||
|
|
||||||
|
# The setup-miniconda action needs this to activate miniconda
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: "bash -l {0}"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Get conda
|
||||||
|
uses: conda-incubator/setup-miniconda@v3
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
environment-file: etc/dev-env.yml
|
||||||
|
miniforge-version: latest
|
||||||
|
channels: conda-forge
|
||||||
|
conda-remove-defaults: "true"
|
||||||
|
|
||||||
|
- name: Enable upload
|
||||||
|
run: conda config --set anaconda_upload yes
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
env:
|
||||||
|
CONDA_TOKEN: ${{ secrets.CONDA_TOKEN }}
|
||||||
|
run: conda build conda-recipe --user slsdetectorgroup --token ${CONDA_TOKEN}
|
||||||
|
|
||||||
41
.github/workflows/build_conda.yml
vendored
Normal file
41
.github/workflows/build_conda.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
name: Build pkgs and deploy if on main
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- developer
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
platform: [ubuntu-latest, ] # macos-12, windows-2019]
|
||||||
|
python-version: ["3.12",]
|
||||||
|
|
||||||
|
runs-on: ${{ matrix.platform }}
|
||||||
|
|
||||||
|
# The setup-miniconda action needs this to activate miniconda
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: "bash -l {0}"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Get conda
|
||||||
|
uses: conda-incubator/setup-miniconda@v3
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
environment-file: etc/dev-env.yml
|
||||||
|
miniforge-version: latest
|
||||||
|
channels: conda-forge
|
||||||
|
conda-remove-defaults: "true"
|
||||||
|
|
||||||
|
|
||||||
|
- name: Disable upload
|
||||||
|
run: conda config --set anaconda_upload no
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: conda build conda-recipe
|
||||||
|
|
||||||
64
.github/workflows/build_wheel.yml
vendored
Normal file
64
.github/workflows/build_wheel.yml
vendored
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
name: Build wheel
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
release:
|
||||||
|
types:
|
||||||
|
- published
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build_wheels:
|
||||||
|
name: Build wheels on ${{ matrix.os }}
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest,]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Build wheels
|
||||||
|
run: pipx run cibuildwheel==2.23.0
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }}
|
||||||
|
path: ./wheelhouse/*.whl
|
||||||
|
|
||||||
|
build_sdist:
|
||||||
|
name: Build source distribution
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Build sdist
|
||||||
|
run: pipx run build --sdist
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: cibw-sdist
|
||||||
|
path: dist/*.tar.gz
|
||||||
|
|
||||||
|
upload_pypi:
|
||||||
|
needs: [build_wheels, build_sdist]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
environment: pypi
|
||||||
|
permissions:
|
||||||
|
id-token: write
|
||||||
|
if: github.event_name == 'release' && github.event.action == 'published'
|
||||||
|
# or, alternatively, upload to PyPI on every tag starting with 'v' (remove on: release above to use this)
|
||||||
|
# if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v')
|
||||||
|
steps:
|
||||||
|
- uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
# unpacks all CIBW artifacts into dist/
|
||||||
|
pattern: cibw-*
|
||||||
|
path: dist
|
||||||
|
merge-multiple: true
|
||||||
|
|
||||||
|
- uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
78
.github/workflows/build_with_docs.yml
vendored
Normal file
78
.github/workflows/build_with_docs.yml
vendored
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
name: Build the package using cmake then documentation
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
release:
|
||||||
|
types:
|
||||||
|
- published
|
||||||
|
|
||||||
|
env:
|
||||||
|
# Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.)
|
||||||
|
BUILD_TYPE: Debug
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pages: write
|
||||||
|
id-token: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
platform: [ubuntu-latest, macos-latest]
|
||||||
|
python-version: ["3.12",]
|
||||||
|
|
||||||
|
runs-on: ${{ matrix.platform }}
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: "bash -l {0}"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Get conda
|
||||||
|
uses: conda-incubator/setup-miniconda@v3
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
environment-file: etc/dev-env.yml
|
||||||
|
miniforge-version: latest
|
||||||
|
channels: conda-forge
|
||||||
|
conda-remove-defaults: "true"
|
||||||
|
|
||||||
|
- name: Build library and docs
|
||||||
|
run: |
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DAARE_SYSTEM_LIBRARIES=ON -DAARE_PYTHON_BINDINGS=ON -DAARE_DOCS=ON -DAARE_TESTS=ON
|
||||||
|
make -j 4
|
||||||
|
make docs
|
||||||
|
|
||||||
|
- name: C++ unit tests
|
||||||
|
working-directory: ${{github.workspace}}/build
|
||||||
|
run: ctest -C ${{env.BUILD_TYPE}} -j4
|
||||||
|
|
||||||
|
- name: Upload static files as artifact
|
||||||
|
if: matrix.platform == 'ubuntu-latest'
|
||||||
|
id: deployment
|
||||||
|
uses: actions/upload-pages-artifact@v3
|
||||||
|
with:
|
||||||
|
path: build/docs/html/
|
||||||
|
deploy:
|
||||||
|
environment:
|
||||||
|
name: github-pages
|
||||||
|
url: ${{ steps.deployment.outputs.page_url }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
if: (github.event_name == 'release' && github.event.action == 'published') || (github.event_name == 'workflow_dispatch' )
|
||||||
|
steps:
|
||||||
|
- name: Deploy to GitHub Pages
|
||||||
|
id: deployment
|
||||||
|
uses: actions/deploy-pages@v4
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
25
.gitignore
vendored
Normal file
25
.gitignore
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
install/
|
||||||
|
.cproject
|
||||||
|
.project
|
||||||
|
bin/
|
||||||
|
.settings
|
||||||
|
*.aux
|
||||||
|
*.log
|
||||||
|
*.out
|
||||||
|
*.toc
|
||||||
|
*.o
|
||||||
|
*.so
|
||||||
|
.*
|
||||||
|
build/
|
||||||
|
RELEASE.txt
|
||||||
|
Testing/
|
||||||
|
|
||||||
|
ctbDict.cpp
|
||||||
|
ctbDict.h
|
||||||
|
|
||||||
|
wheelhouse/
|
||||||
|
dist/
|
||||||
|
|
||||||
|
*.pyc
|
||||||
|
*/__pycache__/*
|
||||||
|
|
||||||
536
CMakeLists.txt
Normal file
536
CMakeLists.txt
Normal file
@@ -0,0 +1,536 @@
|
|||||||
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
cmake_minimum_required(VERSION 3.15)
|
||||||
|
|
||||||
|
project(aare
|
||||||
|
DESCRIPTION "Data processing library for PSI detectors"
|
||||||
|
HOMEPAGE_URL "https://github.com/slsdetectorgroup/aare"
|
||||||
|
LANGUAGES C CXX
|
||||||
|
)
|
||||||
|
|
||||||
|
# Read VERSION file into project version
|
||||||
|
set(VERSION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/VERSION")
|
||||||
|
file(READ "${VERSION_FILE}" VERSION_CONTENT)
|
||||||
|
string(STRIP "${VERSION_CONTENT}" PROJECT_VERSION_STRING)
|
||||||
|
set(PROJECT_VERSION ${PROJECT_VERSION_STRING})
|
||||||
|
|
||||||
|
set(CMAKE_CXX_STANDARD 17)
|
||||||
|
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||||
|
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||||
|
|
||||||
|
execute_process(
|
||||||
|
COMMAND git log -1 --format=%h
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
|
||||||
|
OUTPUT_VARIABLE GIT_HASH
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||||
|
)
|
||||||
|
message(STATUS "Building from git hash: ${GIT_HASH}")
|
||||||
|
|
||||||
|
if (${CMAKE_VERSION} VERSION_GREATER "3.24")
|
||||||
|
cmake_policy(SET CMP0135 NEW) #Fetch content download timestamp
|
||||||
|
endif()
|
||||||
|
cmake_policy(SET CMP0079 NEW)
|
||||||
|
|
||||||
|
include(GNUInstallDirs)
|
||||||
|
include(FetchContent)
|
||||||
|
|
||||||
|
#Set default build type if none was specified
|
||||||
|
include(cmake/helpers.cmake)
|
||||||
|
|
||||||
|
|
||||||
|
default_build_type("Release")
|
||||||
|
set_std_fs_lib()
|
||||||
|
message(STATUS "Extra linking to fs lib:${STD_FS_LIB}")
|
||||||
|
|
||||||
|
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
|
||||||
|
|
||||||
|
|
||||||
|
# General options
|
||||||
|
option(AARE_PYTHON_BINDINGS "Build python bindings" OFF)
|
||||||
|
option(AARE_TESTS "Build tests" OFF)
|
||||||
|
option(AARE_BENCHMARKS "Build benchmarks" OFF)
|
||||||
|
option(AARE_EXAMPLES "Build examples" OFF)
|
||||||
|
option(AARE_IN_GITHUB_ACTIONS "Running in Github Actions" OFF)
|
||||||
|
option(AARE_DOCS "Build documentation" OFF)
|
||||||
|
option(AARE_VERBOSE "Verbose output" OFF)
|
||||||
|
option(AARE_CUSTOM_ASSERT "Use custom assert" OFF)
|
||||||
|
option(AARE_INSTALL_PYTHONEXT "Install the python extension in the install tree under CMAKE_INSTALL_PREFIX/aare/" OFF)
|
||||||
|
option(AARE_ASAN "Enable AddressSanitizer" OFF)
|
||||||
|
|
||||||
|
# Configure which of the dependencies to use FetchContent for
|
||||||
|
option(AARE_FETCH_FMT "Use FetchContent to download fmt" ON)
|
||||||
|
option(AARE_FETCH_PYBIND11 "Use FetchContent to download pybind11" ON)
|
||||||
|
option(AARE_FETCH_CATCH "Use FetchContent to download catch2" ON)
|
||||||
|
option(AARE_FETCH_JSON "Use FetchContent to download nlohmann::json" ON)
|
||||||
|
option(AARE_FETCH_ZMQ "Use FetchContent to download libzmq" ON)
|
||||||
|
option(AARE_FETCH_LMFIT "Use FetchContent to download lmfit" ON)
|
||||||
|
|
||||||
|
|
||||||
|
#Convenience option to use system libraries only (no FetchContent)
|
||||||
|
option(AARE_SYSTEM_LIBRARIES "Use system libraries" OFF)
|
||||||
|
if(AARE_SYSTEM_LIBRARIES)
|
||||||
|
message(STATUS "Build using system libraries")
|
||||||
|
set(AARE_FETCH_FMT OFF CACHE BOOL "Disabled FetchContent for FMT" FORCE)
|
||||||
|
set(AARE_FETCH_PYBIND11 OFF CACHE BOOL "Disabled FetchContent for pybind11" FORCE)
|
||||||
|
set(AARE_FETCH_CATCH OFF CACHE BOOL "Disabled FetchContent for catch2" FORCE)
|
||||||
|
set(AARE_FETCH_JSON OFF CACHE BOOL "Disabled FetchContent for nlohmann::json" FORCE)
|
||||||
|
set(AARE_FETCH_ZMQ OFF CACHE BOOL "Disabled FetchContent for libzmq" FORCE)
|
||||||
|
# Still fetch lmfit when setting AARE_SYSTEM_LIBRARIES since this is not available
|
||||||
|
# on conda-forge
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(AARE_BENCHMARKS)
|
||||||
|
add_subdirectory(benchmarks)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||||
|
|
||||||
|
if(AARE_FETCH_LMFIT)
|
||||||
|
#TODO! Should we fetch lmfit from the web or inlcude a tar.gz in the repo?
|
||||||
|
set(LMFIT_PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch)
|
||||||
|
|
||||||
|
# For cmake < 3.28 we can't supply EXCLUDE_FROM_ALL to FetchContent_Declare
|
||||||
|
# so we need this workaround
|
||||||
|
if (${CMAKE_VERSION} VERSION_LESS "3.28")
|
||||||
|
FetchContent_Declare(
|
||||||
|
lmfit
|
||||||
|
GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git
|
||||||
|
GIT_TAG main
|
||||||
|
PATCH_COMMAND ${LMFIT_PATCH_COMMAND}
|
||||||
|
UPDATE_DISCONNECTED 1
|
||||||
|
)
|
||||||
|
else()
|
||||||
|
FetchContent_Declare(
|
||||||
|
lmfit
|
||||||
|
GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git
|
||||||
|
GIT_TAG main
|
||||||
|
PATCH_COMMAND ${LMFIT_PATCH_COMMAND}
|
||||||
|
UPDATE_DISCONNECTED 1
|
||||||
|
EXCLUDE_FROM_ALL 1
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
#Disable what we don't need from lmfit
|
||||||
|
set(BUILD_TESTING OFF CACHE BOOL "")
|
||||||
|
set(LMFIT_CPPTEST OFF CACHE BOOL "")
|
||||||
|
set(LIB_MAN OFF CACHE BOOL "")
|
||||||
|
set(LMFIT_CPPTEST OFF CACHE BOOL "")
|
||||||
|
set(BUILD_SHARED_LIBS OFF CACHE BOOL "")
|
||||||
|
|
||||||
|
if (${CMAKE_VERSION} VERSION_LESS "3.28")
|
||||||
|
if(NOT lmfit_POPULATED)
|
||||||
|
FetchContent_Populate(lmfit)
|
||||||
|
add_subdirectory(${lmfit_SOURCE_DIR} ${lmfit_BINARY_DIR} EXCLUDE_FROM_ALL)
|
||||||
|
endif()
|
||||||
|
else()
|
||||||
|
FetchContent_MakeAvailable(lmfit)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON)
|
||||||
|
else()
|
||||||
|
find_package(lmfit REQUIRED)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
if(AARE_FETCH_ZMQ)
|
||||||
|
# Fetchcontent_Declare is deprecated need to find a way to update this
|
||||||
|
# for now setting the policy to old is enough
|
||||||
|
if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.30")
|
||||||
|
cmake_policy(SET CMP0169 OLD)
|
||||||
|
endif()
|
||||||
|
set(ZMQ_PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/libzmq_cmake_version.patch)
|
||||||
|
FetchContent_Declare(
|
||||||
|
libzmq
|
||||||
|
GIT_REPOSITORY https://github.com/zeromq/libzmq.git
|
||||||
|
GIT_TAG v4.3.4
|
||||||
|
PATCH_COMMAND ${ZMQ_PATCH_COMMAND}
|
||||||
|
UPDATE_DISCONNECTED 1
|
||||||
|
)
|
||||||
|
# Disable unwanted options from libzmq
|
||||||
|
set(BUILD_TESTS OFF CACHE BOOL "Switch off libzmq test build")
|
||||||
|
set(BUILD_SHARED OFF CACHE BOOL "Switch off libzmq shared libs")
|
||||||
|
set(WITH_PERF_TOOL OFF CACHE BOOL "")
|
||||||
|
set(ENABLE_CPACK OFF CACHE BOOL "")
|
||||||
|
set(ENABLE_CLANG OFF CACHE BOOL "")
|
||||||
|
set(ENABLE_CURVE OFF CACHE BOOL "")
|
||||||
|
set(ENABLE_DRAFTS OFF CACHE BOOL "")
|
||||||
|
|
||||||
|
# TODO! Verify that this is what we want to do in aare
|
||||||
|
# Using GetProperties and Populate to be able to exclude zmq
|
||||||
|
# from install (not possible with FetchContent_MakeAvailable(libzmq))
|
||||||
|
FetchContent_GetProperties(libzmq)
|
||||||
|
if(NOT libzmq_POPULATED)
|
||||||
|
FetchContent_Populate(libzmq)
|
||||||
|
add_subdirectory(${libzmq_SOURCE_DIR} ${libzmq_BINARY_DIR} EXCLUDE_FROM_ALL)
|
||||||
|
endif()
|
||||||
|
else()
|
||||||
|
find_package(ZeroMQ 4 REQUIRED)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
if (AARE_FETCH_FMT)
|
||||||
|
set(FMT_TEST OFF CACHE INTERNAL "disabling fmt tests")
|
||||||
|
FetchContent_Declare(
|
||||||
|
fmt
|
||||||
|
GIT_REPOSITORY https://github.com/fmtlib/fmt.git
|
||||||
|
GIT_TAG 10.2.1
|
||||||
|
GIT_PROGRESS TRUE
|
||||||
|
USES_TERMINAL_DOWNLOAD TRUE
|
||||||
|
)
|
||||||
|
set(FMT_INSTALL ON CACHE BOOL "")
|
||||||
|
# set(FMT_CMAKE_DIR "")
|
||||||
|
FetchContent_MakeAvailable(fmt)
|
||||||
|
set_property(TARGET fmt PROPERTY POSITION_INDEPENDENT_CODE ON)
|
||||||
|
install(TARGETS fmt
|
||||||
|
EXPORT ${project}-targets
|
||||||
|
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||||
|
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||||
|
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
|
||||||
|
INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
|
||||||
|
)
|
||||||
|
else()
|
||||||
|
find_package(fmt 6 REQUIRED)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
if (AARE_FETCH_JSON)
|
||||||
|
FetchContent_Declare(
|
||||||
|
json
|
||||||
|
URL https://github.com/nlohmann/json/releases/download/v3.11.3/json.tar.xz
|
||||||
|
)
|
||||||
|
set(JSON_Install ON CACHE BOOL "")
|
||||||
|
FetchContent_MakeAvailable(json)
|
||||||
|
set(NLOHMANN_JSON_TARGET_NAME nlohmann_json)
|
||||||
|
|
||||||
|
install(
|
||||||
|
TARGETS nlohmann_json
|
||||||
|
EXPORT "${TARGETS_EXPORT_NAME}"
|
||||||
|
)
|
||||||
|
message(STATUS "target: ${NLOHMANN_JSON_TARGET_NAME}")
|
||||||
|
else()
|
||||||
|
find_package(nlohmann_json 3.11.3 REQUIRED)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
include(GNUInstallDirs)
|
||||||
|
|
||||||
|
# If conda build, always set lib dir to 'lib'
|
||||||
|
if($ENV{CONDA_BUILD})
|
||||||
|
set(CMAKE_INSTALL_LIBDIR "lib")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Set lower / upper case project names
|
||||||
|
string(TOUPPER "${PROJECT_NAME}" PROJECT_NAME_UPPER)
|
||||||
|
string(TOLOWER "${PROJECT_NAME}" PROJECT_NAME_LOWER)
|
||||||
|
|
||||||
|
|
||||||
|
# Set targets export name (used by slsDetectorPackage and dependencies)
|
||||||
|
set(TARGETS_EXPORT_NAME "${PROJECT_NAME_LOWER}-targets")
|
||||||
|
set(namespace "aare::")
|
||||||
|
|
||||||
|
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
|
||||||
|
|
||||||
|
# Check if project is being used directly or via add_subdirectory
|
||||||
|
set(AARE_MASTER_PROJECT OFF)
|
||||||
|
if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
|
||||||
|
set(AARE_MASTER_PROJECT ON)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
add_library(aare_compiler_flags INTERFACE)
|
||||||
|
target_compile_features(aare_compiler_flags INTERFACE cxx_std_17)
|
||||||
|
|
||||||
|
if(AARE_PYTHON_BINDINGS)
|
||||||
|
add_subdirectory(python)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
#################
|
||||||
|
# MSVC specific #
|
||||||
|
#################
|
||||||
|
if(MSVC)
|
||||||
|
add_compile_definitions(AARE_MSVC)
|
||||||
|
if(CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||||
|
message(STATUS "Release build")
|
||||||
|
target_compile_options(aare_compiler_flags INTERFACE /O2)
|
||||||
|
else()
|
||||||
|
message(STATUS "Debug build")
|
||||||
|
target_compile_options(
|
||||||
|
aare_compiler_flags
|
||||||
|
INTERFACE
|
||||||
|
/Od
|
||||||
|
/Zi
|
||||||
|
/MDd
|
||||||
|
/D_ITERATOR_DEBUG_LEVEL=2
|
||||||
|
)
|
||||||
|
target_link_options(
|
||||||
|
aare_compiler_flags
|
||||||
|
INTERFACE
|
||||||
|
/DEBUG:FULL
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
target_compile_options(
|
||||||
|
aare_compiler_flags
|
||||||
|
INTERFACE
|
||||||
|
/w # disable warnings
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
else()
|
||||||
|
######################
|
||||||
|
# GCC/Clang specific #
|
||||||
|
######################
|
||||||
|
|
||||||
|
if(CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||||
|
message(STATUS "Release build")
|
||||||
|
target_compile_options(aare_compiler_flags INTERFACE -O3)
|
||||||
|
else()
|
||||||
|
message(STATUS "Debug build")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Common flags for GCC and Clang
|
||||||
|
target_compile_options(
|
||||||
|
aare_compiler_flags
|
||||||
|
INTERFACE
|
||||||
|
-Wall
|
||||||
|
-Wextra
|
||||||
|
-pedantic
|
||||||
|
-Wshadow
|
||||||
|
-Wold-style-cast
|
||||||
|
-Wnon-virtual-dtor
|
||||||
|
-Woverloaded-virtual
|
||||||
|
-Wdouble-promotion
|
||||||
|
-Wformat=2
|
||||||
|
-Wredundant-decls
|
||||||
|
-Wvla
|
||||||
|
-Wdouble-promotion
|
||||||
|
-Werror=return-type #important can cause segfault in optimzed builds
|
||||||
|
)
|
||||||
|
|
||||||
|
endif() #GCC/Clang specific
|
||||||
|
|
||||||
|
|
||||||
|
if(AARE_ASAN)
|
||||||
|
message(STATUS "AddressSanitizer enabled")
|
||||||
|
target_compile_options(
|
||||||
|
aare_compiler_flags
|
||||||
|
INTERFACE
|
||||||
|
-fsanitize=address,undefined,pointer-compare
|
||||||
|
-fno-omit-frame-pointer
|
||||||
|
)
|
||||||
|
target_link_libraries(
|
||||||
|
aare_compiler_flags
|
||||||
|
INTERFACE
|
||||||
|
-fsanitize=address,undefined,pointer-compare
|
||||||
|
-fno-omit-frame-pointer
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(AARE_TESTS)
|
||||||
|
enable_testing()
|
||||||
|
add_subdirectory(tests)
|
||||||
|
target_compile_definitions(tests PRIVATE AARE_TESTS)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
###------------------------------------------------------------------------------MAIN LIBRARY
|
||||||
|
###------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
set(PUBLICHEADERS
|
||||||
|
include/aare/ArrayExpr.hpp
|
||||||
|
include/aare/CalculateEta.hpp
|
||||||
|
include/aare/Cluster.hpp
|
||||||
|
include/aare/ClusterFinder.hpp
|
||||||
|
include/aare/ClusterFile.hpp
|
||||||
|
include/aare/CtbRawFile.hpp
|
||||||
|
include/aare/ClusterVector.hpp
|
||||||
|
include/aare/decode.hpp
|
||||||
|
include/aare/defs.hpp
|
||||||
|
include/aare/Dtype.hpp
|
||||||
|
include/aare/File.hpp
|
||||||
|
include/aare/Fit.hpp
|
||||||
|
include/aare/FileInterface.hpp
|
||||||
|
include/aare/FilePtr.hpp
|
||||||
|
include/aare/Frame.hpp
|
||||||
|
include/aare/GainMap.hpp
|
||||||
|
include/aare/DetectorGeometry.hpp
|
||||||
|
include/aare/JungfrauDataFile.hpp
|
||||||
|
include/aare/logger.hpp
|
||||||
|
include/aare/NDArray.hpp
|
||||||
|
include/aare/NDView.hpp
|
||||||
|
include/aare/NumpyFile.hpp
|
||||||
|
include/aare/NumpyHelpers.hpp
|
||||||
|
include/aare/Pedestal.hpp
|
||||||
|
include/aare/PixelMap.hpp
|
||||||
|
include/aare/RawFile.hpp
|
||||||
|
include/aare/RawMasterFile.hpp
|
||||||
|
include/aare/RawSubFile.hpp
|
||||||
|
include/aare/VarClusterFinder.hpp
|
||||||
|
include/aare/utils/task.hpp
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
set(SourceFiles
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/calibration.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/CtbRawFile.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/DetectorGeometry.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/FilePtr.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Fit.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/to_string.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp
|
||||||
|
)
|
||||||
|
|
||||||
|
add_library(aare_core STATIC ${SourceFiles})
|
||||||
|
target_include_directories(aare_core PUBLIC
|
||||||
|
"$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>"
|
||||||
|
"$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>"
|
||||||
|
)
|
||||||
|
|
||||||
|
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||||
|
find_package(Threads REQUIRED)
|
||||||
|
|
||||||
|
target_link_libraries(
|
||||||
|
aare_core
|
||||||
|
PUBLIC
|
||||||
|
fmt::fmt
|
||||||
|
nlohmann_json::nlohmann_json
|
||||||
|
${STD_FS_LIB} # from helpers.cmake
|
||||||
|
PRIVATE
|
||||||
|
aare_compiler_flags
|
||||||
|
Threads::Threads
|
||||||
|
$<BUILD_INTERFACE:lmfit>
|
||||||
|
|
||||||
|
)
|
||||||
|
|
||||||
|
set_property(TARGET aare_core PROPERTY POSITION_INDEPENDENT_CODE ON)
|
||||||
|
|
||||||
|
if(AARE_TESTS)
|
||||||
|
target_compile_definitions(aare_core PRIVATE AARE_TESTS)
|
||||||
|
endif()
|
||||||
|
if(AARE_VERBOSE)
|
||||||
|
target_compile_definitions(aare_core PUBLIC AARE_VERBOSE)
|
||||||
|
target_compile_definitions(aare_core PUBLIC AARE_LOG_LEVEL=aare::logDEBUG5)
|
||||||
|
else()
|
||||||
|
target_compile_definitions(aare_core PUBLIC AARE_LOG_LEVEL=aare::logERROR)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(AARE_CUSTOM_ASSERT)
|
||||||
|
target_compile_definitions(aare_core PUBLIC AARE_CUSTOM_ASSERT)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set_target_properties(aare_core PROPERTIES
|
||||||
|
ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}
|
||||||
|
PUBLIC_HEADER "${PUBLICHEADERS}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if(AARE_TESTS)
|
||||||
|
set(TestSources
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/calibration.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/DetectorGeometry.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolation.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Cluster.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/CalculateEta.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinderMT.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.test.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/src/to_string.test.cpp
|
||||||
|
|
||||||
|
)
|
||||||
|
target_sources(tests PRIVATE ${TestSources} )
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
if(AARE_MASTER_PROJECT)
|
||||||
|
install(TARGETS aare_core aare_compiler_flags
|
||||||
|
EXPORT "${TARGETS_EXPORT_NAME}"
|
||||||
|
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||||
|
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||||
|
PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/aare
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(CMAKE_INSTALL_RPATH $ORIGIN)
|
||||||
|
set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
|
||||||
|
|
||||||
|
|
||||||
|
# #Overall target to link to when using the library
|
||||||
|
# add_library(aare INTERFACE)
|
||||||
|
# target_link_libraries(aare INTERFACE aare_core aare_compiler_flags)
|
||||||
|
# target_include_directories(aare INTERFACE
|
||||||
|
# $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
|
||||||
|
# $<INSTALL_INTERFACE:include>
|
||||||
|
# )
|
||||||
|
|
||||||
|
# add_subdirectory(examples)
|
||||||
|
|
||||||
|
if(AARE_DOCS)
|
||||||
|
add_subdirectory(docs)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
# custom target to run check formatting with clang-format
|
||||||
|
add_custom_target(
|
||||||
|
check-format
|
||||||
|
COMMAND find \( -name "*.cpp" -o -name "*.hpp" \) -not -path "./build/*" | xargs -I {} -n 1 -P 10 bash -c "clang-format -Werror -style=\"file:.clang-format\" {} | diff {} -"
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
|
COMMENT "Checking code formatting with clang-format"
|
||||||
|
VERBATIM
|
||||||
|
|
||||||
|
)
|
||||||
|
|
||||||
|
add_custom_target(
|
||||||
|
format-files
|
||||||
|
COMMAND find \( -name "*.cpp" -o -name "*.hpp" \) -not -path "./build/*" | xargs -I {} -n 1 -P 10 bash -c "clang-format -i -style=\"file:.clang-format\" {}"
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
|
COMMENT "Formatting with clang-format"
|
||||||
|
VERBATIM
|
||||||
|
)
|
||||||
|
|
||||||
|
if (AARE_IN_GITHUB_ACTIONS)
|
||||||
|
message(STATUS "Running in Github Actions")
|
||||||
|
set(CLANG_TIDY_COMMAND "clang-tidy-17")
|
||||||
|
else()
|
||||||
|
set(CLANG_TIDY_COMMAND "clang-tidy")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
add_custom_target(
|
||||||
|
clang-tidy
|
||||||
|
COMMAND find \( -path "./src/*" -a -not -path "./src/python/*" -a \( -name "*.cpp" -not -name "*.test.cpp" \) \) -not -name "CircularFifo.hpp" -not -name "ProducerConsumerQueue.hpp" -not -name "VariableSizeClusterFinder.hpp" | xargs -I {} -n 1 -P 10 bash -c "${CLANG_TIDY_COMMAND} --config-file=.clang-tidy -p build {}"
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
|
COMMENT "linting with clang-tidy"
|
||||||
|
VERBATIM
|
||||||
|
)
|
||||||
|
|
||||||
|
if(AARE_MASTER_PROJECT)
|
||||||
|
set(CMAKE_INSTALL_DIR "share/cmake/${PROJECT_NAME}")
|
||||||
|
set(PROJECT_LIBRARIES aare-core aare-compiler-flags )
|
||||||
|
include(cmake/package_config.cmake)
|
||||||
|
endif()
|
||||||
373
LICENSE
Normal file
373
LICENSE
Normal file
@@ -0,0 +1,373 @@
|
|||||||
|
Mozilla Public License Version 2.0
|
||||||
|
==================================
|
||||||
|
|
||||||
|
1. Definitions
|
||||||
|
--------------
|
||||||
|
|
||||||
|
1.1. "Contributor"
|
||||||
|
means each individual or legal entity that creates, contributes to
|
||||||
|
the creation of, or owns Covered Software.
|
||||||
|
|
||||||
|
1.2. "Contributor Version"
|
||||||
|
means the combination of the Contributions of others (if any) used
|
||||||
|
by a Contributor and that particular Contributor's Contribution.
|
||||||
|
|
||||||
|
1.3. "Contribution"
|
||||||
|
means Covered Software of a particular Contributor.
|
||||||
|
|
||||||
|
1.4. "Covered Software"
|
||||||
|
means Source Code Form to which the initial Contributor has attached
|
||||||
|
the notice in Exhibit A, the Executable Form of such Source Code
|
||||||
|
Form, and Modifications of such Source Code Form, in each case
|
||||||
|
including portions thereof.
|
||||||
|
|
||||||
|
1.5. "Incompatible With Secondary Licenses"
|
||||||
|
means
|
||||||
|
|
||||||
|
(a) that the initial Contributor has attached the notice described
|
||||||
|
in Exhibit B to the Covered Software; or
|
||||||
|
|
||||||
|
(b) that the Covered Software was made available under the terms of
|
||||||
|
version 1.1 or earlier of the License, but not also under the
|
||||||
|
terms of a Secondary License.
|
||||||
|
|
||||||
|
1.6. "Executable Form"
|
||||||
|
means any form of the work other than Source Code Form.
|
||||||
|
|
||||||
|
1.7. "Larger Work"
|
||||||
|
means a work that combines Covered Software with other material, in
|
||||||
|
a separate file or files, that is not Covered Software.
|
||||||
|
|
||||||
|
1.8. "License"
|
||||||
|
means this document.
|
||||||
|
|
||||||
|
1.9. "Licensable"
|
||||||
|
means having the right to grant, to the maximum extent possible,
|
||||||
|
whether at the time of the initial grant or subsequently, any and
|
||||||
|
all of the rights conveyed by this License.
|
||||||
|
|
||||||
|
1.10. "Modifications"
|
||||||
|
means any of the following:
|
||||||
|
|
||||||
|
(a) any file in Source Code Form that results from an addition to,
|
||||||
|
deletion from, or modification of the contents of Covered
|
||||||
|
Software; or
|
||||||
|
|
||||||
|
(b) any new file in Source Code Form that contains any Covered
|
||||||
|
Software.
|
||||||
|
|
||||||
|
1.11. "Patent Claims" of a Contributor
|
||||||
|
means any patent claim(s), including without limitation, method,
|
||||||
|
process, and apparatus claims, in any patent Licensable by such
|
||||||
|
Contributor that would be infringed, but for the grant of the
|
||||||
|
License, by the making, using, selling, offering for sale, having
|
||||||
|
made, import, or transfer of either its Contributions or its
|
||||||
|
Contributor Version.
|
||||||
|
|
||||||
|
1.12. "Secondary License"
|
||||||
|
means either the GNU General Public License, Version 2.0, the GNU
|
||||||
|
Lesser General Public License, Version 2.1, the GNU Affero General
|
||||||
|
Public License, Version 3.0, or any later versions of those
|
||||||
|
licenses.
|
||||||
|
|
||||||
|
1.13. "Source Code Form"
|
||||||
|
means the form of the work preferred for making modifications.
|
||||||
|
|
||||||
|
1.14. "You" (or "Your")
|
||||||
|
means an individual or a legal entity exercising rights under this
|
||||||
|
License. For legal entities, "You" includes any entity that
|
||||||
|
controls, is controlled by, or is under common control with You. For
|
||||||
|
purposes of this definition, "control" means (a) the power, direct
|
||||||
|
or indirect, to cause the direction or management of such entity,
|
||||||
|
whether by contract or otherwise, or (b) ownership of more than
|
||||||
|
fifty percent (50%) of the outstanding shares or beneficial
|
||||||
|
ownership of such entity.
|
||||||
|
|
||||||
|
2. License Grants and Conditions
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
2.1. Grants
|
||||||
|
|
||||||
|
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||||
|
non-exclusive license:
|
||||||
|
|
||||||
|
(a) under intellectual property rights (other than patent or trademark)
|
||||||
|
Licensable by such Contributor to use, reproduce, make available,
|
||||||
|
modify, display, perform, distribute, and otherwise exploit its
|
||||||
|
Contributions, either on an unmodified basis, with Modifications, or
|
||||||
|
as part of a Larger Work; and
|
||||||
|
|
||||||
|
(b) under Patent Claims of such Contributor to make, use, sell, offer
|
||||||
|
for sale, have made, import, and otherwise transfer either its
|
||||||
|
Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
2.2. Effective Date
|
||||||
|
|
||||||
|
The licenses granted in Section 2.1 with respect to any Contribution
|
||||||
|
become effective for each Contribution on the date the Contributor first
|
||||||
|
distributes such Contribution.
|
||||||
|
|
||||||
|
2.3. Limitations on Grant Scope
|
||||||
|
|
||||||
|
The licenses granted in this Section 2 are the only rights granted under
|
||||||
|
this License. No additional rights or licenses will be implied from the
|
||||||
|
distribution or licensing of Covered Software under this License.
|
||||||
|
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||||
|
Contributor:
|
||||||
|
|
||||||
|
(a) for any code that a Contributor has removed from Covered Software;
|
||||||
|
or
|
||||||
|
|
||||||
|
(b) for infringements caused by: (i) Your and any other third party's
|
||||||
|
modifications of Covered Software, or (ii) the combination of its
|
||||||
|
Contributions with other software (except as part of its Contributor
|
||||||
|
Version); or
|
||||||
|
|
||||||
|
(c) under Patent Claims infringed by Covered Software in the absence of
|
||||||
|
its Contributions.
|
||||||
|
|
||||||
|
This License does not grant any rights in the trademarks, service marks,
|
||||||
|
or logos of any Contributor (except as may be necessary to comply with
|
||||||
|
the notice requirements in Section 3.4).
|
||||||
|
|
||||||
|
2.4. Subsequent Licenses
|
||||||
|
|
||||||
|
No Contributor makes additional grants as a result of Your choice to
|
||||||
|
distribute the Covered Software under a subsequent version of this
|
||||||
|
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||||
|
permitted under the terms of Section 3.3).
|
||||||
|
|
||||||
|
2.5. Representation
|
||||||
|
|
||||||
|
Each Contributor represents that the Contributor believes its
|
||||||
|
Contributions are its original creation(s) or it has sufficient rights
|
||||||
|
to grant the rights to its Contributions conveyed by this License.
|
||||||
|
|
||||||
|
2.6. Fair Use
|
||||||
|
|
||||||
|
This License is not intended to limit any rights You have under
|
||||||
|
applicable copyright doctrines of fair use, fair dealing, or other
|
||||||
|
equivalents.
|
||||||
|
|
||||||
|
2.7. Conditions
|
||||||
|
|
||||||
|
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
|
||||||
|
in Section 2.1.
|
||||||
|
|
||||||
|
3. Responsibilities
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
3.1. Distribution of Source Form
|
||||||
|
|
||||||
|
All distribution of Covered Software in Source Code Form, including any
|
||||||
|
Modifications that You create or to which You contribute, must be under
|
||||||
|
the terms of this License. You must inform recipients that the Source
|
||||||
|
Code Form of the Covered Software is governed by the terms of this
|
||||||
|
License, and how they can obtain a copy of this License. You may not
|
||||||
|
attempt to alter or restrict the recipients' rights in the Source Code
|
||||||
|
Form.
|
||||||
|
|
||||||
|
3.2. Distribution of Executable Form
|
||||||
|
|
||||||
|
If You distribute Covered Software in Executable Form then:
|
||||||
|
|
||||||
|
(a) such Covered Software must also be made available in Source Code
|
||||||
|
Form, as described in Section 3.1, and You must inform recipients of
|
||||||
|
the Executable Form how they can obtain a copy of such Source Code
|
||||||
|
Form by reasonable means in a timely manner, at a charge no more
|
||||||
|
than the cost of distribution to the recipient; and
|
||||||
|
|
||||||
|
(b) You may distribute such Executable Form under the terms of this
|
||||||
|
License, or sublicense it under different terms, provided that the
|
||||||
|
license for the Executable Form does not attempt to limit or alter
|
||||||
|
the recipients' rights in the Source Code Form under this License.
|
||||||
|
|
||||||
|
3.3. Distribution of a Larger Work
|
||||||
|
|
||||||
|
You may create and distribute a Larger Work under terms of Your choice,
|
||||||
|
provided that You also comply with the requirements of this License for
|
||||||
|
the Covered Software. If the Larger Work is a combination of Covered
|
||||||
|
Software with a work governed by one or more Secondary Licenses, and the
|
||||||
|
Covered Software is not Incompatible With Secondary Licenses, this
|
||||||
|
License permits You to additionally distribute such Covered Software
|
||||||
|
under the terms of such Secondary License(s), so that the recipient of
|
||||||
|
the Larger Work may, at their option, further distribute the Covered
|
||||||
|
Software under the terms of either this License or such Secondary
|
||||||
|
License(s).
|
||||||
|
|
||||||
|
3.4. Notices
|
||||||
|
|
||||||
|
You may not remove or alter the substance of any license notices
|
||||||
|
(including copyright notices, patent notices, disclaimers of warranty,
|
||||||
|
or limitations of liability) contained within the Source Code Form of
|
||||||
|
the Covered Software, except that You may alter any license notices to
|
||||||
|
the extent required to remedy known factual inaccuracies.
|
||||||
|
|
||||||
|
3.5. Application of Additional Terms
|
||||||
|
|
||||||
|
You may choose to offer, and to charge a fee for, warranty, support,
|
||||||
|
indemnity or liability obligations to one or more recipients of Covered
|
||||||
|
Software. However, You may do so only on Your own behalf, and not on
|
||||||
|
behalf of any Contributor. You must make it absolutely clear that any
|
||||||
|
such warranty, support, indemnity, or liability obligation is offered by
|
||||||
|
You alone, and You hereby agree to indemnify every Contributor for any
|
||||||
|
liability incurred by such Contributor as a result of warranty, support,
|
||||||
|
indemnity or liability terms You offer. You may include additional
|
||||||
|
disclaimers of warranty and limitations of liability specific to any
|
||||||
|
jurisdiction.
|
||||||
|
|
||||||
|
4. Inability to Comply Due to Statute or Regulation
|
||||||
|
---------------------------------------------------
|
||||||
|
|
||||||
|
If it is impossible for You to comply with any of the terms of this
|
||||||
|
License with respect to some or all of the Covered Software due to
|
||||||
|
statute, judicial order, or regulation then You must: (a) comply with
|
||||||
|
the terms of this License to the maximum extent possible; and (b)
|
||||||
|
describe the limitations and the code they affect. Such description must
|
||||||
|
be placed in a text file included with all distributions of the Covered
|
||||||
|
Software under this License. Except to the extent prohibited by statute
|
||||||
|
or regulation, such description must be sufficiently detailed for a
|
||||||
|
recipient of ordinary skill to be able to understand it.
|
||||||
|
|
||||||
|
5. Termination
|
||||||
|
--------------
|
||||||
|
|
||||||
|
5.1. The rights granted under this License will terminate automatically
|
||||||
|
if You fail to comply with any of its terms. However, if You become
|
||||||
|
compliant, then the rights granted under this License from a particular
|
||||||
|
Contributor are reinstated (a) provisionally, unless and until such
|
||||||
|
Contributor explicitly and finally terminates Your grants, and (b) on an
|
||||||
|
ongoing basis, if such Contributor fails to notify You of the
|
||||||
|
non-compliance by some reasonable means prior to 60 days after You have
|
||||||
|
come back into compliance. Moreover, Your grants from a particular
|
||||||
|
Contributor are reinstated on an ongoing basis if such Contributor
|
||||||
|
notifies You of the non-compliance by some reasonable means, this is the
|
||||||
|
first time You have received notice of non-compliance with this License
|
||||||
|
from such Contributor, and You become compliant prior to 30 days after
|
||||||
|
Your receipt of the notice.
|
||||||
|
|
||||||
|
5.2. If You initiate litigation against any entity by asserting a patent
|
||||||
|
infringement claim (excluding declaratory judgment actions,
|
||||||
|
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||||
|
directly or indirectly infringes any patent, then the rights granted to
|
||||||
|
You by any and all Contributors for the Covered Software under Section
|
||||||
|
2.1 of this License shall terminate.
|
||||||
|
|
||||||
|
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
|
||||||
|
end user license agreements (excluding distributors and resellers) which
|
||||||
|
have been validly granted by You or Your distributors under this License
|
||||||
|
prior to termination shall survive termination.
|
||||||
|
|
||||||
|
************************************************************************
|
||||||
|
* *
|
||||||
|
* 6. Disclaimer of Warranty *
|
||||||
|
* ------------------------- *
|
||||||
|
* *
|
||||||
|
* Covered Software is provided under this License on an "as is" *
|
||||||
|
* basis, without warranty of any kind, either expressed, implied, or *
|
||||||
|
* statutory, including, without limitation, warranties that the *
|
||||||
|
* Covered Software is free of defects, merchantable, fit for a *
|
||||||
|
* particular purpose or non-infringing. The entire risk as to the *
|
||||||
|
* quality and performance of the Covered Software is with You. *
|
||||||
|
* Should any Covered Software prove defective in any respect, You *
|
||||||
|
* (not any Contributor) assume the cost of any necessary servicing, *
|
||||||
|
* repair, or correction. This disclaimer of warranty constitutes an *
|
||||||
|
* essential part of this License. No use of any Covered Software is *
|
||||||
|
* authorized under this License except under this disclaimer. *
|
||||||
|
* *
|
||||||
|
************************************************************************
|
||||||
|
|
||||||
|
************************************************************************
|
||||||
|
* *
|
||||||
|
* 7. Limitation of Liability *
|
||||||
|
* -------------------------- *
|
||||||
|
* *
|
||||||
|
* Under no circumstances and under no legal theory, whether tort *
|
||||||
|
* (including negligence), contract, or otherwise, shall any *
|
||||||
|
* Contributor, or anyone who distributes Covered Software as *
|
||||||
|
* permitted above, be liable to You for any direct, indirect, *
|
||||||
|
* special, incidental, or consequential damages of any character *
|
||||||
|
* including, without limitation, damages for lost profits, loss of *
|
||||||
|
* goodwill, work stoppage, computer failure or malfunction, or any *
|
||||||
|
* and all other commercial damages or losses, even if such party *
|
||||||
|
* shall have been informed of the possibility of such damages. This *
|
||||||
|
* limitation of liability shall not apply to liability for death or *
|
||||||
|
* personal injury resulting from such party's negligence to the *
|
||||||
|
* extent applicable law prohibits such limitation. Some *
|
||||||
|
* jurisdictions do not allow the exclusion or limitation of *
|
||||||
|
* incidental or consequential damages, so this exclusion and *
|
||||||
|
* limitation may not apply to You. *
|
||||||
|
* *
|
||||||
|
************************************************************************
|
||||||
|
|
||||||
|
8. Litigation
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Any litigation relating to this License may be brought only in the
|
||||||
|
courts of a jurisdiction where the defendant maintains its principal
|
||||||
|
place of business and such litigation shall be governed by laws of that
|
||||||
|
jurisdiction, without reference to its conflict-of-law provisions.
|
||||||
|
Nothing in this Section shall prevent a party's ability to bring
|
||||||
|
cross-claims or counter-claims.
|
||||||
|
|
||||||
|
9. Miscellaneous
|
||||||
|
----------------
|
||||||
|
|
||||||
|
This License represents the complete agreement concerning the subject
|
||||||
|
matter hereof. If any provision of this License is held to be
|
||||||
|
unenforceable, such provision shall be reformed only to the extent
|
||||||
|
necessary to make it enforceable. Any law or regulation which provides
|
||||||
|
that the language of a contract shall be construed against the drafter
|
||||||
|
shall not be used to construe this License against a Contributor.
|
||||||
|
|
||||||
|
10. Versions of the License
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
10.1. New Versions
|
||||||
|
|
||||||
|
Mozilla Foundation is the license steward. Except as provided in Section
|
||||||
|
10.3, no one other than the license steward has the right to modify or
|
||||||
|
publish new versions of this License. Each version will be given a
|
||||||
|
distinguishing version number.
|
||||||
|
|
||||||
|
10.2. Effect of New Versions
|
||||||
|
|
||||||
|
You may distribute the Covered Software under the terms of the version
|
||||||
|
of the License under which You originally received the Covered Software,
|
||||||
|
or under the terms of any subsequent version published by the license
|
||||||
|
steward.
|
||||||
|
|
||||||
|
10.3. Modified Versions
|
||||||
|
|
||||||
|
If you create software not governed by this License, and you want to
|
||||||
|
create a new license for such software, you may create and use a
|
||||||
|
modified version of this License if you rename the license and remove
|
||||||
|
any references to the name of the license steward (except to note that
|
||||||
|
such modified license differs from this License).
|
||||||
|
|
||||||
|
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||||
|
Licenses
|
||||||
|
|
||||||
|
If You choose to distribute Source Code Form that is Incompatible With
|
||||||
|
Secondary Licenses under the terms of this version of the License, the
|
||||||
|
notice described in Exhibit B of this License must be attached.
|
||||||
|
|
||||||
|
Exhibit A - Source Code Form License Notice
|
||||||
|
-------------------------------------------
|
||||||
|
|
||||||
|
This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||||
|
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
If it is not possible or desirable to put the notice in a particular
|
||||||
|
file, then You may include the notice in a location (such as a LICENSE
|
||||||
|
file in a relevant directory) where a recipient would be likely to look
|
||||||
|
for such a notice.
|
||||||
|
|
||||||
|
You may add additional accurate notices of copyright ownership.
|
||||||
|
|
||||||
|
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||||
|
---------------------------------------------------------
|
||||||
|
|
||||||
|
This Source Code Form is "Incompatible With Secondary Licenses", as
|
||||||
|
defined by the Mozilla Public License, v. 2.0.
|
||||||
77
README.md
77
README.md
@@ -1,2 +1,79 @@
|
|||||||
# aare
|
# aare
|
||||||
Data analysis library for PSI hybrid detectors
|
Data analysis library for PSI hybrid detectors
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
Detailed documentation including installation can be found in [Documentation](https://slsdetectorgroup.github.io/aare/)
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
This project is licensed under the MPL-2.0 license.
|
||||||
|
See the LICENSE file or https://www.mozilla.org/en-US/MPL/ for details.
|
||||||
|
|
||||||
|
## Build and install
|
||||||
|
|
||||||
|
Prerequisites
|
||||||
|
- cmake >= 3.14
|
||||||
|
- C++17 compiler (gcc >= 8)
|
||||||
|
- python >= 3.10
|
||||||
|
|
||||||
|
### Development install (for Python)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone git@github.com:slsdetectorgroup/aare.git --branch=v1 #or using http...
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
|
||||||
|
#configure using cmake
|
||||||
|
cmake ../aare
|
||||||
|
|
||||||
|
#build (replace 4 with the number of threads you want to use)
|
||||||
|
make -j4
|
||||||
|
```
|
||||||
|
|
||||||
|
Now you can use the Python module from your build directory
|
||||||
|
|
||||||
|
```python
|
||||||
|
import aare
|
||||||
|
f = aare.File('Some/File/I/Want_to_open_master_0.json')
|
||||||
|
```
|
||||||
|
|
||||||
|
To run form other folders either add the path to your conda environment using conda-build or add it to your PYTHONPATH
|
||||||
|
|
||||||
|
|
||||||
|
### Install using conda/mamba
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#enable your env first!
|
||||||
|
conda install aare=2024.10.29.dev0 -c slsdetectorgroup
|
||||||
|
```
|
||||||
|
|
||||||
|
### Install to a custom location and use in your project
|
||||||
|
|
||||||
|
Working example in: https://github.com/slsdetectorgroup/aare-examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#build and install aare
|
||||||
|
git clone git@github.com:slsdetectorgroup/aare.git --branch=v1 #or using http...
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
|
||||||
|
#configure using cmake
|
||||||
|
cmake ../aare -DCMAKE_INSTALL_PREFIX=/where/to/put/aare
|
||||||
|
|
||||||
|
#build (replace 4 with the number of threads you want to use)
|
||||||
|
make -j4
|
||||||
|
|
||||||
|
#install
|
||||||
|
make install
|
||||||
|
|
||||||
|
|
||||||
|
#Now configure your project
|
||||||
|
cmake .. -DCMAKE_PREFIX_PATH=SOME_PATH
|
||||||
|
```
|
||||||
|
|
||||||
|
### Local build of conda pkgs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conda build . --variants="{python: [3.11, 3.12, 3.13]}"
|
||||||
|
```
|
||||||
119
RELEASE.md
Normal file
119
RELEASE.md
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
# Release notes
|
||||||
|
|
||||||
|
## head
|
||||||
|
|
||||||
|
### New Features:
|
||||||
|
|
||||||
|
- Expanding 24 to 32 bit data
|
||||||
|
- Decoding digital data from Mythen 302
|
||||||
|
- added ``transform_eta_values``. Function transforms :math:`\eta` to uniform spatial coordinates. Should only be used for easier debugging.
|
||||||
|
- New to_string, string_to for aare
|
||||||
|
- Added exptime and period members to RawMasterFile including decoding
|
||||||
|
- Removed redundant arr.value(ix,iy...) on NDArray use arr(ix,iy...)
|
||||||
|
- Removed Print/Print_some/Print_all form NDArray (operator << still works)
|
||||||
|
- Added const* version of .data()
|
||||||
|
|
||||||
|
|
||||||
|
### 2025.11.21
|
||||||
|
|
||||||
|
### New Features:
|
||||||
|
|
||||||
|
- Added SPDX-License-Identifier: MPL-2.0 to source files
|
||||||
|
- Calculate Eta3 supports all cluster types
|
||||||
|
- interpolation class supports using cross eta3x3 and eta3x3 on full cluster as well as eta2x2 on full cluster
|
||||||
|
- interpolation class has option to calculate the rosenblatt transform
|
||||||
|
- reduction operations to reduce Clusters of general size to 2x2 or 3x3 clusters
|
||||||
|
- `max_sum_2x2` including index of subcluster with highest energy is now available from Python API
|
||||||
|
- interpolation supports bilinear interpolation of eta values for more fine grained transformed uniform coordinates
|
||||||
|
- Interpolation is documented
|
||||||
|
|
||||||
|
- Added tell to ClusterFile. Returns position in bytes for debugging
|
||||||
|
|
||||||
|
### Resolved Features:
|
||||||
|
|
||||||
|
- calculate_eta coincides with theoretical definition
|
||||||
|
|
||||||
|
### Bugfixes:
|
||||||
|
|
||||||
|
- eta calculation assumes correct photon center
|
||||||
|
- eta transformation to uniform coordinates starts at 0
|
||||||
|
- Bug in interpolation
|
||||||
|
- File supports reading new master json file format (multiple ROI's not supported yet)
|
||||||
|
|
||||||
|
|
||||||
|
### API Changes:
|
||||||
|
|
||||||
|
- ClusterFinder for 2x2 Cluster disabled
|
||||||
|
- eta stores corner as enum class cTopLeft, cTopRight, BottomLeft, cBottomRight indicating 2x2 subcluster with largest energy relative to cluster center
|
||||||
|
- max_sum_2x2 returns corner as index
|
||||||
|
|
||||||
|
### 2025.8.22
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
- Apply calibration works in G0 if passes a 2D calibration and pedestal
|
||||||
|
- count pixels that switch
|
||||||
|
- calculate pedestal (also g0 version)
|
||||||
|
- NDArray::view() needs an lvalue to reduce issues with the view outliving the array
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes:
|
||||||
|
|
||||||
|
- Now using glibc 2.17 in conda builds (was using the host)
|
||||||
|
- Fixed shifted pixels in clusters close to the edge of a frame
|
||||||
|
|
||||||
|
### 2025.7.18
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
- Cluster finder now works with 5x5, 7x7 and 9x9 clusters
|
||||||
|
- Added ClusterVector::empty() member
|
||||||
|
- Added apply_calibration function for Jungfrau data
|
||||||
|
|
||||||
|
Bugfixes:
|
||||||
|
- Fixed reading RawFiles with ROI fully excluding some sub files.
|
||||||
|
- Decoding of MH02 files placed the pixels in wrong position
|
||||||
|
- Removed unused file: ClusterFile.cpp
|
||||||
|
|
||||||
|
|
||||||
|
### 2025.5.22
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
- Added scurve fitting
|
||||||
|
|
||||||
|
Bugfixes:
|
||||||
|
|
||||||
|
- Fixed crash when opening raw files with large number of data files
|
||||||
|
|
||||||
|
## Download, Documentation & Support
|
||||||
|
|
||||||
|
### Download
|
||||||
|
|
||||||
|
The Source Code:
|
||||||
|
https://github.com/slsdetectorgroup/aare
|
||||||
|
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
|
||||||
|
|
||||||
|
Documentation including installation details:
|
||||||
|
https://github.com/slsdetectorgroup/aare
|
||||||
|
|
||||||
|
|
||||||
|
### Support
|
||||||
|
|
||||||
|
|
||||||
|
erik.frojdh@psi.ch \
|
||||||
|
alice.mazzoleni@psi.ch \
|
||||||
|
dhanya.thattil@psi.ch
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
28
benchmarks/CMakeLists.txt
Normal file
28
benchmarks/CMakeLists.txt
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
include(FetchContent)
|
||||||
|
|
||||||
|
|
||||||
|
FetchContent_Declare(
|
||||||
|
benchmark
|
||||||
|
GIT_REPOSITORY https://github.com/google/benchmark.git
|
||||||
|
GIT_TAG v1.8.3 # Change to the latest version if needed
|
||||||
|
)
|
||||||
|
|
||||||
|
# Ensure Google Benchmark is built correctly
|
||||||
|
set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE)
|
||||||
|
|
||||||
|
FetchContent_MakeAvailable(benchmark)
|
||||||
|
|
||||||
|
add_executable(benchmarks)
|
||||||
|
|
||||||
|
target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp reduce_benchmark.cpp)
|
||||||
|
|
||||||
|
# Link Google Benchmark and other necessary libraries
|
||||||
|
target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags)
|
||||||
|
|
||||||
|
# Set output properties
|
||||||
|
set_target_properties(benchmarks PROPERTIES
|
||||||
|
RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}
|
||||||
|
OUTPUT_NAME run_benchmarks
|
||||||
|
)
|
||||||
104
benchmarks/calculateeta_benchmark.cpp
Normal file
104
benchmarks/calculateeta_benchmark.cpp
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#include "aare/CalculateEta.hpp"
|
||||||
|
#include "aare/ClusterFile.hpp"
|
||||||
|
#include <benchmark/benchmark.h>
|
||||||
|
|
||||||
|
using namespace aare;
|
||||||
|
|
||||||
|
class ClusterFixture : public benchmark::Fixture {
|
||||||
|
public:
|
||||||
|
Cluster<int, 2, 2> cluster_2x2{};
|
||||||
|
Cluster<int, 3, 3> cluster_3x3{};
|
||||||
|
Cluster<int, 4, 4> cluster_4x4{};
|
||||||
|
|
||||||
|
private:
|
||||||
|
using benchmark::Fixture::SetUp;
|
||||||
|
|
||||||
|
void SetUp([[maybe_unused]] const benchmark::State &state) override {
|
||||||
|
int temp_data[4] = {1, 2, 3, 1};
|
||||||
|
std::copy(std::begin(temp_data), std::end(temp_data),
|
||||||
|
std::begin(cluster_2x2.data));
|
||||||
|
|
||||||
|
cluster_2x2.x = 0;
|
||||||
|
cluster_2x2.y = 0;
|
||||||
|
|
||||||
|
int temp_data2[9] = {1, 2, 3, 1, 3, 4, 5, 1, 20};
|
||||||
|
std::copy(std::begin(temp_data2), std::end(temp_data2),
|
||||||
|
std::begin(cluster_3x3.data));
|
||||||
|
|
||||||
|
cluster_3x3.x = 0;
|
||||||
|
cluster_3x3.y = 0;
|
||||||
|
|
||||||
|
int temp_data3[16] = {1, 2, 3, 4, 5, 6, 7, 8,
|
||||||
|
9, 10, 11, 12, 13, 14, 15, 16};
|
||||||
|
std::copy(std::begin(temp_data3), std::end(temp_data3),
|
||||||
|
std::begin(cluster_4x4.data));
|
||||||
|
cluster_4x4.x = 0;
|
||||||
|
cluster_4x4.y = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// void TearDown(::benchmark::State& state) {
|
||||||
|
// }
|
||||||
|
};
|
||||||
|
|
||||||
|
BENCHMARK_F(ClusterFixture, Calculate2x2Eta)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
Eta2 eta = calculate_eta2(cluster_2x2);
|
||||||
|
benchmark::DoNotOptimize(eta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// almost takes double the time
|
||||||
|
BENCHMARK_F(ClusterFixture, CalculateGeneralEtaFor2x2Cluster)
|
||||||
|
(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
Eta2 eta = calculate_eta2<int, 2, 2>(cluster_2x2);
|
||||||
|
benchmark::DoNotOptimize(eta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(ClusterFixture, Calculate3x3Eta)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
Eta2 eta = calculate_eta2(cluster_3x3);
|
||||||
|
benchmark::DoNotOptimize(eta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// almost takes double the time
|
||||||
|
BENCHMARK_F(ClusterFixture, CalculateGeneralEtaFor3x3Cluster)
|
||||||
|
(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
Eta2 eta = calculate_eta2<int, 3, 3>(cluster_3x3);
|
||||||
|
benchmark::DoNotOptimize(eta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(ClusterFixture, Calculate2x2Etawithreduction)
|
||||||
|
(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
auto reduced_cluster = reduce_to_2x2(cluster_4x4);
|
||||||
|
Eta2 eta = calculate_eta2(reduced_cluster);
|
||||||
|
auto reduced_cluster_from_3x3 = reduce_to_2x2(cluster_3x3);
|
||||||
|
Eta2 eta2 = calculate_eta2(reduced_cluster_from_3x3);
|
||||||
|
benchmark::DoNotOptimize(eta);
|
||||||
|
benchmark::DoNotOptimize(eta2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(ClusterFixture, Calculate2x2Etawithoutreduction)
|
||||||
|
(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
Eta2 eta = calculate_eta2(cluster_4x4);
|
||||||
|
Eta2 eta2 = calculate_eta2(cluster_3x3);
|
||||||
|
benchmark::DoNotOptimize(eta);
|
||||||
|
benchmark::DoNotOptimize(eta2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BENCHMARK_MAIN();
|
||||||
133
benchmarks/ndarray_benchmark.cpp
Normal file
133
benchmarks/ndarray_benchmark.cpp
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#include "aare/NDArray.hpp"
|
||||||
|
#include <benchmark/benchmark.h>
|
||||||
|
|
||||||
|
using aare::NDArray;
|
||||||
|
|
||||||
|
constexpr ssize_t size = 1024;
|
||||||
|
class TwoArrays : public benchmark::Fixture {
|
||||||
|
public:
|
||||||
|
NDArray<int, 2> a{{size, size}, 0};
|
||||||
|
NDArray<int, 2> b{{size, size}, 0};
|
||||||
|
void SetUp(::benchmark::State &state) {
|
||||||
|
for (uint32_t i = 0; i < size; i++) {
|
||||||
|
for (uint32_t j = 0; j < size; j++) {
|
||||||
|
a(i, j) = i * j + 1;
|
||||||
|
b(i, j) = i * j + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// void TearDown(::benchmark::State& state) {
|
||||||
|
// }
|
||||||
|
};
|
||||||
|
|
||||||
|
BENCHMARK_F(TwoArrays, AddWithOperator)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
NDArray<int, 2> res = a + b;
|
||||||
|
benchmark::DoNotOptimize(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BENCHMARK_F(TwoArrays, AddWithIndex)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
NDArray<int, 2> res(a.shape());
|
||||||
|
for (uint32_t i = 0; i < a.size(); i++) {
|
||||||
|
res(i) = a(i) + b(i);
|
||||||
|
}
|
||||||
|
benchmark::DoNotOptimize(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(TwoArrays, SubtractWithOperator)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
NDArray<int, 2> res = a - b;
|
||||||
|
benchmark::DoNotOptimize(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BENCHMARK_F(TwoArrays, SubtractWithIndex)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
NDArray<int, 2> res(a.shape());
|
||||||
|
for (uint32_t i = 0; i < a.size(); i++) {
|
||||||
|
res(i) = a(i) - b(i);
|
||||||
|
}
|
||||||
|
benchmark::DoNotOptimize(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(TwoArrays, MultiplyWithOperator)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
NDArray<int, 2> res = a * b;
|
||||||
|
benchmark::DoNotOptimize(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BENCHMARK_F(TwoArrays, MultiplyWithIndex)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
NDArray<int, 2> res(a.shape());
|
||||||
|
for (uint32_t i = 0; i < a.size(); i++) {
|
||||||
|
res(i) = a(i) * b(i);
|
||||||
|
}
|
||||||
|
benchmark::DoNotOptimize(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(TwoArrays, DivideWithOperator)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
NDArray<int, 2> res = a / b;
|
||||||
|
benchmark::DoNotOptimize(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BENCHMARK_F(TwoArrays, DivideWithIndex)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
NDArray<int, 2> res(a.shape());
|
||||||
|
for (uint32_t i = 0; i < a.size(); i++) {
|
||||||
|
res(i) = a(i) / b(i);
|
||||||
|
}
|
||||||
|
benchmark::DoNotOptimize(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(TwoArrays, FourAddWithOperator)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
NDArray<int, 2> res = a + b + a + b;
|
||||||
|
benchmark::DoNotOptimize(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BENCHMARK_F(TwoArrays, FourAddWithIndex)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
NDArray<int, 2> res(a.shape());
|
||||||
|
for (uint32_t i = 0; i < a.size(); i++) {
|
||||||
|
res(i) = a(i) + b(i) + a(i) + b(i);
|
||||||
|
}
|
||||||
|
benchmark::DoNotOptimize(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(TwoArrays, MultiplyAddDivideWithOperator)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
NDArray<int, 2> res = a * a + b / a;
|
||||||
|
benchmark::DoNotOptimize(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BENCHMARK_F(TwoArrays, MultiplyAddDivideWithIndex)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
NDArray<int, 2> res(a.shape());
|
||||||
|
for (uint32_t i = 0; i < a.size(); i++) {
|
||||||
|
res(i) = a(i) * a(i) + b(i) / a(i);
|
||||||
|
}
|
||||||
|
benchmark::DoNotOptimize(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_MAIN();
|
||||||
169
benchmarks/reduce_benchmark.cpp
Normal file
169
benchmarks/reduce_benchmark.cpp
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#include "aare/Cluster.hpp"
|
||||||
|
#include <benchmark/benchmark.h>
|
||||||
|
|
||||||
|
using namespace aare;
|
||||||
|
|
||||||
|
class ClustersForReduceFixture : public benchmark::Fixture {
|
||||||
|
public:
|
||||||
|
Cluster<int, 5, 5> cluster_5x5{};
|
||||||
|
Cluster<int, 3, 3> cluster_3x3{};
|
||||||
|
|
||||||
|
private:
|
||||||
|
using benchmark::Fixture::SetUp;
|
||||||
|
|
||||||
|
void SetUp([[maybe_unused]] const benchmark::State &state) override {
|
||||||
|
int temp_data[25] = {1, 1, 1, 1, 1, 1, 1, 2, 1, 1,
|
||||||
|
1, 2, 3, 1, 2, 1, 1, 1, 1, 2};
|
||||||
|
std::copy(std::begin(temp_data), std::end(temp_data),
|
||||||
|
std::begin(cluster_5x5.data));
|
||||||
|
|
||||||
|
cluster_5x5.x = 5;
|
||||||
|
cluster_5x5.y = 5;
|
||||||
|
|
||||||
|
int temp_data2[9] = {1, 1, 1, 2, 3, 1, 2, 2, 1};
|
||||||
|
std::copy(std::begin(temp_data2), std::end(temp_data2),
|
||||||
|
std::begin(cluster_3x3.data));
|
||||||
|
|
||||||
|
cluster_3x3.x = 5;
|
||||||
|
cluster_3x3.y = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
// void TearDown(::benchmark::State& state) {
|
||||||
|
// }
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
Cluster<T, 3, 3, uint16_t> reduce_to_3x3(const Cluster<T, 5, 5, uint16_t> &c) {
|
||||||
|
Cluster<T, 3, 3, uint16_t> result;
|
||||||
|
|
||||||
|
// Write out the sums in the hope that the compiler can optimize this
|
||||||
|
std::array<T, 9> sum_3x3_subclusters;
|
||||||
|
|
||||||
|
// Write out the sums in the hope that the compiler can optimize this
|
||||||
|
sum_3x3_subclusters[0] = c.data[0] + c.data[1] + c.data[2] + c.data[5] +
|
||||||
|
c.data[6] + c.data[7] + c.data[10] + c.data[11] +
|
||||||
|
c.data[12];
|
||||||
|
sum_3x3_subclusters[1] = c.data[1] + c.data[2] + c.data[3] + c.data[6] +
|
||||||
|
c.data[7] + c.data[8] + c.data[11] + c.data[12] +
|
||||||
|
c.data[13];
|
||||||
|
sum_3x3_subclusters[2] = c.data[2] + c.data[3] + c.data[4] + c.data[7] +
|
||||||
|
c.data[8] + c.data[9] + c.data[12] + c.data[13] +
|
||||||
|
c.data[14];
|
||||||
|
sum_3x3_subclusters[3] = c.data[5] + c.data[6] + c.data[7] + c.data[10] +
|
||||||
|
c.data[11] + c.data[12] + c.data[15] + c.data[16] +
|
||||||
|
c.data[17];
|
||||||
|
sum_3x3_subclusters[4] = c.data[6] + c.data[7] + c.data[8] + c.data[11] +
|
||||||
|
c.data[12] + c.data[13] + c.data[16] + c.data[17] +
|
||||||
|
c.data[18];
|
||||||
|
sum_3x3_subclusters[5] = c.data[7] + c.data[8] + c.data[9] + c.data[12] +
|
||||||
|
c.data[13] + c.data[14] + c.data[17] + c.data[18] +
|
||||||
|
c.data[19];
|
||||||
|
sum_3x3_subclusters[6] = c.data[10] + c.data[11] + c.data[12] + c.data[15] +
|
||||||
|
c.data[16] + c.data[17] + c.data[20] + c.data[21] +
|
||||||
|
c.data[22];
|
||||||
|
sum_3x3_subclusters[7] = c.data[11] + c.data[12] + c.data[13] + c.data[16] +
|
||||||
|
c.data[17] + c.data[18] + c.data[21] + c.data[22] +
|
||||||
|
c.data[23];
|
||||||
|
sum_3x3_subclusters[8] = c.data[12] + c.data[13] + c.data[14] + c.data[17] +
|
||||||
|
c.data[18] + c.data[19] + c.data[22] + c.data[23] +
|
||||||
|
c.data[24];
|
||||||
|
|
||||||
|
auto index = std::max_element(sum_3x3_subclusters.begin(),
|
||||||
|
sum_3x3_subclusters.end()) -
|
||||||
|
sum_3x3_subclusters.begin();
|
||||||
|
|
||||||
|
switch (index) {
|
||||||
|
case 0:
|
||||||
|
result.x = c.x - 1;
|
||||||
|
result.y = c.y + 1;
|
||||||
|
result.data = {c.data[0], c.data[1], c.data[2], c.data[5], c.data[6],
|
||||||
|
c.data[7], c.data[10], c.data[11], c.data[12]};
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
result.x = c.x;
|
||||||
|
result.y = c.y + 1;
|
||||||
|
result.data = {c.data[1], c.data[2], c.data[3], c.data[6], c.data[7],
|
||||||
|
c.data[8], c.data[11], c.data[12], c.data[13]};
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
result.x = c.x + 1;
|
||||||
|
result.y = c.y + 1;
|
||||||
|
result.data = {c.data[2], c.data[3], c.data[4], c.data[7], c.data[8],
|
||||||
|
c.data[9], c.data[12], c.data[13], c.data[14]};
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
result.x = c.x - 1;
|
||||||
|
result.y = c.y;
|
||||||
|
result.data = {c.data[5], c.data[6], c.data[7],
|
||||||
|
c.data[10], c.data[11], c.data[12],
|
||||||
|
c.data[15], c.data[16], c.data[17]};
|
||||||
|
break;
|
||||||
|
case 4:
|
||||||
|
result.x = c.x + 1;
|
||||||
|
result.y = c.y;
|
||||||
|
result.data = {c.data[6], c.data[7], c.data[8],
|
||||||
|
c.data[11], c.data[12], c.data[13],
|
||||||
|
c.data[16], c.data[17], c.data[18]};
|
||||||
|
break;
|
||||||
|
case 5:
|
||||||
|
result.x = c.x + 1;
|
||||||
|
result.y = c.y;
|
||||||
|
result.data = {c.data[7], c.data[8], c.data[9],
|
||||||
|
c.data[12], c.data[13], c.data[14],
|
||||||
|
c.data[17], c.data[18], c.data[19]};
|
||||||
|
break;
|
||||||
|
case 6:
|
||||||
|
result.x = c.x + 1;
|
||||||
|
result.y = c.y - 1;
|
||||||
|
result.data = {c.data[10], c.data[11], c.data[12],
|
||||||
|
c.data[15], c.data[16], c.data[17],
|
||||||
|
c.data[20], c.data[21], c.data[22]};
|
||||||
|
break;
|
||||||
|
case 7:
|
||||||
|
result.x = c.x + 1;
|
||||||
|
result.y = c.y - 1;
|
||||||
|
result.data = {c.data[11], c.data[12], c.data[13],
|
||||||
|
c.data[16], c.data[17], c.data[18],
|
||||||
|
c.data[21], c.data[22], c.data[23]};
|
||||||
|
break;
|
||||||
|
case 8:
|
||||||
|
result.x = c.x + 1;
|
||||||
|
result.y = c.y - 1;
|
||||||
|
result.data = {c.data[12], c.data[13], c.data[14],
|
||||||
|
c.data[17], c.data[18], c.data[19],
|
||||||
|
c.data[22], c.data[23], c.data[24]};
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(ClustersForReduceFixture, Reduce2x2)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
benchmark::DoNotOptimize(reduce_to_2x2<int, 3, 3, uint16_t>(
|
||||||
|
cluster_3x3)); // make sure compiler evaluates the expression
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(ClustersForReduceFixture, SpecificReduce2x2)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
benchmark::DoNotOptimize(reduce_to_2x2<int>(cluster_3x3));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(ClustersForReduceFixture, Reduce3x3)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
benchmark::DoNotOptimize(
|
||||||
|
reduce_to_3x3<int, 5, 5, uint16_t>(cluster_5x5));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_F(ClustersForReduceFixture, SpecificReduce3x3)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
// This code gets timed
|
||||||
|
benchmark::DoNotOptimize(reduce_to_3x3<int>(cluster_5x5));
|
||||||
|
}
|
||||||
|
}
|
||||||
11
cmake/FindSphinx.cmake
Normal file
11
cmake/FindSphinx.cmake
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
#Look for an executable called sphinx-build
|
||||||
|
find_program(SPHINX_EXECUTABLE
|
||||||
|
NAMES sphinx-build sphinx-build-3.6
|
||||||
|
DOC "Path to sphinx-build executable")
|
||||||
|
|
||||||
|
include(FindPackageHandleStandardArgs)
|
||||||
|
|
||||||
|
#Handle standard arguments to find_package like REQUIRED and QUIET
|
||||||
|
find_package_handle_standard_args(Sphinx
|
||||||
|
"Failed to find sphinx-build executable"
|
||||||
|
SPHINX_EXECUTABLE)
|
||||||
46
cmake/helpers.cmake
Normal file
46
cmake/helpers.cmake
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
function(default_build_type val)
|
||||||
|
if (NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
|
||||||
|
message(STATUS "No build type selected, default to Release")
|
||||||
|
set(CMAKE_BUILD_TYPE ${val} CACHE STRING "Build type (default ${val})" FORCE)
|
||||||
|
endif()
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
function(set_std_fs_lib)
|
||||||
|
# from pybind11
|
||||||
|
# Check if we need to add -lstdc++fs or -lc++fs or nothing
|
||||||
|
if(DEFINED CMAKE_CXX_STANDARD AND CMAKE_CXX_STANDARD LESS 17)
|
||||||
|
set(STD_FS_NO_LIB_NEEDED TRUE)
|
||||||
|
elseif(MSVC)
|
||||||
|
set(STD_FS_NO_LIB_NEEDED TRUE)
|
||||||
|
else()
|
||||||
|
file(
|
||||||
|
WRITE ${CMAKE_CURRENT_BINARY_DIR}/main.cpp
|
||||||
|
"#include <filesystem>\nint main(int argc, char ** argv) {\n std::filesystem::path p(argv[0]);\n return p.string().length();\n}"
|
||||||
|
)
|
||||||
|
try_compile(
|
||||||
|
STD_FS_NO_LIB_NEEDED ${CMAKE_CURRENT_BINARY_DIR}
|
||||||
|
SOURCES ${CMAKE_CURRENT_BINARY_DIR}/main.cpp
|
||||||
|
COMPILE_DEFINITIONS -std=c++17)
|
||||||
|
try_compile(
|
||||||
|
STD_FS_NEEDS_STDCXXFS ${CMAKE_CURRENT_BINARY_DIR}
|
||||||
|
SOURCES ${CMAKE_CURRENT_BINARY_DIR}/main.cpp
|
||||||
|
COMPILE_DEFINITIONS -std=c++17
|
||||||
|
LINK_LIBRARIES stdc++fs)
|
||||||
|
try_compile(
|
||||||
|
STD_FS_NEEDS_CXXFS ${CMAKE_CURRENT_BINARY_DIR}
|
||||||
|
SOURCES ${CMAKE_CURRENT_BINARY_DIR}/main.cpp
|
||||||
|
COMPILE_DEFINITIONS -std=c++17
|
||||||
|
LINK_LIBRARIES c++fs)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(${STD_FS_NEEDS_STDCXXFS})
|
||||||
|
set(STD_FS_LIB stdc++fs PARENT_SCOPE)
|
||||||
|
elseif(${STD_FS_NEEDS_CXXFS})
|
||||||
|
set(STD_FS_LIB c++fs PARENT_SCOPE)
|
||||||
|
elseif(${STD_FS_NO_LIB_NEEDED})
|
||||||
|
set(STD_FS_LIB "" PARENT_SCOPE)
|
||||||
|
else()
|
||||||
|
message(WARNING "Unknown C++17 compiler - not passing -lstdc++fs")
|
||||||
|
set(STD_FS_LIB "")
|
||||||
|
endif()
|
||||||
|
endfunction()
|
||||||
35
cmake/package_config.cmake
Normal file
35
cmake/package_config.cmake
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# This cmake code creates the configuration that is found and used by
|
||||||
|
# find_package() of another cmake project
|
||||||
|
|
||||||
|
# get lower and upper case project name for the configuration files
|
||||||
|
|
||||||
|
# configure and install the configuration files
|
||||||
|
include(CMakePackageConfigHelpers)
|
||||||
|
|
||||||
|
configure_package_config_file(
|
||||||
|
"${CMAKE_SOURCE_DIR}/cmake/project-config.cmake.in"
|
||||||
|
"${PROJECT_BINARY_DIR}/${PROJECT_NAME_LOWER}-config.cmake"
|
||||||
|
INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME_LOWER}
|
||||||
|
PATH_VARS CMAKE_INSTALL_DIR)
|
||||||
|
|
||||||
|
write_basic_package_version_file(
|
||||||
|
"${PROJECT_BINARY_DIR}/${PROJECT_NAME_LOWER}-config-version.cmake"
|
||||||
|
VERSION ${PROJECT_VERSION}
|
||||||
|
COMPATIBILITY SameMajorVersion
|
||||||
|
)
|
||||||
|
|
||||||
|
install(FILES
|
||||||
|
"${PROJECT_BINARY_DIR}/${PROJECT_NAME_LOWER}-config.cmake"
|
||||||
|
"${PROJECT_BINARY_DIR}/${PROJECT_NAME_LOWER}-config-version.cmake"
|
||||||
|
COMPONENT devel
|
||||||
|
DESTINATION ${CMAKE_INSTALL_DIR}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if (PROJECT_LIBRARIES OR PROJECT_STATIC_LIBRARIES)
|
||||||
|
install(
|
||||||
|
EXPORT "${TARGETS_EXPORT_NAME}"
|
||||||
|
FILE ${PROJECT_NAME_LOWER}-targets.cmake
|
||||||
|
DESTINATION ${CMAKE_INSTALL_DIR}
|
||||||
|
)
|
||||||
|
endif ()
|
||||||
28
cmake/project-config.cmake.in
Normal file
28
cmake/project-config.cmake.in
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# Config file for @PROJECT_NAME_LOWER@
|
||||||
|
#
|
||||||
|
# It defines the following variables:
|
||||||
|
#
|
||||||
|
# @PROJECT_NAME_UPPER@_INCLUDE_DIRS - include directory
|
||||||
|
# @PROJECT_NAME_UPPER@_LIBRARIES - all dynamic libraries
|
||||||
|
# @PROJECT_NAME_UPPER@_STATIC_LIBRARIES - all static libraries
|
||||||
|
|
||||||
|
@PACKAGE_INIT@
|
||||||
|
|
||||||
|
include(CMakeFindDependencyMacro)
|
||||||
|
|
||||||
|
set(SLS_USE_HDF5 "@SLS_USE_HDF5@")
|
||||||
|
|
||||||
|
# List dependencies
|
||||||
|
find_dependency(Threads)
|
||||||
|
find_dependency(fmt)
|
||||||
|
find_dependency(nlohmann_json)
|
||||||
|
|
||||||
|
# Add optional dependencies here
|
||||||
|
if (SLS_USE_HDF5)
|
||||||
|
find_dependency(HDF5)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
set_and_check(@PROJECT_NAME_UPPER@_CMAKE_INCLUDE_DIRS "@PACKAGE_CMAKE_INSTALL_DIR@")
|
||||||
|
|
||||||
|
include("${CMAKE_CURRENT_LIST_DIR}/@TARGETS_EXPORT_NAME@.cmake")
|
||||||
|
check_required_components("@PROJECT_NAME@")
|
||||||
16
conda-recipe/conda_build_config.yaml
Normal file
16
conda-recipe/conda_build_config.yaml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
python:
|
||||||
|
- 3.11
|
||||||
|
- 3.12
|
||||||
|
- 3.13
|
||||||
|
|
||||||
|
c_compiler:
|
||||||
|
- gcc # [linux]
|
||||||
|
|
||||||
|
c_stdlib:
|
||||||
|
- sysroot # [linux]
|
||||||
|
|
||||||
|
cxx_compiler:
|
||||||
|
- gxx # [linux]
|
||||||
|
|
||||||
|
c_stdlib_version: # [linux]
|
||||||
|
- 2.17 # [linux]
|
||||||
53
conda-recipe/meta.yaml
Normal file
53
conda-recipe/meta.yaml
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
source:
|
||||||
|
path: ../
|
||||||
|
|
||||||
|
{% set version = load_file_regex(load_file = 'VERSION', regex_pattern = '(\d+(?:\.\d+)*(?:[\+\w\.]+))').group(1) %}
|
||||||
|
package:
|
||||||
|
name: aare
|
||||||
|
version: {{version}}
|
||||||
|
|
||||||
|
source:
|
||||||
|
path: ..
|
||||||
|
|
||||||
|
build:
|
||||||
|
number: 0
|
||||||
|
script:
|
||||||
|
- unset CMAKE_GENERATOR && {{ PYTHON }} -m pip install . -vv
|
||||||
|
|
||||||
|
requirements:
|
||||||
|
build:
|
||||||
|
- {{ compiler('c') }}
|
||||||
|
- {{ stdlib("c") }}
|
||||||
|
- {{ compiler('cxx') }}
|
||||||
|
- cmake
|
||||||
|
- ninja
|
||||||
|
|
||||||
|
host:
|
||||||
|
- python
|
||||||
|
- pip
|
||||||
|
- numpy=2.1
|
||||||
|
- scikit-build-core
|
||||||
|
- pybind11 >=2.13.0
|
||||||
|
- matplotlib # needed in host to solve the environment for run
|
||||||
|
|
||||||
|
run:
|
||||||
|
- python
|
||||||
|
- {{ pin_compatible('numpy') }}
|
||||||
|
- matplotlib
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
test:
|
||||||
|
imports:
|
||||||
|
- aare
|
||||||
|
requires:
|
||||||
|
- pytest
|
||||||
|
- boost-histogram
|
||||||
|
source_files:
|
||||||
|
- python/tests
|
||||||
|
commands:
|
||||||
|
- python -m pytest python/tests
|
||||||
|
|
||||||
|
about:
|
||||||
|
license: SPDX-License-Identifier MPL-2.0
|
||||||
|
summary: Data analysis library for hybrid pixel detectors from PSI
|
||||||
54
docs/CMakeLists.txt
Normal file
54
docs/CMakeLists.txt
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
find_package(Doxygen REQUIRED)
|
||||||
|
find_package(Sphinx REQUIRED)
|
||||||
|
|
||||||
|
#Doxygen
|
||||||
|
set(DOXYGEN_IN ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in)
|
||||||
|
set(DOXYGEN_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
|
||||||
|
configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY)
|
||||||
|
|
||||||
|
#Sphinx
|
||||||
|
set(SPHINX_SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/src)
|
||||||
|
set(SPHINX_BUILD ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
|
|
||||||
|
|
||||||
|
file(GLOB_RECURSE SPHINX_SOURCE_FILES
|
||||||
|
CONFIGURE_DEPENDS
|
||||||
|
RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}/src"
|
||||||
|
"${CMAKE_CURRENT_SOURCE_DIR}/src/*.rst"
|
||||||
|
)
|
||||||
|
|
||||||
|
foreach(relpath IN LISTS SPHINX_SOURCE_FILES)
|
||||||
|
set(src "${CMAKE_CURRENT_SOURCE_DIR}/src/${relpath}")
|
||||||
|
set(dst "${SPHINX_BUILD}/src/${relpath}")
|
||||||
|
|
||||||
|
message(STATUS "Copying ${src} to ${dst}")
|
||||||
|
configure_file("${src}" "${dst}" COPYONLY)
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
configure_file(
|
||||||
|
"${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in"
|
||||||
|
"${SPHINX_BUILD}/conf.py"
|
||||||
|
@ONLY
|
||||||
|
)
|
||||||
|
|
||||||
|
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/figures"
|
||||||
|
DESTINATION "${SPHINX_BUILD}")
|
||||||
|
|
||||||
|
configure_file(
|
||||||
|
"${CMAKE_CURRENT_SOURCE_DIR}/static/extra.css"
|
||||||
|
"${SPHINX_BUILD}/static/css/extra.css"
|
||||||
|
@ONLY
|
||||||
|
)
|
||||||
|
|
||||||
|
add_custom_target(
|
||||||
|
docs
|
||||||
|
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT}
|
||||||
|
COMMAND ${SPHINX_EXECUTABLE} -a -b html
|
||||||
|
-Dbreathe_projects.aare=${CMAKE_CURRENT_BINARY_DIR}/xml
|
||||||
|
-c "${SPHINX_BUILD}"
|
||||||
|
${SPHINX_BUILD}/src
|
||||||
|
${SPHINX_BUILD}/html
|
||||||
|
COMMENT "Generating documentation with Sphinx"
|
||||||
|
)
|
||||||
|
|
||||||
1917
docs/Doxyfile.in
Normal file
1917
docs/Doxyfile.in
Normal file
File diff suppressed because it is too large
Load Diff
63
docs/conf.py.in
Normal file
63
docs/conf.py.in
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
# Configuration file for the Sphinx documentation builder.
|
||||||
|
#
|
||||||
|
# This file only contains a selection of the most common options. For a full
|
||||||
|
# list see the documentation:
|
||||||
|
# http://www.sphinx-doc.org/en/master/config
|
||||||
|
|
||||||
|
# -- Path setup --------------------------------------------------------------
|
||||||
|
|
||||||
|
# If extensions (or modules to document with autodoc) are in another directory,
|
||||||
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
|
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||||
|
#
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
sys.path.insert(0, os.path.abspath('..'))
|
||||||
|
|
||||||
|
print(sys.path)
|
||||||
|
|
||||||
|
# -- Project information -----------------------------------------------------
|
||||||
|
|
||||||
|
project = 'aare'
|
||||||
|
copyright = '2024, CPS Detector Group'
|
||||||
|
author = 'CPS Detector Group'
|
||||||
|
version = '@PROJECT_VERSION@'
|
||||||
|
|
||||||
|
# -- General configuration ---------------------------------------------------
|
||||||
|
|
||||||
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
|
# ones.
|
||||||
|
extensions = ['breathe',
|
||||||
|
'sphinx.ext.autodoc',
|
||||||
|
'sphinx.ext.napoleon',
|
||||||
|
]
|
||||||
|
|
||||||
|
breathe_default_project = "aare"
|
||||||
|
napoleon_use_ivar = True
|
||||||
|
|
||||||
|
|
||||||
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
|
templates_path = ['_templates']
|
||||||
|
|
||||||
|
# List of patterns, relative to source directory, that match files and
|
||||||
|
# directories to ignore when looking for source files.
|
||||||
|
# This pattern also affects html_static_path and html_extra_path.
|
||||||
|
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for HTML output -------------------------------------------------
|
||||||
|
|
||||||
|
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||||
|
# a list of builtin themes.
|
||||||
|
#
|
||||||
|
html_theme = "furo"
|
||||||
|
|
||||||
|
# Add any paths that contain custom static files (such as style sheets) here,
|
||||||
|
# relative to this directory. They are copied after the builtin static files,
|
||||||
|
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||||
|
html_static_path = ['static']
|
||||||
|
|
||||||
|
|
||||||
|
def setup(app):
|
||||||
|
app.add_css_file('css/extra.css') # may also be an URL
|
||||||
BIN
docs/figures/Eta2x2.pdf
Normal file
BIN
docs/figures/Eta2x2.pdf
Normal file
Binary file not shown.
BIN
docs/figures/Eta2x2.png
Normal file
BIN
docs/figures/Eta2x2.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 9.3 KiB |
BIN
docs/figures/Eta2x2Full.pdf
Normal file
BIN
docs/figures/Eta2x2Full.pdf
Normal file
Binary file not shown.
BIN
docs/figures/Eta2x2Full.png
Normal file
BIN
docs/figures/Eta2x2Full.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 14 KiB |
BIN
docs/figures/Eta3x3.pdf
Normal file
BIN
docs/figures/Eta3x3.pdf
Normal file
Binary file not shown.
BIN
docs/figures/Eta3x3.png
Normal file
BIN
docs/figures/Eta3x3.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 20 KiB |
BIN
docs/figures/Eta3x3Cross.pdf
Normal file
BIN
docs/figures/Eta3x3Cross.pdf
Normal file
Binary file not shown.
BIN
docs/figures/Eta3x3Cross.png
Normal file
BIN
docs/figures/Eta3x3Cross.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 9.5 KiB |
15
docs/src/Cluster.rst
Normal file
15
docs/src/Cluster.rst
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
Cluster
|
||||||
|
========
|
||||||
|
|
||||||
|
.. doxygenstruct:: aare::Cluster
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
|
||||||
|
|
||||||
|
**Free Functions:**
|
||||||
|
|
||||||
|
.. doxygenfunction:: aare::reduce_to_3x3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>&)
|
||||||
|
|
||||||
|
.. doxygenfunction:: aare::reduce_to_2x2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>&)
|
||||||
|
|
||||||
8
docs/src/ClusterFile.rst
Normal file
8
docs/src/ClusterFile.rst
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
ClusterFile
|
||||||
|
=============
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::ClusterFile
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
|
||||||
7
docs/src/ClusterFinder.rst
Normal file
7
docs/src/ClusterFinder.rst
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
ClusterFinder
|
||||||
|
=============
|
||||||
|
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::ClusterFinder
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
7
docs/src/ClusterFinderMT.rst
Normal file
7
docs/src/ClusterFinderMT.rst
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
ClusterFinderMT
|
||||||
|
==================
|
||||||
|
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::ClusterFinderMT
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
22
docs/src/ClusterVector.rst
Normal file
22
docs/src/ClusterVector.rst
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
ClusterVector
|
||||||
|
=============
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::ClusterVector
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::ClusterVector< Cluster< T, ClusterSizeX, ClusterSizeY, CoordType > >
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
|
||||||
|
|
||||||
|
**Free Functions:**
|
||||||
|
|
||||||
|
.. doxygenfunction:: aare::reduce_to_3x3(const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>&)
|
||||||
|
|
||||||
|
.. doxygenfunction:: aare::reduce_to_2x2(const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>&)
|
||||||
|
|
||||||
19
docs/src/Consume.rst
Normal file
19
docs/src/Consume.rst
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
Use from C++
|
||||||
|
========================
|
||||||
|
|
||||||
|
There are a few different way to use aare in your C++ project. Which one you choose
|
||||||
|
depends on how you intend to work with the library and how you manage your dependencies.
|
||||||
|
|
||||||
|
|
||||||
|
Install and use cmake with find_package(aare)
|
||||||
|
-------------------------------------------------
|
||||||
|
|
||||||
|
https://github.com/slsdetectorgroup/aare-examples
|
||||||
|
|
||||||
|
.. include:: _install.rst
|
||||||
|
|
||||||
|
|
||||||
|
Use as a submodule
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
Coming soon...
|
||||||
7
docs/src/Dtype.rst
Normal file
7
docs/src/Dtype.rst
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
Dtype
|
||||||
|
=============
|
||||||
|
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::Dtype
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
8
docs/src/File.rst
Normal file
8
docs/src/File.rst
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
File
|
||||||
|
=============
|
||||||
|
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::File
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
8
docs/src/Frame.rst
Normal file
8
docs/src/Frame.rst
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Frame
|
||||||
|
=============
|
||||||
|
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::Frame
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
106
docs/src/Installation.rst
Normal file
106
docs/src/Installation.rst
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
****************
|
||||||
|
Installation
|
||||||
|
****************
|
||||||
|
|
||||||
|
.. attention ::
|
||||||
|
|
||||||
|
- https://cliutils.gitlab.io/modern-cmake/README.html
|
||||||
|
|
||||||
|
conda/mamaba
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This is the recommended way to install aare. Using a package manager makes it easy to
|
||||||
|
switch between versions and is (one of) the most convenient way to install up to date
|
||||||
|
dependencies on older distributions.
|
||||||
|
|
||||||
|
.. note ::
|
||||||
|
|
||||||
|
aare is developing rapidly. Check for the latest release by
|
||||||
|
using: **conda search aare -c slsdetectorgroup**
|
||||||
|
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
# Install a specific version:
|
||||||
|
conda install aare=2024.11.11.dev0 -c slsdetectorgroup
|
||||||
|
|
||||||
|
|
||||||
|
cmake build (development install)
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If you are working on aare or want to test our a version that doesn't yet have
|
||||||
|
a conda package. Build using cmake and then run from the build folder.
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
git clone git@github.com:slsdetectorgroup/aare.git --branch=v1 #or using http...
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
|
||||||
|
#configure using cmake
|
||||||
|
cmake ../aare
|
||||||
|
|
||||||
|
#build (replace 4 with the number of threads you want to use)
|
||||||
|
make -j4
|
||||||
|
|
||||||
|
|
||||||
|
# add the build folder to your PYTHONPATH and then you should be able to
|
||||||
|
# import aare in python
|
||||||
|
|
||||||
|
cmake build + install and use in your C++ project
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. warning ::
|
||||||
|
|
||||||
|
When building aare with default settings we also include fmt and nlohmann_json.
|
||||||
|
Installation to a custom location is highly recommended.
|
||||||
|
|
||||||
|
|
||||||
|
.. note ::
|
||||||
|
|
||||||
|
It is also possible to install aare with conda and then use in your C++ project.
|
||||||
|
|
||||||
|
.. include:: _install.rst
|
||||||
|
|
||||||
|
|
||||||
|
cmake options
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
For detailed options see the CMakeLists.txt file in the root directory of the project.
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
# usage (or edit with ccmake .)
|
||||||
|
cmake ../aare -DOPTION1=ON -DOPTION2=OFF
|
||||||
|
|
||||||
|
|
||||||
|
**AARE_SYSTEM_LIBRARIES "Use system libraries" OFF**
|
||||||
|
|
||||||
|
Use system libraries instead of using FetchContent to pull in dependencies. Default option is off.
|
||||||
|
|
||||||
|
|
||||||
|
**AARE_PYTHON_BINDINGS "Build python bindings" ON**
|
||||||
|
|
||||||
|
Build the Python bindings. Default option is on.
|
||||||
|
|
||||||
|
.. warning ::
|
||||||
|
|
||||||
|
If you have a newer system Python compared to the one in your virtual environment,
|
||||||
|
you might have to pass -DPython_FIND_VIRTUALENV=ONLY to cmake.
|
||||||
|
|
||||||
|
**AARE_TESTS "Build tests" OFF**
|
||||||
|
|
||||||
|
Build unit tests. Default option is off.
|
||||||
|
|
||||||
|
**AARE_EXAMPLES "Build examples" OFF**
|
||||||
|
|
||||||
|
**AARE_DOCS "Build documentation" OFF**
|
||||||
|
|
||||||
|
Build documentation. Needs doxygen, sphinx and breathe. Default option is off.
|
||||||
|
Requires a separate make docs.
|
||||||
|
|
||||||
|
**AARE_VERBOSE "Verbose output" OFF**
|
||||||
|
|
||||||
|
**AARE_CUSTOM_ASSERT "Use custom assert" OFF**
|
||||||
|
|
||||||
|
Enable custom assert macro to check for errors. Default option is off.
|
||||||
160
docs/src/Interpolation.rst
Normal file
160
docs/src/Interpolation.rst
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
.. _Interpolation_C++API:
|
||||||
|
|
||||||
|
Interpolation
|
||||||
|
==============
|
||||||
|
|
||||||
|
The Interpolation class implements the :math:`\eta`-interpolation method.
|
||||||
|
This interpolation technique is based on charge sharing: for detected photon hits (e.g. clusters), it refines the estimated photon hit using information from neighboring pixels.
|
||||||
|
|
||||||
|
The method relies on the so-called :math:`\eta`-functions, which describe the relationship between the energy measured in the central cluster pixel (the initially estimated photon hit) and the energies measured in its neighboring pixels.
|
||||||
|
Depending on how much energy each neighboring pixel receives relative to the central pixel, the estimated photon hit is shifted toward that neighbor by a certain offset to the actual photon hit position in the pixel :math:`(x, y)`.
|
||||||
|
|
||||||
|
The mapping between the :math:`\eta` values and the corresponding spatial photon position :math:`(x,y)` can be viewed as an optimal transport problem.
|
||||||
|
|
||||||
|
One can readily compute the probability distribution :math:`P_{\eta}` of the :math:`\eta` values by forming a 2D histogram.
|
||||||
|
However, the probability distribution :math:`P_{x,y}` of the true photon positions is generally unknown unless the detector is illuminated uniformly (i.e. under flat-field conditions).
|
||||||
|
In a flat-field, the photon positions are uniformly distributed.
|
||||||
|
|
||||||
|
With this assumption, the problem reduces to determining a transport map :math:`T:(\eta_x,\eta_y) \rightarrow (x,y)`, that pushes forward the distribution of :math:`(\eta_x, \eta_y)` to the known uniform distribution of photon positions of a flatfield.
|
||||||
|
|
||||||
|
The map :math:`T` is given by:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\begin{align*}
|
||||||
|
T_1: & F_{x}^{-1} F_{\eta_x|\eta_y} \\
|
||||||
|
T_2: & F_{y}^{-1} F_{\eta_y|\eta_x},
|
||||||
|
\end{align*}
|
||||||
|
|
||||||
|
|
||||||
|
where :math:`F_{\eta_x|\eta_y}` and :math:`F_{\eta_y|\eta_x}` are the conditional cumulative distribution functions e.g. :math:`F_{\eta_x|\eta_y}(\eta_x', \eta_y') = P_{\eta_x, \eta_y}(\eta_x \leq \eta_x' | \eta_y = \eta_y')`.
|
||||||
|
And :math:`F_{x}` and :math:`F_{y}` are the cumulative distribution functions of :math:`x` and :math:`y`. Note as :math:`x` and :math:`y` are uniformly distributed :math:`F_{x}` and :math:`F_{y}` are the identity functions. The map :math:`T` thus simplifies to
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\begin{align*}
|
||||||
|
T_1: & F_{\eta_x|\eta_y} \\
|
||||||
|
T_2: & F_{\eta_y|\eta_x}.
|
||||||
|
\end{align*}
|
||||||
|
|
||||||
|
Note that for the implementation :math:`P_{\eta}` is not only a distribution of :math:`\eta_x`, :math:`\eta_y` but also of the estimated photon energy :math:`e`.
|
||||||
|
The energy level correlates slightly with the z-depth. Higher z-depth leads to more charge sharing and a different :math:`\eta` distribution. Thus we create a mapping :math:`T` for each energy level.
|
||||||
|
|
||||||
|
|
||||||
|
:math:`\eta`-Functions:
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
.. doxygenstruct:: aare::Eta2
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
The corner value ``c`` is only relevant when one uses ``calculate_eta_2`` or ``calculate_full_eta2``. Otherwise its default value is ``cTopLeft``.
|
||||||
|
|
||||||
|
Supported are the following :math:`\eta`-functions:
|
||||||
|
|
||||||
|
:math:`\eta`-Function on 2x2 Clusters:
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: ../figures/Eta2x2.png
|
||||||
|
:target: ../figures/Eta2x2.png
|
||||||
|
:width: 650px
|
||||||
|
:align: center
|
||||||
|
:alt: Eta2x2
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\begin{equation*}
|
||||||
|
{\color{blue}{\eta_x}} = \frac{Q_{1,1}}{Q_{1,0} + Q_{1,1}} \quad \quad
|
||||||
|
{\color{green}{\eta_y}} = \frac{Q_{1,1}}{Q_{0,1} + Q_{1,1}}
|
||||||
|
\end{equation*}
|
||||||
|
|
||||||
|
The :math:`\eta` values can range between 0,1. Note they only range between 0,1 because the position of the center pixel (red) can change.
|
||||||
|
If the center pixel is in the bottom left pixel :math:`\eta_x` will be close to zero. If the center pixel is in the bottom right pixel :math:`\eta_y` will be close to 1.
|
||||||
|
|
||||||
|
One can apply this :math:`\eta` not only on 2x2 clusters but on clusters with any size. Then the 2x2 subcluster with maximum energy is choosen and the :math:`\eta` function applied on the subcluster.
|
||||||
|
|
||||||
|
.. doxygenfunction:: aare::calculate_eta2(const ClusterVector<ClusterType>&)
|
||||||
|
|
||||||
|
.. doxygenfunction:: aare::calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>&)
|
||||||
|
|
||||||
|
Full :math:`\eta`-Function on 2x2 Clusters:
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: ../figures/Eta2x2Full.png
|
||||||
|
:target: ../figures/Eta2x2Full.png
|
||||||
|
:width: 650px
|
||||||
|
:align: center
|
||||||
|
:alt: Eta2x2 Full
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\begin{equation*}
|
||||||
|
{\color{blue}{\eta_x}} = \frac{Q_{0,1} + Q_{1,1}}{\sum_i^{1}\sum_j^{1}Q_{i,j}} \quad \quad
|
||||||
|
{\textcolor{green}{\eta_y}} = \frac{Q_{1,0} + Q_{1,1}}{\sum_i^{1}\sum_j^{1}Q_{i,j}}
|
||||||
|
\end{equation*}
|
||||||
|
|
||||||
|
The :math:`\eta` values can range between 0,1. Note they only range between 0,1 because the position of the center pixel (red) can change.
|
||||||
|
If the center pixel is in the bottom left pixel :math:`\eta_x` will be close to zero. If the center pixel is in the bottom right pixel :math:`\eta_y` will be close to 1.
|
||||||
|
|
||||||
|
.. doxygenfunction:: aare::calculate_full_eta2(const ClusterVector<ClusterType>&)
|
||||||
|
|
||||||
|
.. doxygenfunction:: aare::calculate_full_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>&)
|
||||||
|
|
||||||
|
Full :math:`\eta`-Function on 3x3 Clusters:
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: ../figures/Eta3x3.png
|
||||||
|
:target: ../figures/Eta3x3.png
|
||||||
|
:width: 650px
|
||||||
|
:align: center
|
||||||
|
:alt: Eta3x3
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\begin{equation*}
|
||||||
|
{\color{blue}{\eta_x}} = \frac{\sum_{i=0}^{2} Q_{i,2} - \sum_{i=0}^{2} Q_{i,0}}{\sum_{i=0}^{2}\sum_{j=0}^{2} Q_{i,j}} \quad \quad
|
||||||
|
{\color{green}{\eta_y}} = \frac{\sum_{j=0}^{2} Q_{2,j} - \sum_{j=0}^{2} Q_{0,j}}{\sum_{i=0}^{2}\sum_{j=0}^{2} Q_{i,j}}
|
||||||
|
\end{equation*}
|
||||||
|
|
||||||
|
The :math:`\eta` values can range between -0.5,0.5.
|
||||||
|
|
||||||
|
.. doxygenfunction:: aare::calculate_eta3(const ClusterVector<ClusterType>&)
|
||||||
|
|
||||||
|
.. doxygenfunction:: aare::calculate_eta3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>&)
|
||||||
|
|
||||||
|
Cross :math:`\eta`-Function on 3x3 Clusters:
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: ../figures/Eta3x3Cross.png
|
||||||
|
:target: ../figures/Eta3x3Cross.png
|
||||||
|
:width: 650px
|
||||||
|
:align: center
|
||||||
|
:alt: Cross Eta3x3
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\begin{equation*}
|
||||||
|
{\color{blue}{\eta_x}} = \frac{Q_{1,2} - Q_{1,0}}{Q_{1,0} + Q_{1,1} + Q_{1,2}} \quad \quad
|
||||||
|
{\color{green}{\eta_y}} = \frac{Q_{0,2} - Q_{0,1}}{Q_{0,1} + Q_{1,1} + Q_{2,1}}
|
||||||
|
\end{equation*}
|
||||||
|
|
||||||
|
The :math:`\eta` values can range between -0.5,0.5.
|
||||||
|
|
||||||
|
.. doxygenfunction:: aare::calculate_cross_eta3(const ClusterVector<ClusterType>&)
|
||||||
|
|
||||||
|
.. doxygenfunction:: aare::calculate_cross_eta3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>&)
|
||||||
|
|
||||||
|
Interpolation class:
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
.. warning::
|
||||||
|
The interpolation might lead to erroneous photon positions for clusters at the borders of a frame. Make sure to filter out such cases.
|
||||||
|
|
||||||
|
.. Warning::
|
||||||
|
Make sure to use the same :math:`\eta`-function during interpolation as given by the joint :math:`\eta`-distribution passed to the constructor.
|
||||||
|
|
||||||
|
.. Note::
|
||||||
|
Make sure to use resonable energy bins, when constructing the joint distribution. If data is too sparse for a given energy the interpolation will lead to erreneous results.
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::Interpolator
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
|
||||||
|
|
||||||
25
docs/src/JungfrauDataFile.rst
Normal file
25
docs/src/JungfrauDataFile.rst
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
JungfrauDataFile
|
||||||
|
==================
|
||||||
|
|
||||||
|
JungfrauDataFile is a class to read the .dat files that are produced by Aldo's receiver.
|
||||||
|
It is mostly used for calibration.
|
||||||
|
|
||||||
|
The structure of the file is:
|
||||||
|
|
||||||
|
* JungfrauDataHeader
|
||||||
|
* Binary data (256x256, 256x1024 or 512x1024)
|
||||||
|
* JungfrauDataHeader
|
||||||
|
* ...
|
||||||
|
|
||||||
|
There is no metadata indicating number of frames or the size of the image, but this
|
||||||
|
will be infered by this reader.
|
||||||
|
|
||||||
|
.. doxygenstruct:: aare::JungfrauDataHeader
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::JungfrauDataFile
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
7
docs/src/NDArray.rst
Normal file
7
docs/src/NDArray.rst
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
NDArray
|
||||||
|
=============
|
||||||
|
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::NDArray
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
7
docs/src/NDView.rst
Normal file
7
docs/src/NDView.rst
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
NDView
|
||||||
|
=============
|
||||||
|
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::NDView
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
8
docs/src/Pedestal.rst
Normal file
8
docs/src/Pedestal.rst
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Pedestal
|
||||||
|
=============
|
||||||
|
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::Pedestal
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
47
docs/src/Philosophy.rst
Normal file
47
docs/src/Philosophy.rst
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
****************
|
||||||
|
Philosophy
|
||||||
|
****************
|
||||||
|
|
||||||
|
|
||||||
|
Fast code with a simple interface
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Aare should be fast and efficient, but also easy to use. We strive to keep a simple interface that feels intuitive.
|
||||||
|
Internally we use C++ for performance and the ability to integrate the library in other programs, but we see most
|
||||||
|
users using the Python interface.
|
||||||
|
|
||||||
|
Live at head
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
As a user of the library you should be able to, and is expected to, use the latest version. Bug fixes will rarely be backported
|
||||||
|
to older releases. By upgrading frequently you will benefit from the latest features and minimize the effort to maintain your scripts/code
|
||||||
|
by doing several small upgrades instead of one big upgrade.
|
||||||
|
|
||||||
|
API
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
We aim to keep the API stable and only break it for good reasons. But specially now in the early stages of development
|
||||||
|
the API will change. On those occasions it will be clearly stated in the release notes. However, the norm should be a
|
||||||
|
backward compatible API.
|
||||||
|
|
||||||
|
Documentation
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Being a library it is important to have a well documented API. We use Doxygen to generate the C++ documentation
|
||||||
|
and Sphinx for the Python part. Breathe is used to integrate the two into one Sphinx html site. The documentation is built
|
||||||
|
automatically on release by the CI and published to GitHub pages. In addition to the generated API documentation,
|
||||||
|
certain classes might need more descriptions of the usage. This is then placed in the .rst files in the docs/src directory.
|
||||||
|
|
||||||
|
.. attention::
|
||||||
|
|
||||||
|
The code should be well documented, but using descriptive names is more important. In the same spirit
|
||||||
|
if a function is called `getNumberOfFrames()` you don't need to write a comment saying that it gets the
|
||||||
|
number of frames.
|
||||||
|
|
||||||
|
|
||||||
|
Dependencies
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Deployment in the scientific community is often tricky. Either due to old OS versions or the lack of package managers.
|
||||||
|
We strive to keep the dependencies to a minimum and will vendor some libraries to simplify deployment even though it comes
|
||||||
|
at a cost of build time.
|
||||||
8
docs/src/RawFile.rst
Normal file
8
docs/src/RawFile.rst
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
RawFile
|
||||||
|
===============
|
||||||
|
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::RawFile
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
14
docs/src/RawMasterFile.rst
Normal file
14
docs/src/RawMasterFile.rst
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
RawMasterFile
|
||||||
|
===============
|
||||||
|
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::RawMasterFile
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::RawFileNameComponents
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
8
docs/src/RawSubFile.rst
Normal file
8
docs/src/RawSubFile.rst
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
RawSubFile
|
||||||
|
===============
|
||||||
|
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::RawSubFile
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
26
docs/src/Requirements.rst
Normal file
26
docs/src/Requirements.rst
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
Requirements
|
||||||
|
==============================================
|
||||||
|
|
||||||
|
- C++17 compiler (gcc 8/clang 7)
|
||||||
|
- CMake 3.15+
|
||||||
|
|
||||||
|
**Internally used libraries**
|
||||||
|
|
||||||
|
.. note ::
|
||||||
|
|
||||||
|
To save compile time some of the dependencies can also be picked up from the system/conda environment by specifying:
|
||||||
|
-DAARE_SYSTEM_LIBRARIES=ON during the cmake configuration.
|
||||||
|
|
||||||
|
To simplify deployment we build and statically link a few libraries.
|
||||||
|
|
||||||
|
- fmt
|
||||||
|
- lmfit - https://jugit.fz-juelich.de/mlz/lmfit
|
||||||
|
- nlohmann_json
|
||||||
|
- pybind11
|
||||||
|
- ZeroMQ
|
||||||
|
|
||||||
|
**Extra dependencies for building documentation**
|
||||||
|
|
||||||
|
- Sphinx
|
||||||
|
- Breathe
|
||||||
|
- Doxygen
|
||||||
47
docs/src/Tests.rst
Normal file
47
docs/src/Tests.rst
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
****************
|
||||||
|
Tests
|
||||||
|
****************
|
||||||
|
|
||||||
|
We test the code both from C++ and Python. By default only tests that does not require additional data are run.
|
||||||
|
|
||||||
|
C++
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake .. -DAARE_TESTS=ON
|
||||||
|
make -j 4
|
||||||
|
|
||||||
|
export AARE_TEST_DATA=/path/to/test/data
|
||||||
|
./run_test [.with-data] #or using ctest, [.with-data] is the option to include tests needing data
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Python
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
#From the root dir of the library
|
||||||
|
python -m pytest python/tests --with-data # passing --with-data will run the tests needing data
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Getting the test data
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. attention ::
|
||||||
|
|
||||||
|
The tests needing the test data are not run by default. To make the data available, you need to set the environment variable
|
||||||
|
AARE_TEST_DATA to the path of the test data directory. Then pass either [.with-data] for the C++ tests or --files for Python
|
||||||
|
|
||||||
|
The image files needed for the test are large and are not included in the repository. They are stored
|
||||||
|
using GIT LFS in a separate repository. To get the test data, you need to clone the repository.
|
||||||
|
To do this, you need to have GIT LFS installed. You can find instructions on how to install it here: https://git-lfs.github.com/
|
||||||
|
Once you have GIT LFS installed, you can clone the repository like any normal repo using:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
git clone https://gitea.psi.ch/detectors/aare-test-data.git
|
||||||
7
docs/src/VarClusterFinder.rst
Normal file
7
docs/src/VarClusterFinder.rst
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
VarClusterFinder
|
||||||
|
====================
|
||||||
|
|
||||||
|
|
||||||
|
.. doxygenclass:: aare::VarClusterFinder
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
86
docs/src/Workflow.rst
Normal file
86
docs/src/Workflow.rst
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
****************
|
||||||
|
Workflow
|
||||||
|
****************
|
||||||
|
|
||||||
|
This page describes how we develop aare.
|
||||||
|
|
||||||
|
GitHub centric
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
We use GitHub for all development. Issues and pull requests provide a platform for collaboration as well
|
||||||
|
as a record of the development process. Even if we discuss things in person, we record the outcome in an issue.
|
||||||
|
If a particular implementation is chosen over another, the reason should be recorded in the pull request.
|
||||||
|
|
||||||
|
|
||||||
|
Branches
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
We aim for an as lightweight branching strategy as possible. Short-lived feature branches are merged back into main.
|
||||||
|
The main branch is expected to always be in a releasable state. A release is simply a tag on main which provides a
|
||||||
|
reference and triggers the CI to build the release artifacts (conda, pypi etc.). For large features consider merging
|
||||||
|
smaller chunks into main as they are completed, rather than waiting for the entire feature to be finished. Worst case
|
||||||
|
make sure your feature branch merges with main regularly to avoid large merge conflicts later on.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
The main branch is expected to always work. Feel free to pull from main instead of sticking to a
|
||||||
|
release
|
||||||
|
|
||||||
|
|
||||||
|
Releases
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Release early, release often. As soon as "enough" new features have been implemented, a release is created.
|
||||||
|
A release should not be a big thing, rather a routine part of development that does not require any special person or
|
||||||
|
unfamiliar steps.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Checklists for deployment
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
**Feature:**
|
||||||
|
|
||||||
|
#. Create a new issue for the feature (label feature)
|
||||||
|
#. Create a new branch from main.
|
||||||
|
#. Implement the feature including test and documentation
|
||||||
|
#. Add the feature to RELEASE.md under head
|
||||||
|
#. Create a pull request linked to the issue
|
||||||
|
#. Code is reviewed by at least one other person
|
||||||
|
#. Once approved, the branch is merged into main
|
||||||
|
|
||||||
|
|
||||||
|
**BugFix:**
|
||||||
|
|
||||||
|
Essentially the same as for a feature, if possible start with
|
||||||
|
a failing test that demonstrates the bug.
|
||||||
|
|
||||||
|
#. Create a new issue for the bug (label bug)
|
||||||
|
#. Create a new branch from main.
|
||||||
|
#. **Write a test that fails for the bug**
|
||||||
|
#. Implement the fix
|
||||||
|
#. **Run the test to ensure it passes**
|
||||||
|
#. Add the bugfix to RELEASE.md under head
|
||||||
|
#. Create a pull request linked to the issue.
|
||||||
|
#. Code is reviewed by at least one other person
|
||||||
|
#. Once approved, the branch is merged into main
|
||||||
|
|
||||||
|
**Release:**
|
||||||
|
|
||||||
|
#. Once "enough" new features have been implemented, a release is created
|
||||||
|
#. Update RELEASE.md with the tag of the release and verify that it is complete
|
||||||
|
#. Create the release in GitHub describing the new features and bug fixes
|
||||||
|
#. CI makes magic
|
||||||
|
|
||||||
|
|
||||||
|
**Update documentation only:**
|
||||||
|
|
||||||
|
.. attention::
|
||||||
|
|
||||||
|
It's possible to update the documentation without changing the code, but take
|
||||||
|
care since the docs will reflect the code in main and not the latest release.
|
||||||
|
|
||||||
|
#. Create a PR to main with the documentation changes
|
||||||
|
#. Create a pull request linked to the issue.
|
||||||
|
#. Code is reviewed by at least one other person
|
||||||
|
#. Once merged you can manually trigger the CI workflow for documentation
|
||||||
23
docs/src/_install.rst
Normal file
23
docs/src/_install.rst
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
#build and install aare
|
||||||
|
git clone git@github.com:slsdetectorgroup/aare.git --branch=developer #or using http...
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
|
||||||
|
#configure using cmake
|
||||||
|
cmake ../aare -DCMAKE_INSTALL_PREFIX=/where/to/put/aare
|
||||||
|
|
||||||
|
#build (replace 4 with the number of threads you want to use)
|
||||||
|
make -j4
|
||||||
|
|
||||||
|
#install
|
||||||
|
make install
|
||||||
|
|
||||||
|
#Go to your project
|
||||||
|
cd /your/project/source
|
||||||
|
|
||||||
|
#Now configure your project
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake .. -DCMAKE_PREFIX_PATH=SOME_PATH
|
||||||
5
docs/src/algorithm.rst
Normal file
5
docs/src/algorithm.rst
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
algorithm
|
||||||
|
=============
|
||||||
|
|
||||||
|
.. doxygenfile:: algorithm.hpp
|
||||||
|
|
||||||
66
docs/src/index.rst
Normal file
66
docs/src/index.rst
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
AARE
|
||||||
|
==============================================
|
||||||
|
|
||||||
|
.. note ::
|
||||||
|
|
||||||
|
**Examples:**
|
||||||
|
|
||||||
|
- `jupyter notebooks <https://github.com/slsdetectorgroup/aare-notebooks>`_
|
||||||
|
- `cmake+install <https://github.com/slsdetectorgroup/aare-examples>`_
|
||||||
|
- `git submodule <https://github.com/slsdetectorgroup/aare-submodule>`_
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:caption: Installation
|
||||||
|
:maxdepth: 3
|
||||||
|
|
||||||
|
Installation
|
||||||
|
Requirements
|
||||||
|
Consume
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:caption: Python API
|
||||||
|
:maxdepth: 3
|
||||||
|
:hidden:
|
||||||
|
|
||||||
|
pycalibration
|
||||||
|
python/cluster/index
|
||||||
|
python/file/index
|
||||||
|
pyFit
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:caption: C++ API
|
||||||
|
:maxdepth: 1
|
||||||
|
|
||||||
|
algorithm
|
||||||
|
NDArray
|
||||||
|
NDView
|
||||||
|
Frame
|
||||||
|
File
|
||||||
|
Dtype
|
||||||
|
Cluster
|
||||||
|
ClusterFinder
|
||||||
|
ClusterFinderMT
|
||||||
|
ClusterFile
|
||||||
|
ClusterVector
|
||||||
|
Interpolation
|
||||||
|
JungfrauDataFile
|
||||||
|
Pedestal
|
||||||
|
RawFile
|
||||||
|
RawSubFile
|
||||||
|
RawMasterFile
|
||||||
|
VarClusterFinder
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:caption: Developer
|
||||||
|
:maxdepth: 3
|
||||||
|
|
||||||
|
Philosophy
|
||||||
|
Workflow
|
||||||
|
Tests
|
||||||
19
docs/src/pyFit.rst
Normal file
19
docs/src/pyFit.rst
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
|
||||||
|
Fit
|
||||||
|
========
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
|
||||||
|
**Functions**
|
||||||
|
|
||||||
|
.. autofunction:: gaus
|
||||||
|
|
||||||
|
.. autofunction:: pol1
|
||||||
|
|
||||||
|
|
||||||
|
**Fitting**
|
||||||
|
|
||||||
|
.. autofunction:: fit_gaus
|
||||||
|
|
||||||
|
.. autofunction:: fit_pol1
|
||||||
40
docs/src/pycalibration.rst
Normal file
40
docs/src/pycalibration.rst
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
|
||||||
|
Calibration
|
||||||
|
==============
|
||||||
|
|
||||||
|
Functions for applying calibration to data.
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
import aare
|
||||||
|
|
||||||
|
# Load calibration data for a single JF module (512x1024 pixels)
|
||||||
|
calibration = aare.load_calibration('path/to/calibration/file.bin')
|
||||||
|
|
||||||
|
raw_data = ... # Load your raw data here
|
||||||
|
pedestal = ... # Load your pedestal data here
|
||||||
|
|
||||||
|
# Apply calibration to raw data to convert from raw ADC values to keV
|
||||||
|
data = aare.apply_calibration(raw_data, pd=pedestal, cal=calibration)
|
||||||
|
|
||||||
|
# If you pass a 2D pedestal and calibration only G0 will be used for the conversion
|
||||||
|
# Pixels that switched to G1 or G2 will be set to 0
|
||||||
|
data = aare.apply_calibration(raw_data, pd=pedestal[0], cal=calibration[0])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
.. autofunction:: apply_calibration
|
||||||
|
|
||||||
|
.. autofunction:: load_calibration
|
||||||
|
|
||||||
|
.. autofunction:: calculate_pedestal
|
||||||
|
|
||||||
|
.. autofunction:: calculate_pedestal_float
|
||||||
|
|
||||||
|
.. autofunction:: calculate_pedestal_g0
|
||||||
|
|
||||||
|
.. autofunction:: calculate_pedestal_g0_float
|
||||||
|
|
||||||
|
.. autofunction:: count_switching_pixels
|
||||||
11
docs/src/python/cluster/index.rst
Normal file
11
docs/src/python/cluster/index.rst
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
Cluster & Interpolation
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:caption: Cluster & Interpolation
|
||||||
|
:maxdepth: 1
|
||||||
|
|
||||||
|
pyCluster
|
||||||
|
pyClusterVector
|
||||||
|
pyInterpolation
|
||||||
|
pyVarClusterFinder
|
||||||
23
docs/src/python/cluster/pyCluster.rst
Normal file
23
docs/src/python/cluster/pyCluster.rst
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
Cluster
|
||||||
|
========
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
.. autoclass:: Cluster
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:inherited-members:
|
||||||
|
|
||||||
|
|
||||||
|
Below is the API of a cluster of size :math:`3\times 3` and type ``int`` but all variants share the same API.
|
||||||
|
|
||||||
|
.. autoclass:: aare._aare.Cluster3x3i
|
||||||
|
:special-members: __init__
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members:
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
More functions can be found in the :ref:`ClusterVector <py_clustervector>` documentation. Generally apply functions directly on the ``ClusterVector`` instead of looping over individual clusters.
|
||||||
|
|
||||||
58
docs/src/python/cluster/pyClusterVector.rst
Normal file
58
docs/src/python/cluster/pyClusterVector.rst
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
.. _py_clustervector:
|
||||||
|
|
||||||
|
ClusterVector
|
||||||
|
================
|
||||||
|
|
||||||
|
The ClusterVector, holds clusters from the ClusterFinder. Since it is templated
|
||||||
|
in C++ we use a suffix indicating the type of cluster it holds. The suffix follows
|
||||||
|
the same pattern as for ClusterFile i.e. ``ClusterVector_Cluster3x3i``
|
||||||
|
for a vector holding 3x3 integer clusters.
|
||||||
|
|
||||||
|
|
||||||
|
At the moment the functionality from python is limited and it is not supported
|
||||||
|
to push_back clusters to the vector. The intended use case is to pass it to
|
||||||
|
C++ functions that support the ClusterVector or to view it as a numpy array.
|
||||||
|
|
||||||
|
**View ClusterVector as numpy array**
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
from aare import ClusterFile
|
||||||
|
with ClusterFile("path/to/file") as f:
|
||||||
|
cluster_vector = f.read_frame()
|
||||||
|
|
||||||
|
# Create a copy of the cluster data in a numpy array
|
||||||
|
clusters = np.array(cluster_vector)
|
||||||
|
|
||||||
|
# Avoid copying the data by passing copy=False
|
||||||
|
clusters = np.array(cluster_vector, copy = False)
|
||||||
|
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
.. autoclass:: ClusterVector
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:inherited-members:
|
||||||
|
|
||||||
|
Below is the API of the ClusterVector_Cluster3x3i but all variants share the same API.
|
||||||
|
|
||||||
|
.. autoclass:: aare._aare.ClusterVector_Cluster3x3i
|
||||||
|
:special-members: __init__
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members:
|
||||||
|
|
||||||
|
|
||||||
|
**Free Functions:**
|
||||||
|
|
||||||
|
.. autofunction:: reduce_to_3x3
|
||||||
|
:noindex:
|
||||||
|
|
||||||
|
Reduce a single Cluster to 3x3 by taking the 3x3 subcluster with highest photon energy.
|
||||||
|
|
||||||
|
.. autofunction:: reduce_to_2x2
|
||||||
|
:noindex:
|
||||||
|
|
||||||
|
Reduce a single Cluster to 2x2 by taking the 2x2 subcluster with highest photon energy.
|
||||||
124
docs/src/python/cluster/pyInterpolation.rst
Normal file
124
docs/src/python/cluster/pyInterpolation.rst
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
Interpolation
|
||||||
|
==============
|
||||||
|
|
||||||
|
The Interpolation class implements the :math:`\eta`-interpolation method.
|
||||||
|
This interpolation technique is based on charge sharing: for detected photon hits (e.g. clusters), it refines the estimated photon hit using information from neighboring pixels.
|
||||||
|
|
||||||
|
See :ref:`Interpolation_C++API` for a more elaborate documentation and explanation of the method.
|
||||||
|
|
||||||
|
:math:`\eta`-Functions:
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
Below is an example of the Eta class of type ``double``. Supported are ``Etaf`` of type ``float`` and ``Etai`` of type ``int``.
|
||||||
|
|
||||||
|
.. autoclass:: aare._aare.Etad
|
||||||
|
:members:
|
||||||
|
:private-members:
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
The corner value ``c`` is only relevant when one uses ``calculate_eta_2`` or ``calculate_full_eta2``. Otherwise its default value is ``cTopLeft``.
|
||||||
|
|
||||||
|
Supported are the following :math:`\eta`-functions:
|
||||||
|
|
||||||
|
:math:`\eta`-Function on 2x2 Clusters:
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
.. image:: ../../../figures/Eta2x2.png
|
||||||
|
:target: ../../../figures/Eta2x2.png
|
||||||
|
:width: 650px
|
||||||
|
:align: center
|
||||||
|
:alt: Eta2x2
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\begin{equation*}
|
||||||
|
{\color{blue}{\eta_x}} = \frac{Q_{1,1}}{Q_{1,0} + Q_{1,1}} \quad \quad
|
||||||
|
{\color{green}{\eta_y}} = \frac{Q_{1,1}}{Q_{0,1} + Q_{1,1}}
|
||||||
|
\end{equation*}
|
||||||
|
|
||||||
|
The :math:`\eta` values can range between 0,1. Note they only range between 0,1 because the position of the center pixel (red) can change.
|
||||||
|
If the center pixel is in the bottom left pixel :math:`\eta_x` will be close to zero. If the center pixel is in the bottom right pixel :math:`\eta_y` will be close to 1.
|
||||||
|
|
||||||
|
.. autofunction:: calculate_eta2
|
||||||
|
|
||||||
|
Full :math:`\eta`-Function on 2x2 Clusters:
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: ../../../figures/Eta2x2Full.png
|
||||||
|
:target: ../../../figures/Eta2x2Full.png
|
||||||
|
:width: 650px
|
||||||
|
:align: center
|
||||||
|
:alt: Eta2x2 Full
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\begin{equation*}
|
||||||
|
{\color{blue}{\eta_x}} = \frac{Q_{0,1} + Q_{1,1}}{\sum_{i=0}^{1}\sum_{j=0}^{1}Q_{i,j}} \quad \quad
|
||||||
|
{\textcolor{green}{\eta_y}} = \frac{Q_{1,0} + Q_{1,1}}{\sum_{i=0}^{1}\sum_{j=0}^{1}Q_{i,j}}
|
||||||
|
\end{equation*}
|
||||||
|
|
||||||
|
The :math:`\eta` values can range between 0,1. Note they only range between 0,1 because the position of the center pixel (red) can change.
|
||||||
|
If the center pixel is in the bottom left pixel :math:`\eta_x` will be close to zero. If the center pixel is in the bottom right pixel :math:`\eta_y` will be close to 1.
|
||||||
|
|
||||||
|
.. autofunction:: calculate_full_eta2
|
||||||
|
|
||||||
|
Full :math:`\eta`-Function on 3x3 Clusters:
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: ../../../figures/Eta3x3.png
|
||||||
|
:target: ../../../figures/Eta3x3.png
|
||||||
|
:width: 650px
|
||||||
|
:align: center
|
||||||
|
:alt: Eta3x3
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\begin{equation*}
|
||||||
|
{\color{blue}{\eta_x}} = \frac{\sum_{i=0}^{2} Q_{i,2} - \sum_{i=0}^{2} Q_{i,0}}{\sum_{i=0}^{2}\sum_{j}^{3} Q_{i,j}} \quad \quad
|
||||||
|
{\color{green}{\eta_y}} = \frac{\sum_{j=0}^{2} Q_{2,j} - \sum_{j=0}^{2} Q_{0,j}}{\sum_{i=0}^{2}\sum_{j}^{3} Q_{i,j}}
|
||||||
|
\end{equation*}
|
||||||
|
|
||||||
|
The :math:`\eta` values can range between -0.5,0.5.
|
||||||
|
|
||||||
|
.. autofunction:: calculate_eta3
|
||||||
|
|
||||||
|
Cross :math:`\eta`-Function on 3x3 Clusters:
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: ../../../figures/Eta3x3Cross.png
|
||||||
|
:target: ../../../figures/Eta3x3Cross.png
|
||||||
|
:width: 650px
|
||||||
|
:align: center
|
||||||
|
:alt: Cross Eta3x3
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\begin{equation*}
|
||||||
|
{\color{blue}{\eta_x}} = \frac{Q_{1,2} - Q_{1,0}}{Q_{1,0} + Q_{1,1} + Q_{1,2}} \quad \quad
|
||||||
|
{\color{green}{\eta_y}} = \frac{Q_{0,2} - Q_{0,1}}{Q_{0,1} + Q_{1,1} + Q_{2,1}}
|
||||||
|
\end{equation*}
|
||||||
|
|
||||||
|
The :math:`\eta` values can range between -0.5,0.5.
|
||||||
|
|
||||||
|
.. autofunction:: calculate_cross_eta3
|
||||||
|
|
||||||
|
|
||||||
|
Interpolation class for :math:`\eta`-Interpolation
|
||||||
|
----------------------------------------------------
|
||||||
|
|
||||||
|
.. Warning::
|
||||||
|
Make sure to use the same :math:`\eta`-function during interpolation as given by the joint :math:`\eta`-distribution passed to the constructor.
|
||||||
|
|
||||||
|
.. Warning::
|
||||||
|
The interpolation might lead to erroneous photon positions for clusters at the boarders of a frame. Make sure to filter out such cases.
|
||||||
|
|
||||||
|
.. Note::
|
||||||
|
Make sure to use resonable energy bins, when constructing the joint distribution. If data is too sparse for a given energy the interpolation will lead to erreneous results.
|
||||||
|
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
.. autoclass:: Interpolator
|
||||||
|
:special-members: __init__
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:inherited-members:
|
||||||
|
|
||||||
10
docs/src/python/cluster/pyVarClusterFinder.rst
Normal file
10
docs/src/python/cluster/pyVarClusterFinder.rst
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
VarClusterFinder
|
||||||
|
===================
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
.. autoclass:: VarClusterFinder
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members:
|
||||||
14
docs/src/python/file/index.rst
Normal file
14
docs/src/python/file/index.rst
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
File I/O
|
||||||
|
===================
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:caption: File I/O
|
||||||
|
:maxdepth: 1
|
||||||
|
|
||||||
|
pyClusterFile
|
||||||
|
pyCtbRawFile
|
||||||
|
pyFile
|
||||||
|
pyJungfrauDataFile
|
||||||
|
pyRawFile
|
||||||
|
pyRawMasterFile
|
||||||
|
pyTransform
|
||||||
26
docs/src/python/file/pyClusterFile.rst
Normal file
26
docs/src/python/file/pyClusterFile.rst
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
|
||||||
|
ClusterFile
|
||||||
|
============
|
||||||
|
|
||||||
|
|
||||||
|
The :class:`ClusterFile` class is the main interface to read and write clusters in aare. Unfortunately the
|
||||||
|
old file format does not include metadata like the cluster size and the data type. This means that the
|
||||||
|
user has to know this information from other sources. Specifying the wrong cluster size or data type
|
||||||
|
will lead to garbage data being read.
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
.. autoclass:: ClusterFile
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:inherited-members:
|
||||||
|
|
||||||
|
|
||||||
|
Below is the API of the ClusterFile_Cluster3x3i but all variants share the same API.
|
||||||
|
|
||||||
|
.. autoclass:: aare._aare.ClusterFile_Cluster3x3i
|
||||||
|
:special-members: __init__
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members:
|
||||||
25
docs/src/python/file/pyCtbRawFile.rst
Normal file
25
docs/src/python/file/pyCtbRawFile.rst
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
|
||||||
|
CtbRawFile
|
||||||
|
============
|
||||||
|
|
||||||
|
Read analog, digital and transceiver samples from a raw file containing
|
||||||
|
data from the Chip Test Board. Uses :mod:`aare.transform` to decode the
|
||||||
|
data into a format that the user can work with.
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
import aare
|
||||||
|
from aare.transform import Mythen302Transform
|
||||||
|
my302 = Mythen302Transform(offset = 4)
|
||||||
|
|
||||||
|
with aare.CtbRawFile(fname, transform = my302) as f:
|
||||||
|
for header, data in f:
|
||||||
|
#do something with the data
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
.. autoclass:: CtbRawFile
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members:
|
||||||
11
docs/src/python/file/pyFile.rst
Normal file
11
docs/src/python/file/pyFile.rst
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
|
||||||
|
File
|
||||||
|
========
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
.. autoclass:: File
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members:
|
||||||
10
docs/src/python/file/pyJungfrauDataFile.rst
Normal file
10
docs/src/python/file/pyJungfrauDataFile.rst
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
JungfrauDataFile
|
||||||
|
===================
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
.. autoclass:: JungfrauDataFile
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members:
|
||||||
10
docs/src/python/file/pyRawFile.rst
Normal file
10
docs/src/python/file/pyRawFile.rst
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
RawFile
|
||||||
|
===================
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
.. autoclass:: RawFile
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members:
|
||||||
10
docs/src/python/file/pyRawMasterFile.rst
Normal file
10
docs/src/python/file/pyRawMasterFile.rst
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
RawMasterFile
|
||||||
|
===================
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
.. autoclass:: RawMasterFile
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members:
|
||||||
27
docs/src/python/file/pyTransform.rst
Normal file
27
docs/src/python/file/pyTransform.rst
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
Transform
|
||||||
|
===================
|
||||||
|
|
||||||
|
The transform module takes data read by :class:`aare.CtbRawFile` and decodes it
|
||||||
|
to a useful image format. Depending on detector it supports both analog
|
||||||
|
and digital samples.
|
||||||
|
|
||||||
|
For convenience the following transform objects are defined with a short name
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
moench05 = Moench05Transform()
|
||||||
|
moench05_1g = Moench05Transform1g()
|
||||||
|
moench05_old = Moench05TransformOld()
|
||||||
|
matterhorn02 = Matterhorn02Transform()
|
||||||
|
adc_sar_04_64to16 = AdcSar04Transform64to16()
|
||||||
|
adc_sar_05_64to16 = AdcSar05Transform64to16()
|
||||||
|
|
||||||
|
.. py:currentmodule:: aare
|
||||||
|
|
||||||
|
.. automodule:: aare.transform
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:private-members:
|
||||||
|
:special-members: __call__
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members:
|
||||||
4
docs/static/extra.css
vendored
Normal file
4
docs/static/extra.css
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
/* override table no-wrap */
|
||||||
|
.wy-table-responsive table td, .wy-table-responsive table th {
|
||||||
|
white-space: normal;
|
||||||
|
}
|
||||||
103
etc/add_license.py
Normal file
103
etc/add_license.py
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import argparse
|
||||||
|
import fnmatch
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
CPP_PATTERNS = ["*.h", "*.hpp", "*.cpp"]
|
||||||
|
PY_PATTERNS = ["*.py"]
|
||||||
|
CMAKE_PATTERNS = ["CMakeLists.txt"]
|
||||||
|
|
||||||
|
FILE_PATTERNS = CPP_PATTERNS + PY_PATTERNS + CMAKE_PATTERNS
|
||||||
|
LICENSE_TEXT = "SPDX-License-Identifier: MPL-2.0"
|
||||||
|
|
||||||
|
|
||||||
|
def get_comment_prefix(filename: str) -> str | None:
|
||||||
|
if any(fnmatch.fnmatch(filename, p) for p in CPP_PATTERNS):
|
||||||
|
return "// "
|
||||||
|
if any(fnmatch.fnmatch(filename, p) for p in (PY_PATTERNS + CMAKE_PATTERNS)):
|
||||||
|
return "# "
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def matches_pattern(filename: str) -> bool:
|
||||||
|
return any(fnmatch.fnmatch(filename, p) for p in FILE_PATTERNS)
|
||||||
|
|
||||||
|
|
||||||
|
def process_file(filepath: Path) -> bool:
|
||||||
|
filename = filepath.name
|
||||||
|
prefix = get_comment_prefix(filename)
|
||||||
|
if not prefix:
|
||||||
|
return False
|
||||||
|
|
||||||
|
license_line = f"{prefix}{LICENSE_TEXT}\n"
|
||||||
|
|
||||||
|
try:
|
||||||
|
with filepath.open("r", encoding="utf-8") as f:
|
||||||
|
lines = f.readlines()
|
||||||
|
except Exception as e:
|
||||||
|
print(f"⚠️ Error reading {filepath}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Skip if SPDX already present anywhere in the file
|
||||||
|
if any("SPDX-License-Identifier" in line for line in lines):
|
||||||
|
return False
|
||||||
|
|
||||||
|
insert_index = 0
|
||||||
|
|
||||||
|
# For Python, keep shebang on the very first line
|
||||||
|
if filename.endswith(".py") and lines:
|
||||||
|
if lines[0].startswith("#!"):
|
||||||
|
insert_index = 1
|
||||||
|
|
||||||
|
lines.insert(insert_index, license_line)
|
||||||
|
|
||||||
|
try:
|
||||||
|
with filepath.open("w", encoding="utf-8") as f:
|
||||||
|
f.writelines(lines)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"⚠️ Error writing {filepath}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Add SPDX-License-Identifier: MPL-2.0 to source files."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"path",
|
||||||
|
help="Root directory to recursively process "
|
||||||
|
"(*.h, *.cpp, *.py, and CMakeLists.txt).",
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
root_path = Path(args.path).expanduser().resolve()
|
||||||
|
|
||||||
|
if not root_path.exists():
|
||||||
|
print(f"Error: Path does not exist: {root_path}")
|
||||||
|
raise SystemExit(1)
|
||||||
|
|
||||||
|
if not root_path.is_dir():
|
||||||
|
print(f"Error: Path is not a directory: {root_path}")
|
||||||
|
raise SystemExit(1)
|
||||||
|
|
||||||
|
print(f"Processing directory: {root_path}")
|
||||||
|
modified = 0
|
||||||
|
|
||||||
|
for dirpath, _, files in os.walk(root_path):
|
||||||
|
dirpath = Path(dirpath)
|
||||||
|
for name in files:
|
||||||
|
if matches_pattern(name):
|
||||||
|
fullpath = dirpath / name
|
||||||
|
if process_file(fullpath):
|
||||||
|
print(f"✔ Added SPDX: {fullpath}")
|
||||||
|
modified += 1
|
||||||
|
|
||||||
|
print(f"\nDone. Updated {modified} file(s).")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
18
etc/dev-env.yml
Normal file
18
etc/dev-env.yml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
name: dev-environment
|
||||||
|
channels:
|
||||||
|
- conda-forge
|
||||||
|
dependencies:
|
||||||
|
- anaconda-client
|
||||||
|
- catch2
|
||||||
|
- conda-build
|
||||||
|
- doxygen
|
||||||
|
- sphinx
|
||||||
|
- breathe
|
||||||
|
- sphinx_rtd_theme
|
||||||
|
- furo
|
||||||
|
- zeromq
|
||||||
|
- pybind11
|
||||||
|
- numpy
|
||||||
|
- matplotlib
|
||||||
|
- nlohmann_json
|
||||||
|
|
||||||
60
etc/update_version.py
Normal file
60
etc/update_version.py
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
# Copyright (C) 2021 Contributors to the Aare Package
|
||||||
|
"""
|
||||||
|
Script to update VERSION file with semantic versioning if provided as an argument, or with 0.0.0 if no argument is provided.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from packaging.version import Version, InvalidVersion
|
||||||
|
|
||||||
|
|
||||||
|
SCRIPT_DIR = Path(__file__).absolute().parent.parent
|
||||||
|
|
||||||
|
def is_integer(value):
|
||||||
|
try:
|
||||||
|
int(value)
|
||||||
|
except ValueError:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def get_version():
|
||||||
|
|
||||||
|
# Check at least one argument is passed
|
||||||
|
if len(sys.argv) < 2:
|
||||||
|
version = datetime.today().strftime('%Y.%-m.%-d')
|
||||||
|
else:
|
||||||
|
version = sys.argv[1]
|
||||||
|
|
||||||
|
try:
|
||||||
|
v = Version(version) # normalize check if version follows PEP 440 specification
|
||||||
|
|
||||||
|
version_normalized = version.replace("-", ".")
|
||||||
|
|
||||||
|
version_normalized = re.sub(r'0*(\d+)', lambda m : str(int(m.group(0))), version_normalized) #remove leading zeros
|
||||||
|
|
||||||
|
return version_normalized
|
||||||
|
|
||||||
|
except InvalidVersion as e:
|
||||||
|
print(f"Invalid version {version}. Version format must follow semantic versioning format of python PEP 440 version identification specification.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def write_version_to_file(version):
|
||||||
|
version_file_path = SCRIPT_DIR/"VERSION"
|
||||||
|
print(version_file_path)
|
||||||
|
with open(version_file_path, "w") as version_file:
|
||||||
|
version_file.write(version)
|
||||||
|
print(f"Version {version} written to VERSION file.")
|
||||||
|
|
||||||
|
# Main script
|
||||||
|
if __name__ == "__main__":
|
||||||
|
|
||||||
|
version = get_version()
|
||||||
|
write_version_to_file(version)
|
||||||
139
include/aare/ArrayExpr.hpp
Normal file
139
include/aare/ArrayExpr.hpp
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
#include "aare/defs.hpp"
|
||||||
|
#include <array>
|
||||||
|
#include <cassert>
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
template <ssize_t Dim = 0, typename Strides>
|
||||||
|
ssize_t element_offset(const Strides & /*unused*/) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <ssize_t Dim = 0, typename Strides, typename... Ix>
|
||||||
|
ssize_t element_offset(const Strides &strides, ssize_t i, Ix... index) {
|
||||||
|
return i * strides[Dim] + element_offset<Dim + 1>(strides, index...);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Derived, typename T, ssize_t Ndim>
|
||||||
|
class NDIndexOps {
|
||||||
|
public:
|
||||||
|
template <typename... Ix>
|
||||||
|
std::enable_if_t<sizeof...(Ix) == Ndim, T &> operator()(Ix... index) {
|
||||||
|
return derived().data()[element_offset(derived().strides(), index...)];
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename... Ix>
|
||||||
|
std::enable_if_t<sizeof...(Ix) == Ndim, const T &> operator()(Ix... index) const {
|
||||||
|
return derived().data()[element_offset(derived().strides(), index...)];
|
||||||
|
}
|
||||||
|
|
||||||
|
T &operator()(ssize_t i) {
|
||||||
|
return derived().data()[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
const T &operator()(ssize_t i) const {
|
||||||
|
return derived().data()[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
T &operator[](ssize_t i) { return derived().data()[i]; }
|
||||||
|
const T &operator[](ssize_t i) const { return derived().data()[i]; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
Derived &derived() { return static_cast<Derived &>(*this); }
|
||||||
|
const Derived &derived() const { return static_cast<const Derived &>(*this); }
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename E, ssize_t Ndim> class ArrayExpr {
|
||||||
|
public:
|
||||||
|
static constexpr bool is_leaf = false;
|
||||||
|
|
||||||
|
auto operator[](size_t i) const { return static_cast<E const &>(*this)[i]; }
|
||||||
|
auto operator()(size_t i) const { return static_cast<E const &>(*this)[i]; }
|
||||||
|
auto size() const { return static_cast<E const &>(*this).size(); }
|
||||||
|
std::array<ssize_t, Ndim> shape() const {
|
||||||
|
return static_cast<E const &>(*this).shape();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename A, typename B, ssize_t Ndim>
|
||||||
|
class ArrayAdd : public ArrayExpr<ArrayAdd<A, B, Ndim>, Ndim> {
|
||||||
|
const A &arr1_;
|
||||||
|
const B &arr2_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
ArrayAdd(const A &arr1, const B &arr2) : arr1_(arr1), arr2_(arr2) {
|
||||||
|
assert(arr1.size() == arr2.size());
|
||||||
|
}
|
||||||
|
auto operator[](int i) const { return arr1_[i] + arr2_[i]; }
|
||||||
|
size_t size() const { return arr1_.size(); }
|
||||||
|
std::array<ssize_t, Ndim> shape() const { return arr1_.shape(); }
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename A, typename B, ssize_t Ndim>
|
||||||
|
class ArraySub : public ArrayExpr<ArraySub<A, B, Ndim>, Ndim> {
|
||||||
|
const A &arr1_;
|
||||||
|
const B &arr2_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
ArraySub(const A &arr1, const B &arr2) : arr1_(arr1), arr2_(arr2) {
|
||||||
|
assert(arr1.size() == arr2.size());
|
||||||
|
}
|
||||||
|
auto operator[](int i) const { return arr1_[i] - arr2_[i]; }
|
||||||
|
size_t size() const { return arr1_.size(); }
|
||||||
|
std::array<ssize_t, Ndim> shape() const { return arr1_.shape(); }
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename A, typename B, ssize_t Ndim>
|
||||||
|
class ArrayMul : public ArrayExpr<ArrayMul<A, B, Ndim>, Ndim> {
|
||||||
|
const A &arr1_;
|
||||||
|
const B &arr2_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
ArrayMul(const A &arr1, const B &arr2) : arr1_(arr1), arr2_(arr2) {
|
||||||
|
assert(arr1.size() == arr2.size());
|
||||||
|
}
|
||||||
|
auto operator[](int i) const { return arr1_[i] * arr2_[i]; }
|
||||||
|
size_t size() const { return arr1_.size(); }
|
||||||
|
std::array<ssize_t, Ndim> shape() const { return arr1_.shape(); }
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename A, typename B, ssize_t Ndim>
|
||||||
|
class ArrayDiv : public ArrayExpr<ArrayDiv<A, B, Ndim>, Ndim> {
|
||||||
|
const A &arr1_;
|
||||||
|
const B &arr2_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
ArrayDiv(const A &arr1, const B &arr2) : arr1_(arr1), arr2_(arr2) {
|
||||||
|
assert(arr1.size() == arr2.size());
|
||||||
|
}
|
||||||
|
auto operator[](int i) const { return arr1_[i] / arr2_[i]; }
|
||||||
|
size_t size() const { return arr1_.size(); }
|
||||||
|
std::array<ssize_t, Ndim> shape() const { return arr1_.shape(); }
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename A, typename B, ssize_t Ndim>
|
||||||
|
auto operator+(const ArrayExpr<A, Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
|
||||||
|
return ArrayAdd<ArrayExpr<A, Ndim>, ArrayExpr<B, Ndim>, Ndim>(arr1, arr2);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename A, typename B, ssize_t Ndim>
|
||||||
|
auto operator-(const ArrayExpr<A, Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
|
||||||
|
return ArraySub<ArrayExpr<A, Ndim>, ArrayExpr<B, Ndim>, Ndim>(arr1, arr2);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename A, typename B, ssize_t Ndim>
|
||||||
|
auto operator*(const ArrayExpr<A, Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
|
||||||
|
return ArrayMul<ArrayExpr<A, Ndim>, ArrayExpr<B, Ndim>, Ndim>(arr1, arr2);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename A, typename B, ssize_t Ndim>
|
||||||
|
auto operator/(const ArrayExpr<A, Ndim> &arr1, const ArrayExpr<B, Ndim> &arr2) {
|
||||||
|
return ArrayDiv<ArrayExpr<A, Ndim>, ArrayExpr<B, Ndim>, Ndim>(arr1, arr2);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
447
include/aare/CalculateEta.hpp
Normal file
447
include/aare/CalculateEta.hpp
Normal file
@@ -0,0 +1,447 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "aare/Cluster.hpp"
|
||||||
|
#include "aare/ClusterVector.hpp"
|
||||||
|
#include "aare/NDArray.hpp"
|
||||||
|
#include "aare/defs.hpp"
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
enum class pixel : int {
|
||||||
|
pBottomLeft = 0,
|
||||||
|
pBottom = 1,
|
||||||
|
pBottomRight = 2,
|
||||||
|
pLeft = 3,
|
||||||
|
pCenter = 4,
|
||||||
|
pRight = 5,
|
||||||
|
pTopLeft = 6,
|
||||||
|
pTop = 7,
|
||||||
|
pTopRight = 8
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO: better to have sum after x,y
|
||||||
|
/**
|
||||||
|
* eta struct
|
||||||
|
*/
|
||||||
|
template <typename T> struct Eta2 {
|
||||||
|
/// @brief eta in x direction
|
||||||
|
double x{};
|
||||||
|
/// @brief eta in y direction
|
||||||
|
double y{};
|
||||||
|
/// @brief index of subcluster with highest energy value (given as corner
|
||||||
|
/// relative to cluster center)
|
||||||
|
corner c{0};
|
||||||
|
/// @brief photon energy (cluster sum)
|
||||||
|
T sum{};
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Calculate the eta2 values for all clusters in a ClusterVector
|
||||||
|
*/
|
||||||
|
template <typename ClusterType,
|
||||||
|
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||||
|
std::vector<Eta2<typename ClusterType::value_type>>
|
||||||
|
calculate_eta2(const ClusterVector<ClusterType> &clusters) {
|
||||||
|
|
||||||
|
std::vector<Eta2<typename ClusterType::value_type>> eta2{};
|
||||||
|
eta2.reserve(clusters.size());
|
||||||
|
|
||||||
|
for (size_t i = 0; i < clusters.size(); i++) {
|
||||||
|
auto e = calculate_eta2(clusters[i]);
|
||||||
|
eta2.push_back(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return eta2;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Calculate the full eta2 values for all clusters in a ClusterVector
|
||||||
|
*/
|
||||||
|
template <typename ClusterType,
|
||||||
|
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||||
|
std::vector<Eta2<typename ClusterType::value_type>>
|
||||||
|
calculate_full_eta2(const ClusterVector<ClusterType> &clusters) {
|
||||||
|
std::vector<Eta2<typename ClusterType::value_type>> eta2{};
|
||||||
|
eta2.reserve(clusters.size());
|
||||||
|
|
||||||
|
for (size_t i = 0; i < clusters.size(); i++) {
|
||||||
|
auto e = calculate_full_eta2(clusters[i]);
|
||||||
|
eta2.push_back(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return eta2;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Calculate eta3 for all 3x3 clusters in a ClusterVector
|
||||||
|
*/
|
||||||
|
template <typename ClusterType,
|
||||||
|
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||||
|
std::vector<Eta2<typename ClusterType::value_type>>
|
||||||
|
calculate_eta3(const ClusterVector<ClusterType> &clusters) {
|
||||||
|
std::vector<Eta2<typename ClusterType::value_type>> eta2{};
|
||||||
|
eta2.reserve(clusters.size());
|
||||||
|
|
||||||
|
for (size_t i = 0; i < clusters.size(); i++) {
|
||||||
|
auto e = calculate_eta3(clusters[i]);
|
||||||
|
eta2.push_back(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return eta2;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Calculate cross eta3 for all 3x3 clusters in a ClusterVector
|
||||||
|
*/
|
||||||
|
template <typename ClusterType,
|
||||||
|
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||||
|
std::vector<Eta2<typename ClusterType::value_type>>
|
||||||
|
calculate_cross_eta3(const ClusterVector<ClusterType> &clusters) {
|
||||||
|
std::vector<Eta2<typename ClusterType::value_type>> eta2{};
|
||||||
|
eta2.reserve(clusters.size());
|
||||||
|
|
||||||
|
for (size_t i = 0; i < clusters.size(); i++) {
|
||||||
|
auto e = calculate_cross_eta3(clusters[i]);
|
||||||
|
eta2.push_back(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return eta2;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief helper function to calculate eta2 x and y values
|
||||||
|
* @param eta reference to the Eta2 object to update
|
||||||
|
* @param left_x value of the left pixel
|
||||||
|
* @param right_x value of the right pixel
|
||||||
|
* @param bottom_y value of the bottom pixel
|
||||||
|
* @param top_y value of the top pixel
|
||||||
|
*/
|
||||||
|
template <typename T>
|
||||||
|
inline void calculate_eta2(Eta2<T> &eta, const T left_x, const T right_x,
|
||||||
|
const T bottom_y, const T top_y) {
|
||||||
|
if ((right_x + left_x) != 0)
|
||||||
|
eta.x = static_cast<double>(right_x) /
|
||||||
|
static_cast<double>(right_x + left_x); // between (0,1) the
|
||||||
|
// closer to zero left
|
||||||
|
// value probably larger
|
||||||
|
if ((top_y + bottom_y) != 0)
|
||||||
|
eta.y = static_cast<double>(top_y) /
|
||||||
|
static_cast<double>(top_y + bottom_y); // between (0,1) the
|
||||||
|
// closer to zero bottom
|
||||||
|
// value probably larger
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Calculate the eta2 values for a generic sized cluster and return them
|
||||||
|
* in a Eta2 struct containing etay, etax and the index (as corner) of the
|
||||||
|
* respective 2x2 subcluster relative to the cluster center.
|
||||||
|
*/
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType = uint16_t>
|
||||||
|
Eta2<T>
|
||||||
|
calculate_eta2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||||
|
|
||||||
|
static_assert(ClusterSizeX > 1 && ClusterSizeY > 1);
|
||||||
|
Eta2<T> eta{};
|
||||||
|
|
||||||
|
size_t cluster_center_index =
|
||||||
|
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||||
|
|
||||||
|
auto max_sum = cl.max_sum_2x2();
|
||||||
|
eta.sum = max_sum.sum;
|
||||||
|
corner c = max_sum.index;
|
||||||
|
|
||||||
|
// subcluster top right from center
|
||||||
|
switch (c) {
|
||||||
|
case (corner::cTopLeft):
|
||||||
|
calculate_eta2(eta, cl.data[cluster_center_index - 1],
|
||||||
|
cl.data[cluster_center_index],
|
||||||
|
cl.data[cluster_center_index - ClusterSizeX],
|
||||||
|
cl.data[cluster_center_index]);
|
||||||
|
// dx = -1
|
||||||
|
// dy = -1
|
||||||
|
break;
|
||||||
|
case (corner::cTopRight):
|
||||||
|
calculate_eta2(eta, cl.data[cluster_center_index],
|
||||||
|
cl.data[cluster_center_index + 1],
|
||||||
|
cl.data[cluster_center_index - ClusterSizeX],
|
||||||
|
cl.data[cluster_center_index]);
|
||||||
|
// dx = 0
|
||||||
|
// dy = -1
|
||||||
|
break;
|
||||||
|
case (corner::cBottomLeft):
|
||||||
|
calculate_eta2(eta, cl.data[cluster_center_index - 1],
|
||||||
|
cl.data[cluster_center_index],
|
||||||
|
cl.data[cluster_center_index],
|
||||||
|
cl.data[cluster_center_index + ClusterSizeX]);
|
||||||
|
// dx = -1
|
||||||
|
// dy = 0
|
||||||
|
break;
|
||||||
|
case (corner::cBottomRight):
|
||||||
|
calculate_eta2(eta, cl.data[cluster_center_index],
|
||||||
|
cl.data[cluster_center_index + 1],
|
||||||
|
cl.data[cluster_center_index],
|
||||||
|
cl.data[cluster_center_index + ClusterSizeX]);
|
||||||
|
// dx = 0
|
||||||
|
// dy = 0
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
eta.c = c;
|
||||||
|
|
||||||
|
return eta;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Calculate the eta2 values for a generic sized cluster and return them
|
||||||
|
* in a Eta2 struct containing etay, etax and the index (as corner) of the
|
||||||
|
* respective 2x2 subcluster relative to the cluster center.
|
||||||
|
*/
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType>
|
||||||
|
Eta2<T> calculate_full_eta2(
|
||||||
|
const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||||
|
|
||||||
|
static_assert(ClusterSizeX > 1 && ClusterSizeY > 1);
|
||||||
|
Eta2<T> eta{};
|
||||||
|
|
||||||
|
constexpr size_t cluster_center_index =
|
||||||
|
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||||
|
|
||||||
|
auto max_sum = cl.max_sum_2x2();
|
||||||
|
eta.sum = max_sum.sum;
|
||||||
|
corner c = max_sum.index;
|
||||||
|
|
||||||
|
// subcluster top right from center
|
||||||
|
switch (c) {
|
||||||
|
case (corner::cTopLeft):
|
||||||
|
if (eta.sum != 0) {
|
||||||
|
eta.x = static_cast<double>(
|
||||||
|
cl.data[cluster_center_index] +
|
||||||
|
cl.data[cluster_center_index - ClusterSizeX]) /
|
||||||
|
static_cast<double>(eta.sum);
|
||||||
|
|
||||||
|
eta.y = static_cast<double>(cl.data[cluster_center_index - 1] +
|
||||||
|
cl.data[cluster_center_index]) /
|
||||||
|
static_cast<double>(eta.sum);
|
||||||
|
}
|
||||||
|
// dx = -1
|
||||||
|
// dy = -1
|
||||||
|
break;
|
||||||
|
case (corner::cTopRight):
|
||||||
|
if (eta.sum != 0) {
|
||||||
|
eta.x = static_cast<double>(
|
||||||
|
cl.data[cluster_center_index + 1] +
|
||||||
|
cl.data[cluster_center_index - ClusterSizeX + 1]) /
|
||||||
|
static_cast<double>(eta.sum);
|
||||||
|
eta.y = static_cast<double>(cl.data[cluster_center_index] +
|
||||||
|
cl.data[cluster_center_index + 1]) /
|
||||||
|
static_cast<double>(eta.sum);
|
||||||
|
}
|
||||||
|
// dx = 0
|
||||||
|
// dy = -1
|
||||||
|
break;
|
||||||
|
case (corner::cBottomLeft):
|
||||||
|
if (eta.sum != 0) {
|
||||||
|
eta.x = static_cast<double>(
|
||||||
|
cl.data[cluster_center_index] +
|
||||||
|
cl.data[cluster_center_index + ClusterSizeX]) /
|
||||||
|
static_cast<double>(eta.sum);
|
||||||
|
eta.y = static_cast<double>(
|
||||||
|
cl.data[cluster_center_index + ClusterSizeX] +
|
||||||
|
cl.data[cluster_center_index + ClusterSizeX - 1]) /
|
||||||
|
static_cast<double>(eta.sum);
|
||||||
|
}
|
||||||
|
// dx = -1
|
||||||
|
// dy = 0
|
||||||
|
break;
|
||||||
|
case (corner::cBottomRight):
|
||||||
|
if (eta.sum != 0) {
|
||||||
|
eta.x = static_cast<double>(
|
||||||
|
cl.data[cluster_center_index + 1] +
|
||||||
|
cl.data[cluster_center_index + ClusterSizeX + 1]) /
|
||||||
|
static_cast<double>(eta.sum);
|
||||||
|
eta.y = static_cast<double>(
|
||||||
|
cl.data[cluster_center_index + ClusterSizeX] +
|
||||||
|
cl.data[cluster_center_index + ClusterSizeX + 1]) /
|
||||||
|
static_cast<double>(eta.sum);
|
||||||
|
}
|
||||||
|
// dx = 0
|
||||||
|
// dy = 0
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
eta.c = c;
|
||||||
|
|
||||||
|
return eta;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
Eta2<T> calculate_eta2(const Cluster<T, 2, 2, uint16_t> &cl) {
|
||||||
|
Eta2<T> eta{};
|
||||||
|
|
||||||
|
// TODO: maybe have as member function of cluster
|
||||||
|
const uint8_t photon_hit_index =
|
||||||
|
std::max_element(cl.data.begin(), cl.data.end()) - cl.data.begin();
|
||||||
|
|
||||||
|
eta.c = static_cast<corner>(3 - photon_hit_index);
|
||||||
|
|
||||||
|
switch (eta.c) {
|
||||||
|
case corner::cTopLeft:
|
||||||
|
calculate_eta2(eta, cl.data[2], cl.data[3], cl.data[1], cl.data[3]);
|
||||||
|
break;
|
||||||
|
case corner::cTopRight:
|
||||||
|
calculate_eta2(eta, cl.data[2], cl.data[3], cl.data[0], cl.data[2]);
|
||||||
|
break;
|
||||||
|
case corner::cBottomLeft:
|
||||||
|
calculate_eta2(eta, cl.data[0], cl.data[1], cl.data[1], cl.data[3]);
|
||||||
|
break;
|
||||||
|
case corner::cBottomRight:
|
||||||
|
calculate_eta2(eta, cl.data[0], cl.data[1], cl.data[0], cl.data[2]);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
eta.sum = cl.sum();
|
||||||
|
|
||||||
|
return eta;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
Eta2<T> calculate_full_eta2(const Cluster<T, 2, 2, uint16_t> &cl) {
|
||||||
|
|
||||||
|
Eta2<T> eta{};
|
||||||
|
|
||||||
|
eta.sum = cl.sum();
|
||||||
|
|
||||||
|
const uint8_t photon_hit_index =
|
||||||
|
std::max_element(cl.data.begin(), cl.data.end()) - cl.data.begin();
|
||||||
|
|
||||||
|
eta.c = static_cast<corner>(3 - photon_hit_index);
|
||||||
|
|
||||||
|
if (eta.sum != 0) {
|
||||||
|
eta.x = static_cast<double>(cl.data[1] + cl.data[3]) /
|
||||||
|
static_cast<double>(eta.sum);
|
||||||
|
eta.y = static_cast<double>(cl.data[2] + cl.data[3]) /
|
||||||
|
static_cast<double>(eta.sum);
|
||||||
|
}
|
||||||
|
|
||||||
|
return eta;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO generalize
|
||||||
|
template <typename T>
|
||||||
|
Eta2<T> calculate_eta2(const Cluster<T, 1, 2, uint16_t> &cl) {
|
||||||
|
Eta2<T> eta{};
|
||||||
|
|
||||||
|
eta.x = 0;
|
||||||
|
eta.y = static_cast<double>(cl.data[1]) / cl.data[0];
|
||||||
|
eta.sum = cl.sum();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
Eta2<T> calculate_eta2(const Cluster<T, 2, 1, uint16_t> &cl) {
|
||||||
|
Eta2<T> eta{};
|
||||||
|
|
||||||
|
eta.x = static_cast<double>(cl.data[1]) / cl.data[0];
|
||||||
|
eta.y = 0;
|
||||||
|
eta.sum = cl.sum();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief calculates cross Eta3 for 3x3 cluster
|
||||||
|
* cross Eta3 calculates the eta by taking into account only the cross pixels
|
||||||
|
* {top, bottom, left, right, center}
|
||||||
|
*/
|
||||||
|
template <typename T, typename CoordType = uint16_t>
|
||||||
|
Eta2<T> calculate_cross_eta3(const Cluster<T, 3, 3, CoordType> &cl) {
|
||||||
|
|
||||||
|
Eta2<T> eta{};
|
||||||
|
|
||||||
|
T photon_energy = cl.sum();
|
||||||
|
|
||||||
|
eta.sum = photon_energy;
|
||||||
|
|
||||||
|
if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0)
|
||||||
|
|
||||||
|
eta.x =
|
||||||
|
static_cast<double>(-cl.data[3] + cl.data[3 + 2]) /
|
||||||
|
|
||||||
|
static_cast<double>(cl.data[3] + cl.data[4] + cl.data[5]); // (-1,1)
|
||||||
|
|
||||||
|
if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0)
|
||||||
|
|
||||||
|
eta.y = static_cast<double>(-cl.data[1] + cl.data[2 * 3 + 1]) /
|
||||||
|
|
||||||
|
static_cast<double>(cl.data[1] + cl.data[4] + cl.data[7]);
|
||||||
|
|
||||||
|
return eta;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType = uint16_t>
|
||||||
|
Eta2<T> calculate_cross_eta3(
|
||||||
|
const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||||
|
|
||||||
|
static_assert(ClusterSizeX > 2 && ClusterSizeY > 2,
|
||||||
|
"calculate_eta3 only defined for clusters larger than 2x2");
|
||||||
|
|
||||||
|
if constexpr (ClusterSizeX != 3 || ClusterSizeY != 3) {
|
||||||
|
auto reduced_cluster = reduce_cluster_to_3x3(cl);
|
||||||
|
return calculate_cross_eta3(reduced_cluster);
|
||||||
|
} else {
|
||||||
|
return calculate_cross_eta3(cl);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief calculates Eta3 for 3x3 cluster
|
||||||
|
* It calculates the eta by taking into account all pixels in the 3x3 cluster
|
||||||
|
*/
|
||||||
|
template <typename T, typename CoordType = uint16_t>
|
||||||
|
Eta2<T> calculate_eta3(const Cluster<T, 3, 3, CoordType> &cl) {
|
||||||
|
|
||||||
|
Eta2<T> eta{};
|
||||||
|
|
||||||
|
T photon_energy = cl.sum();
|
||||||
|
|
||||||
|
eta.sum = photon_energy;
|
||||||
|
|
||||||
|
// TODO: how do we handle potential arithmetic overflows? - T could be
|
||||||
|
// uint16
|
||||||
|
if (photon_energy != 0) {
|
||||||
|
std::array<T, 2> column_sums{
|
||||||
|
static_cast<T>(cl.data[0] + cl.data[3] + cl.data[6]),
|
||||||
|
static_cast<T>(cl.data[2] + cl.data[5] + cl.data[8])};
|
||||||
|
|
||||||
|
eta.x = static_cast<double>(-column_sums[0] + column_sums[1]) /
|
||||||
|
static_cast<double>(photon_energy);
|
||||||
|
|
||||||
|
std::array<T, 2> row_sums{
|
||||||
|
static_cast<T>(cl.data[0] + cl.data[1] + cl.data[2]),
|
||||||
|
static_cast<T>(cl.data[6] + cl.data[7] + cl.data[8])};
|
||||||
|
|
||||||
|
eta.y = static_cast<double>(-row_sums[0] + row_sums[1]) /
|
||||||
|
static_cast<double>(photon_energy);
|
||||||
|
}
|
||||||
|
|
||||||
|
return eta;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType = uint16_t>
|
||||||
|
Eta2<T>
|
||||||
|
calculate_eta3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &cl) {
|
||||||
|
|
||||||
|
static_assert(ClusterSizeX > 2 && ClusterSizeY > 2,
|
||||||
|
"calculate_eta3 only defined for clusters larger than 2x2");
|
||||||
|
|
||||||
|
if constexpr (ClusterSizeX != 3 || ClusterSizeY != 3) {
|
||||||
|
auto reduced_cluster = reduce_cluster_to_3x3(cl);
|
||||||
|
return calculate_eta3(reduced_cluster);
|
||||||
|
} else {
|
||||||
|
return calculate_eta3(cl);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
100
include/aare/CircularFifo.hpp
Normal file
100
include/aare/CircularFifo.hpp
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
|
#include <fmt/color.h>
|
||||||
|
#include <fmt/format.h>
|
||||||
|
#include <memory>
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#include "aare/ProducerConsumerQueue.hpp"
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
template <class ItemType> class CircularFifo {
|
||||||
|
uint32_t fifo_size;
|
||||||
|
aare::ProducerConsumerQueue<ItemType> free_slots;
|
||||||
|
aare::ProducerConsumerQueue<ItemType> filled_slots;
|
||||||
|
|
||||||
|
public:
|
||||||
|
CircularFifo() : CircularFifo(100){};
|
||||||
|
CircularFifo(uint32_t size)
|
||||||
|
: fifo_size(size), free_slots(size + 1), filled_slots(size + 1) {
|
||||||
|
|
||||||
|
// TODO! how do we deal with alignment for writing? alignas???
|
||||||
|
// Do we give the user a chance to provide memory locations?
|
||||||
|
// Templated allocator?
|
||||||
|
for (size_t i = 0; i < fifo_size; ++i) {
|
||||||
|
free_slots.write(ItemType{});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool next() {
|
||||||
|
// TODO! avoid default constructing ItemType
|
||||||
|
ItemType it;
|
||||||
|
if (!filled_slots.read(it))
|
||||||
|
return false;
|
||||||
|
if (!free_slots.write(std::move(it)))
|
||||||
|
return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
~CircularFifo() {}
|
||||||
|
|
||||||
|
using value_type = ItemType;
|
||||||
|
|
||||||
|
auto numFilledSlots() const noexcept { return filled_slots.sizeGuess(); }
|
||||||
|
auto numFreeSlots() const noexcept { return free_slots.sizeGuess(); }
|
||||||
|
auto isFull() const noexcept { return filled_slots.isFull(); }
|
||||||
|
|
||||||
|
ItemType pop_free() {
|
||||||
|
ItemType v;
|
||||||
|
while (!free_slots.read(v))
|
||||||
|
;
|
||||||
|
return std::move(v);
|
||||||
|
// return v;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool try_pop_free(ItemType &v) { return free_slots.read(v); }
|
||||||
|
|
||||||
|
ItemType pop_value(std::chrono::nanoseconds wait,
|
||||||
|
std::atomic<bool> &stopped) {
|
||||||
|
ItemType v;
|
||||||
|
while (!filled_slots.read(v) && !stopped) {
|
||||||
|
std::this_thread::sleep_for(wait);
|
||||||
|
}
|
||||||
|
return std::move(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
ItemType pop_value() {
|
||||||
|
ItemType v;
|
||||||
|
while (!filled_slots.read(v))
|
||||||
|
;
|
||||||
|
return std::move(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
ItemType *frontPtr() { return filled_slots.frontPtr(); }
|
||||||
|
|
||||||
|
// TODO! Add function to move item from filled to free to be used
|
||||||
|
// with the frontPtr function
|
||||||
|
|
||||||
|
template <class... Args> void push_value(Args &&...recordArgs) {
|
||||||
|
while (!filled_slots.write(std::forward<Args>(recordArgs)...))
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class... Args> bool try_push_value(Args &&...recordArgs) {
|
||||||
|
return filled_slots.write(std::forward<Args>(recordArgs)...);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class... Args> void push_free(Args &&...recordArgs) {
|
||||||
|
while (!free_slots.write(std::forward<Args>(recordArgs)...))
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class... Args> bool try_push_free(Args &&...recordArgs) {
|
||||||
|
return free_slots.write(std::forward<Args>(recordArgs)...);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
239
include/aare/Cluster.hpp
Executable file
239
include/aare/Cluster.hpp
Executable file
@@ -0,0 +1,239 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
/************************************************
|
||||||
|
* @file Cluster.hpp
|
||||||
|
* @short definition of cluster, where CoordType (x,y) give
|
||||||
|
* the cluster center coordinates and data the actual cluster data
|
||||||
|
* cluster size is given as template parameters
|
||||||
|
***********************************************/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "defs.hpp"
|
||||||
|
#include <algorithm>
|
||||||
|
#include <array>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <numeric>
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
// requires clause c++20 maybe update
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Cluster struct
|
||||||
|
*/
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType = uint16_t>
|
||||||
|
struct Cluster {
|
||||||
|
|
||||||
|
static_assert(std::is_arithmetic_v<T>, "T needs to be an arithmetic type");
|
||||||
|
static_assert(std::is_integral_v<CoordType>,
|
||||||
|
"CoordType needs to be an integral type");
|
||||||
|
static_assert(ClusterSizeX > 0 && ClusterSizeY > 0,
|
||||||
|
"Cluster sizes must be bigger than zero");
|
||||||
|
|
||||||
|
/// @brief Cluster center x coordinate (in pixel coordinates)
|
||||||
|
CoordType x;
|
||||||
|
/// @brief Cluster center y coordinate (in pixel coordinates)
|
||||||
|
CoordType y;
|
||||||
|
/// @brief Cluster data stored in row-major order starting from top-left
|
||||||
|
std::array<T, ClusterSizeX * ClusterSizeY> data;
|
||||||
|
|
||||||
|
static constexpr uint8_t cluster_size_x = ClusterSizeX;
|
||||||
|
static constexpr uint8_t cluster_size_y = ClusterSizeY;
|
||||||
|
using value_type = T;
|
||||||
|
using coord_type = CoordType;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Sum of all elements in the cluster
|
||||||
|
*/
|
||||||
|
T sum() const { return std::accumulate(data.begin(), data.end(), T{}); }
|
||||||
|
|
||||||
|
// TODO: handle 1 dimensional clusters
|
||||||
|
/**
|
||||||
|
* @brief sum of 2x2 subcluster with highest energy
|
||||||
|
* @return photon energy of subcluster, 2x2 subcluster index relative to
|
||||||
|
* cluster center
|
||||||
|
*/
|
||||||
|
Sum_index_pair<T, corner> max_sum_2x2() const {
|
||||||
|
|
||||||
|
if constexpr (cluster_size_x == 3 && cluster_size_y == 3) {
|
||||||
|
std::array<T, 4> sum_2x2_subclusters;
|
||||||
|
sum_2x2_subclusters[0] = data[0] + data[1] + data[3] + data[4];
|
||||||
|
sum_2x2_subclusters[1] = data[1] + data[2] + data[4] + data[5];
|
||||||
|
sum_2x2_subclusters[2] = data[3] + data[4] + data[6] + data[7];
|
||||||
|
sum_2x2_subclusters[3] = data[4] + data[5] + data[7] + data[8];
|
||||||
|
int index = std::max_element(sum_2x2_subclusters.begin(),
|
||||||
|
sum_2x2_subclusters.end()) -
|
||||||
|
sum_2x2_subclusters.begin();
|
||||||
|
return Sum_index_pair<T, corner>{sum_2x2_subclusters[index],
|
||||||
|
corner{index}};
|
||||||
|
} else if constexpr (cluster_size_x == 2 && cluster_size_y == 2) {
|
||||||
|
return Sum_index_pair<T, corner>{
|
||||||
|
data[0] + data[1] + data[2] + data[3], corner{0}};
|
||||||
|
} else {
|
||||||
|
constexpr size_t cluster_center_index =
|
||||||
|
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||||
|
|
||||||
|
std::array<T, 4> sum_2x2_subcluster{0};
|
||||||
|
// subcluster top left from center
|
||||||
|
sum_2x2_subcluster[0] =
|
||||||
|
data[cluster_center_index] + data[cluster_center_index - 1] +
|
||||||
|
data[cluster_center_index - ClusterSizeX] +
|
||||||
|
data[cluster_center_index - 1 - ClusterSizeX];
|
||||||
|
// subcluster top right from center
|
||||||
|
if (ClusterSizeX > 2) {
|
||||||
|
sum_2x2_subcluster[1] =
|
||||||
|
data[cluster_center_index] +
|
||||||
|
data[cluster_center_index + 1] +
|
||||||
|
data[cluster_center_index - ClusterSizeX] +
|
||||||
|
data[cluster_center_index - ClusterSizeX + 1];
|
||||||
|
}
|
||||||
|
// subcluster bottom left from center
|
||||||
|
if (ClusterSizeY > 2) {
|
||||||
|
sum_2x2_subcluster[2] =
|
||||||
|
data[cluster_center_index] +
|
||||||
|
data[cluster_center_index - 1] +
|
||||||
|
data[cluster_center_index + ClusterSizeX] +
|
||||||
|
data[cluster_center_index + ClusterSizeX - 1];
|
||||||
|
}
|
||||||
|
// subcluster bottom right from center
|
||||||
|
if (ClusterSizeX > 2 && ClusterSizeY > 2) {
|
||||||
|
sum_2x2_subcluster[3] =
|
||||||
|
data[cluster_center_index] +
|
||||||
|
data[cluster_center_index + 1] +
|
||||||
|
data[cluster_center_index + ClusterSizeX] +
|
||||||
|
data[cluster_center_index + ClusterSizeX + 1];
|
||||||
|
}
|
||||||
|
|
||||||
|
int index = std::max_element(sum_2x2_subcluster.begin(),
|
||||||
|
sum_2x2_subcluster.end()) -
|
||||||
|
sum_2x2_subcluster.begin();
|
||||||
|
return Sum_index_pair<T, corner>{sum_2x2_subcluster[index],
|
||||||
|
corner{index}};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Reduce a cluster to a 2x2 cluster by selecting the 2x2 block with the
|
||||||
|
* highest sum.
|
||||||
|
* @param c Cluster to reduce
|
||||||
|
* @return reduced cluster
|
||||||
|
* @note The cluster is filled using row major ordering starting at the top-left
|
||||||
|
* (thus for a max subcluster in the top left cornern the photon hit is at
|
||||||
|
* the fourth position)
|
||||||
|
*/
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType = uint16_t>
|
||||||
|
Cluster<T, 2, 2, CoordType>
|
||||||
|
reduce_to_2x2(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
|
||||||
|
|
||||||
|
static_assert(ClusterSizeX >= 2 && ClusterSizeY >= 2,
|
||||||
|
"Cluster sizes must be at least 2x2 for reduction to 2x2");
|
||||||
|
|
||||||
|
Cluster<T, 2, 2, CoordType> result{};
|
||||||
|
|
||||||
|
auto [sum, index] = c.max_sum_2x2();
|
||||||
|
|
||||||
|
constexpr int16_t cluster_center_index =
|
||||||
|
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||||
|
|
||||||
|
int16_t index_top_left_max_2x2_subcluster = cluster_center_index;
|
||||||
|
switch (index) {
|
||||||
|
case corner::cTopLeft:
|
||||||
|
index_top_left_max_2x2_subcluster -= (ClusterSizeX + 1);
|
||||||
|
break;
|
||||||
|
case corner::cTopRight:
|
||||||
|
index_top_left_max_2x2_subcluster -= ClusterSizeX;
|
||||||
|
break;
|
||||||
|
case corner::cBottomLeft:
|
||||||
|
index_top_left_max_2x2_subcluster -= 1;
|
||||||
|
break;
|
||||||
|
case corner::cBottomRight:
|
||||||
|
// no change needed
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
result.x = c.x;
|
||||||
|
result.y = c.y;
|
||||||
|
|
||||||
|
result.data = {
|
||||||
|
c.data[index_top_left_max_2x2_subcluster],
|
||||||
|
c.data[index_top_left_max_2x2_subcluster + 1],
|
||||||
|
c.data[index_top_left_max_2x2_subcluster + ClusterSizeX],
|
||||||
|
c.data[index_top_left_max_2x2_subcluster + ClusterSizeX + 1]};
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
Cluster<T, 2, 2, uint16_t> reduce_to_2x2(const Cluster<T, 3, 3, uint16_t> &c) {
|
||||||
|
Cluster<T, 2, 2, uint16_t> result{};
|
||||||
|
|
||||||
|
auto [s, i] = c.max_sum_2x2();
|
||||||
|
result.x = c.x;
|
||||||
|
result.y = c.y;
|
||||||
|
switch (i) {
|
||||||
|
case corner::cTopLeft:
|
||||||
|
result.data = {c.data[0], c.data[1], c.data[3], c.data[4]};
|
||||||
|
break;
|
||||||
|
case corner::cTopRight:
|
||||||
|
result.data = {c.data[1], c.data[2], c.data[4], c.data[5]};
|
||||||
|
break;
|
||||||
|
case corner::cBottomLeft:
|
||||||
|
result.data = {c.data[3], c.data[4], c.data[6], c.data[7]};
|
||||||
|
break;
|
||||||
|
case corner::cBottomRight:
|
||||||
|
result.data = {c.data[4], c.data[5], c.data[7], c.data[8]};
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Reduce a cluster to a 3x3 cluster
|
||||||
|
* @param c Cluster to reduce
|
||||||
|
* @return reduced cluster
|
||||||
|
*/
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType = int16_t>
|
||||||
|
Cluster<T, 3, 3, CoordType>
|
||||||
|
reduce_to_3x3(const Cluster<T, ClusterSizeX, ClusterSizeY, CoordType> &c) {
|
||||||
|
|
||||||
|
static_assert(ClusterSizeX >= 3 && ClusterSizeY >= 3,
|
||||||
|
"Cluster sizes must be at least 3x3 for reduction to 3x3");
|
||||||
|
|
||||||
|
Cluster<T, 3, 3, CoordType> result{};
|
||||||
|
|
||||||
|
int16_t cluster_center_index =
|
||||||
|
(ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX;
|
||||||
|
|
||||||
|
result.x = c.x;
|
||||||
|
result.y = c.y;
|
||||||
|
|
||||||
|
result.data = {c.data[cluster_center_index - ClusterSizeX - 1],
|
||||||
|
c.data[cluster_center_index - ClusterSizeX],
|
||||||
|
c.data[cluster_center_index - ClusterSizeX + 1],
|
||||||
|
c.data[cluster_center_index - 1],
|
||||||
|
c.data[cluster_center_index],
|
||||||
|
c.data[cluster_center_index + 1],
|
||||||
|
c.data[cluster_center_index + ClusterSizeX - 1],
|
||||||
|
c.data[cluster_center_index + ClusterSizeX],
|
||||||
|
c.data[cluster_center_index + ClusterSizeX + 1]};
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type Traits for is_cluster_type
|
||||||
|
template <typename T>
|
||||||
|
struct is_cluster : std::false_type {}; // Default case: Not a Cluster
|
||||||
|
|
||||||
|
template <typename T, uint8_t X, uint8_t Y, typename CoordType>
|
||||||
|
struct is_cluster<Cluster<T, X, Y, CoordType>> : std::true_type {}; // Cluster
|
||||||
|
|
||||||
|
template <typename T> constexpr bool is_cluster_v = is_cluster<T>::value;
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
60
include/aare/ClusterCollector.hpp
Normal file
60
include/aare/ClusterCollector.hpp
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
#include <atomic>
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#include "aare/ClusterFinderMT.hpp"
|
||||||
|
#include "aare/ClusterVector.hpp"
|
||||||
|
#include "aare/ProducerConsumerQueue.hpp"
|
||||||
|
#include "aare/defs.hpp"
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
template <typename ClusterType,
|
||||||
|
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||||
|
class ClusterCollector {
|
||||||
|
ProducerConsumerQueue<ClusterVector<ClusterType>> *m_source;
|
||||||
|
std::atomic<bool> m_stop_requested{false};
|
||||||
|
std::atomic<bool> m_stopped{true};
|
||||||
|
std::chrono::milliseconds m_default_wait{1};
|
||||||
|
std::thread m_thread;
|
||||||
|
std::vector<ClusterVector<ClusterType>> m_clusters;
|
||||||
|
|
||||||
|
void process() {
|
||||||
|
m_stopped = false;
|
||||||
|
fmt::print("ClusterCollector started\n");
|
||||||
|
while (!m_stop_requested || !m_source->isEmpty()) {
|
||||||
|
if (ClusterVector<ClusterType> *clusters = m_source->frontPtr();
|
||||||
|
clusters != nullptr) {
|
||||||
|
m_clusters.push_back(std::move(*clusters));
|
||||||
|
m_source->popFront();
|
||||||
|
} else {
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt::print("ClusterCollector stopped\n");
|
||||||
|
m_stopped = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
ClusterCollector(ClusterFinderMT<ClusterType, uint16_t, double> *source) {
|
||||||
|
m_source = source->sink();
|
||||||
|
m_thread =
|
||||||
|
std::thread(&ClusterCollector::process,
|
||||||
|
this); // only one process does that so why isnt it
|
||||||
|
// automatically written to m_cluster in collect
|
||||||
|
// - instead of writing first to m_sink?
|
||||||
|
}
|
||||||
|
void stop() {
|
||||||
|
m_stop_requested = true;
|
||||||
|
m_thread.join();
|
||||||
|
}
|
||||||
|
std::vector<ClusterVector<ClusterType>> steal_clusters() {
|
||||||
|
if (!m_stopped) {
|
||||||
|
throw std::runtime_error("ClusterCollector is still running");
|
||||||
|
}
|
||||||
|
return std::move(m_clusters);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
471
include/aare/ClusterFile.hpp
Normal file
471
include/aare/ClusterFile.hpp
Normal file
@@ -0,0 +1,471 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "aare/Cluster.hpp"
|
||||||
|
#include "aare/ClusterVector.hpp"
|
||||||
|
#include "aare/GainMap.hpp"
|
||||||
|
#include "aare/NDArray.hpp"
|
||||||
|
#include "aare/defs.hpp"
|
||||||
|
#include "aare/logger.hpp"
|
||||||
|
|
||||||
|
#include <filesystem>
|
||||||
|
#include <fstream>
|
||||||
|
#include <optional>
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
/*
|
||||||
|
Binary cluster file. Expects data to be laid out as:
|
||||||
|
int32_t frame_number
|
||||||
|
uint32_t number_of_clusters
|
||||||
|
int16_t x, int16_t y, int32_t data[9] x number_of_clusters
|
||||||
|
int32_t frame_number
|
||||||
|
uint32_t number_of_clusters
|
||||||
|
....
|
||||||
|
*/
|
||||||
|
|
||||||
|
// TODO: change to support any type of clusters, e.g. header line with
|
||||||
|
// clsuter_size_x, cluster_size_y,
|
||||||
|
/**
|
||||||
|
* @brief Class to read and write cluster files
|
||||||
|
* Expects data to be laid out as:
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* int32_t frame_number
|
||||||
|
* uint32_t number_of_clusters
|
||||||
|
* int16_t x, int16_t y, int32_t data[9] * number_of_clusters
|
||||||
|
* int32_t frame_number
|
||||||
|
* uint32_t number_of_clusters
|
||||||
|
* etc.
|
||||||
|
*/
|
||||||
|
template <typename ClusterType,
|
||||||
|
typename Enable = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||||
|
class ClusterFile {
|
||||||
|
FILE *fp{};
|
||||||
|
const std::string m_filename{};
|
||||||
|
uint32_t m_num_left{}; /*Number of photons left in frame*/
|
||||||
|
size_t m_chunk_size{}; /*Number of clusters to read at a time*/
|
||||||
|
std::string m_mode; /*Mode to open the file in*/
|
||||||
|
std::optional<ROI> m_roi; /*Region of interest, will be applied if set*/
|
||||||
|
std::optional<NDArray<int32_t, 2>>
|
||||||
|
m_noise_map; /*Noise map to cut photons, will be applied if set*/
|
||||||
|
std::optional<InvertedGainMap> m_gain_map; /*Gain map to apply to the
|
||||||
|
clusters, will be applied if set*/
|
||||||
|
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* @brief Construct a new Cluster File object
|
||||||
|
* @param fname path to the file
|
||||||
|
* @param chunk_size number of clusters to read at a time when iterating
|
||||||
|
* over the file
|
||||||
|
* @param mode mode to open the file in. "r" for reading, "w" for writing,
|
||||||
|
* "a" for appending
|
||||||
|
* @throws std::runtime_error if the file could not be opened
|
||||||
|
*/
|
||||||
|
ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000,
|
||||||
|
const std::string &mode = "r")
|
||||||
|
|
||||||
|
: m_filename(fname.string()), m_chunk_size(chunk_size), m_mode(mode) {
|
||||||
|
|
||||||
|
if (mode == "r") {
|
||||||
|
fp = fopen(m_filename.c_str(), "rb");
|
||||||
|
if (!fp) {
|
||||||
|
throw std::runtime_error("Could not open file for reading: " +
|
||||||
|
m_filename);
|
||||||
|
}
|
||||||
|
} else if (mode == "w") {
|
||||||
|
fp = fopen(m_filename.c_str(), "wb");
|
||||||
|
if (!fp) {
|
||||||
|
throw std::runtime_error("Could not open file for writing: " +
|
||||||
|
m_filename);
|
||||||
|
}
|
||||||
|
} else if (mode == "a") {
|
||||||
|
fp = fopen(m_filename.c_str(), "ab");
|
||||||
|
if (!fp) {
|
||||||
|
throw std::runtime_error("Could not open file for appending: " +
|
||||||
|
m_filename);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw std::runtime_error("Unsupported mode: " + mode);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
~ClusterFile() { close(); }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Read n_clusters clusters from the file discarding
|
||||||
|
* frame numbers. If EOF is reached the returned vector will
|
||||||
|
* have less than n_clusters clusters
|
||||||
|
*/
|
||||||
|
ClusterVector<ClusterType> read_clusters(size_t n_clusters) {
|
||||||
|
if (m_mode != "r") {
|
||||||
|
throw std::runtime_error("File not opened for reading");
|
||||||
|
}
|
||||||
|
if (m_noise_map || m_roi) {
|
||||||
|
return read_clusters_with_cut(n_clusters);
|
||||||
|
} else {
|
||||||
|
return read_clusters_without_cut(n_clusters);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Read a single frame from the file and return the
|
||||||
|
* clusters. The cluster vector will have the frame number
|
||||||
|
* set.
|
||||||
|
* @throws std::runtime_error if the file is not opened for
|
||||||
|
* reading or the file pointer not at the beginning of a
|
||||||
|
* frame
|
||||||
|
*/
|
||||||
|
ClusterVector<ClusterType> read_frame() {
|
||||||
|
if (m_mode != "r") {
|
||||||
|
throw std::runtime_error(LOCATION + "File not opened for reading");
|
||||||
|
}
|
||||||
|
if (m_noise_map || m_roi) {
|
||||||
|
return read_frame_with_cut();
|
||||||
|
} else {
|
||||||
|
return read_frame_without_cut();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void write_frame(const ClusterVector<ClusterType> &clusters) {
|
||||||
|
if (m_mode != "w" && m_mode != "a") {
|
||||||
|
throw std::runtime_error("File not opened for writing");
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t frame_number = clusters.frame_number();
|
||||||
|
fwrite(&frame_number, sizeof(frame_number), 1, fp);
|
||||||
|
uint32_t n_clusters = clusters.size();
|
||||||
|
fwrite(&n_clusters, sizeof(n_clusters), 1, fp);
|
||||||
|
fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the chunk size
|
||||||
|
*/
|
||||||
|
size_t chunk_size() const { return m_chunk_size; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Set the region of interest to use when reading
|
||||||
|
* clusters. If set only clusters within the ROI will be
|
||||||
|
* read.
|
||||||
|
*/
|
||||||
|
void set_roi(ROI roi) { m_roi = roi; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Set the noise map to use when reading clusters. If
|
||||||
|
* set clusters below the noise level will be discarded.
|
||||||
|
* Selection criteria one of: Central pixel above noise,
|
||||||
|
* highest 2x2 sum above 2 * noise, total sum above 3 *
|
||||||
|
* noise.
|
||||||
|
*/
|
||||||
|
void set_noise_map(const NDView<int32_t, 2> noise_map) {
|
||||||
|
m_noise_map = NDArray<int32_t, 2>(noise_map);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Set the gain map to use when reading clusters. If set the gain map
|
||||||
|
* will be applied to the clusters that pass ROI and noise_map selection.
|
||||||
|
* The gain map is expected to be in ADU/energy.
|
||||||
|
*/
|
||||||
|
void set_gain_map(const NDView<double, 2> gain_map) {
|
||||||
|
m_gain_map = InvertedGainMap(gain_map);
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_gain_map(const InvertedGainMap &gain_map) {
|
||||||
|
m_gain_map = gain_map;
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_gain_map(const InvertedGainMap &&gain_map) {
|
||||||
|
m_gain_map = gain_map;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Close the file. If not closed the file will be
|
||||||
|
* closed in the destructor
|
||||||
|
*/
|
||||||
|
void close() {
|
||||||
|
if (fp) {
|
||||||
|
fclose(fp);
|
||||||
|
fp = nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the current position in the file (bytes)
|
||||||
|
*/
|
||||||
|
int64_t tell() {
|
||||||
|
if (!fp) {
|
||||||
|
throw std::runtime_error(LOCATION + "File not opened");
|
||||||
|
}
|
||||||
|
return ftell(fp);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @brief Open the file in specific mode
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void open(const std::string &mode) {
|
||||||
|
if (fp) {
|
||||||
|
close();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mode == "r") {
|
||||||
|
fp = fopen(m_filename.c_str(), "rb");
|
||||||
|
if (!fp) {
|
||||||
|
throw std::runtime_error("Could not open file for reading: " +
|
||||||
|
m_filename);
|
||||||
|
}
|
||||||
|
m_mode = "r";
|
||||||
|
} else if (mode == "w") {
|
||||||
|
fp = fopen(m_filename.c_str(), "wb");
|
||||||
|
if (!fp) {
|
||||||
|
throw std::runtime_error("Could not open file for writing: " +
|
||||||
|
m_filename);
|
||||||
|
}
|
||||||
|
m_mode = "w";
|
||||||
|
} else if (mode == "a") {
|
||||||
|
fp = fopen(m_filename.c_str(), "ab");
|
||||||
|
if (!fp) {
|
||||||
|
throw std::runtime_error("Could not open file for appending: " +
|
||||||
|
m_filename);
|
||||||
|
}
|
||||||
|
m_mode = "a";
|
||||||
|
} else {
|
||||||
|
throw std::runtime_error("Unsupported mode: " + mode);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
ClusterVector<ClusterType> read_clusters_with_cut(size_t n_clusters);
|
||||||
|
ClusterVector<ClusterType> read_clusters_without_cut(size_t n_clusters);
|
||||||
|
ClusterVector<ClusterType> read_frame_with_cut();
|
||||||
|
ClusterVector<ClusterType> read_frame_without_cut();
|
||||||
|
bool is_selected(ClusterType &cl);
|
||||||
|
ClusterType read_one_cluster();
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename ClusterType, typename Enable>
|
||||||
|
ClusterVector<ClusterType>
|
||||||
|
ClusterFile<ClusterType, Enable>::read_clusters_without_cut(size_t n_clusters) {
|
||||||
|
if (m_mode != "r") {
|
||||||
|
throw std::runtime_error("File not opened for reading");
|
||||||
|
}
|
||||||
|
|
||||||
|
ClusterVector<ClusterType> clusters(n_clusters);
|
||||||
|
clusters.resize(n_clusters);
|
||||||
|
|
||||||
|
int32_t iframe = 0; // frame number needs to be 4 bytes!
|
||||||
|
size_t nph_read = 0;
|
||||||
|
uint32_t nn = m_num_left;
|
||||||
|
uint32_t nph = m_num_left; // number of clusters in frame needs to be 4
|
||||||
|
|
||||||
|
auto buf = clusters.data();
|
||||||
|
// if there are photons left from previous frame read them first
|
||||||
|
if (nph) {
|
||||||
|
if (nph > n_clusters) {
|
||||||
|
// if we have more photons left in the frame then photons to
|
||||||
|
// read we read directly the requested number
|
||||||
|
nn = n_clusters;
|
||||||
|
} else {
|
||||||
|
nn = nph;
|
||||||
|
}
|
||||||
|
nph_read += fread((buf + nph_read), clusters.item_size(), nn, fp);
|
||||||
|
m_num_left = nph - nn; // write back the number of photons left
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nph_read < n_clusters) {
|
||||||
|
// keep on reading frames and photons until reaching n_clusters
|
||||||
|
while (fread(&iframe, sizeof(iframe), 1, fp)) {
|
||||||
|
clusters.set_frame_number(iframe);
|
||||||
|
// read number of clusters in frame
|
||||||
|
if (fread(&nph, sizeof(nph), 1, fp)) {
|
||||||
|
if (nph > (n_clusters - nph_read))
|
||||||
|
nn = n_clusters - nph_read;
|
||||||
|
else
|
||||||
|
nn = nph;
|
||||||
|
|
||||||
|
nph_read +=
|
||||||
|
fread((buf + nph_read), clusters.item_size(), nn, fp);
|
||||||
|
m_num_left = nph - nn;
|
||||||
|
}
|
||||||
|
if (nph_read >= n_clusters)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resize the vector to the number o f clusters.
|
||||||
|
// No new allocation, only change bounds.
|
||||||
|
clusters.resize(nph_read);
|
||||||
|
if (m_gain_map)
|
||||||
|
m_gain_map->apply_gain_map(clusters);
|
||||||
|
return clusters;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ClusterType, typename Enable>
|
||||||
|
ClusterVector<ClusterType>
|
||||||
|
ClusterFile<ClusterType, Enable>::read_clusters_with_cut(size_t n_clusters) {
|
||||||
|
ClusterVector<ClusterType> clusters;
|
||||||
|
clusters.reserve(n_clusters);
|
||||||
|
|
||||||
|
// if there are photons left from previous frame read them first
|
||||||
|
if (m_num_left) {
|
||||||
|
while (m_num_left && clusters.size() < n_clusters) {
|
||||||
|
ClusterType c = read_one_cluster();
|
||||||
|
if (is_selected(c)) {
|
||||||
|
clusters.push_back(c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// we did not have enough clusters left in the previous frame
|
||||||
|
// keep on reading frames until reaching n_clusters
|
||||||
|
if (clusters.size() < n_clusters) {
|
||||||
|
// sanity check
|
||||||
|
if (m_num_left) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
LOCATION + "Entered second loop with clusters left\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t frame_number = 0; // frame number needs to be 4 bytes!
|
||||||
|
while (fread(&frame_number, sizeof(frame_number), 1, fp)) {
|
||||||
|
if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) {
|
||||||
|
clusters.set_frame_number(
|
||||||
|
frame_number); // cluster vector will hold the last
|
||||||
|
// frame number
|
||||||
|
while (m_num_left && clusters.size() < n_clusters) {
|
||||||
|
ClusterType c = read_one_cluster();
|
||||||
|
if (is_selected(c)) {
|
||||||
|
clusters.push_back(c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// we have enough clusters, break out of the outer while loop
|
||||||
|
if (clusters.size() >= n_clusters)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (m_gain_map)
|
||||||
|
m_gain_map->apply_gain_map(clusters);
|
||||||
|
|
||||||
|
return clusters;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ClusterType, typename Enable>
|
||||||
|
ClusterType ClusterFile<ClusterType, Enable>::read_one_cluster() {
|
||||||
|
ClusterType c;
|
||||||
|
auto rc = fread(&c, sizeof(c), 1, fp);
|
||||||
|
if (rc != 1) {
|
||||||
|
throw std::runtime_error(LOCATION + "Could not read cluster");
|
||||||
|
}
|
||||||
|
--m_num_left;
|
||||||
|
return c;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ClusterType, typename Enable>
|
||||||
|
ClusterVector<ClusterType>
|
||||||
|
ClusterFile<ClusterType, Enable>::read_frame_without_cut() {
|
||||||
|
if (m_mode != "r") {
|
||||||
|
throw std::runtime_error(LOCATION + "File not opened for reading");
|
||||||
|
}
|
||||||
|
if (m_num_left) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
LOCATION + "There are still photons left in the last frame");
|
||||||
|
}
|
||||||
|
int32_t frame_number;
|
||||||
|
if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) {
|
||||||
|
if (feof(fp))
|
||||||
|
throw std::runtime_error(LOCATION + "Unexpected end of file");
|
||||||
|
else if (ferror(fp))
|
||||||
|
throw std::runtime_error(LOCATION + "Error reading from file");
|
||||||
|
|
||||||
|
throw std::runtime_error(LOCATION + "Unexpected error (not feof or ferror) when reading frame number");
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t n_clusters; // Saved as 32bit integer in the cluster file
|
||||||
|
if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) {
|
||||||
|
throw std::runtime_error(LOCATION +
|
||||||
|
"Could not read number of clusters");
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG(logDEBUG1) << "Reading " << n_clusters << " clusters from frame "
|
||||||
|
<< frame_number;
|
||||||
|
|
||||||
|
ClusterVector<ClusterType> clusters(n_clusters);
|
||||||
|
clusters.set_frame_number(frame_number);
|
||||||
|
clusters.resize(n_clusters);
|
||||||
|
|
||||||
|
LOG(logDEBUG1) << "clusters.item_size(): " << clusters.item_size();
|
||||||
|
|
||||||
|
if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) !=
|
||||||
|
static_cast<size_t>(n_clusters)) {
|
||||||
|
throw std::runtime_error(LOCATION + "Could not read clusters");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m_gain_map)
|
||||||
|
m_gain_map->apply_gain_map(clusters);
|
||||||
|
return clusters;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ClusterType, typename Enable>
|
||||||
|
ClusterVector<ClusterType>
|
||||||
|
ClusterFile<ClusterType, Enable>::read_frame_with_cut() {
|
||||||
|
if (m_mode != "r") {
|
||||||
|
throw std::runtime_error("File not opened for reading");
|
||||||
|
}
|
||||||
|
if (m_num_left) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
"There are still photons left in the last frame");
|
||||||
|
}
|
||||||
|
int32_t frame_number;
|
||||||
|
if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) {
|
||||||
|
throw std::runtime_error("Could not read frame number");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fread(&m_num_left, sizeof(m_num_left), 1, fp) != 1) {
|
||||||
|
throw std::runtime_error("Could not read number of clusters");
|
||||||
|
}
|
||||||
|
|
||||||
|
ClusterVector<ClusterType> clusters;
|
||||||
|
clusters.reserve(m_num_left);
|
||||||
|
clusters.set_frame_number(frame_number);
|
||||||
|
while (m_num_left) {
|
||||||
|
ClusterType c = read_one_cluster();
|
||||||
|
if (is_selected(c)) {
|
||||||
|
clusters.push_back(c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (m_gain_map)
|
||||||
|
m_gain_map->apply_gain_map(clusters);
|
||||||
|
return clusters;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ClusterType, typename Enable>
|
||||||
|
bool ClusterFile<ClusterType, Enable>::is_selected(ClusterType &cl) {
|
||||||
|
// Should fail fast
|
||||||
|
if (m_roi) {
|
||||||
|
if (!(m_roi->contains(cl.x, cl.y))) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t cluster_center_index =
|
||||||
|
(ClusterType::cluster_size_x / 2) +
|
||||||
|
(ClusterType::cluster_size_y / 2) * ClusterType::cluster_size_x;
|
||||||
|
|
||||||
|
if (m_noise_map) {
|
||||||
|
auto sum_1x1 = cl.data[cluster_center_index]; // central pixel
|
||||||
|
auto sum_2x2 = cl.max_sum_2x2().sum; // highest sum of 2x2 subclusters
|
||||||
|
auto total_sum = cl.sum(); // sum of all pixels
|
||||||
|
|
||||||
|
auto noise =
|
||||||
|
(*m_noise_map)(cl.y, cl.x); // TODO! check if this is correct
|
||||||
|
if (sum_1x1 <= noise || sum_2x2 <= 2 * noise ||
|
||||||
|
total_sum <= 3 * noise) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// we passed all checks
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
67
include/aare/ClusterFileSink.hpp
Normal file
67
include/aare/ClusterFileSink.hpp
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
#include <atomic>
|
||||||
|
#include <filesystem>
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#include "aare/ClusterFinderMT.hpp"
|
||||||
|
#include "aare/ClusterVector.hpp"
|
||||||
|
#include "aare/ProducerConsumerQueue.hpp"
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
template <typename ClusterType,
|
||||||
|
typename = std::enable_if_t<is_cluster_v<ClusterType>>,
|
||||||
|
typename = std::enable_if_t<no_2x2_cluster<ClusterType>::value>>
|
||||||
|
class ClusterFileSink {
|
||||||
|
ProducerConsumerQueue<ClusterVector<ClusterType>> *m_source;
|
||||||
|
std::atomic<bool> m_stop_requested{false};
|
||||||
|
std::atomic<bool> m_stopped{true};
|
||||||
|
std::chrono::milliseconds m_default_wait{1};
|
||||||
|
std::thread m_thread;
|
||||||
|
std::ofstream m_file;
|
||||||
|
|
||||||
|
void process() {
|
||||||
|
m_stopped = false;
|
||||||
|
LOG(logDEBUG) << "ClusterFileSink started";
|
||||||
|
while (!m_stop_requested || !m_source->isEmpty()) {
|
||||||
|
if (ClusterVector<ClusterType> *clusters = m_source->frontPtr();
|
||||||
|
clusters != nullptr) {
|
||||||
|
// Write clusters to file
|
||||||
|
int32_t frame_number =
|
||||||
|
clusters->frame_number(); // TODO! Should we store frame
|
||||||
|
// number already as int?
|
||||||
|
uint32_t num_clusters = clusters->size();
|
||||||
|
m_file.write(reinterpret_cast<const char *>(&frame_number),
|
||||||
|
sizeof(frame_number));
|
||||||
|
m_file.write(reinterpret_cast<const char *>(&num_clusters),
|
||||||
|
sizeof(num_clusters));
|
||||||
|
m_file.write(reinterpret_cast<const char *>(clusters->data()),
|
||||||
|
clusters->size() * clusters->item_size());
|
||||||
|
m_source->popFront();
|
||||||
|
} else {
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LOG(logDEBUG) << "ClusterFileSink stopped";
|
||||||
|
m_stopped = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
ClusterFileSink(ClusterFinderMT<ClusterType, uint16_t, double> *source,
|
||||||
|
const std::filesystem::path &fname) {
|
||||||
|
LOG(logDEBUG) << "ClusterFileSink: "
|
||||||
|
<< "source: " << source->sink()
|
||||||
|
<< ", file: " << fname.string();
|
||||||
|
m_source = source->sink();
|
||||||
|
m_thread = std::thread(&ClusterFileSink::process, this);
|
||||||
|
m_file.open(fname, std::ios::binary);
|
||||||
|
}
|
||||||
|
void stop() {
|
||||||
|
m_stop_requested = true;
|
||||||
|
m_thread.join();
|
||||||
|
m_file.close();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
170
include/aare/ClusterFinder.hpp
Normal file
170
include/aare/ClusterFinder.hpp
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
#include "aare/ClusterFile.hpp"
|
||||||
|
#include "aare/ClusterVector.hpp"
|
||||||
|
#include "aare/Dtype.hpp"
|
||||||
|
#include "aare/NDArray.hpp"
|
||||||
|
#include "aare/NDView.hpp"
|
||||||
|
#include "aare/Pedestal.hpp"
|
||||||
|
#include "aare/defs.hpp"
|
||||||
|
#include <cstddef>
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
template <typename ClusterType,
|
||||||
|
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||||
|
struct no_2x2_cluster {
|
||||||
|
constexpr static bool value =
|
||||||
|
ClusterType::cluster_size_x > 2 && ClusterType::cluster_size_y > 2;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename ClusterType = Cluster<int32_t, 3, 3>,
|
||||||
|
typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double,
|
||||||
|
typename = std::enable_if_t<no_2x2_cluster<ClusterType>::value>>
|
||||||
|
class ClusterFinder {
|
||||||
|
Shape<2> m_image_size;
|
||||||
|
const PEDESTAL_TYPE m_nSigma;
|
||||||
|
const PEDESTAL_TYPE c2;
|
||||||
|
const PEDESTAL_TYPE c3;
|
||||||
|
Pedestal<PEDESTAL_TYPE> m_pedestal;
|
||||||
|
ClusterVector<ClusterType> m_clusters;
|
||||||
|
|
||||||
|
static const uint8_t ClusterSizeX = ClusterType::cluster_size_x;
|
||||||
|
static const uint8_t ClusterSizeY = ClusterType::cluster_size_y;
|
||||||
|
using CT = typename ClusterType::value_type;
|
||||||
|
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* @brief Construct a new ClusterFinder object
|
||||||
|
* @param image_size size of the image
|
||||||
|
* @param cluster_size size of the cluster (x, y)
|
||||||
|
* @param nSigma number of sigma above the pedestal to consider a photon
|
||||||
|
* @param capacity initial capacity of the cluster vector
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
ClusterFinder(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
|
||||||
|
size_t capacity = 1000000)
|
||||||
|
: m_image_size(image_size), m_nSigma(nSigma),
|
||||||
|
c2(sqrt((ClusterSizeY + 1) / 2 * (ClusterSizeX + 1) / 2)),
|
||||||
|
c3(sqrt(ClusterSizeX * ClusterSizeY)),
|
||||||
|
m_pedestal(image_size[0], image_size[1]), m_clusters(capacity) {
|
||||||
|
LOG(logDEBUG) << "ClusterFinder: "
|
||||||
|
<< "image_size: " << image_size[0] << "x" << image_size[1]
|
||||||
|
<< ", nSigma: " << nSigma << ", capacity: " << capacity;
|
||||||
|
}
|
||||||
|
|
||||||
|
void push_pedestal_frame(NDView<FRAME_TYPE, 2> frame) {
|
||||||
|
m_pedestal.push(frame);
|
||||||
|
}
|
||||||
|
|
||||||
|
NDArray<PEDESTAL_TYPE, 2> pedestal() { return m_pedestal.mean(); }
|
||||||
|
NDArray<PEDESTAL_TYPE, 2> noise() { return m_pedestal.std(); }
|
||||||
|
void clear_pedestal() { m_pedestal.clear(); }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Move the clusters from the ClusterVector in the ClusterFinder to a
|
||||||
|
* new ClusterVector and return it.
|
||||||
|
* @param realloc_same_capacity if true the new ClusterVector will have the
|
||||||
|
* same capacity as the old one
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
ClusterVector<ClusterType>
|
||||||
|
steal_clusters(bool realloc_same_capacity = false) {
|
||||||
|
ClusterVector<ClusterType> tmp = std::move(m_clusters);
|
||||||
|
if (realloc_same_capacity)
|
||||||
|
m_clusters = ClusterVector<ClusterType>(tmp.capacity());
|
||||||
|
else
|
||||||
|
m_clusters = ClusterVector<ClusterType>{};
|
||||||
|
return tmp;
|
||||||
|
}
|
||||||
|
void find_clusters(NDView<FRAME_TYPE, 2> frame, uint64_t frame_number = 0) {
|
||||||
|
// // TODO! deal with even size clusters
|
||||||
|
// // currently 3,3 -> +/- 1
|
||||||
|
// // 4,4 -> +/- 2
|
||||||
|
int dy = ClusterSizeY / 2;
|
||||||
|
int dx = ClusterSizeX / 2;
|
||||||
|
int has_center_pixel_x =
|
||||||
|
ClusterSizeX %
|
||||||
|
2; // for even sized clusters there is no proper cluster center and
|
||||||
|
// even amount of pixels around the center
|
||||||
|
int has_center_pixel_y = ClusterSizeY % 2;
|
||||||
|
|
||||||
|
m_clusters.set_frame_number(frame_number);
|
||||||
|
for (int iy = 0; iy < frame.shape(0); iy++) {
|
||||||
|
for (int ix = 0; ix < frame.shape(1); ix++) {
|
||||||
|
|
||||||
|
PEDESTAL_TYPE max = std::numeric_limits<FRAME_TYPE>::min();
|
||||||
|
PEDESTAL_TYPE total = 0;
|
||||||
|
|
||||||
|
// What can we short circuit here?
|
||||||
|
PEDESTAL_TYPE rms = m_pedestal.std(iy, ix);
|
||||||
|
PEDESTAL_TYPE value = (frame(iy, ix) - m_pedestal.mean(iy, ix));
|
||||||
|
|
||||||
|
if (value < -m_nSigma * rms)
|
||||||
|
continue; // NEGATIVE_PEDESTAL go to next pixel
|
||||||
|
// TODO! No pedestal update???
|
||||||
|
|
||||||
|
for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) {
|
||||||
|
for (int ic = -dx; ic < dx + has_center_pixel_x; ic++) {
|
||||||
|
if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
|
||||||
|
iy + ir >= 0 && iy + ir < frame.shape(0)) {
|
||||||
|
PEDESTAL_TYPE val =
|
||||||
|
frame(iy + ir, ix + ic) -
|
||||||
|
m_pedestal.mean(iy + ir, ix + ic);
|
||||||
|
|
||||||
|
total += val;
|
||||||
|
max = std::max(max, val);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((max > m_nSigma * rms)) {
|
||||||
|
if (value < max)
|
||||||
|
continue; // Not max go to the next pixel
|
||||||
|
// but also no pedestal update
|
||||||
|
} else if (total > c3 * m_nSigma * rms) {
|
||||||
|
// pass
|
||||||
|
} else {
|
||||||
|
// m_pedestal.push(iy, ix, frame(iy, ix)); // Safe option
|
||||||
|
m_pedestal.push_fast(
|
||||||
|
iy, ix,
|
||||||
|
frame(iy,
|
||||||
|
ix)); // Assume we have reached n_samples in the
|
||||||
|
// pedestal, slight performance improvement
|
||||||
|
continue; // It was a pedestal value nothing to store
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store cluster
|
||||||
|
if (value == max) {
|
||||||
|
ClusterType cluster{};
|
||||||
|
cluster.x = ix;
|
||||||
|
cluster.y = iy;
|
||||||
|
|
||||||
|
// Fill the cluster data since we have a photon to store
|
||||||
|
// It's worth redoing the look since most of the time we
|
||||||
|
// don't have a photon
|
||||||
|
int i = 0;
|
||||||
|
for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) {
|
||||||
|
for (int ic = -dx; ic < dx + has_center_pixel_x; ic++) {
|
||||||
|
if (ix + ic >= 0 && ix + ic < frame.shape(1) &&
|
||||||
|
iy + ir >= 0 && iy + ir < frame.shape(0)) {
|
||||||
|
CT tmp =
|
||||||
|
static_cast<CT>(frame(iy + ir, ix + ic)) -
|
||||||
|
static_cast<CT>(
|
||||||
|
m_pedestal.mean(iy + ir, ix + ic));
|
||||||
|
cluster.data[i] =
|
||||||
|
tmp; // Watch for out of bounds access
|
||||||
|
}
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the cluster to the output ClusterVector
|
||||||
|
m_clusters.push_back(cluster);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
286
include/aare/ClusterFinderMT.hpp
Normal file
286
include/aare/ClusterFinderMT.hpp
Normal file
@@ -0,0 +1,286 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
#include <atomic>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <memory>
|
||||||
|
#include <thread>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "aare/ClusterFinder.hpp"
|
||||||
|
#include "aare/NDArray.hpp"
|
||||||
|
#include "aare/ProducerConsumerQueue.hpp"
|
||||||
|
#include "aare/logger.hpp"
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
enum class FrameType {
|
||||||
|
DATA,
|
||||||
|
PEDESTAL,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct FrameWrapper {
|
||||||
|
FrameType type;
|
||||||
|
uint64_t frame_number;
|
||||||
|
NDArray<uint16_t, 2> data;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief ClusterFinderMT is a multi-threaded version of ClusterFinder. It uses
|
||||||
|
* a producer-consumer queue to distribute the frames to the threads. The
|
||||||
|
* clusters are collected in a single output queue.
|
||||||
|
* @tparam FRAME_TYPE type of the frame data
|
||||||
|
* @tparam PEDESTAL_TYPE type of the pedestal data
|
||||||
|
* @tparam CT type of the cluster data
|
||||||
|
*/
|
||||||
|
template <typename ClusterType = Cluster<int32_t, 3, 3>,
|
||||||
|
typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double,
|
||||||
|
typename = std::enable_if_t<no_2x2_cluster<ClusterType>::value>>
|
||||||
|
class ClusterFinderMT {
|
||||||
|
|
||||||
|
protected:
|
||||||
|
using CT = typename ClusterType::value_type;
|
||||||
|
size_t m_current_thread{0};
|
||||||
|
size_t m_n_threads{0};
|
||||||
|
using Finder = ClusterFinder<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>;
|
||||||
|
using InputQueue = ProducerConsumerQueue<FrameWrapper>;
|
||||||
|
using OutputQueue = ProducerConsumerQueue<ClusterVector<ClusterType>>;
|
||||||
|
std::vector<std::unique_ptr<InputQueue>> m_input_queues;
|
||||||
|
std::vector<std::unique_ptr<OutputQueue>> m_output_queues;
|
||||||
|
|
||||||
|
OutputQueue m_sink{1000}; // All clusters go into this queue
|
||||||
|
|
||||||
|
std::vector<std::unique_ptr<Finder>> m_cluster_finders;
|
||||||
|
std::vector<std::thread> m_threads;
|
||||||
|
std::thread m_collect_thread;
|
||||||
|
std::chrono::milliseconds m_default_wait{1};
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::atomic<bool> m_stop_requested{false};
|
||||||
|
std::atomic<bool> m_processing_threads_stopped{true};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Function called by the processing threads. It reads the frames
|
||||||
|
* from the input queue and processes them.
|
||||||
|
*/
|
||||||
|
void process(int thread_id) {
|
||||||
|
auto cf = m_cluster_finders[thread_id].get();
|
||||||
|
auto q = m_input_queues[thread_id].get();
|
||||||
|
bool realloc_same_capacity = true;
|
||||||
|
|
||||||
|
while (!m_stop_requested || !q->isEmpty()) {
|
||||||
|
if (FrameWrapper *frame = q->frontPtr(); frame != nullptr) {
|
||||||
|
|
||||||
|
switch (frame->type) {
|
||||||
|
case FrameType::DATA:
|
||||||
|
cf->find_clusters(frame->data.view(), frame->frame_number);
|
||||||
|
m_output_queues[thread_id]->write(
|
||||||
|
cf->steal_clusters(realloc_same_capacity));
|
||||||
|
break;
|
||||||
|
|
||||||
|
case FrameType::PEDESTAL:
|
||||||
|
m_cluster_finders[thread_id]->push_pedestal_frame(
|
||||||
|
frame->data.view());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// frame is processed now discard it
|
||||||
|
m_input_queues[thread_id]->popFront();
|
||||||
|
} else {
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Collect all the clusters from the output queues and write them to
|
||||||
|
* the sink
|
||||||
|
*/
|
||||||
|
void collect() {
|
||||||
|
bool empty = true;
|
||||||
|
while (!m_stop_requested || !empty || !m_processing_threads_stopped) {
|
||||||
|
empty = true;
|
||||||
|
for (auto &queue : m_output_queues) {
|
||||||
|
if (!queue->isEmpty()) {
|
||||||
|
|
||||||
|
while (!m_sink.write(std::move(*queue->frontPtr()))) {
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
queue->popFront();
|
||||||
|
empty = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* @brief Construct a new ClusterFinderMT object
|
||||||
|
* @param image_size size of the image
|
||||||
|
* @param cluster_size size of the cluster
|
||||||
|
* @param nSigma number of sigma above the pedestal to consider a photon
|
||||||
|
* @param capacity initial capacity of the cluster vector. Should match
|
||||||
|
* expected number of clusters in a frame per frame.
|
||||||
|
* @param n_threads number of threads to use
|
||||||
|
*/
|
||||||
|
ClusterFinderMT(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
|
||||||
|
size_t capacity = 2000, size_t n_threads = 3)
|
||||||
|
: m_n_threads(n_threads) {
|
||||||
|
|
||||||
|
LOG(logDEBUG1) << "ClusterFinderMT: "
|
||||||
|
<< "image_size: " << image_size[0] << "x"
|
||||||
|
<< image_size[1] << ", nSigma: " << nSigma
|
||||||
|
<< ", capacity: " << capacity
|
||||||
|
<< ", n_threads: " << n_threads;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < n_threads; i++) {
|
||||||
|
m_cluster_finders.push_back(
|
||||||
|
std::make_unique<
|
||||||
|
ClusterFinder<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>>(
|
||||||
|
image_size, nSigma, capacity));
|
||||||
|
}
|
||||||
|
for (size_t i = 0; i < n_threads; i++) {
|
||||||
|
m_input_queues.emplace_back(std::make_unique<InputQueue>(200));
|
||||||
|
m_output_queues.emplace_back(std::make_unique<OutputQueue>(200));
|
||||||
|
}
|
||||||
|
// TODO! Should we start automatically?
|
||||||
|
start();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the sink queue where all the clusters are collected
|
||||||
|
* @warning You need to empty this queue otherwise the cluster finder will
|
||||||
|
* wait forever
|
||||||
|
*/
|
||||||
|
ProducerConsumerQueue<ClusterVector<ClusterType>> *sink() {
|
||||||
|
return &m_sink;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Start all processing threads
|
||||||
|
*/
|
||||||
|
void start() {
|
||||||
|
m_processing_threads_stopped = false;
|
||||||
|
m_stop_requested = false;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < m_n_threads; i++) {
|
||||||
|
m_threads.push_back(
|
||||||
|
std::thread(&ClusterFinderMT::process, this, i));
|
||||||
|
}
|
||||||
|
|
||||||
|
m_collect_thread = std::thread(&ClusterFinderMT::collect, this);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Stop all processing threads
|
||||||
|
*/
|
||||||
|
void stop() {
|
||||||
|
m_stop_requested = true;
|
||||||
|
|
||||||
|
for (auto &thread : m_threads) {
|
||||||
|
thread.join();
|
||||||
|
}
|
||||||
|
m_threads.clear();
|
||||||
|
|
||||||
|
m_processing_threads_stopped = true;
|
||||||
|
m_collect_thread.join();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Wait for all the queues to be empty. Mostly used for timing tests.
|
||||||
|
*/
|
||||||
|
void sync() {
|
||||||
|
for (auto &q : m_input_queues) {
|
||||||
|
while (!q->isEmpty()) {
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (auto &q : m_output_queues) {
|
||||||
|
while (!q->isEmpty()) {
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
while (!m_sink.isEmpty()) {
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Push a pedestal frame to all the cluster finders. The frames is
|
||||||
|
* expected to be dark. No photon finding is done. Just pedestal update.
|
||||||
|
*/
|
||||||
|
void push_pedestal_frame(NDView<FRAME_TYPE, 2> frame) {
|
||||||
|
FrameWrapper fw{FrameType::PEDESTAL, 0,
|
||||||
|
NDArray(frame)}; // TODO! copies the data!
|
||||||
|
|
||||||
|
for (auto &queue : m_input_queues) {
|
||||||
|
while (!queue->write(fw)) {
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Push the frame to the queue of the next available thread. Function
|
||||||
|
* returns once the frame is in a queue.
|
||||||
|
* @note Spin locks with a default wait if the queue is full.
|
||||||
|
*/
|
||||||
|
void find_clusters(NDView<FRAME_TYPE, 2> frame, uint64_t frame_number = 0) {
|
||||||
|
FrameWrapper fw{FrameType::DATA, frame_number,
|
||||||
|
NDArray(frame)}; // TODO! copies the data!
|
||||||
|
while (!m_input_queues[m_current_thread % m_n_threads]->write(fw)) {
|
||||||
|
std::this_thread::sleep_for(m_default_wait);
|
||||||
|
}
|
||||||
|
m_current_thread++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void clear_pedestal() {
|
||||||
|
if (!m_processing_threads_stopped) {
|
||||||
|
throw std::runtime_error("ClusterFinderMT is still running");
|
||||||
|
}
|
||||||
|
for (auto &cf : m_cluster_finders) {
|
||||||
|
cf->clear_pedestal();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the pedestal currently used by the cluster finder
|
||||||
|
* @param thread_index index of the thread
|
||||||
|
*/
|
||||||
|
auto pedestal(size_t thread_index = 0) {
|
||||||
|
if (m_cluster_finders.empty()) {
|
||||||
|
throw std::runtime_error("No cluster finders available");
|
||||||
|
}
|
||||||
|
if (!m_processing_threads_stopped) {
|
||||||
|
throw std::runtime_error("ClusterFinderMT is still running");
|
||||||
|
}
|
||||||
|
if (thread_index >= m_cluster_finders.size()) {
|
||||||
|
throw std::runtime_error("Thread index out of range");
|
||||||
|
}
|
||||||
|
return m_cluster_finders[thread_index]->pedestal();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the noise currently used by the cluster finder
|
||||||
|
* @param thread_index index of the thread
|
||||||
|
*/
|
||||||
|
auto noise(size_t thread_index = 0) {
|
||||||
|
if (m_cluster_finders.empty()) {
|
||||||
|
throw std::runtime_error("No cluster finders available");
|
||||||
|
}
|
||||||
|
if (!m_processing_threads_stopped) {
|
||||||
|
throw std::runtime_error("ClusterFinderMT is still running");
|
||||||
|
}
|
||||||
|
if (thread_index >= m_cluster_finders.size()) {
|
||||||
|
throw std::runtime_error("Thread index out of range");
|
||||||
|
}
|
||||||
|
return m_cluster_finders[thread_index]->noise();
|
||||||
|
}
|
||||||
|
|
||||||
|
// void push(FrameWrapper&& frame) {
|
||||||
|
// //TODO! need to loop until we are successful
|
||||||
|
// auto rc = m_input_queue.write(std::move(frame));
|
||||||
|
// fmt::print("pushed frame {}\n", rc);
|
||||||
|
// }
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
213
include/aare/ClusterVector.hpp
Normal file
213
include/aare/ClusterVector.hpp
Normal file
@@ -0,0 +1,213 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
#include "aare/Cluster.hpp" //TODO maybe store in seperate file !!!
|
||||||
|
#include <algorithm>
|
||||||
|
#include <array>
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <numeric>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include <fmt/core.h>
|
||||||
|
|
||||||
|
#include "aare/Cluster.hpp"
|
||||||
|
#include "aare/NDView.hpp"
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
template <typename ClusterType,
|
||||||
|
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||||
|
class ClusterVector; // Forward declaration
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief ClusterVector is a container for clusters of various sizes. It
|
||||||
|
* uses a contiguous memory buffer to store the clusters. It is templated on
|
||||||
|
* the data type and the coordinate type of the clusters.
|
||||||
|
* @note push_back can invalidate pointers to elements in the container
|
||||||
|
* @warning ClusterVector is currently move only to catch unintended copies,
|
||||||
|
* but this might change since there are probably use cases where copying is
|
||||||
|
* needed.
|
||||||
|
* @tparam T data type of the pixels in the cluster
|
||||||
|
* @tparam CoordType data type of the x and y coordinates of the cluster
|
||||||
|
* (normally uint16_t)
|
||||||
|
*/
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType>
|
||||||
|
class ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> {
|
||||||
|
|
||||||
|
std::vector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>> m_data{};
|
||||||
|
int32_t m_frame_number{0}; // TODO! Check frame number size and type
|
||||||
|
|
||||||
|
public:
|
||||||
|
using value_type = T;
|
||||||
|
using ClusterType = Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Construct a new ClusterVector object
|
||||||
|
* @param capacity initial capacity of the buffer in number of clusters
|
||||||
|
* @param frame_number frame number of the clusters. Default is 0, which is
|
||||||
|
* also used to indicate that the clusters come from many frames
|
||||||
|
*/
|
||||||
|
ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0)
|
||||||
|
: m_frame_number(frame_number) {
|
||||||
|
m_data.reserve(capacity);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move constructor
|
||||||
|
ClusterVector(ClusterVector &&other) noexcept
|
||||||
|
: m_data(other.m_data), m_frame_number(other.m_frame_number) {
|
||||||
|
other.m_data.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move assignment operator
|
||||||
|
ClusterVector &operator=(ClusterVector &&other) noexcept {
|
||||||
|
if (this != &other) {
|
||||||
|
m_data = other.m_data;
|
||||||
|
m_frame_number = other.m_frame_number;
|
||||||
|
other.m_data.clear();
|
||||||
|
other.m_frame_number = 0;
|
||||||
|
}
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Sum the pixels in each cluster
|
||||||
|
* @return std::vector<T> vector of sums for each cluster
|
||||||
|
*/
|
||||||
|
std::vector<T> sum() {
|
||||||
|
std::vector<T> sums(m_data.size());
|
||||||
|
|
||||||
|
std::transform(
|
||||||
|
m_data.begin(), m_data.end(), sums.begin(),
|
||||||
|
[](const ClusterType &cluster) { return cluster.sum(); });
|
||||||
|
|
||||||
|
return sums;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Sum the pixels in the 2x2 subcluster with the biggest pixel sum in
|
||||||
|
* each cluster
|
||||||
|
* @return vector of sums index pairs for each cluster
|
||||||
|
*/
|
||||||
|
std::vector<Sum_index_pair<T, corner>> sum_2x2() {
|
||||||
|
std::vector<Sum_index_pair<T, corner>> sums_2x2(m_data.size());
|
||||||
|
|
||||||
|
std::transform(
|
||||||
|
m_data.begin(), m_data.end(), sums_2x2.begin(),
|
||||||
|
[](const ClusterType &cluster) { return cluster.max_sum_2x2(); });
|
||||||
|
|
||||||
|
return sums_2x2;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Reserve space for at least capacity clusters
|
||||||
|
* @param capacity number of clusters to reserve space for
|
||||||
|
* @note If capacity is less than the current capacity, the function does
|
||||||
|
* nothing.
|
||||||
|
*/
|
||||||
|
void reserve(size_t capacity) { m_data.reserve(capacity); }
|
||||||
|
|
||||||
|
void resize(size_t size) { m_data.resize(size); }
|
||||||
|
|
||||||
|
void push_back(const ClusterType &cluster) { m_data.push_back(cluster); }
|
||||||
|
|
||||||
|
ClusterVector &operator+=(const ClusterVector &other) {
|
||||||
|
m_data.insert(m_data.end(), other.begin(), other.end());
|
||||||
|
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the number of clusters in the vector
|
||||||
|
*/
|
||||||
|
size_t size() const { return m_data.size(); }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Check if the vector is empty
|
||||||
|
*/
|
||||||
|
bool empty() const { return m_data.empty(); }
|
||||||
|
|
||||||
|
uint8_t cluster_size_x() const { return ClusterSizeX; }
|
||||||
|
|
||||||
|
uint8_t cluster_size_y() const { return ClusterSizeY; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the capacity of the buffer in number of clusters. This is
|
||||||
|
* the number of clusters that can be stored in the current buffer without
|
||||||
|
* reallocation.
|
||||||
|
*/
|
||||||
|
size_t capacity() const { return m_data.capacity(); }
|
||||||
|
|
||||||
|
auto begin() const { return m_data.begin(); }
|
||||||
|
|
||||||
|
auto end() const { return m_data.end(); }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the size in bytes of a single cluster
|
||||||
|
*/
|
||||||
|
size_t item_size() const {
|
||||||
|
return sizeof(ClusterType); // 2 * sizeof(CoordType) + ClusterSizeX *
|
||||||
|
// ClusterSizeY * sizeof(T);
|
||||||
|
}
|
||||||
|
|
||||||
|
ClusterType *data() { return m_data.data(); }
|
||||||
|
ClusterType const *data() const { return m_data.data(); }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return a reference to the i-th cluster casted to type V
|
||||||
|
* @tparam V type of the cluster
|
||||||
|
*/
|
||||||
|
ClusterType &operator[](size_t i) { return m_data[i]; }
|
||||||
|
|
||||||
|
const ClusterType &operator[](size_t i) const { return m_data[i]; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Return the frame number of the clusters. 0 is used to indicate
|
||||||
|
* that the clusters come from many frames
|
||||||
|
*/
|
||||||
|
int32_t frame_number() const { return m_frame_number; }
|
||||||
|
|
||||||
|
void set_frame_number(int32_t frame_number) {
|
||||||
|
m_frame_number = frame_number;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Reduce a cluster to a 2x2 cluster by selecting the 2x2 block with the
|
||||||
|
* highest sum.
|
||||||
|
* @param cv Clustervector containing clusters to reduce
|
||||||
|
* @return Clustervector with reduced clusters
|
||||||
|
* @note The cluster is filled using row major ordering starting at the top-left
|
||||||
|
* (thus for a max subcluster in the top left cornern the photon hit is at
|
||||||
|
* the fourth position)
|
||||||
|
*/
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType>
|
||||||
|
ClusterVector<Cluster<T, 2, 2, CoordType>> reduce_to_2x2(
|
||||||
|
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
|
||||||
|
&cv) {
|
||||||
|
ClusterVector<Cluster<T, 2, 2, CoordType>> result;
|
||||||
|
for (const auto &c : cv) {
|
||||||
|
result.push_back(reduce_to_2x2(c));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Reduce a cluster to a 3x3 cluster
|
||||||
|
* @param cv Clustervector containing clusters to reduce
|
||||||
|
* @return Clustervector with reduced clusters
|
||||||
|
*/
|
||||||
|
template <typename T, uint8_t ClusterSizeX, uint8_t ClusterSizeY,
|
||||||
|
typename CoordType>
|
||||||
|
ClusterVector<Cluster<T, 3, 3, CoordType>> reduce_to_3x3(
|
||||||
|
const ClusterVector<Cluster<T, ClusterSizeX, ClusterSizeY, CoordType>>
|
||||||
|
&cv) {
|
||||||
|
ClusterVector<Cluster<T, 3, 3, CoordType>> result;
|
||||||
|
for (const auto &c : cv) {
|
||||||
|
result.push_back(reduce_to_3x3(c));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
42
include/aare/CtbRawFile.hpp
Normal file
42
include/aare/CtbRawFile.hpp
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "aare/FileInterface.hpp"
|
||||||
|
#include "aare/Frame.hpp"
|
||||||
|
#include "aare/RawMasterFile.hpp"
|
||||||
|
|
||||||
|
#include <filesystem>
|
||||||
|
#include <fstream>
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
class CtbRawFile {
|
||||||
|
RawMasterFile m_master;
|
||||||
|
std::ifstream m_file;
|
||||||
|
size_t m_current_frame{0};
|
||||||
|
size_t m_current_subfile{0};
|
||||||
|
size_t m_num_subfiles{0};
|
||||||
|
|
||||||
|
public:
|
||||||
|
CtbRawFile(const std::filesystem::path &fname);
|
||||||
|
|
||||||
|
void read_into(std::byte *image_buf, DetectorHeader *header = nullptr);
|
||||||
|
void seek(size_t frame_index); //!< seek to the given frame index
|
||||||
|
size_t tell() const; //!< get the frame index of the file pointer
|
||||||
|
|
||||||
|
// in the specific class we can expose more functionality
|
||||||
|
|
||||||
|
size_t image_size_in_bytes() const;
|
||||||
|
size_t frames_in_file() const;
|
||||||
|
|
||||||
|
RawMasterFile master() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
void find_subfiles();
|
||||||
|
size_t sub_file_index(size_t frame_index) const {
|
||||||
|
return frame_index / m_master.max_frames_per_file();
|
||||||
|
}
|
||||||
|
void open_data_file(size_t subfile_index);
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
82
include/aare/DetectorGeometry.hpp
Normal file
82
include/aare/DetectorGeometry.hpp
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
#include "aare/RawMasterFile.hpp" //ROI refactor away
|
||||||
|
#include "aare/defs.hpp"
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
struct ModuleConfig {
|
||||||
|
int module_gap_row{};
|
||||||
|
int module_gap_col{};
|
||||||
|
|
||||||
|
bool operator==(const ModuleConfig &other) const {
|
||||||
|
if (module_gap_col != other.module_gap_col)
|
||||||
|
return false;
|
||||||
|
if (module_gap_row != other.module_gap_row)
|
||||||
|
return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Class to hold the geometry of a module. Where pixel 0 is located and
|
||||||
|
* the size of the module
|
||||||
|
*/
|
||||||
|
struct ModuleGeometry {
|
||||||
|
int origin_x{};
|
||||||
|
int origin_y{};
|
||||||
|
int height{};
|
||||||
|
int width{};
|
||||||
|
int row_index{};
|
||||||
|
int col_index{};
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Class to hold the geometry of a detector. Number of modules, their
|
||||||
|
* size and where pixel 0 for each module is located
|
||||||
|
*/
|
||||||
|
class DetectorGeometry {
|
||||||
|
public:
|
||||||
|
DetectorGeometry(const xy &geometry, const ssize_t module_pixels_x,
|
||||||
|
const ssize_t module_pixels_y,
|
||||||
|
const xy udp_interfaces_per_module = xy{1, 1},
|
||||||
|
const bool quad = false);
|
||||||
|
|
||||||
|
~DetectorGeometry() = default;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Update the detector geometry given a region of interest
|
||||||
|
*
|
||||||
|
* @param roi
|
||||||
|
* @return DetectorGeometry
|
||||||
|
*/
|
||||||
|
void update_geometry_with_roi(ROI roi);
|
||||||
|
|
||||||
|
size_t n_modules() const;
|
||||||
|
|
||||||
|
size_t n_modules_in_roi() const;
|
||||||
|
|
||||||
|
size_t pixels_x() const;
|
||||||
|
size_t pixels_y() const;
|
||||||
|
|
||||||
|
size_t modules_x() const;
|
||||||
|
size_t modules_y() const;
|
||||||
|
|
||||||
|
const std::vector<ssize_t> &get_modules_in_roi() const;
|
||||||
|
|
||||||
|
ssize_t get_modules_in_roi(const size_t index) const;
|
||||||
|
|
||||||
|
const std::vector<ModuleGeometry> &get_module_geometries() const;
|
||||||
|
|
||||||
|
const ModuleGeometry &get_module_geometries(const size_t index) const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
size_t m_modules_x{};
|
||||||
|
size_t m_modules_y{};
|
||||||
|
size_t m_pixels_x{};
|
||||||
|
size_t m_pixels_y{};
|
||||||
|
static constexpr ModuleConfig cfg{0, 0};
|
||||||
|
std::vector<ModuleGeometry> module_geometries{};
|
||||||
|
std::vector<ssize_t> modules_in_roi{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
107
include/aare/Dtype.hpp
Normal file
107
include/aare/Dtype.hpp
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
#include <cstdint>
|
||||||
|
#include <map>
|
||||||
|
#include <string>
|
||||||
|
#include <typeinfo>
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
// The format descriptor is a single character that specifies the type of the
|
||||||
|
// data
|
||||||
|
// - python documentation: https://docs.python.org/3/c-api/arg.html#numbers
|
||||||
|
// - py::format_descriptor<T>::format() (in pybind11) does not return the same
|
||||||
|
// format as
|
||||||
|
// written in python.org documentation.
|
||||||
|
// - numpy also doesn't use the same format. and also numpy associates the
|
||||||
|
// format
|
||||||
|
// with variable bitdepth types. (e.g. long is int64 on linux64 and int32 on
|
||||||
|
// win64) https://numpy.org/doc/stable/reference/arrays.scalars.html
|
||||||
|
//
|
||||||
|
// github issue discussing this:
|
||||||
|
// https://github.com/pybind/pybind11/issues/1908#issuecomment-658358767
|
||||||
|
//
|
||||||
|
// [IN LINUX] the difference is for int64 (long) and uint64 (unsigned long). The
|
||||||
|
// format descriptor is 'q' and 'Q' respectively and in the documentation it is
|
||||||
|
// 'l' and 'k'.
|
||||||
|
|
||||||
|
// in practice numpy doesn't seem to care when reading buffer info: the library
|
||||||
|
// interprets 'q' or 'l' as int64 and 'Q' or 'L' as uint64.
|
||||||
|
// for this reason we decided to use the same format descriptor as pybind to
|
||||||
|
// avoid any further discrepancies.
|
||||||
|
|
||||||
|
// in the following order:
|
||||||
|
// int8, uint8, int16, uint16, int32, uint32, int64, uint64, float, double
|
||||||
|
const char DTYPE_FORMAT_DSC[] = {'b', 'B', 'h', 'H', 'i',
|
||||||
|
'I', 'q', 'Q', 'f', 'd'};
|
||||||
|
|
||||||
|
// on linux64 & apple
|
||||||
|
const char NUMPY_FORMAT_DSC[] = {'b', 'B', 'h', 'H', 'i',
|
||||||
|
'I', 'l', 'L', 'f', 'd'};
|
||||||
|
/**
|
||||||
|
* @brief enum class to define the endianess of the system
|
||||||
|
*/
|
||||||
|
enum class endian {
|
||||||
|
#ifdef _WIN32
|
||||||
|
little = 0,
|
||||||
|
big = 1,
|
||||||
|
native = little
|
||||||
|
#else
|
||||||
|
little = __ORDER_LITTLE_ENDIAN__,
|
||||||
|
big = __ORDER_BIG_ENDIAN__,
|
||||||
|
native = __BYTE_ORDER__
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief class to define the data type of the pixels
|
||||||
|
* @note only native endianess is supported
|
||||||
|
*/
|
||||||
|
class Dtype {
|
||||||
|
public:
|
||||||
|
enum TypeIndex {
|
||||||
|
INT8,
|
||||||
|
UINT8,
|
||||||
|
INT16,
|
||||||
|
UINT16,
|
||||||
|
INT32,
|
||||||
|
UINT32,
|
||||||
|
INT64,
|
||||||
|
UINT64,
|
||||||
|
FLOAT,
|
||||||
|
DOUBLE,
|
||||||
|
ERROR,
|
||||||
|
NONE
|
||||||
|
};
|
||||||
|
|
||||||
|
uint8_t bitdepth() const;
|
||||||
|
size_t bytes() const;
|
||||||
|
std::string format_descr() const {
|
||||||
|
return std::string(1, DTYPE_FORMAT_DSC[static_cast<int>(m_type)]);
|
||||||
|
}
|
||||||
|
std::string numpy_descr() const {
|
||||||
|
return std::string(1, NUMPY_FORMAT_DSC[static_cast<int>(m_type)]);
|
||||||
|
}
|
||||||
|
|
||||||
|
explicit Dtype(const std::type_info &t);
|
||||||
|
explicit Dtype(std::string_view sv);
|
||||||
|
static Dtype from_bitdepth(uint8_t bitdepth);
|
||||||
|
|
||||||
|
// not explicit to allow conversions form enum to DType
|
||||||
|
Dtype(Dtype::TypeIndex ti); // NOLINT
|
||||||
|
|
||||||
|
bool operator==(const Dtype &other) const noexcept;
|
||||||
|
bool operator!=(const Dtype &other) const noexcept;
|
||||||
|
bool operator==(const std::type_info &t) const;
|
||||||
|
bool operator!=(const std::type_info &t) const;
|
||||||
|
|
||||||
|
// bool operator==(DType::TypeIndex ti) const;
|
||||||
|
// bool operator!=(DType::TypeIndex ti) const;
|
||||||
|
std::string to_string() const;
|
||||||
|
void set_type(Dtype::TypeIndex ti) { m_type = ti; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
TypeIndex m_type{TypeIndex::ERROR};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
71
include/aare/File.hpp
Normal file
71
include/aare/File.hpp
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
#include "aare/FileInterface.hpp"
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief RAII File class for reading, and in the future potentially writing
|
||||||
|
* image files in various formats. Minimal generic interface. For specail
|
||||||
|
* fuctions plase use the RawFile or NumpyFile classes directly. Wraps
|
||||||
|
* FileInterface to abstract the underlying file format
|
||||||
|
* @note **frame_number** refers the the frame number sent by the detector while
|
||||||
|
* **frame_index** is the position of the frame in the file
|
||||||
|
*/
|
||||||
|
class File {
|
||||||
|
std::unique_ptr<FileInterface> file_impl;
|
||||||
|
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* @brief Construct a new File object
|
||||||
|
* @param fname path to the file
|
||||||
|
* @param mode file mode (r, w, a)
|
||||||
|
* @param cfg file configuration
|
||||||
|
* @throws std::runtime_error if the file cannot be opened
|
||||||
|
* @throws std::invalid_argument if the file mode is not supported
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
File(const std::filesystem::path &fname, const std::string &mode = "r",
|
||||||
|
const FileConfig &cfg = {});
|
||||||
|
|
||||||
|
/**Since the object is responsible for managing the file we disable copy
|
||||||
|
* construction */
|
||||||
|
File(File const &other) = delete;
|
||||||
|
|
||||||
|
/**The same goes for copy assignment */
|
||||||
|
File &operator=(File const &other) = delete;
|
||||||
|
|
||||||
|
File(File &&other) noexcept;
|
||||||
|
File &operator=(File &&other) noexcept;
|
||||||
|
~File() = default;
|
||||||
|
|
||||||
|
// void close(); //!< close the file
|
||||||
|
|
||||||
|
Frame
|
||||||
|
read_frame(); //!< read one frame from the file at the current position
|
||||||
|
Frame read_frame(size_t frame_index); //!< read one frame at the position
|
||||||
|
//!< given by frame number
|
||||||
|
std::vector<Frame> read_n(size_t n_frames); //!< read n_frames from the file
|
||||||
|
//!< at the current position
|
||||||
|
|
||||||
|
void read_into(std::byte *image_buf);
|
||||||
|
void read_into(std::byte *image_buf, size_t n_frames);
|
||||||
|
|
||||||
|
size_t frame_number(); //!< get the frame number at the current position
|
||||||
|
size_t frame_number(
|
||||||
|
size_t frame_index); //!< get the frame number at the given frame index
|
||||||
|
size_t bytes_per_frame() const;
|
||||||
|
size_t pixels_per_frame() const;
|
||||||
|
size_t bytes_per_pixel() const;
|
||||||
|
size_t bitdepth() const;
|
||||||
|
void seek(size_t frame_index); //!< seek to the given frame index
|
||||||
|
size_t tell() const; //!< get the frame index of the file pointer
|
||||||
|
size_t total_frames() const;
|
||||||
|
size_t rows() const;
|
||||||
|
size_t cols() const;
|
||||||
|
|
||||||
|
DetectorType detector_type() const;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
170
include/aare/FileInterface.hpp
Normal file
170
include/aare/FileInterface.hpp
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
#include "aare/Dtype.hpp"
|
||||||
|
#include "aare/Frame.hpp"
|
||||||
|
#include "aare/defs.hpp"
|
||||||
|
|
||||||
|
#include <filesystem>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief FileConfig structure to store the configuration of a file
|
||||||
|
* dtype: data type of the file
|
||||||
|
* rows: number of rows in the file
|
||||||
|
* cols: number of columns in the file
|
||||||
|
* geometry: geometry of the file
|
||||||
|
*/
|
||||||
|
struct FileConfig {
|
||||||
|
aare::Dtype dtype{typeid(uint16_t)};
|
||||||
|
uint64_t rows{};
|
||||||
|
uint64_t cols{};
|
||||||
|
bool operator==(const FileConfig &other) const {
|
||||||
|
return dtype == other.dtype && rows == other.rows &&
|
||||||
|
cols == other.cols && geometry == other.geometry &&
|
||||||
|
detector_type == other.detector_type &&
|
||||||
|
max_frames_per_file == other.max_frames_per_file;
|
||||||
|
}
|
||||||
|
bool operator!=(const FileConfig &other) const { return !(*this == other); }
|
||||||
|
|
||||||
|
// rawfile specific
|
||||||
|
std::string version{};
|
||||||
|
xy geometry{1, 1};
|
||||||
|
DetectorType detector_type{DetectorType::Unknown};
|
||||||
|
int max_frames_per_file{};
|
||||||
|
size_t total_frames{};
|
||||||
|
// std::string to_string() const {
|
||||||
|
// return "{ dtype: " + dtype.to_string() +
|
||||||
|
// ", rows: " + std::to_string(rows) +
|
||||||
|
// ", cols: " + std::to_string(cols) +
|
||||||
|
// ", geometry: " + geometry.to_string() +
|
||||||
|
// ", detector_type: " + ToString(detector_type) +
|
||||||
|
// ", max_frames_per_file: " + std::to_string(max_frames_per_file) +
|
||||||
|
// ", total_frames: " + std::to_string(total_frames) + " }";
|
||||||
|
// }
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief FileInterface class to define the interface for file operations
|
||||||
|
* @note parent class for NumpyFile and RawFile
|
||||||
|
* @note all functions are pure virtual and must be implemented by the derived
|
||||||
|
* classes
|
||||||
|
*/
|
||||||
|
class FileInterface {
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* @brief one frame from the file at the current position
|
||||||
|
* @return Frame
|
||||||
|
*/
|
||||||
|
virtual Frame read_frame() = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief read one frame from the file at the given frame number
|
||||||
|
* @param frame_number frame number to read
|
||||||
|
* @return frame
|
||||||
|
*/
|
||||||
|
virtual Frame read_frame(size_t frame_number) = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief read n_frames from the file at the current position
|
||||||
|
* @param n_frames number of frames to read
|
||||||
|
* @return vector of frames
|
||||||
|
*/
|
||||||
|
virtual std::vector<Frame>
|
||||||
|
read_n(size_t n_frames) = 0; // Is this the right interface?
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief read one frame from the file at the current position and store it
|
||||||
|
* in the provided buffer
|
||||||
|
* @param image_buf buffer to store the frame
|
||||||
|
* @return void
|
||||||
|
*/
|
||||||
|
virtual void read_into(std::byte *image_buf) = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief read n_frames from the file at the current position and store them
|
||||||
|
* in the provided buffer
|
||||||
|
* @param image_buf buffer to store the frames
|
||||||
|
* @param n_frames number of frames to read
|
||||||
|
* @return void
|
||||||
|
*/
|
||||||
|
virtual void read_into(std::byte *image_buf, size_t n_frames) = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief get the frame number at the given frame index
|
||||||
|
* @param frame_index index of the frame
|
||||||
|
* @return frame number
|
||||||
|
*/
|
||||||
|
virtual size_t frame_number(size_t frame_index) = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief get the size of one frame in bytes
|
||||||
|
* @return size of one frame
|
||||||
|
*/
|
||||||
|
virtual size_t bytes_per_frame() = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief get the number of pixels in one frame
|
||||||
|
* @return number of pixels in one frame
|
||||||
|
*/
|
||||||
|
virtual size_t pixels_per_frame() = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief seek to the given frame number
|
||||||
|
* @param frame_number frame number to seek to
|
||||||
|
* @return void
|
||||||
|
*/
|
||||||
|
virtual void seek(size_t frame_number) = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief get the current position of the file pointer
|
||||||
|
* @return current position of the file pointer
|
||||||
|
*/
|
||||||
|
virtual size_t tell() = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief get the total number of frames in the file
|
||||||
|
* @return total number of frames in the file
|
||||||
|
*/
|
||||||
|
virtual size_t total_frames() const = 0;
|
||||||
|
/**
|
||||||
|
* @brief get the number of rows in the file
|
||||||
|
* @return number of rows in the file
|
||||||
|
*/
|
||||||
|
virtual size_t rows() const = 0;
|
||||||
|
/**
|
||||||
|
* @brief get the number of columns in the file
|
||||||
|
* @return number of columns in the file
|
||||||
|
*/
|
||||||
|
virtual size_t cols() const = 0;
|
||||||
|
/**
|
||||||
|
* @brief get the bitdepth of the file
|
||||||
|
* @return bitdepth of the file
|
||||||
|
*/
|
||||||
|
virtual size_t bitdepth() const = 0;
|
||||||
|
|
||||||
|
virtual DetectorType detector_type() const = 0;
|
||||||
|
|
||||||
|
// function to query the data type of the file
|
||||||
|
/*virtual DataType dtype = 0; */
|
||||||
|
|
||||||
|
virtual ~FileInterface() = default;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
std::string m_mode{};
|
||||||
|
// std::filesystem::path m_fname{};
|
||||||
|
// std::filesystem::path m_base_path{};
|
||||||
|
// std::string m_base_name{}, m_ext{};
|
||||||
|
// int m_findex{};
|
||||||
|
// size_t m_total_frames{};
|
||||||
|
// size_t max_frames_per_file{};
|
||||||
|
// std::string version{};
|
||||||
|
// DetectorType m_type{DetectorType::Unknown};
|
||||||
|
// size_t m_rows{};
|
||||||
|
// size_t m_cols{};
|
||||||
|
// size_t m_bitdepth{};
|
||||||
|
// size_t current_frame{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
31
include/aare/FilePtr.hpp
Normal file
31
include/aare/FilePtr.hpp
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
#include <cstdio>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief RAII wrapper for FILE pointer
|
||||||
|
*/
|
||||||
|
class FilePtr {
|
||||||
|
FILE *fp_{nullptr};
|
||||||
|
|
||||||
|
public:
|
||||||
|
FilePtr() = default;
|
||||||
|
FilePtr(const std::filesystem::path &fname, const std::string &mode);
|
||||||
|
FilePtr(const FilePtr &) = delete; // we don't want a copy
|
||||||
|
FilePtr &operator=(const FilePtr &) = delete; // since we handle a resource
|
||||||
|
FilePtr(FilePtr &&other);
|
||||||
|
FilePtr &operator=(FilePtr &&other);
|
||||||
|
FILE *get();
|
||||||
|
ssize_t tell();
|
||||||
|
void seek(ssize_t offset, int whence = SEEK_SET) {
|
||||||
|
if (fseek(fp_, offset, whence) != 0)
|
||||||
|
throw std::runtime_error("Error seeking in file");
|
||||||
|
}
|
||||||
|
std::string error_msg();
|
||||||
|
~FilePtr();
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
121
include/aare/Fit.hpp
Normal file
121
include/aare/Fit.hpp
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
|
#include <fmt/core.h>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "aare/NDArray.hpp"
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
namespace func {
|
||||||
|
double gaus(const double x, const double *par);
|
||||||
|
NDArray<double, 1> gaus(NDView<double, 1> x, NDView<double, 1> par);
|
||||||
|
|
||||||
|
double pol1(const double x, const double *par);
|
||||||
|
NDArray<double, 1> pol1(NDView<double, 1> x, NDView<double, 1> par);
|
||||||
|
|
||||||
|
double scurve(const double x, const double *par);
|
||||||
|
NDArray<double, 1> scurve(NDView<double, 1> x, NDView<double, 1> par);
|
||||||
|
|
||||||
|
double scurve2(const double x, const double *par);
|
||||||
|
NDArray<double, 1> scurve2(NDView<double, 1> x, NDView<double, 1> par);
|
||||||
|
|
||||||
|
} // namespace func
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Estimate the initial parameters for a Gaussian fit
|
||||||
|
*/
|
||||||
|
std::array<double, 3> gaus_init_par(const NDView<double, 1> x,
|
||||||
|
const NDView<double, 1> y);
|
||||||
|
|
||||||
|
std::array<double, 2> pol1_init_par(const NDView<double, 1> x,
|
||||||
|
const NDView<double, 1> y);
|
||||||
|
|
||||||
|
std::array<double, 6> scurve_init_par(const NDView<double, 1> x,
|
||||||
|
const NDView<double, 1> y);
|
||||||
|
std::array<double, 6> scurve2_init_par(const NDView<double, 1> x,
|
||||||
|
const NDView<double, 1> y);
|
||||||
|
|
||||||
|
static constexpr int DEFAULT_NUM_THREADS = 4;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Fit a 1D Gaussian to data.
|
||||||
|
* @param data data to fit
|
||||||
|
* @param x x values
|
||||||
|
*/
|
||||||
|
NDArray<double, 1> fit_gaus(NDView<double, 1> x, NDView<double, 1> y);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Fit a 1D Gaussian to each pixel. Data layout [row, col, values]
|
||||||
|
* @param x x values
|
||||||
|
* @param y y values, layout [row, col, values]
|
||||||
|
* @param n_threads number of threads to use
|
||||||
|
*/
|
||||||
|
|
||||||
|
NDArray<double, 3> fit_gaus(NDView<double, 1> x, NDView<double, 3> y,
|
||||||
|
int n_threads = DEFAULT_NUM_THREADS);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Fit a 1D Gaussian with error estimates
|
||||||
|
* @param x x values
|
||||||
|
* @param y y values, layout [row, col, values]
|
||||||
|
* @param y_err error in y, layout [row, col, values]
|
||||||
|
* @param par_out output parameters
|
||||||
|
* @param par_err_out output error parameters
|
||||||
|
*/
|
||||||
|
void fit_gaus(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
|
||||||
|
NDView<double, 1> par_out, NDView<double, 1> par_err_out,
|
||||||
|
double &chi2);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout
|
||||||
|
* [row, col, values]
|
||||||
|
* @param x x values
|
||||||
|
* @param y y values, layout [row, col, values]
|
||||||
|
* @param y_err error in y, layout [row, col, values]
|
||||||
|
* @param par_out output parameters, layout [row, col, values]
|
||||||
|
* @param par_err_out output parameter errors, layout [row, col, values]
|
||||||
|
* @param n_threads number of threads to use
|
||||||
|
*/
|
||||||
|
void fit_gaus(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
|
||||||
|
NDView<double, 3> par_out, NDView<double, 3> par_err_out,
|
||||||
|
NDView<double, 2> chi2_out, int n_threads = DEFAULT_NUM_THREADS);
|
||||||
|
|
||||||
|
NDArray<double, 1> fit_pol1(NDView<double, 1> x, NDView<double, 1> y);
|
||||||
|
|
||||||
|
NDArray<double, 3> fit_pol1(NDView<double, 1> x, NDView<double, 3> y,
|
||||||
|
int n_threads = DEFAULT_NUM_THREADS);
|
||||||
|
|
||||||
|
void fit_pol1(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
|
||||||
|
NDView<double, 1> par_out, NDView<double, 1> par_err_out,
|
||||||
|
double &chi2);
|
||||||
|
|
||||||
|
// TODO! not sure we need to offer the different version in C++
|
||||||
|
void fit_pol1(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
|
||||||
|
NDView<double, 3> par_out, NDView<double, 3> par_err_out,
|
||||||
|
NDView<double, 2> chi2_out, int n_threads = DEFAULT_NUM_THREADS);
|
||||||
|
|
||||||
|
NDArray<double, 1> fit_scurve(NDView<double, 1> x, NDView<double, 1> y);
|
||||||
|
NDArray<double, 3> fit_scurve(NDView<double, 1> x, NDView<double, 3> y,
|
||||||
|
int n_threads);
|
||||||
|
void fit_scurve(NDView<double, 1> x, NDView<double, 1> y,
|
||||||
|
NDView<double, 1> y_err, NDView<double, 1> par_out,
|
||||||
|
NDView<double, 1> par_err_out, double &chi2);
|
||||||
|
void fit_scurve(NDView<double, 1> x, NDView<double, 3> y,
|
||||||
|
NDView<double, 3> y_err, NDView<double, 3> par_out,
|
||||||
|
NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
|
||||||
|
int n_threads);
|
||||||
|
|
||||||
|
NDArray<double, 1> fit_scurve2(NDView<double, 1> x, NDView<double, 1> y);
|
||||||
|
NDArray<double, 3> fit_scurve2(NDView<double, 1> x, NDView<double, 3> y,
|
||||||
|
int n_threads);
|
||||||
|
void fit_scurve2(NDView<double, 1> x, NDView<double, 1> y,
|
||||||
|
NDView<double, 1> y_err, NDView<double, 1> par_out,
|
||||||
|
NDView<double, 1> par_err_out, double &chi2);
|
||||||
|
void fit_scurve2(NDView<double, 1> x, NDView<double, 3> y,
|
||||||
|
NDView<double, 3> y_err, NDView<double, 3> par_out,
|
||||||
|
NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
|
||||||
|
int n_threads);
|
||||||
|
} // namespace aare
|
||||||
124
include/aare/Frame.hpp
Normal file
124
include/aare/Frame.hpp
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
#pragma once
|
||||||
|
#include "aare/Dtype.hpp"
|
||||||
|
#include "aare/NDArray.hpp"
|
||||||
|
#include "aare/defs.hpp"
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <memory>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Frame class to represent a single frame of data. Not much more than a
|
||||||
|
* pointer and some info. Limited interface to accept frames from many sources.
|
||||||
|
*/
|
||||||
|
class Frame {
|
||||||
|
uint32_t m_rows;
|
||||||
|
uint32_t m_cols;
|
||||||
|
Dtype m_dtype;
|
||||||
|
std::byte *m_data;
|
||||||
|
// TODO! Add frame number?
|
||||||
|
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* @brief Construct a new Frame
|
||||||
|
* @param rows number of rows
|
||||||
|
* @param cols number of columns
|
||||||
|
* @param dtype data type of the pixels
|
||||||
|
* @note the data is initialized to zero
|
||||||
|
*/
|
||||||
|
Frame(uint32_t rows, uint32_t cols, Dtype dtype);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Construct a new Frame
|
||||||
|
* @param bytes pointer to the data to be copied into the frame
|
||||||
|
* @param rows number of rows
|
||||||
|
* @param cols number of columns
|
||||||
|
* @param dtype data type of the pixels
|
||||||
|
*/
|
||||||
|
Frame(const std::byte *bytes, uint32_t rows, uint32_t cols, Dtype dtype);
|
||||||
|
~Frame() { delete[] m_data; };
|
||||||
|
|
||||||
|
/** @warning Copy is disabled to ensure performance when passing
|
||||||
|
* frames around. Can discuss enabling it.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
Frame &operator=(const Frame &other) = delete;
|
||||||
|
Frame(const Frame &other) = delete;
|
||||||
|
|
||||||
|
// enable move
|
||||||
|
Frame &operator=(Frame &&other) noexcept;
|
||||||
|
Frame(Frame &&other) noexcept;
|
||||||
|
|
||||||
|
Frame clone() const; //<- Explicit copy
|
||||||
|
|
||||||
|
uint32_t rows() const;
|
||||||
|
uint32_t cols() const;
|
||||||
|
size_t bitdepth() const;
|
||||||
|
Dtype dtype() const;
|
||||||
|
uint64_t size() const;
|
||||||
|
size_t bytes() const;
|
||||||
|
std::byte *data() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Get the pointer to the pixel at the given row and column
|
||||||
|
* @param row row index
|
||||||
|
* @param col column index
|
||||||
|
* @return pointer to the pixel
|
||||||
|
* @warning The user should cast the pointer to the appropriate type. Think
|
||||||
|
* twice if this is the function you want to use.
|
||||||
|
*/
|
||||||
|
std::byte *pixel_ptr(uint32_t row, uint32_t col) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Set the pixel at the given row and column to the given value
|
||||||
|
* @tparam T type of the value
|
||||||
|
* @param row row index
|
||||||
|
* @param col column index
|
||||||
|
* @param data value to set
|
||||||
|
*/
|
||||||
|
template <typename T> void set(uint32_t row, uint32_t col, T data) {
|
||||||
|
assert(sizeof(T) == m_dtype.bytes());
|
||||||
|
if (row >= m_rows || col >= m_cols) {
|
||||||
|
throw std::out_of_range("Invalid row or column index");
|
||||||
|
}
|
||||||
|
std::memcpy(m_data + (row * m_cols + col) * m_dtype.bytes(), &data,
|
||||||
|
m_dtype.bytes());
|
||||||
|
}
|
||||||
|
template <typename T> T get(uint32_t row, uint32_t col) {
|
||||||
|
assert(sizeof(T) == m_dtype.bytes());
|
||||||
|
if (row >= m_rows || col >= m_cols) {
|
||||||
|
throw std::out_of_range("Invalid row or column index");
|
||||||
|
}
|
||||||
|
// TODO! add tests then reimplement using pixel_ptr
|
||||||
|
T data;
|
||||||
|
std::memcpy(&data, m_data + (row * m_cols + col) * m_dtype.bytes(),
|
||||||
|
m_dtype.bytes());
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* @brief Return an NDView of the frame. This is the preferred way to access
|
||||||
|
* data in the frame.
|
||||||
|
*
|
||||||
|
* @tparam T type of the pixels
|
||||||
|
* @return NDView<T, 2>
|
||||||
|
*/
|
||||||
|
template <typename T> NDView<T, 2> view() & {
|
||||||
|
std::array<ssize_t, 2> shape = {static_cast<ssize_t>(m_rows),
|
||||||
|
static_cast<ssize_t>(m_cols)};
|
||||||
|
T *data = reinterpret_cast<T *>(m_data);
|
||||||
|
return NDView<T, 2>(data, shape);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Copy the frame data into a new NDArray. This is a deep copy.
|
||||||
|
*/
|
||||||
|
template <typename T> NDArray<T> image() {
|
||||||
|
return NDArray<T>(this->view<T>());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace aare
|
||||||
69
include/aare/GainMap.hpp
Normal file
69
include/aare/GainMap.hpp
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
/************************************************
|
||||||
|
* @file GainMap.hpp
|
||||||
|
* @short function to apply gain map of image size to a vector of clusters -
|
||||||
|
*note stored gainmap is inverted for efficient aaplication to images
|
||||||
|
***********************************************/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
#include "aare/Cluster.hpp"
|
||||||
|
#include "aare/ClusterVector.hpp"
|
||||||
|
#include "aare/NDArray.hpp"
|
||||||
|
#include "aare/NDView.hpp"
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
namespace aare {
|
||||||
|
|
||||||
|
class InvertedGainMap {
|
||||||
|
|
||||||
|
public:
|
||||||
|
explicit InvertedGainMap(const NDArray<double, 2> &gain_map)
|
||||||
|
: m_gain_map(gain_map) {
|
||||||
|
for (auto &item : m_gain_map) {
|
||||||
|
item = 1.0 / item;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
explicit InvertedGainMap(const NDView<double, 2> gain_map) {
|
||||||
|
m_gain_map = NDArray<double, 2>(gain_map);
|
||||||
|
for (auto &item : m_gain_map) {
|
||||||
|
item = 1.0 / item;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename ClusterType,
|
||||||
|
typename = std::enable_if_t<is_cluster_v<ClusterType>>>
|
||||||
|
void apply_gain_map(ClusterVector<ClusterType> &clustervec) {
|
||||||
|
// in principle we need to know the size of the image for this lookup
|
||||||
|
size_t ClusterSizeX = clustervec.cluster_size_x();
|
||||||
|
size_t ClusterSizeY = clustervec.cluster_size_y();
|
||||||
|
|
||||||
|
using T = typename ClusterVector<ClusterType>::value_type;
|
||||||
|
|
||||||
|
int64_t index_cluster_center_x = ClusterSizeX / 2;
|
||||||
|
int64_t index_cluster_center_y = ClusterSizeY / 2;
|
||||||
|
for (size_t i = 0; i < clustervec.size(); i++) {
|
||||||
|
auto &cl = clustervec[i];
|
||||||
|
|
||||||
|
if (cl.x > 0 && cl.y > 0 && cl.x < m_gain_map.shape(1) - 1 &&
|
||||||
|
cl.y < m_gain_map.shape(0) - 1) {
|
||||||
|
for (size_t j = 0; j < ClusterSizeX * ClusterSizeY; j++) {
|
||||||
|
size_t x = cl.x + j % ClusterSizeX - index_cluster_center_x;
|
||||||
|
size_t y = cl.y + j / ClusterSizeX - index_cluster_center_y;
|
||||||
|
cl.data[j] = static_cast<T>(
|
||||||
|
static_cast<double>(cl.data[j]) *
|
||||||
|
m_gain_map(
|
||||||
|
y, x)); // cast after conversion to keep precision
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// clear edge clusters
|
||||||
|
cl.data.fill(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
NDArray<double, 2> m_gain_map{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // end of namespace aare
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user