CI/CD: Integrate pre-commit hooks and GitHub Actions workflow (#303)
Build on RHEL8 / build (push) Successful in 2m48s
Build on RHEL9 / build (push) Successful in 3m8s
Run tests using data on local RHEL8 / build (push) Successful in 3m34s
Build on local RHEL8 / build (push) Successful in 2m24s

To improve codebase quality and reduce human error, this PR introduces
the pre-commit framework. This ensures that all code adheres to project
standards before it is even committed, maintaining a consistent style
and catching common mistakes early.

Key Changes:

- Code Formatting: Automated C++ formatting using clang-format (based on
the project's .clang-format file).
- Syntax Validation: Basic checks for file integrity and syntax.
- Spell Check: Automated scanning for typos in source code and comments.
- CMake Formatting: Standardization of CMakeLists.txt and .cmake
configuration files.
- GitHub Workflow: Added a CI action that validates every Pull Request
against the pre-commit configuration to ensure compliance.

The configuration includes a [ci] block to handle automated fixes within
the PR. Currently, this is disabled. If we want the CI to automatically
commit formatting fixes back to the PR branch, this can be toggled to
true in .pre-commit-config.yaml.

```yaml
ci:
  autofix_commit_msg: [pre-commit] auto fixes from pre-commit hooks
  autofix_prs: false
  autoupdate_schedule: monthly
```

The last large commit with the fit functions, for example, was not
formatted according to the clang-format rules. This PR would allow to
avoid similar mistakes in the future.

Python fomat with `ruff` for tests and sanitiser for `.ipynb` notebooks
can be added as well.
This commit is contained in:
2026-04-14 11:52:23 +02:00
committed by GitHub
parent 85098d2008
commit 8f8173feb6
36 changed files with 4104 additions and 3853 deletions
+12
View File
@@ -0,0 +1,12 @@
name: pre-commit
on:
pull_request:
jobs:
pre-commit:
name: pre-commit
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: pre-commit/action@v3.0.1
+186 -16
View File
@@ -1,25 +1,195 @@
install/
.cproject
.project
bin/
.settings
*.aux
*.log
*.out
*.toc
# C++ .gitignore template (https://github.com/github/gitignore/blob/main/C%2B%2B.gitignore)
# Prerequisites
*.d
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Precompiled Headers
*.gch
*.pch
# Linker files
*.ilk
# Debugger Files
*.pdb
# Compiled Dynamic libraries
*.so
.*
*.dylib
*.dll
*.so.*
# Fortran module files
*.mod
*.smod
# Compiled Static libraries
*.lai
*.la
*.a
*.lib
# Executables
*.exe
*.out
*.app
# Build directories
build/
RELEASE.txt
Build/
build-*/
# CMake generated files
CMakeFiles/
CMakeCache.txt
cmake_install.cmake
Makefile
install_manifest.txt
compile_commands.json
# Temporary files
*.tmp
*.log
*.bak
*.swp
# vcpkg
vcpkg_installed/
# debug information files
*.dwo
# test output & cache
Testing/
.cache/
ctbDict.cpp
ctbDict.h
# Python .gitignore template (https://github.com/github/gitignore/blob/main/Python.gitignore)
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[codz]
*$py.class
wheelhouse/
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
*.pyc
*/__pycache__/*
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py.cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Environments
.env
.envrc
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# Visual Studio Code
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
# and can be added to the global gitignore or merged into this file. However, if you prefer,
# you could uncomment the following to ignore the entire vscode folder
.vscode/
# Ruff stuff:
.ruff_cache/
# user defined
wheelhouse/
+29
View File
@@ -0,0 +1,29 @@
fail_fast: true
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v6.0.0
hooks:
- id: check-added-large-files
- id: check-yaml
exclude: conda-recipe/
- id: check-toml
- id: check-json
- id: mixed-line-ending
- repo: https://github.com/crate-ci/typos
rev: v1.45.0
hooks:
- id: typos
files: docs
args: [] # empty, to remove write-changes from the default arguments.
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v22.1.3
hooks:
- id: clang-format
name: clang-format .cpp and .hpp files
- repo: https://github.com/zultron/cmake-format-precommit
rev: v0.6.14
hooks:
- id: cmake-format
+339 -331
View File
@@ -1,11 +1,11 @@
# SPDX-License-Identifier: MPL-2.0
cmake_minimum_required(VERSION 3.15)
project(aare
DESCRIPTION "Data processing library for PSI detectors"
HOMEPAGE_URL "https://github.com/slsdetectorgroup/aare"
LANGUAGES C CXX
)
project(
aare
DESCRIPTION "Data processing library for PSI detectors"
HOMEPAGE_URL "https://github.com/slsdetectorgroup/aare"
LANGUAGES C CXX)
# Read VERSION file into project version
set(VERSION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/VERSION")
@@ -13,37 +13,34 @@ file(READ "${VERSION_FILE}" VERSION_CONTENT)
string(STRIP "${VERSION_CONTENT}" PROJECT_VERSION_STRING)
set(PROJECT_VERSION ${PROJECT_VERSION_STRING})
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
execute_process(
COMMAND git log -1 --format=%h
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
OUTPUT_VARIABLE GIT_HASH
OUTPUT_STRIP_TRAILING_WHITESPACE
)
COMMAND git log -1 --format=%h
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
OUTPUT_VARIABLE GIT_HASH
OUTPUT_STRIP_TRAILING_WHITESPACE)
message(STATUS "Building from git hash: ${GIT_HASH}")
if (${CMAKE_VERSION} VERSION_GREATER "3.24")
cmake_policy(SET CMP0135 NEW) #Fetch content download timestamp
if(${CMAKE_VERSION} VERSION_GREATER "3.24")
cmake_policy(SET CMP0135 NEW) # Fetch content download timestamp
endif()
cmake_policy(SET CMP0079 NEW)
include(GNUInstallDirs)
include(FetchContent)
#Set default build type if none was specified
# Set default build type if none was specified
include(cmake/helpers.cmake)
default_build_type("Release")
set_std_fs_lib()
message(STATUS "Extra linking to fs lib:${STD_FS_LIB}")
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
# General options
option(AARE_PYTHON_BINDINGS "Build python bindings" OFF)
option(AARE_TESTS "Build tests" OFF)
@@ -53,7 +50,10 @@ option(AARE_IN_GITHUB_ACTIONS "Running in Github Actions" OFF)
option(AARE_DOCS "Build documentation" OFF)
option(AARE_VERBOSE "Verbose output" OFF)
option(AARE_CUSTOM_ASSERT "Use custom assert" OFF)
option(AARE_INSTALL_PYTHONEXT "Install the python extension in the install tree under CMAKE_INSTALL_PREFIX/aare/" OFF)
option(
AARE_INSTALL_PYTHONEXT
"Install the python extension in the install tree under CMAKE_INSTALL_PREFIX/aare/"
OFF)
option(AARE_ASAN "Enable AddressSanitizer" OFF)
# Configure which of the dependencies to use FetchContent for
@@ -65,173 +65,207 @@ option(AARE_FETCH_ZMQ "Use FetchContent to download libzmq" ON)
option(AARE_FETCH_LMFIT "Use FetchContent to download lmfit" ON)
option(AARE_FETCH_MINUIT2 "Use FetchContent to download Minuit2" ON)
#Convenience option to use system libraries only (no FetchContent)
# Convenience option to use system libraries only (no FetchContent)
option(AARE_SYSTEM_LIBRARIES "Use system libraries" OFF)
if(AARE_SYSTEM_LIBRARIES)
message(STATUS "Build using system libraries")
set(AARE_FETCH_FMT OFF CACHE BOOL "Disabled FetchContent for FMT" FORCE)
set(AARE_FETCH_PYBIND11 OFF CACHE BOOL "Disabled FetchContent for pybind11" FORCE)
set(AARE_FETCH_CATCH OFF CACHE BOOL "Disabled FetchContent for catch2" FORCE)
set(AARE_FETCH_JSON OFF CACHE BOOL "Disabled FetchContent for nlohmann::json" FORCE)
set(AARE_FETCH_ZMQ OFF CACHE BOOL "Disabled FetchContent for libzmq" FORCE)
# Still fetch lmfit and Minuit2 when setting AARE_SYSTEM_LIBRARIES
# since these are not available on conda-forge
message(STATUS "Build using system libraries")
set(AARE_FETCH_FMT
OFF
CACHE BOOL "Disabled FetchContent for FMT" FORCE)
set(AARE_FETCH_PYBIND11
OFF
CACHE BOOL "Disabled FetchContent for pybind11" FORCE)
set(AARE_FETCH_CATCH
OFF
CACHE BOOL "Disabled FetchContent for catch2" FORCE)
set(AARE_FETCH_JSON
OFF
CACHE BOOL "Disabled FetchContent for nlohmann::json" FORCE)
set(AARE_FETCH_ZMQ
OFF
CACHE BOOL "Disabled FetchContent for libzmq" FORCE)
# Still fetch lmfit and Minuit2 when setting AARE_SYSTEM_LIBRARIES since these
# are not available on conda-forge
endif()
if(AARE_BENCHMARKS)
add_subdirectory(benchmarks)
add_subdirectory(benchmarks)
endif()
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
if(AARE_FETCH_LMFIT)
#TODO! Should we fetch lmfit from the web or inlcude a tar.gz in the repo?
set(LMFIT_PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch)
# TODO! Should we fetch lmfit from the web or inlcude a tar.gz in the repo?
set(LMFIT_PATCH_COMMAND git apply
${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch)
# For cmake < 3.28 we can't supply EXCLUDE_FROM_ALL to FetchContent_Declare
# so we need this workaround
if (${CMAKE_VERSION} VERSION_LESS "3.28")
FetchContent_Declare(
lmfit
GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git
GIT_TAG main
PATCH_COMMAND ${LMFIT_PATCH_COMMAND}
UPDATE_DISCONNECTED 1
)
else()
FetchContent_Declare(
lmfit
GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git
GIT_TAG main
PATCH_COMMAND ${LMFIT_PATCH_COMMAND}
UPDATE_DISCONNECTED 1
EXCLUDE_FROM_ALL 1
)
# For cmake < 3.28 we can't supply EXCLUDE_FROM_ALL to FetchContent_Declare so
# we need this workaround
if(${CMAKE_VERSION} VERSION_LESS "3.28")
FetchContent_Declare(
lmfit
GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git
GIT_TAG main
PATCH_COMMAND ${LMFIT_PATCH_COMMAND}
UPDATE_DISCONNECTED 1)
else()
FetchContent_Declare(
lmfit
GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git
GIT_TAG main
PATCH_COMMAND ${LMFIT_PATCH_COMMAND}
UPDATE_DISCONNECTED 1
EXCLUDE_FROM_ALL 1)
endif()
# Disable what we don't need from lmfit
set(BUILD_TESTING
OFF
CACHE BOOL "")
set(LMFIT_CPPTEST
OFF
CACHE BOOL "")
set(LIB_MAN
OFF
CACHE BOOL "")
set(LMFIT_CPPTEST
OFF
CACHE BOOL "")
set(BUILD_SHARED_LIBS
OFF
CACHE BOOL "")
if(${CMAKE_VERSION} VERSION_LESS "3.28")
if(NOT lmfit_POPULATED)
FetchContent_Populate(lmfit)
add_subdirectory(${lmfit_SOURCE_DIR} ${lmfit_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
else()
FetchContent_MakeAvailable(lmfit)
endif()
#Disable what we don't need from lmfit
set(BUILD_TESTING OFF CACHE BOOL "")
set(LMFIT_CPPTEST OFF CACHE BOOL "")
set(LIB_MAN OFF CACHE BOOL "")
set(LMFIT_CPPTEST OFF CACHE BOOL "")
set(BUILD_SHARED_LIBS OFF CACHE BOOL "")
if (${CMAKE_VERSION} VERSION_LESS "3.28")
if(NOT lmfit_POPULATED)
FetchContent_Populate(lmfit)
add_subdirectory(${lmfit_SOURCE_DIR} ${lmfit_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
else()
FetchContent_MakeAvailable(lmfit)
endif()
set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON)
set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON)
else()
find_package(lmfit REQUIRED)
find_package(lmfit REQUIRED)
endif()
if(AARE_FETCH_MINUIT2)
FetchContent_Declare(
Minuit2
GIT_REPOSITORY https://github.com/GooFit/Minuit2.git
GIT_TAG master
)
# Disable Minuit2 extras we don't need
set(minuit2_mpi OFF CACHE BOOL "")
set(minuit2_omp OFF CACHE BOOL "")
set(BUILD_TESTING OFF CACHE BOOL "")
set(CMAKE_POLICY_VERSION_MINIMUM 3.5 CACHE STRING "Allow older CMake compat for Minuit2")
FetchContent_MakeAvailable(Minuit2)
unset(CMAKE_POLICY_VERSION_MINIMUM CACHE)
set_property(TARGET Minuit2Math PROPERTY POSITION_INDEPENDENT_CODE ON)
set_property(TARGET Minuit2 PROPERTY POSITION_INDEPENDENT_CODE ON)
FetchContent_Declare(
Minuit2
GIT_REPOSITORY https://github.com/GooFit/Minuit2.git
GIT_TAG master)
# Disable Minuit2 extras we don't need
set(minuit2_mpi
OFF
CACHE BOOL "")
set(minuit2_omp
OFF
CACHE BOOL "")
set(BUILD_TESTING
OFF
CACHE BOOL "")
set(CMAKE_POLICY_VERSION_MINIMUM
3.5
CACHE STRING "Allow older CMake compat for Minuit2")
FetchContent_MakeAvailable(Minuit2)
unset(CMAKE_POLICY_VERSION_MINIMUM CACHE)
set_property(TARGET Minuit2Math PROPERTY POSITION_INDEPENDENT_CODE ON)
set_property(TARGET Minuit2 PROPERTY POSITION_INDEPENDENT_CODE ON)
else()
find_package(Minuit2 REQUIRED)
find_package(Minuit2 REQUIRED)
endif()
if(AARE_FETCH_ZMQ)
# Fetchcontent_Declare is deprecated need to find a way to update this
# for now setting the policy to old is enough
if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.30")
cmake_policy(SET CMP0169 OLD)
endif()
set(ZMQ_PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/libzmq_cmake_version.patch)
FetchContent_Declare(
libzmq
GIT_REPOSITORY https://github.com/zeromq/libzmq.git
GIT_TAG v4.3.4
PATCH_COMMAND ${ZMQ_PATCH_COMMAND}
UPDATE_DISCONNECTED 1
)
# Disable unwanted options from libzmq
set(BUILD_TESTS OFF CACHE BOOL "Switch off libzmq test build")
set(BUILD_SHARED OFF CACHE BOOL "Switch off libzmq shared libs")
set(WITH_PERF_TOOL OFF CACHE BOOL "")
set(ENABLE_CPACK OFF CACHE BOOL "")
set(ENABLE_CLANG OFF CACHE BOOL "")
set(ENABLE_CURVE OFF CACHE BOOL "")
set(ENABLE_DRAFTS OFF CACHE BOOL "")
# Fetchcontent_Declare is deprecated need to find a way to update this for now
# setting the policy to old is enough
if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.30")
cmake_policy(SET CMP0169 OLD)
endif()
set(ZMQ_PATCH_COMMAND
git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/libzmq_cmake_version.patch)
FetchContent_Declare(
libzmq
GIT_REPOSITORY https://github.com/zeromq/libzmq.git
GIT_TAG v4.3.4
PATCH_COMMAND ${ZMQ_PATCH_COMMAND}
UPDATE_DISCONNECTED 1)
# Disable unwanted options from libzmq
set(BUILD_TESTS
OFF
CACHE BOOL "Switch off libzmq test build")
set(BUILD_SHARED
OFF
CACHE BOOL "Switch off libzmq shared libs")
set(WITH_PERF_TOOL
OFF
CACHE BOOL "")
set(ENABLE_CPACK
OFF
CACHE BOOL "")
set(ENABLE_CLANG
OFF
CACHE BOOL "")
set(ENABLE_CURVE
OFF
CACHE BOOL "")
set(ENABLE_DRAFTS
OFF
CACHE BOOL "")
# TODO! Verify that this is what we want to do in aare
# Using GetProperties and Populate to be able to exclude zmq
# from install (not possible with FetchContent_MakeAvailable(libzmq))
FetchContent_GetProperties(libzmq)
if(NOT libzmq_POPULATED)
FetchContent_Populate(libzmq)
add_subdirectory(${libzmq_SOURCE_DIR} ${libzmq_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
# TODO! Verify that this is what we want to do in aare Using GetProperties and
# Populate to be able to exclude zmq from install (not possible with
# FetchContent_MakeAvailable(libzmq))
FetchContent_GetProperties(libzmq)
if(NOT libzmq_POPULATED)
FetchContent_Populate(libzmq)
add_subdirectory(${libzmq_SOURCE_DIR} ${libzmq_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
else()
find_package(ZeroMQ 4 REQUIRED)
find_package(ZeroMQ 4 REQUIRED)
endif()
if (AARE_FETCH_FMT)
set(FMT_TEST OFF CACHE INTERNAL "disabling fmt tests")
FetchContent_Declare(
fmt
GIT_REPOSITORY https://github.com/fmtlib/fmt.git
GIT_TAG 10.2.1
GIT_PROGRESS TRUE
USES_TERMINAL_DOWNLOAD TRUE
)
set(FMT_INSTALL ON CACHE BOOL "")
# set(FMT_CMAKE_DIR "")
FetchContent_MakeAvailable(fmt)
set_property(TARGET fmt PROPERTY POSITION_INDEPENDENT_CODE ON)
install(TARGETS fmt
EXPORT ${project}-targets
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
)
if(AARE_FETCH_FMT)
set(FMT_TEST
OFF
CACHE INTERNAL "disabling fmt tests")
FetchContent_Declare(
fmt
GIT_REPOSITORY https://github.com/fmtlib/fmt.git
GIT_TAG 10.2.1
GIT_PROGRESS TRUE
USES_TERMINAL_DOWNLOAD TRUE)
set(FMT_INSTALL
ON
CACHE BOOL "")
# set(FMT_CMAKE_DIR "")
FetchContent_MakeAvailable(fmt)
set_property(TARGET fmt PROPERTY POSITION_INDEPENDENT_CODE ON)
install(
TARGETS fmt
EXPORT ${project}-targets
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
INCLUDES
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
else()
find_package(fmt 6 REQUIRED)
find_package(fmt 6 REQUIRED)
endif()
if(AARE_FETCH_JSON)
FetchContent_Declare(
json
URL https://github.com/nlohmann/json/releases/download/v3.11.3/json.tar.xz)
set(JSON_Install
ON
CACHE BOOL "")
FetchContent_MakeAvailable(json)
set(NLOHMANN_JSON_TARGET_NAME nlohmann_json)
if (AARE_FETCH_JSON)
FetchContent_Declare(
json
URL https://github.com/nlohmann/json/releases/download/v3.11.3/json.tar.xz
)
set(JSON_Install ON CACHE BOOL "")
FetchContent_MakeAvailable(json)
set(NLOHMANN_JSON_TARGET_NAME nlohmann_json)
install(
TARGETS nlohmann_json
EXPORT "${TARGETS_EXPORT_NAME}"
)
message(STATUS "target: ${NLOHMANN_JSON_TARGET_NAME}")
install(TARGETS nlohmann_json EXPORT "${TARGETS_EXPORT_NAME}")
message(STATUS "target: ${NLOHMANN_JSON_TARGET_NAME}")
else()
find_package(nlohmann_json 3.11.3 REQUIRED)
find_package(nlohmann_json 3.11.3 REQUIRED)
endif()
include(GNUInstallDirs)
@@ -245,7 +279,6 @@ endif()
string(TOUPPER "${PROJECT_NAME}" PROJECT_NAME_UPPER)
string(TOLOWER "${PROJECT_NAME}" PROJECT_NAME_LOWER)
# Set targets export name (used by slsDetectorPackage and dependencies)
set(TARGETS_EXPORT_NAME "${PROJECT_NAME_LOWER}-targets")
set(namespace "aare::")
@@ -254,107 +287,86 @@ set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
# Check if project is being used directly or via add_subdirectory
set(AARE_MASTER_PROJECT OFF)
if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
set(AARE_MASTER_PROJECT ON)
if(CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
set(AARE_MASTER_PROJECT ON)
endif()
add_library(aare_compiler_flags INTERFACE)
target_compile_features(aare_compiler_flags INTERFACE cxx_std_17)
#################
# ##############################################################################
# MSVC specific #
#################
# ##############################################################################
if(MSVC)
add_compile_definitions(AARE_MSVC)
if(CMAKE_BUILD_TYPE STREQUAL "Release")
add_compile_definitions(AARE_MSVC)
if(CMAKE_BUILD_TYPE STREQUAL "Release")
message(STATUS "Release build")
target_compile_options(aare_compiler_flags INTERFACE /O2)
else()
else()
message(STATUS "Debug build")
target_compile_options(
aare_compiler_flags
INTERFACE
/Od
/Zi
/MDd
/D_ITERATOR_DEBUG_LEVEL=2
)
target_link_options(
aare_compiler_flags
INTERFACE
/DEBUG:FULL
)
endif()
target_compile_options(
aare_compiler_flags
INTERFACE
/w # disable warnings
)
target_compile_options(aare_compiler_flags
INTERFACE /Od /Zi /MDd /D_ITERATOR_DEBUG_LEVEL=2)
target_link_options(aare_compiler_flags INTERFACE /DEBUG:FULL)
endif()
target_compile_options(aare_compiler_flags INTERFACE /w # disable warnings
)
else()
######################
# GCC/Clang specific #
######################
# ############################################################################
# GCC/Clang specific #
# ############################################################################
if(CMAKE_BUILD_TYPE STREQUAL "Release")
if(CMAKE_BUILD_TYPE STREQUAL "Release")
message(STATUS "Release build")
target_compile_options(aare_compiler_flags INTERFACE -O3)
else()
else()
message(STATUS "Debug build")
endif()
endif()
# Common flags for GCC and Clang
target_compile_options(
aare_compiler_flags
INTERFACE
-Wall
-Wextra
-pedantic
-Wshadow
-Wold-style-cast
-Wnon-virtual-dtor
-Woverloaded-virtual
-Wdouble-promotion
-Wformat=2
-Wredundant-decls
-Wvla
-Wdouble-promotion
-Werror=return-type #important can cause segfault in optimzed builds
)
# Common flags for GCC and Clang
target_compile_options(
aare_compiler_flags
INTERFACE -Wall
-Wextra
-pedantic
-Wshadow
-Wold-style-cast
-Wnon-virtual-dtor
-Woverloaded-virtual
-Wdouble-promotion
-Wformat=2
-Wredundant-decls
-Wvla
-Wdouble-promotion
-Werror=return-type # important can cause segfault in optimzed
# builds
)
endif() #GCC/Clang specific
endif() # GCC/Clang specific
if(AARE_PYTHON_BINDINGS)
add_subdirectory(python)
add_subdirectory(python)
endif()
if(AARE_ASAN)
message(STATUS "AddressSanitizer enabled")
target_compile_options(
aare_compiler_flags
INTERFACE
-fsanitize=address,undefined,pointer-compare
-fno-omit-frame-pointer
)
target_link_libraries(
aare_compiler_flags
INTERFACE
-fsanitize=address,undefined,pointer-compare
-fno-omit-frame-pointer
)
message(STATUS "AddressSanitizer enabled")
target_compile_options(
aare_compiler_flags INTERFACE -fsanitize=address,undefined,pointer-compare
-fno-omit-frame-pointer)
target_link_libraries(
aare_compiler_flags INTERFACE -fsanitize=address,undefined,pointer-compare
-fno-omit-frame-pointer)
endif()
if(AARE_TESTS)
enable_testing()
add_subdirectory(tests)
target_compile_definitions(tests PRIVATE AARE_TESTS)
enable_testing()
add_subdirectory(tests)
target_compile_definitions(tests PRIVATE AARE_TESTS)
endif()
###------------------------------------------------------------------------------MAIN LIBRARY
###------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------MAIN
# LIBRARY
# ------------------------------------------------------------------------------------------
set(PUBLICHEADERS
include/aare/ArrayExpr.hpp
@@ -390,9 +402,7 @@ set(PUBLICHEADERS
include/aare/RawMasterFile.hpp
include/aare/RawSubFile.hpp
include/aare/VarClusterFinder.hpp
include/aare/utils/task.hpp
)
include/aare/utils/task.hpp)
set(SourceFiles
${CMAKE_CURRENT_SOURCE_DIR}/src/calibration.cpp
@@ -416,152 +426,150 @@ set(SourceFiles
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/to_string.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp
)
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp)
add_library(aare_core STATIC ${SourceFiles})
target_include_directories(aare_core PUBLIC
"$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>"
"$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>"
)
target_include_directories(
aare_core PUBLIC "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>"
"$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>")
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
target_link_libraries(
aare_core
PUBLIC
fmt::fmt
nlohmann_json::nlohmann_json
${STD_FS_LIB} # from helpers.cmake
Minuit2::Minuit2
PRIVATE
aare_compiler_flags
Threads::Threads
$<BUILD_INTERFACE:lmfit>
)
aare_core
PUBLIC fmt::fmt nlohmann_json::nlohmann_json ${STD_FS_LIB} # from
# helpers.cmake
Minuit2::Minuit2
PRIVATE aare_compiler_flags Threads::Threads $<BUILD_INTERFACE:lmfit>)
target_include_directories(aare_core SYSTEM PRIVATE
$<TARGET_PROPERTY:Minuit2::Minuit2,INTERFACE_INCLUDE_DIRECTORIES>
)
target_include_directories(
aare_core SYSTEM
PRIVATE $<TARGET_PROPERTY:Minuit2::Minuit2,INTERFACE_INCLUDE_DIRECTORIES>)
set_property(TARGET aare_core PROPERTY POSITION_INDEPENDENT_CODE ON)
if(AARE_TESTS)
target_compile_definitions(aare_core PRIVATE AARE_TESTS)
target_compile_definitions(aare_core PRIVATE AARE_TESTS)
endif()
if(AARE_VERBOSE)
target_compile_definitions(aare_core PUBLIC AARE_VERBOSE)
target_compile_definitions(aare_core PUBLIC AARE_LOG_LEVEL=aare::logDEBUG5)
target_compile_definitions(aare_core PUBLIC AARE_VERBOSE)
target_compile_definitions(aare_core PUBLIC AARE_LOG_LEVEL=aare::logDEBUG5)
else()
target_compile_definitions(aare_core PUBLIC AARE_LOG_LEVEL=aare::logERROR)
target_compile_definitions(aare_core PUBLIC AARE_LOG_LEVEL=aare::logERROR)
endif()
if(AARE_CUSTOM_ASSERT)
target_compile_definitions(aare_core PUBLIC AARE_CUSTOM_ASSERT)
target_compile_definitions(aare_core PUBLIC AARE_CUSTOM_ASSERT)
endif()
set_target_properties(aare_core PROPERTIES
ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}
PUBLIC_HEADER "${PUBLICHEADERS}"
)
set_target_properties(
aare_core PROPERTIES ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}
PUBLIC_HEADER "${PUBLICHEADERS}")
if(AARE_TESTS)
set(TestSources
${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/calibration.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/DetectorGeometry.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolation.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Cluster.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/CalculateEta.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinderMT.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/to_string.test.cpp
)
target_sources(tests PRIVATE ${TestSources} )
set(TestSources
${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/calibration.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/decode.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/DetectorGeometry.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolation.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Cluster.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/CalculateEta.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinderMT.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/to_string.test.cpp)
target_sources(tests PRIVATE ${TestSources})
endif()
if(AARE_MASTER_PROJECT)
install(TARGETS aare_core aare_compiler_flags
EXPORT "${TARGETS_EXPORT_NAME}"
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/aare
)
install(
TARGETS aare_core aare_compiler_flags
EXPORT "${TARGETS_EXPORT_NAME}"
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/aare)
endif()
set(CMAKE_INSTALL_RPATH $ORIGIN)
set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
# #Overall target to link to when using the library
# add_library(aare INTERFACE)
# #Overall target to link to when using the library add_library(aare INTERFACE)
# target_link_libraries(aare INTERFACE aare_core aare_compiler_flags)
# target_include_directories(aare INTERFACE
# $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
# $<INSTALL_INTERFACE:include>
# )
# target_include_directories(aare INTERFACE
# $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
# $<INSTALL_INTERFACE:include> )
# add_subdirectory(examples)
if(AARE_DOCS)
add_subdirectory(docs)
add_subdirectory(docs)
endif()
# custom target to run check formatting with clang-format
add_custom_target(
check-format
COMMAND find \( -name "*.cpp" -o -name "*.hpp" \) -not -path "./build/*" | xargs -I {} -n 1 -P 10 bash -c "clang-format -Werror -style=\"file:.clang-format\" {} | diff {} -"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "Checking code formatting with clang-format"
VERBATIM
)
check-format
COMMAND
find \\ (-name "*.cpp" -o -name "*.hpp" \\) -not -path "./build/*" | xargs
-I {} -n 1 -P 10 bash -c
"clang-format -Werror -style=\"file:.clang-format\" {} | diff {} -"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "Checking code formatting with clang-format"
VERBATIM)
add_custom_target(
format-files
COMMAND find \( -name "*.cpp" -o -name "*.hpp" \) -not -path "./build/*" | xargs -I {} -n 1 -P 10 bash -c "clang-format -i -style=\"file:.clang-format\" {}"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "Formatting with clang-format"
VERBATIM
)
format-files
COMMAND
find \\ (-name "*.cpp" -o -name "*.hpp" \\) -not -path "./build/*" | xargs
-I {} -n 1 -P 10 bash -c "clang-format -i -style=\"file:.clang-format\" {}"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "Formatting with clang-format"
VERBATIM)
if (AARE_IN_GITHUB_ACTIONS)
message(STATUS "Running in Github Actions")
set(CLANG_TIDY_COMMAND "clang-tidy-17")
if(AARE_IN_GITHUB_ACTIONS)
message(STATUS "Running in Github Actions")
set(CLANG_TIDY_COMMAND "clang-tidy-17")
else()
set(CLANG_TIDY_COMMAND "clang-tidy")
set(CLANG_TIDY_COMMAND "clang-tidy")
endif()
add_custom_target(
clang-tidy
COMMAND find \( -path "./src/*" -a -not -path "./src/python/*" -a \( -name "*.cpp" -not -name "*.test.cpp" \) \) -not -name "CircularFifo.hpp" -not -name "ProducerConsumerQueue.hpp" -not -name "VariableSizeClusterFinder.hpp" | xargs -I {} -n 1 -P 10 bash -c "${CLANG_TIDY_COMMAND} --config-file=.clang-tidy -p build {}"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "linting with clang-tidy"
VERBATIM
)
clang-tidy
COMMAND
find \\ (-path
"./src/*"
-a
-not
-path
"./src/python/*"
-a
\\
(-name "*.cpp" -not -name "*.test.cpp" \\)
\\) -not -name "CircularFifo.hpp" -not -name
"ProducerConsumerQueue.hpp" -not -name "VariableSizeClusterFinder.hpp" |
xargs -I {} -n 1 -P 10 bash -c
"${CLANG_TIDY_COMMAND} --config-file=.clang-tidy -p build {}"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "linting with clang-tidy"
VERBATIM)
if(AARE_MASTER_PROJECT)
set(CMAKE_INSTALL_DIR "share/cmake/${PROJECT_NAME}")
set(PROJECT_LIBRARIES aare-core aare-compiler-flags )
include(cmake/package_config.cmake)
set(CMAKE_INSTALL_DIR "share/cmake/${PROJECT_NAME}")
set(PROJECT_LIBRARIES aare-core aare-compiler-flags)
include(cmake/package_config.cmake)
endif()
+12 -1
View File
@@ -79,4 +79,15 @@ make install
```bash
conda build . --variants="{python: [3.11, 3.12, 3.13]}"
```
```
## Developer's guide
We are looking forward to your contributions via pull requests!
If you want to fix an existing bug or propose a new feature:
1. Install `pre-commit` python package and setup it `pre-commit install`
2. Create a new branch with `git branch branch_name`
3. Implement your changes and make a commit (`pre-commit` will check your code automatically)
4. Push your commit and open a pull request if needed
+21 -16
View File
@@ -2,34 +2,39 @@
include(FetchContent)
FetchContent_Declare(
benchmark
GIT_REPOSITORY https://github.com/google/benchmark.git
GIT_TAG v1.8.3 # Change to the latest version if needed
benchmark
GIT_REPOSITORY https://github.com/google/benchmark.git
GIT_TAG v1.8.3 # Change to the latest version if needed
)
# Ensure Google Benchmark is built correctly
set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE)
set(BENCHMARK_ENABLE_TESTING
OFF
CACHE BOOL "" FORCE)
FetchContent_MakeAvailable(benchmark)
add_executable(benchmarks)
target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp reduce_benchmark.cpp)
target_sources(
benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp
reduce_benchmark.cpp)
# Link Google Benchmark and other necessary libraries
target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags)
target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core
aare_compiler_flags)
# Set output properties
set_target_properties(benchmarks PROPERTIES
RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}
OUTPUT_NAME run_benchmarks
)
set_target_properties(
benchmarks PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}
OUTPUT_NAME run_benchmarks)
add_executable(fit_benchmark fit_benchmark.cpp)
target_link_libraries(fit_benchmark PRIVATE benchmark::benchmark aare_core aare_compiler_flags)
target_include_directories(fit_benchmark SYSTEM PRIVATE
$<TARGET_PROPERTY:Minuit2::Minuit2,INTERFACE_INCLUDE_DIRECTORIES>
)
set_target_properties(fit_benchmark PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
target_link_libraries(fit_benchmark PRIVATE benchmark::benchmark aare_core
aare_compiler_flags)
target_include_directories(
fit_benchmark SYSTEM
PRIVATE $<TARGET_PROPERTY:Minuit2::Minuit2,INTERFACE_INCLUDE_DIRECTORIES>)
set_target_properties(fit_benchmark PROPERTIES RUNTIME_OUTPUT_DIRECTORY
${CMAKE_BINARY_DIR})
+30 -29
View File
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: MPL-2.0
#include "aare/Fit.hpp"
#include "aare/Chi2.hpp"
#include "aare/Models.hpp"
#include "aare/Fit.hpp"
#include "aare/FitModel.hpp"
#include "aare/Models.hpp"
#include <benchmark/benchmark.h>
#include <cmath>
@@ -10,7 +10,6 @@
#include <string>
#include <vector>
struct TestCase {
std::string name;
double true_A;
@@ -21,12 +20,12 @@ struct TestCase {
static const std::vector<TestCase> &get_test_cases() {
static const std::vector<TestCase> cases = {
{"Clean_signal", 1000.0, 50.0, 5.0, 0.02},
{"Moderate_noise", 1000.0, 50.0, 5.0, 0.10},
{"High_noise", 1000.0, 50.0, 5.0, 0.30},
{"Narrow_peak", 500.0, 25.0, 1.0, 0.05},
{"Wide_peak", 200.0, 100.0, 20.0, 0.05},
{"Off_center_peak", 800.0, -15.0, 3.0, 0.05},
{"Clean_signal", 1000.0, 50.0, 5.0, 0.02},
{"Moderate_noise", 1000.0, 50.0, 5.0, 0.10},
{"High_noise", 1000.0, 50.0, 5.0, 0.30},
{"Narrow_peak", 500.0, 25.0, 1.0, 0.05},
{"Wide_peak", 200.0, 100.0, 20.0, 0.05},
{"Off_center_peak", 800.0, -15.0, 3.0, 0.05},
};
return cases;
}
@@ -58,21 +57,18 @@ static GeneratedData generate_gaussian_data(const TestCase &tc) {
for (ssize_t i = 0; i < N_POINTS; ++i) {
d.x[i] = x_min + i * dx;
double clean = tc.true_A *
std::exp(-std::pow(d.x[i] - tc.true_mu, 2) /
(2.0 * std::pow(tc.true_sig, 2)));
double clean = tc.true_A * std::exp(-std::pow(d.x[i] - tc.true_mu, 2) /
(2.0 * std::pow(tc.true_sig, 2)));
d.y[i] = clean + noise(rng);
d.y_err[i] = noise_sigma;
}
return d;
}
static void report_accuracy(benchmark::State &state,
const TestCase &tc,
static void report_accuracy(benchmark::State &state, const TestCase &tc,
const aare::NDArray<double, 1> &result) {
state.counters["dA"] = result(0) - tc.true_A;
state.counters["dMu"] = result(1) - tc.true_mu;
state.counters["dA"] = result(0) - tc.true_A;
state.counters["dMu"] = result(1) - tc.true_mu;
state.counters["dSig"] = result(2) - tc.true_sig;
}
@@ -104,14 +100,17 @@ static void BM_FitGausMinuitGrad(benchmark::State &state) {
auto xv = data.x.view();
auto yv = data.y.view();
const auto model = aare::FitModel<aare::model::Gaussian>(/*strategy = */0,
/*max_calls = */500, // increase for noisy signals
/*tolerance = */0.5,
/*compute_errors = */false);
const auto model = aare::FitModel<aare::model::Gaussian>(
/*strategy = */ 0,
/*max_calls = */ 500, // increase for noisy signals
/*tolerance = */ 0.5,
/*compute_errors = */ false);
aare::NDArray<double, 1> result;
for (auto _ : state) {
result = aare::fit_pixel<aare::model::Gaussian, aare::func::Chi2Gaussian>(model, xv, yv);
result =
aare::fit_pixel<aare::model::Gaussian, aare::func::Chi2Gaussian>(
model, xv, yv);
benchmark::DoNotOptimize(result.data());
}
@@ -127,11 +126,15 @@ static void BM_FitGausMinuitGradHesse(benchmark::State &state) {
auto yv = data.y.view();
auto ev = data.y_err.view();
const auto model = aare::FitModel<aare::model::Gaussian>(0, 500, 0.5, true); // compute_errors = true -> Runs Hesse and provides errors on fitted params
const auto model = aare::FitModel<aare::model::Gaussian>(
0, 500, 0.5, true); // compute_errors = true -> Runs Hesse and provides
// errors on fitted params
aare::NDArray<double, 1> result;
for (auto _ : state) {
result = aare::fit_pixel<aare::model::Gaussian, aare::func::Chi2Gaussian>(model, xv, yv, ev);
result =
aare::fit_pixel<aare::model::Gaussian, aare::func::Chi2Gaussian>(
model, xv, yv, ev);
benchmark::DoNotOptimize(result.data());
}
@@ -140,16 +143,14 @@ static void BM_FitGausMinuitGradHesse(benchmark::State &state) {
// Also report Hesse uncertainties
if (result.size() >= 6) {
state.counters["errA"] = result(3);
state.counters["errMu"] = result(4);
state.counters["errA"] = result(3);
state.counters["errMu"] = result(4);
state.counters["errSig"] = result(5);
}
state.SetLabel(tc.name);
}
BENCHMARK(BM_FitGausLm)
->DenseRange(0, 5)
->Unit(benchmark::kMicrosecond);
BENCHMARK(BM_FitGausLm)->DenseRange(0, 5)->Unit(benchmark::kMicrosecond);
BENCHMARK(BM_FitGausMinuitGrad)
->DenseRange(0, 5)
+10 -10
View File
@@ -1,11 +1,11 @@
#Look for an executable called sphinx-build
find_program(SPHINX_EXECUTABLE
NAMES sphinx-build sphinx-build-3.6
DOC "Path to sphinx-build executable")
# Look for an executable called sphinx-build
find_program(
SPHINX_EXECUTABLE
NAMES sphinx-build sphinx-build-3.6
DOC "Path to sphinx-build executable")
include(FindPackageHandleStandardArgs)
#Handle standard arguments to find_package like REQUIRED and QUIET
find_package_handle_standard_args(Sphinx
"Failed to find sphinx-build executable"
SPHINX_EXECUTABLE)
# Handle standard arguments to find_package like REQUIRED and QUIET
find_package_handle_standard_args(
Sphinx "Failed to find sphinx-build executable" SPHINX_EXECUTABLE)
+48 -41
View File
@@ -1,46 +1,53 @@
function(default_build_type val)
if (NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
message(STATUS "No build type selected, default to Release")
set(CMAKE_BUILD_TYPE ${val} CACHE STRING "Build type (default ${val})" FORCE)
endif()
if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
message(STATUS "No build type selected, default to Release")
set(CMAKE_BUILD_TYPE
${val}
CACHE STRING "Build type (default ${val})" FORCE)
endif()
endfunction()
function(set_std_fs_lib)
# from pybind11
# Check if we need to add -lstdc++fs or -lc++fs or nothing
if(DEFINED CMAKE_CXX_STANDARD AND CMAKE_CXX_STANDARD LESS 17)
set(STD_FS_NO_LIB_NEEDED TRUE)
elseif(MSVC)
set(STD_FS_NO_LIB_NEEDED TRUE)
else()
file(
WRITE ${CMAKE_CURRENT_BINARY_DIR}/main.cpp
"#include <filesystem>\nint main(int argc, char ** argv) {\n std::filesystem::path p(argv[0]);\n return p.string().length();\n}"
)
try_compile(
STD_FS_NO_LIB_NEEDED ${CMAKE_CURRENT_BINARY_DIR}
SOURCES ${CMAKE_CURRENT_BINARY_DIR}/main.cpp
COMPILE_DEFINITIONS -std=c++17)
try_compile(
STD_FS_NEEDS_STDCXXFS ${CMAKE_CURRENT_BINARY_DIR}
SOURCES ${CMAKE_CURRENT_BINARY_DIR}/main.cpp
COMPILE_DEFINITIONS -std=c++17
LINK_LIBRARIES stdc++fs)
try_compile(
STD_FS_NEEDS_CXXFS ${CMAKE_CURRENT_BINARY_DIR}
SOURCES ${CMAKE_CURRENT_BINARY_DIR}/main.cpp
COMPILE_DEFINITIONS -std=c++17
LINK_LIBRARIES c++fs)
endif()
# from pybind11 Check if we need to add -lstdc++fs or -lc++fs or nothing
if(DEFINED CMAKE_CXX_STANDARD AND CMAKE_CXX_STANDARD LESS 17)
set(STD_FS_NO_LIB_NEEDED TRUE)
elseif(MSVC)
set(STD_FS_NO_LIB_NEEDED TRUE)
else()
file(
WRITE ${CMAKE_CURRENT_BINARY_DIR}/main.cpp
"#include <filesystem>\nint main(int argc, char ** argv) {\n std::filesystem::path p(argv[0]);\n return p.string().length();\n}"
)
try_compile(
STD_FS_NO_LIB_NEEDED ${CMAKE_CURRENT_BINARY_DIR}
SOURCES ${CMAKE_CURRENT_BINARY_DIR}/main.cpp
COMPILE_DEFINITIONS -std=c++17)
try_compile(
STD_FS_NEEDS_STDCXXFS ${CMAKE_CURRENT_BINARY_DIR}
SOURCES ${CMAKE_CURRENT_BINARY_DIR}/main.cpp
COMPILE_DEFINITIONS -std=c++17
LINK_LIBRARIES stdc++fs)
try_compile(
STD_FS_NEEDS_CXXFS ${CMAKE_CURRENT_BINARY_DIR}
SOURCES ${CMAKE_CURRENT_BINARY_DIR}/main.cpp
COMPILE_DEFINITIONS -std=c++17
LINK_LIBRARIES c++fs)
endif()
if(${STD_FS_NEEDS_STDCXXFS})
set(STD_FS_LIB stdc++fs PARENT_SCOPE)
elseif(${STD_FS_NEEDS_CXXFS})
set(STD_FS_LIB c++fs PARENT_SCOPE)
elseif(${STD_FS_NO_LIB_NEEDED})
set(STD_FS_LIB "" PARENT_SCOPE)
else()
message(WARNING "Unknown C++17 compiler - not passing -lstdc++fs")
set(STD_FS_LIB "")
endif()
endfunction()
if(${STD_FS_NEEDS_STDCXXFS})
set(STD_FS_LIB
stdc++fs
PARENT_SCOPE)
elseif(${STD_FS_NEEDS_CXXFS})
set(STD_FS_LIB
c++fs
PARENT_SCOPE)
elseif(${STD_FS_NO_LIB_NEEDED})
set(STD_FS_LIB
""
PARENT_SCOPE)
else()
message(WARNING "Unknown C++17 compiler - not passing -lstdc++fs")
set(STD_FS_LIB "")
endif()
endfunction()
+8 -12
View File
@@ -15,21 +15,17 @@ configure_package_config_file(
write_basic_package_version_file(
"${PROJECT_BINARY_DIR}/${PROJECT_NAME_LOWER}-config-version.cmake"
VERSION ${PROJECT_VERSION}
COMPATIBILITY SameMajorVersion
)
COMPATIBILITY SameMajorVersion)
install(FILES
"${PROJECT_BINARY_DIR}/${PROJECT_NAME_LOWER}-config.cmake"
"${PROJECT_BINARY_DIR}/${PROJECT_NAME_LOWER}-config-version.cmake"
install(
FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME_LOWER}-config.cmake"
"${PROJECT_BINARY_DIR}/${PROJECT_NAME_LOWER}-config-version.cmake"
COMPONENT devel
DESTINATION ${CMAKE_INSTALL_DIR}
)
DESTINATION ${CMAKE_INSTALL_DIR})
if (PROJECT_LIBRARIES OR PROJECT_STATIC_LIBRARIES)
if(PROJECT_LIBRARIES OR PROJECT_STATIC_LIBRARIES)
install(
EXPORT "${TARGETS_EXPORT_NAME}"
FILE ${PROJECT_NAME_LOWER}-targets.cmake
DESTINATION ${CMAKE_INSTALL_DIR}
)
endif ()
DESTINATION ${CMAKE_INSTALL_DIR})
endif()
+22 -34
View File
@@ -2,53 +2,41 @@
find_package(Doxygen REQUIRED)
find_package(Sphinx REQUIRED)
#Doxygen
# Doxygen
set(DOXYGEN_IN ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in)
set(DOXYGEN_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY)
#Sphinx
# Sphinx
set(SPHINX_SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/src)
set(SPHINX_BUILD ${CMAKE_CURRENT_BINARY_DIR})
file(GLOB_RECURSE SPHINX_SOURCE_FILES
CONFIGURE_DEPENDS
RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}/src"
"${CMAKE_CURRENT_SOURCE_DIR}/src/*.rst"
)
file(
GLOB_RECURSE SPHINX_SOURCE_FILES CONFIGURE_DEPENDS
RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}/src"
"${CMAKE_CURRENT_SOURCE_DIR}/src/*.rst")
foreach(relpath IN LISTS SPHINX_SOURCE_FILES)
set(src "${CMAKE_CURRENT_SOURCE_DIR}/src/${relpath}")
set(dst "${SPHINX_BUILD}/src/${relpath}")
set(src "${CMAKE_CURRENT_SOURCE_DIR}/src/${relpath}")
set(dst "${SPHINX_BUILD}/src/${relpath}")
message(STATUS "Copying ${src} to ${dst}")
configure_file("${src}" "${dst}" COPYONLY)
message(STATUS "Copying ${src} to ${dst}")
configure_file("${src}" "${dst}" COPYONLY)
endforeach()
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in"
"${SPHINX_BUILD}/conf.py"
@ONLY
)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in"
"${SPHINX_BUILD}/conf.py" @ONLY)
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/figures"
DESTINATION "${SPHINX_BUILD}")
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/figures" DESTINATION "${SPHINX_BUILD}")
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/static/extra.css"
"${SPHINX_BUILD}/static/css/extra.css"
@ONLY
)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/static/extra.css"
"${SPHINX_BUILD}/static/css/extra.css" @ONLY)
add_custom_target(
docs
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT}
COMMAND ${SPHINX_EXECUTABLE} -a -b html
-Dbreathe_projects.aare=${CMAKE_CURRENT_BINARY_DIR}/xml
-c "${SPHINX_BUILD}"
${SPHINX_BUILD}/src
${SPHINX_BUILD}/html
COMMENT "Generating documentation with Sphinx"
)
docs
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT}
COMMAND
${SPHINX_EXECUTABLE} -a -b html
-Dbreathe_projects.aare=${CMAKE_CURRENT_BINARY_DIR}/xml -c "${SPHINX_BUILD}"
${SPHINX_BUILD}/src ${SPHINX_BUILD}/html
COMMENT "Generating documentation with Sphinx")
+2 -2
View File
@@ -70,7 +70,7 @@ Supported are the following :math:`\eta`-functions:
The :math:`\eta` values can range between 0,1. Note they only range between 0,1 because the position of the center pixel (red) can change.
If the center pixel is in the bottom left pixel :math:`\eta_x` will be close to zero. If the center pixel is in the bottom right pixel :math:`\eta_y` will be close to 1.
One can apply this :math:`\eta` not only on 2x2 clusters but on clusters with any size. Then the 2x2 subcluster with maximum energy is choosen and the :math:`\eta` function applied on the subcluster.
One can apply this :math:`\eta` not only on 2x2 clusters but on clusters with any size. Then the 2x2 subcluster with maximum energy is chosen and the :math:`\eta` function applied on the subcluster.
.. doxygenfunction:: aare::calculate_eta2(const ClusterVector<ClusterType>&)
@@ -150,7 +150,7 @@ Interpolation class:
Make sure to use the same :math:`\eta`-function during interpolation as given by the joint :math:`\eta`-distribution passed to the constructor.
.. Note::
Make sure to use resonable energy bins, when constructing the joint distribution. If data is too sparse for a given energy the interpolation will lead to erreneous results.
Make sure to use reasonable energy bins, when constructing the joint distribution. If data is too sparse for a given energy the interpolation will lead to erroneous results.
.. doxygenclass:: aare::Interpolator
:members:
+1 -1
View File
@@ -12,7 +12,7 @@ The structure of the file is:
* ...
There is no metadata indicating number of frames or the size of the image, but this
will be infered by this reader.
will be inferred by this reader.
.. doxygenstruct:: aare::JungfrauDataHeader
:members:
+1 -1
View File
@@ -111,7 +111,7 @@ Interpolation class for :math:`\eta`-Interpolation
The interpolation might lead to erroneous photon positions for clusters at the boarders of a frame. Make sure to filter out such cases.
.. Note::
Make sure to use resonable energy bins, when constructing the joint distribution. If data is too sparse for a given energy the interpolation will lead to erreneous results.
Make sure to use reasonable energy bins, when constructing the joint distribution. If data is too sparse for a given energy the interpolation will lead to erreneous results.
.. py:currentmodule:: aare
+36 -30
View File
@@ -1,12 +1,12 @@
// SPDX-License-Identifier: MPL-2.0
#pragma once
#include <stdexcept>
#include "aare/Models.hpp"
#include "aare/NDView.hpp"
#include <Minuit2/FCNGradientBase.h>
#include <algorithm>
#include <cmath>
#include <stdexcept>
#include <vector>
#include <Minuit2/FCNGradientBase.h>
#include "aare/NDView.hpp"
#include "aare/Models.hpp"
namespace aare {
@@ -29,7 +29,7 @@ namespace func {
*
* By providing analytic gradients we avoid 2*npar extra function evaluations
* per Minuit step that would otherwise be spent on finite differences.
*
*
* @throws std::invalid_argument if par.size() != Model::npar.
*
* Invalid model parameters do not throw; they return a large penalty
@@ -37,31 +37,32 @@ namespace func {
*/
template <class Model>
class Chi2Model1DGrad : public ROOT::Minuit2::FCNGradientBase {
public:
Chi2Model1DGrad(NDView<double, 1> x,
NDView<double, 1> y)
public:
Chi2Model1DGrad(NDView<double, 1> x, NDView<double, 1> y)
: x_(x), y_(y), s_(), weighted_(false) {}
Chi2Model1DGrad(NDView<double, 1> x,
NDView<double, 1> y,
Chi2Model1DGrad(NDView<double, 1> x, NDView<double, 1> y,
NDView<double, 1> y_err)
: x_(x), y_(y), s_(y_err), weighted_(true) {}
~Chi2Model1DGrad() override = default;
double operator()(const std::vector<double>& par) const override {
double operator()(const std::vector<double> &par) const override {
if (par.size() != Model::npar) {
throw std::invalid_argument("Chi2Model1DGrad: wrong parameter vector size.");
throw std::invalid_argument(
"Chi2Model1DGrad: wrong parameter vector size.");
}
if (!Model::is_valid(par)) return 1e20;
if (!Model::is_valid(par))
return 1e20;
double chi2 = 0.0;
if (weighted_) {
for (ssize_t i = 0; i < x_.size(); ++i) {
const double si = s_[i];
if (si == 0.0) continue;
if (si == 0.0)
continue;
const double f_i = Model::eval(x_[i], par);
const double r_i = y_[i] - f_i;
@@ -78,13 +79,16 @@ public:
return chi2;
}
std::vector<double> Gradient(const std::vector<double>& par) const override {
std::vector<double>
Gradient(const std::vector<double> &par) const override {
if (par.size() != Model::npar) {
throw std::invalid_argument("Chi2Model1DGrad: wrong parameter vector size.");
throw std::invalid_argument(
"Chi2Model1DGrad: wrong parameter vector size.");
}
std::vector<double> grad(Model::npar, 0.0);
if (!Model::is_valid(par)) return grad;
if (!Model::is_valid(par))
return grad;
std::array<double, Model::npar> df{};
double f_i = 0.0;
@@ -92,12 +96,13 @@ public:
if (weighted_) {
for (ssize_t i = 0; i < x_.size(); ++i) {
const double si = s_[i];
if (si == 0.0) continue;
if (si == 0.0)
continue;
Model::eval_and_grad(x_[i], par, f_i, df);
const double r_i = y_[i] - f_i;
const double c = -2.0 * r_i / (si * si);
const double c = -2.0 * r_i / (si * si);
for (std::size_t k = 0; k < Model::npar; ++k) {
grad[k] += c * df[k];
@@ -108,7 +113,7 @@ public:
Model::eval_and_grad(x_[i], par, f_i, df);
const double r_i = y_[i] - f_i;
const double c = -2.0 * r_i;
const double c = -2.0 * r_i;
for (std::size_t k = 0; k < Model::npar; ++k) {
grad[k] += c * df[k];
@@ -119,10 +124,11 @@ public:
return grad;
}
/** @brief Error definition: 1.0 for chi-squared (delta_chi2 = 1 -> 1-sigma). */
/** @brief Error definition: 1.0 for chi-squared (delta_chi2 = 1 ->
* 1-sigma). */
double Up() const override { return 1.0; }
private:
private:
NDView<double, 1> x_;
NDView<double, 1> y_;
NDView<double, 1> s_;
@@ -131,12 +137,12 @@ private:
// ── Convenient aliases ──────────────────────────────────────────────
using Chi2Gaussian = Chi2Model1DGrad<aare::model::Gaussian>;
using Chi2RisingScurve = Chi2Model1DGrad<aare::model::RisingScurve>;
using Chi2Gaussian = Chi2Model1DGrad<aare::model::Gaussian>;
using Chi2RisingScurve = Chi2Model1DGrad<aare::model::RisingScurve>;
using Chi2FallingScurve = Chi2Model1DGrad<aare::model::FallingScurve>;
using Chi2Pol1 = Chi2Model1DGrad<aare::model::Pol1>;
using Chi2Pol2 = Chi2Model1DGrad<aare::model::Pol2>;
using Chi2Pol1 = Chi2Model1DGrad<aare::model::Pol1>;
using Chi2Pol2 = Chi2Model1DGrad<aare::model::Pol2>;
} // namespace aare::func
} // namespace func
} // aare
} // namespace aare
+1 -1
View File
@@ -17,7 +17,7 @@ template <class ItemType> class CircularFifo {
aare::ProducerConsumerQueue<ItemType> filled_slots;
public:
CircularFifo() : CircularFifo(100){};
CircularFifo() : CircularFifo(100) {};
CircularFifo(uint32_t size)
: fifo_size(size), free_slots(size + 1), filled_slots(size + 1) {
+66 -66
View File
@@ -5,17 +5,17 @@
#include <fmt/core.h>
#include <vector>
#include "aare/utils/par.hpp"
#include "aare/utils/task.hpp"
#include "aare/NDArray.hpp"
#include "aare/Chi2.hpp"
#include "aare/FitModel.hpp"
#include "aare/NDArray.hpp"
#include "aare/utils/par.hpp"
#include "aare/utils/task.hpp"
#include "Minuit2/FunctionMinimum.h"
#include "Minuit2/MnMigrad.h"
#include "Minuit2/MnHesse.h"
#include "Minuit2/MnUserParameters.h"
#include "Minuit2/MnMigrad.h"
#include "Minuit2/MnPrint.h"
#include "Minuit2/MnUserParameters.h"
namespace aare {
@@ -34,7 +34,6 @@ NDArray<double, 1> scurve2(NDView<double, 1> x, NDView<double, 1> par);
} // namespace func
static constexpr int DEFAULT_NUM_THREADS = 4;
/**
@@ -80,7 +79,6 @@ void fit_gaus(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
NDView<double, 3> par_out, NDView<double, 3> par_err_out,
NDView<double, 2> chi2_out, int n_threads = DEFAULT_NUM_THREADS);
NDArray<double, 1> fit_pol1(NDView<double, 1> x, NDView<double, 1> y);
NDArray<double, 3> fit_pol1(NDView<double, 1> x, NDView<double, 3> y,
@@ -136,7 +134,8 @@ void fit_scurve2(NDView<double, 1> x, NDView<double, 3> y,
* - Neither: both value and step size auto-filled from data.
*
* @tparam Model Model struct (Gaussian, RisingScurve, …).
* @tparam FCN Chi2 functor type (Chi2Model1D or Chi2Model1DGrad instantiation).
* @tparam FCN Chi2 functor type (Chi2Model1D or Chi2Model1DGrad
* instantiation).
*
* @param model The FitModel configuration (read-only).
* @param upar_local Thread-local clone of model.upar(). Modified in place.
@@ -148,20 +147,19 @@ void fit_scurve2(NDView<double, 1> x, NDView<double, 3> y,
* - compute_errors: [p0..pN, err0..errN, chi2] -> 2*npar + 1
* - otherwise: [p0..pN, chi2] -> npar + 1
*/
template<typename Model, typename FCN>
NDArray<double, 1> fit_pixel(const FitModel<Model>& model,
ROOT::Minuit2::MnUserParameters& upar_local,
NDView<double, 1> x,
NDView<double, 1> y,
NDView<double, 1> y_err) {
template <typename Model, typename FCN>
NDArray<double, 1> fit_pixel(const FitModel<Model> &model,
ROOT::Minuit2::MnUserParameters &upar_local,
NDView<double, 1> x, NDView<double, 1> y,
NDView<double, 1> y_err) {
constexpr std::size_t npar = Model::npar;
constexpr std::size_t npar = Model::npar;
const bool want_errors = model.compute_errors();
const ssize_t result_size = want_errors ? (2 * npar + 1) : (npar + 1);
// ──── automatic parameter estimation ─────────────
auto start = Model::estimate_par(x,y);
auto start = Model::estimate_par(x, y);
// dead / degenerate pixel guard
if (!Model::is_valid(std::vector<double>(start.begin(), start.end()))) {
return NDArray<double, 1>({result_size}, 0.0);
@@ -170,18 +168,18 @@ NDArray<double, 1> fit_pixel(const FitModel<Model>& model,
// ──── data-range statistics for step sizes ─────────────
double x_range, y_range, slope_scale;
model::compute_ranges(x, y, x_range, y_range, slope_scale);
std::array<double, npar> steps{};
Model::compute_steps(start, x_range, y_range, slope_scale, steps);
// ── apply auto-estimates respecting user precedence ─────────────
for(std::size_t i = 0; i < npar; ++i){
for (std::size_t i = 0; i < npar; ++i) {
// fixed: do not touch at all
if(model.is_user_fixed(i)){
if (model.is_user_fixed(i)) {
continue;
}
if(!model.is_user_start(i)){
if (!model.is_user_start(i)) {
upar_local.SetValue(i, start[i]);
}
@@ -193,7 +191,8 @@ NDArray<double, 1> fit_pixel(const FitModel<Model>& model,
// ──── run minimizer ────────
ROOT::Minuit2::MnMigrad migrad(chi2, upar_local, model.strategy());
ROOT::Minuit2::FunctionMinimum min = migrad(model.max_calls(), model.tolerance());
ROOT::Minuit2::FunctionMinimum min =
migrad(model.max_calls(), model.tolerance());
if (!min.IsValid())
return NDArray<double, 1>({result_size}, 0.0);
@@ -202,20 +201,20 @@ NDArray<double, 1> fit_pixel(const FitModel<Model>& model,
if (want_errors) {
ROOT::Minuit2::MnHesse hesse;
hesse(chi2, min);
const auto& values = min.UserState().Params();
const auto& errors = min.UserState().Errors();
const auto &values = min.UserState().Params();
const auto &errors = min.UserState().Errors();
NDArray<double, 1> result({result_size});
for (std::size_t k = 0; k < npar; ++k) {
result[k] = values[k];
result[k] = values[k];
result[npar + k] = errors[k];
}
result[2 * npar] = min.Fval();
return result;
}
const auto& values = min.UserState().Params();
const auto &values = min.UserState().Params();
NDArray<double, 1> result({result_size});
for (std::size_t k = 0; k < npar; ++k)
result[k] = values[k];
@@ -225,21 +224,16 @@ NDArray<double, 1> fit_pixel(const FitModel<Model>& model,
// ── self-contained for 1D / standalone use ─────────
template <typename Model, typename FCN>
NDArray<double, 1> fit_pixel(const FitModel<Model>& model,
NDView<double, 1> x,
NDView<double, 1> y,
NDView<double, 1> y_err)
{
NDArray<double, 1> fit_pixel(const FitModel<Model> &model, NDView<double, 1> x,
NDView<double, 1> y, NDView<double, 1> y_err) {
auto upar_local = model.upar();
return fit_pixel<Model, FCN>(model, upar_local, x, y, y_err);
}
// Overload: uncertainties not provided
template <typename Model, typename FCN>
NDArray<double, 1> fit_pixel(const FitModel<Model>& model,
NDView<double, 1> x,
NDView<double, 1> y)
{
NDArray<double, 1> fit_pixel(const FitModel<Model> &model, NDView<double, 1> x,
NDView<double, 1> y) {
auto upar_local = model.upar();
return fit_pixel<Model, FCN>(model, upar_local, x, y, NDView<double, 1>{});
}
@@ -257,68 +251,74 @@ NDArray<double, 1> fit_pixel(const FitModel<Model>& model,
* @param model Fit configuration shared by all pixels.
* @param x Scan points, shape `(n_scan)`.
* @param y Measured values, shape `(rows, cols, n_scan)`.
* @param y_err Uncertainties, same shape as y, or empty for unweighted fits.
* @param y_err Uncertainties, same shape as y, or empty for unweighted
* fits.
* @param par_out Output parameters, shape `(rows, cols, npar)`.
* @param err_out Output parameter errors, shape `(rows, cols, npar)`, if used.
* @param chi2_out Output chi-squared / objective values, shape `(rows, cols)`.
* @param err_out Output parameter errors, shape `(rows, cols, npar)`, if
* used.
* @param chi2_out Output chi-squared / objective values, shape `(rows,
* cols)`.
* @param n_threads Number of threads used to split rows.
*
*/
template <typename Model, typename FCN>
void fit_3d(const FitModel<Model>& model,
NDView<double, 1> x, // (n_scan)
NDView<double, 3> y, // (rows, cols, n_scan)
NDView<double, 3> y_err, // (rows, cols, n_scan) or empty for unweighted fit
NDView<double, 3> par_out,
NDView<double, 3> err_out,
NDView<double, 2> chi2_out,
int n_threads)
{
void fit_3d(
const FitModel<Model> &model, NDView<double, 1> x, // (n_scan)
NDView<double, 3> y, // (rows, cols, n_scan)
NDView<double, 3> y_err, // (rows, cols, n_scan) or empty for unweighted fit
NDView<double, 3> par_out, NDView<double, 3> err_out,
NDView<double, 2> chi2_out, int n_threads) {
const std::size_t npar = Model::npar;
// ──── checks ───────
if (x.size() != y.shape(2))
throw std::runtime_error("fit_3d: x.size() must match y.shape(2).");
if (par_out.shape(0) != y.shape(0) || par_out.shape(1) != y.shape(1) || par_out.shape(2) != npar)
if (par_out.shape(0) != y.shape(0) || par_out.shape(1) != y.shape(1) ||
par_out.shape(2) != npar)
throw std::runtime_error("par_out must have shape [rows, cols, npar].");
if (chi2_out.shape(0) != y.shape(0) || chi2_out.shape(1) != y.shape(1))
throw std::runtime_error("chi2_out must have shape [rows, cols].");
const bool has_errors = (y_err.size() > 0);
const bool want_par_errors = (err_out.size() > 0) && model.compute_errors();
if (has_errors) {
if (y.shape(0) != y_err.shape(0) || y.shape(1) != y_err.shape(1) || y.shape(2) != y_err.shape(2))
throw std::runtime_error("fit_3d: y and y_err must have identical shape.");
if (y.shape(0) != y_err.shape(0) || y.shape(1) != y_err.shape(1) ||
y.shape(2) != y_err.shape(2))
throw std::runtime_error(
"fit_3d: y and y_err must have identical shape.");
if (err_out.shape(0) != y.shape(0) || err_out.shape(1) != y.shape(1) || err_out.shape(2) != npar)
throw std::runtime_error("err_out must have shape [rows, cols, npar].");
if (err_out.shape(0) != y.shape(0) || err_out.shape(1) != y.shape(1) ||
err_out.shape(2) != npar)
throw std::runtime_error(
"err_out must have shape [rows, cols, npar].");
}
// ──── parallel dispatch ───────
auto process = [&](ssize_t first_row, ssize_t last_row) {
// one clone per thread
auto upar_local = model.upar();
// one clone per thread
auto upar_local = model.upar();
for (ssize_t row = first_row; row < last_row; row++) {
for (ssize_t col = 0; col < y.shape(1); col++) {
NDView<double, 1> values(&y(row, col, 0), {y.shape(2)});
NDView<double, 1> errors = has_errors
? NDView<double, 1>(&y_err(row, col, 0), {y_err.shape(2)})
: NDView<double, 1>{};
NDView<double, 1> errors =
has_errors ? NDView<double, 1>(&y_err(row, col, 0),
{y_err.shape(2)})
: NDView<double, 1>{};
auto res = fit_pixel<Model, FCN>(model, upar_local, x, values, errors);
for(std::size_t k = 0; k < npar; ++k) {
auto res =
fit_pixel<Model, FCN>(model, upar_local, x, values, errors);
for (std::size_t k = 0; k < npar; ++k) {
par_out(row, col, k) = res(k);
}
if (want_par_errors) {
for(std::size_t k = 0; k < npar; ++k){
for (std::size_t k = 0; k < npar; ++k) {
err_out(row, col, k) = res(npar + k);
}
chi2_out(row, col) = res(2 * npar);
+40 -45
View File
@@ -1,17 +1,15 @@
// SPDX-License-Identifier: MPL-2.0
#pragma once
#include <type_traits>
#include "aare/Models.hpp"
#include <type_traits>
#include "Minuit2/MnUserParameters.h"
#include "Minuit2/MnStrategy.h"
#include "Minuit2/MnUserParameters.h"
namespace aare {
template <typename Model>
class FitModel {
template <typename Model> class FitModel {
ROOT::Minuit2::MnUserParameters upar_;
ROOT::Minuit2::MnStrategy strategy_;
unsigned int max_calls_;
@@ -22,54 +20,50 @@ class FitModel {
std::array<bool, Model::npar> user_start_{};
/** @brief Safely resolve a parameter name to its index. */
unsigned int checked_index(const std::string& name) const {
unsigned int checked_index(const std::string &name) const {
for (std::size_t i = 0; i < npar; ++i) {
if (upar_.Name(i) == name)
return static_cast<unsigned int>(i);
}
throw std::runtime_error(
"FitModel: unknown parameter name '" + name + "'");
throw std::runtime_error("FitModel: unknown parameter name '" + name +
"'");
}
public:
public:
static constexpr std::size_t npar = Model::npar;
/**
* @brief Construct a fit model with sensible defaults.
*
* @param strategy Minuit2 strategy level (0 = fast/gradient, 1 = default).
* @param strategy Minuit2 strategy level (0 = fast/gradient, 1 =
* default).
* @param max_calls Maximum FCN calls per pixel minimisation.
* @param tolerance Minuit2 EDM tolerance.
* @param compute_errors If true, run MnHesse after minimisation.
*/
FitModel(unsigned int strategy = 0,
unsigned int max_calls = 100,
double tolerance = 0.5,
bool compute_errors = false)
: strategy_(strategy),
max_calls_(max_calls),
tolerance_(tolerance),
compute_errors_(compute_errors)
{
for(std::size_t i = 0; i < npar; ++i){
FitModel(unsigned int strategy = 0, unsigned int max_calls = 100,
double tolerance = 0.5, bool compute_errors = false)
: strategy_(strategy), max_calls_(max_calls), tolerance_(tolerance),
compute_errors_(compute_errors) {
for (std::size_t i = 0; i < npar; ++i) {
const auto pi = Model::param_info[i];
const bool has_lo = std::isfinite(pi.default_lo);
const bool has_hi = std::isfinite(pi.default_hi);
// Add parameters and valid bounds
if (has_lo && has_hi){
if (has_lo && has_hi) {
upar_.Add(pi.name, 0.0, 1.0, pi.default_lo, pi.default_hi);
} else if (has_lo) {
upar_.Add(pi.name, 0.0, 1.0, pi.default_lo, 1e6);
} else {
upar_.Add(pi.name, 0.0, 1.0);
}
}
}
}
/** @brief Set lower and upper bounds for parameter idx.*/
void SetParLimits(unsigned int idx, double lo, double hi) {
upar_.SetLimits(idx, lo, hi);
void SetParLimits(unsigned int idx, double lo, double hi) {
upar_.SetLimits(idx, lo, hi);
}
/**
@@ -77,19 +71,20 @@ public:
*
* Excluded from minimisation. Automatic estimates will not touch it.
*/
void FixParameter(unsigned int idx, double val) {
void FixParameter(unsigned int idx, double val) {
SetParameter(idx, val);
upar_.Fix(idx);
user_fixed_[idx] = true;
}
/** @brief Release a previously fixed parameter, re-enabling auto estimates. */
/** @brief Release a previously fixed parameter, re-enabling auto estimates.
*/
void ReleaseParameter(unsigned int idx) {
upar_.Release(idx);
user_fixed_[idx] = false;
}
void ReleaseParameter(const std::string& name) {
void ReleaseParameter(const std::string &name) {
ReleaseParameter(checked_index(name));
}
@@ -99,22 +94,23 @@ public:
user_start_[idx] = true;
}
void SetParameter(const std::string& name, double val) {
void SetParameter(const std::string &name, double val) {
// go through index to maintain user_start_ bookkeeping
SetParameter(checked_index(name), val);
}
void FixParameter(const std::string& name, double val) {
void FixParameter(const std::string &name, double val) {
// go through index to maintain user_fixed_ bookkeeping
FixParameter(checked_index(name), val);
}
void SetParLimits(const std::string& name, double lo, double hi) {
void SetParLimits(const std::string &name, double lo, double hi) {
SetParLimits(checked_index(name), lo, hi);
}
std::string GetParName(unsigned int idx) const { return upar_.GetName(idx); }
std::string GetParName(unsigned int idx) const {
return upar_.GetName(idx);
}
std::vector<std::string> GetParNames() const {
std::vector<std::string> names;
@@ -123,20 +119,19 @@ public:
return names;
}
static constexpr std::size_t GetNpar() noexcept { return npar; }
void SetMaxCalls(unsigned int n) { max_calls_ = n; }
void SetTolerance(double t) { tolerance_ = t; }
void SetComputeErrors(bool b) { compute_errors_ = b; }
void SetMaxCalls(unsigned int n) { max_calls_ = n; }
void SetTolerance(double t) { tolerance_ = t; }
void SetComputeErrors(bool b) { compute_errors_ = b; }
// accessors
const ROOT::Minuit2::MnUserParameters& upar() const { return upar_; }
const ROOT::Minuit2::MnStrategy& strategy() const { return strategy_; }
unsigned int max_calls() const { return max_calls_; }
double tolerance() const { return tolerance_; }
bool compute_errors() const { return compute_errors_; }
bool is_user_fixed(unsigned int idx) const { return user_fixed_[idx]; }
bool is_user_start(unsigned int idx) const { return user_start_[idx]; }
const ROOT::Minuit2::MnUserParameters &upar() const { return upar_; }
const ROOT::Minuit2::MnStrategy &strategy() const { return strategy_; }
unsigned int max_calls() const { return max_calls_; }
double tolerance() const { return tolerance_; }
bool compute_errors() const { return compute_errors_; }
bool is_user_fixed(unsigned int idx) const { return user_fixed_[idx]; }
bool is_user_start(unsigned int idx) const { return user_start_[idx]; }
};
} // namespace aare
+164 -168
View File
@@ -1,35 +1,37 @@
// SPDX-License-Identifier: MPL-2.0
#pragma once
#include "aare/NDView.hpp"
#include <algorithm>
#include <array>
#include <cmath>
#include <vector>
#include <limits>
#include <algorithm>
#include "aare/NDView.hpp"
#include <vector>
namespace aare::model {
inline constexpr double inv_sqrt2 = 0.70710678118654752440;
inline constexpr double inv_sqrt2 = 0.70710678118654752440;
inline constexpr double inv_sqrt_2pi = 0.39894228040143267794;
inline double fast_erf(double x) {
//AbramowitzStegun Handbook of Mathematical Functions
//erf approximation with max error ~1.5e-7, faster than std::erf.
// AbramowitzStegun Handbook of Mathematical Functions
// erf approximation with max error ~1.5e-7, faster than std::erf.
const double a1 = 0.254829592;
const double a2 = -0.284496736;
const double a3 = 1.421413741;
const double a4 = -1.453152027;
const double a5 = 1.061405429;
const double p = 0.3275911;
const double p = 0.3275911;
const int sign = x < 0 ? -1 : 1;
x = std::abs(x);
// 7.1.26
const double t = 1.0 / (1.0 + p * x);
const double y = 1.0 - (((((a5 * t + a4) * t + a3) * t + a2) * t + a1) * t) * std::exp(-x * x);
const double y =
1.0 -
(((((a5 * t + a4) * t + a3) * t + a2) * t + a1) * t) * std::exp(-x * x);
return sign * y;
}
@@ -41,11 +43,11 @@ inline double fast_erf(double x) {
* Unbounded directions use ±no_bound as sentinels.
*/
struct ParamInfo {
const char* name; // name of parameter
double default_lo; // lower bound value
double default_hi; // upper bound value
const char *name; // name of parameter
double default_lo; // lower bound value
double default_hi; // upper bound value
};
inline constexpr double no_bound = std::numeric_limits<double>::infinity();
/**
@@ -53,18 +55,15 @@ inline constexpr double no_bound = std::numeric_limits<double>::infinity();
*
* Model-independent, called once per pixel.
*/
inline void compute_ranges(NDView<double, 1> x,
NDView<double, 1> y,
double& x_range,
double& y_range,
double& slope_scale)
{
inline void compute_ranges(NDView<double, 1> x, NDView<double, 1> y,
double &x_range, double &y_range,
double &slope_scale) {
const auto [x_min, x_max] = std::minmax_element(x.begin(), x.end());
const auto [y_min, y_max] = std::minmax_element(y.begin(), y.end());
x_range = std::max(*x_max - *x_min, 1e-9);
y_range = std::max(*y_max - *y_min, 1e-9);
slope_scale = std::max(y_range / x_range, 1e-9);
}
@@ -90,50 +89,45 @@ struct Pol1 {
static constexpr std::size_t npar = 2;
static constexpr std::array<ParamInfo, npar> param_info = {{
{"p0", -no_bound, no_bound},
{"p1", -no_bound, no_bound},
{"p0", -no_bound, no_bound},
{"p1", -no_bound, no_bound},
}};
static double eval(double x, const std::vector<double>& par) {
static double eval(double x, const std::vector<double> &par) {
return par[0] + par[1] * x;
}
static void eval_and_grad(double x,
const std::vector<double>& par,
double& f,
std::array<double, npar>& g)
{
static void eval_and_grad(double x, const std::vector<double> &par,
double &f, std::array<double, npar> &g) {
f = par[0] + par[1] * x;
g[0] = 1.0 ; // df/dp0
g[1] = x; // df/dp1
g[0] = 1.0; // df/dp0
g[1] = x; // df/dp1
}
static bool is_valid([[maybe_unused]] const std::vector<double>& par) {
return true; // always valid
static bool is_valid([[maybe_unused]] const std::vector<double> &par) {
return true; // always valid
}
/** @brief Estimate from endpoints: slope = dy/dx, intercept from first point. */
/** @brief Estimate from endpoints: slope = dy/dx, intercept from first
* point. */
static std::array<double, npar> estimate_par(NDView<double, 1> x,
NDView<double, 1> y)
{
const double dx = x[x.size()-1] - x[0];
const double dy = y[y.size()-1] - y[0];
const double slope = (std::abs(dx) > 1e-12) ? dy/dx : 0.0;
NDView<double, 1> y) {
const double dx = x[x.size() - 1] - x[0];
const double dy = y[y.size() - 1] - y[0];
const double slope = (std::abs(dx) > 1e-12) ? dy / dx : 0.0;
const double intercept = y[0] - slope * x[0];
return {intercept, slope};
}
static void compute_steps(const std::array<double, npar>& start,
[[maybe_unused]] double x_range, double y_range, double slope_scale,
std::array<double, npar>& steps)
{
static void compute_steps(const std::array<double, npar> &start,
[[maybe_unused]] double x_range, double y_range,
double slope_scale,
std::array<double, npar> &steps) {
steps[0] = std::max(0.1 * std::abs(start[0]), 0.1 * y_range);
steps[1] = 0.1 * slope_scale;
}
};
// _____________________________________________________________________
@@ -144,7 +138,7 @@ struct Pol1 {
/**
* @brief Polynomial fuction of degree 2
*
* f(x) = p0 + p1 * x + p2 * x * x
* f(x) = p0 + p1 * x + p2 * x * x
*
* Parameters:
* par[0] = p0 (constant term)
@@ -160,29 +154,26 @@ struct Pol2 {
static constexpr std::size_t npar = 3;
static constexpr std::array<ParamInfo, npar> param_info = {{
{"p0", -no_bound, no_bound},
{"p1", -no_bound, no_bound},
{"p2", -no_bound, no_bound},
{"p0", -no_bound, no_bound},
{"p1", -no_bound, no_bound},
{"p2", -no_bound, no_bound},
}};
static double eval(double x, const std::vector<double>& par) {
static double eval(double x, const std::vector<double> &par) {
return par[0] + par[1] * x + par[2] * x * x;
}
static void eval_and_grad(double x,
const std::vector<double>& par,
double& f,
std::array<double, npar>& g)
{
static void eval_and_grad(double x, const std::vector<double> &par,
double &f, std::array<double, npar> &g) {
f = par[0] + par[1] * x + par[2] * x * x;
g[0] = 1.0 ; // df/dp0
g[1] = x; // df/dp1
g[2] = x * x; // df/dp2
g[0] = 1.0; // df/dp0
g[1] = x; // df/dp1
g[2] = x * x; // df/dp2
}
static bool is_valid([[maybe_unused]]const std::vector<double>& par) {
return true; // always valid
static bool is_valid([[maybe_unused]] const std::vector<double> &par) {
return true; // always valid
}
/**
@@ -204,23 +195,22 @@ struct Pol2 {
* when curvature is negligible i.e. slope_e ≈ slope_s -> p2 ≈ 0.
*/
static std::array<double, npar> estimate_par(NDView<double, 1> x,
NDView<double, 1> y)
{
NDView<double, 1> y) {
const ssize_t n = y.size();
const ssize_t tail = std::max<ssize_t>(n / 10, 2);
// start: slope from first 10%
const double x_s = (x[0] + x[tail-1]) * 0.5;
const double slope_s = (y[tail-1] - y[0]) / (x[tail-1] - x[0]);
const double x_s = (x[0] + x[tail - 1]) * 0.5;
const double slope_s = (y[tail - 1] - y[0]) / (x[tail - 1] - x[0]);
// end: slope from last 10%
const double x_e = (x[n-tail] + x[n-1]) * 0.5;
const double slope_e = (y[n-1] - y[n-tail]) / (x[n-1] - x[n-tail]);
const double x_e = (x[n - tail] + x[n - 1]) * 0.5;
const double slope_e =
(y[n - 1] - y[n - tail]) / (x[n - 1] - x[n - tail]);
const double dx = x_e - x_s;
const double p2 = (std::abs(dx) > 1e-12)
? (slope_e - slope_s) / (2.0 * dx)
: 0.0;
const double p2 =
(std::abs(dx) > 1e-12) ? (slope_e - slope_s) / (2.0 * dx) : 0.0;
// p1 from slope_s = p1 + 2*p2*x_s
const double p1 = slope_s - 2.0 * p2 * x_s;
@@ -231,11 +221,10 @@ struct Pol2 {
return {p0, p1, p2};
}
static void compute_steps(const std::array<double, npar>& start,
double x_range, double y_range, double slope_scale,
std::array<double, npar>& steps)
{
static void compute_steps(const std::array<double, npar> &start,
double x_range, double y_range,
double slope_scale,
std::array<double, npar> &steps) {
steps[0] = std::max(0.1 * std::abs(start[0]), 0.1 * y_range);
steps[1] = 0.1 * slope_scale;
steps[2] = 0.1 * slope_scale / std::max(x_range, 1e-12);
@@ -266,43 +255,41 @@ struct Gaussian {
static constexpr std::size_t npar = 3;
static constexpr std::array<ParamInfo, npar> param_info = {{
{"A", -no_bound, no_bound},
{"mu", -no_bound, no_bound},
{"sigma", 1e-12, no_bound},
{"A", -no_bound, no_bound},
{"mu", -no_bound, no_bound},
{"sigma", 1e-12, no_bound},
}};
static double eval(double x, const std::vector<double>& par) {
const double A = par[0];
const double mu = par[1];
static double eval(double x, const std::vector<double> &par) {
const double A = par[0];
const double mu = par[1];
const double sig = par[2];
const double dx = x - mu;
const double dx = x - mu;
const double inv_2sig2 = 1.0 / (2.0 * sig * sig);
return A * std::exp(-dx * dx * inv_2sig2);
}
static void eval_and_grad(double x,
const std::vector<double>& par,
double& f,
std::array<double, npar>& g)
{
const double A = par[0];
const double mu = par[1];
static void eval_and_grad(double x, const std::vector<double> &par,
double &f, std::array<double, npar> &g) {
const double A = par[0];
const double mu = par[1];
const double sig = par[2];
const double dx = x - mu;
const double dx = x - mu;
const double inv_2sig2 = 1.0 / (2.0 * sig * sig);
const double e = std::exp(-dx * dx * inv_2sig2);
const double e = std::exp(-dx * dx * inv_2sig2);
f = A * e;
g[0] = e; // df/dA
g[1] = 2.0 * A * e * dx * inv_2sig2; // df/dmu = A*e*(x-mu)/sig^2
g[2] = 2.0 * A * e * dx * dx * inv_2sig2 / sig; // df/dsigma = A*e*(x-mu)^2/sig^3
g[0] = e; // df/dA
g[1] = 2.0 * A * e * dx * inv_2sig2; // df/dmu = A*e*(x-mu)/sig^2
g[2] = 2.0 * A * e * dx * dx * inv_2sig2 /
sig; // df/dsigma = A*e*(x-mu)^2/sig^3
}
/** @brief Reject degenerate sigma (zero width). */
static bool is_valid(const std::vector<double>& par) {
static bool is_valid(const std::vector<double> &par) {
return par[2] != 0.0;
}
@@ -314,41 +301,45 @@ struct Gaussian {
* sigma = FWHM / 2.35
*/
static std::array<double, npar> estimate_par(NDView<double, 1> x,
NDView<double, 1> y)
{
NDView<double, 1> y) {
// find peak
const auto max_it = std::max_element(y.begin(), y.end());
const ssize_t i_max = std::distance(y.begin(), max_it);
const double A = *max_it;
const double A = *max_it;
const double mu = x[i_max];
// FWHM estimate
const double half = A * 0.5;
double x_lo = mu, x_hi = mu;
for (ssize_t i = i_max; i >= 0; --i)
if (y[i] < half) { x_lo = x[i]; break; }
if (y[i] < half) {
x_lo = x[i];
break;
}
for (ssize_t i = i_max; i < y.size(); ++i)
if (y[i] < half) { x_hi = x[i]; break; }
if (y[i] < half) {
x_hi = x[i];
break;
}
const double sig = std::max((x_hi - x_lo) / 2.35, 1e-6);
return {A, mu, sig};
}
/**
* @brief Data-driven Minuit step sizes.
*/
static void compute_steps(const std::array<double, npar>& start,
double x_range, double y_range, double /*slope_scale*/,
std::array<double, npar>& steps)
{
static void compute_steps(const std::array<double, npar> &start,
double x_range, double y_range,
double /*slope_scale*/,
std::array<double, npar> &steps) {
steps[0] = std::max(0.1 * std::abs(start[0]), 0.1 * y_range);
steps[1] = 0.05 * x_range;
steps[2] = 0.05 * x_range;
}
};
// _____________________________________________________________________
//
// RisingScurve
@@ -358,7 +349,8 @@ struct Gaussian {
* @brief Rising S-curve (error-function step) with linear baseline and
* post-step slope.
*
* f(x) = (p0 + p1*x) + 0.5*(1 + erf((x - mu) / (sqrt(2)*sigma))) * (A + C*(x - mu)))
* f(x) = (p0 + p1*x) + 0.5*(1 + erf((x - mu) / (sqrt(2)*sigma))) * (A + C*(x -
* mu)))
*
* Parameters:
* par[0] = p0 (baseline offset)
@@ -375,12 +367,12 @@ struct RisingScurve {
{"p0", -no_bound, no_bound},
{"p1", -no_bound, no_bound},
{"mu", -no_bound, no_bound},
{"sigma", 1e-12, no_bound},
{"sigma", 1e-12, no_bound},
{"A", -no_bound, no_bound},
{"C", -no_bound, no_bound},
}};
static double eval(double x, const std::vector<double>& par) {
static double eval(double x, const std::vector<double> &par) {
const double p0 = par[0];
const double p1 = par[1];
const double p2 = par[2];
@@ -388,7 +380,7 @@ struct RisingScurve {
const double p4 = par[4];
const double p5 = par[5];
const double dx = x - p2;
const double dx = x - p2;
const double step = 0.5 * (1.0 + fast_erf(dx * inv_sqrt2 / p3));
return (p0 + p1 * x) + step * (p4 + p5 * dx);
}
@@ -401,11 +393,8 @@ struct RisingScurve {
* dS/dp2 = -(1/sqrt(2*pi)) * exp(-z^2) / p3
* dS/dp3 = -(1/sqrt(2*pi)) * exp(-z^2) * (x-p2) / p3^2
*/
static void eval_and_grad(double x,
const std::vector<double>& par,
double& f,
std::array<double, npar>& g)
{
static void eval_and_grad(double x, const std::vector<double> &par,
double &f, std::array<double, npar> &g) {
const double p0 = par[0];
const double p1 = par[1];
const double p2 = par[2];
@@ -413,50 +402,48 @@ struct RisingScurve {
const double p4 = par[4];
const double p5 = par[5];
const double dx = x - p2;
const double z = dx * inv_sqrt2 / p3;
const double dx = x - p2;
const double z = dx * inv_sqrt2 / p3;
const double step = 0.5 * (1.0 + fast_erf(z));
const double amp = p4 + p5 * dx;
const double amp = p4 + p5 * dx;
f = (p0 + p1 * x) + step * amp;
const double e = std::exp(-z * z);
const double e = std::exp(-z * z);
const double dSdp2 = -inv_sqrt_2pi * e / p3;
const double dSdp3 = -inv_sqrt_2pi * e * dx / (p3 * p3);
g[0] = 1.0; // df/dp0
g[1] = x; // df/dp1
g[2] = dSdp2 * amp - step * p5; // df/dp2
g[3] = dSdp3 * amp; // df/dp3
g[4] = step; // df/dp4
g[5] = step * dx; // df/dp5
g[0] = 1.0; // df/dp0
g[1] = x; // df/dp1
g[2] = dSdp2 * amp - step * p5; // df/dp2
g[3] = dSdp3 * amp; // df/dp3
g[4] = step; // df/dp4
g[5] = step * dx; // df/dp5
}
/** @brief Reject degenerate width (zero transition width). */
static bool is_valid(const std::vector<double>& par) {
static bool is_valid(const std::vector<double> &par) {
return par[3] != 0.0;
}
/** @brief Data-driven initial parameter estimates for a rising S-curve. */
static std::array<double, npar> estimate_par(NDView<double, 1> x,
NDView<double, 1> y)
{
NDView<double, 1> y) {
const ssize_t n = y.size();
// baseline: average of first ~10% of points (before turn-on)
ssize_t n_base = std::max<ssize_t>(n / 10, 2);
double sum_y = 0, sum_xy = 0, sum_x = 0, sum_x2 = 0;
for (ssize_t i = 0; i < n_base; ++i) {
sum_y += y[i];
sum_x += x[i];
sum_y += y[i];
sum_x += x[i];
sum_xy += x[i] * y[i];
sum_x2 += x[i] * x[i];
}
double denom = n_base * sum_x2 - sum_x * sum_x;
double p1 = (std::abs(denom) > 1e-30)
? (n_base * sum_xy - sum_x * sum_y) / denom
: 0.0;
? (n_base * sum_xy - sum_x * sum_y) / denom
: 0.0;
double p0 = (sum_y - p1 * sum_x) / n_base;
// plateau: average of last ~10%
@@ -486,10 +473,16 @@ struct RisingScurve {
double y_90 = baseline_at_mid + 0.9 * p4;
double x_10 = x[0], x_90 = x[n - 1];
for (ssize_t i = 0; i < n; ++i) {
if (y[i] >= y_10) { x_10 = x[i]; break; }
if (y[i] >= y_10) {
x_10 = x[i];
break;
}
}
for (ssize_t i = 0; i < n; ++i) {
if (y[i] >= y_90) { x_90 = x[i]; break; }
if (y[i] >= y_90) {
x_90 = x[i];
break;
}
}
// for a Gaussian CDF: 10%-90% width = 2 * 1.2816 * sigma
double p3 = std::max((x_90 - x_10) / 2.5631, 1.0);
@@ -498,11 +491,11 @@ struct RisingScurve {
return {p0, p1, p2, p3, p4, p5};
}
static void compute_steps(const std::array<double, npar>& start,
double x_range, double y_range, double slope_scale,
std::array<double, npar>& steps)
{
static void compute_steps(const std::array<double, npar> &start,
double x_range, double y_range,
double slope_scale,
std::array<double, npar> &steps) {
steps[0] = std::max(0.1 * std::abs(start[0]), 0.1 * y_range);
steps[1] = 0.1 * slope_scale;
steps[2] = 0.05 * x_range;
@@ -512,7 +505,6 @@ struct RisingScurve {
}
};
// _____________________________________________________________________
//
// FallingScurve
@@ -522,7 +514,8 @@ struct RisingScurve {
* @brief Falling S-curve (complementary error-function step) with linear
* baseline and post-step slope.
*
* f(x) = (p0 + p1*x) + 0.5*(1 - erf((x - mu) / (sqrt(2)*sigma))) * (A + C*(x - mu))
* f(x) = (p0 + p1*x) + 0.5*(1 - erf((x - mu) / (sqrt(2)*sigma))) * (A + C*(x -
* mu))
*
* Parameters are identical to RisingScurve. The only difference is the
* sign of the erf term, which flips the step direction (and the signs of
@@ -535,12 +528,12 @@ struct FallingScurve {
{"p0", -no_bound, no_bound},
{"p1", -no_bound, no_bound},
{"mu", -no_bound, no_bound},
{"sigma", 1e-12, no_bound},
{"sigma", 1e-12, no_bound},
{"A", -no_bound, no_bound},
{"C", -no_bound, no_bound},
}};
static double eval(double x, const std::vector<double>& par) {
static double eval(double x, const std::vector<double> &par) {
const double p0 = par[0];
const double p1 = par[1];
const double p2 = par[2];
@@ -548,16 +541,13 @@ struct FallingScurve {
const double p4 = par[4];
const double p5 = par[5];
const double dx = x - p2;
const double dx = x - p2;
const double step = 0.5 * (1.0 - fast_erf(dx * inv_sqrt2 / p3));
return (p0 + p1 * x) + step * (p4 + p5 * dx);
}
static void eval_and_grad(double x,
const std::vector<double>& par,
double& f,
std::array<double, npar>& g)
{
static void eval_and_grad(double x, const std::vector<double> &par,
double &f, std::array<double, npar> &g) {
const double p0 = par[0];
const double p1 = par[1];
const double p2 = par[2];
@@ -565,15 +555,15 @@ struct FallingScurve {
const double p4 = par[4];
const double p5 = par[5];
const double dx = x - p2;
const double z = dx * inv_sqrt2 / p3;
const double dx = x - p2;
const double z = dx * inv_sqrt2 / p3;
const double step = 0.5 * (1.0 - fast_erf(z));
const double amp = p4 + p5 * dx;
const double amp = p4 + p5 * dx;
f = (p0 + p1 * x) + step * amp;
const double e = std::exp(-z * z);
const double dSdp2 = +inv_sqrt_2pi * e / p3; // sign flipped vs rising
const double e = std::exp(-z * z);
const double dSdp2 = +inv_sqrt_2pi * e / p3; // sign flipped vs rising
const double dSdp3 = +inv_sqrt_2pi * e * dx / (p3 * p3);
g[0] = 1.0;
@@ -585,29 +575,28 @@ struct FallingScurve {
}
/** @brief Reject degenerate width (zero transition width). */
static bool is_valid(const std::vector<double>& par) {
static bool is_valid(const std::vector<double> &par) {
return par[3] != 0.0;
}
/** @brief Data-driven initial parameter estimates for a falling S-curve. */
static std::array<double, npar> estimate_par(NDView<double, 1> x,
NDView<double, 1> y)
{
NDView<double, 1> y) {
const ssize_t n = y.size();
// baseline: last ~10% of points (after turn-off)
ssize_t n_base = std::max<ssize_t>(n / 10, 2);
double sum_y = 0, sum_xy = 0, sum_x = 0, sum_x2 = 0;
for (ssize_t i = n - n_base; i < n; ++i) {
sum_y += y[i];
sum_x += x[i];
sum_y += y[i];
sum_x += x[i];
sum_xy += x[i] * y[i];
sum_x2 += x[i] * x[i];
}
double denom = n_base * sum_x2 - sum_x * sum_x;
double p1 = (std::abs(denom) > 1e-30)
? (n_base * sum_xy - sum_x * sum_y) / denom
: 0.0;
? (n_base * sum_xy - sum_x * sum_y) / denom
: 0.0;
double p0 = (sum_y - p1 * sum_x) / n_base;
// plateau: average of first ~10%
@@ -637,10 +626,16 @@ struct FallingScurve {
double y_10 = baseline_at_mid + 0.1 * p4;
double x_90 = x[0], x_10 = x[n - 1];
for (ssize_t i = 0; i < n; ++i) {
if (y[i] <= y_90) { x_90 = x[i]; break; }
if (y[i] <= y_90) {
x_90 = x[i];
break;
}
}
for (ssize_t i = 0; i < n; ++i) {
if (y[i] <= y_10) { x_10 = x[i]; break; }
if (y[i] <= y_10) {
x_10 = x[i];
break;
}
}
// same CDF relationship: 10%-90% width = 2 * 1.2816 * sigma
double p3 = std::max((x_10 - x_90) / 2.5631, 1.0);
@@ -649,12 +644,13 @@ struct FallingScurve {
return {p0, p1, p2, p3, p4, p5};
}
static void compute_steps(const std::array<double, npar>& start,
double x_range, double y_range, double slope_scale,
std::array<double, npar>& steps)
{
RisingScurve::compute_steps(start, x_range, y_range, slope_scale, steps);
static void compute_steps(const std::array<double, npar> &start,
double x_range, double y_range,
double slope_scale,
std::array<double, npar> &steps) {
RisingScurve::compute_steps(start, x_range, y_range, slope_scale,
steps);
}
};
+1 -1
View File
@@ -36,7 +36,7 @@ class NDArray : public ArrayExpr<NDArray<T, Ndim>, Ndim> {
* @brief Default constructor. Constructs an empty NDArray.
*
*/
NDArray() : shape_(), strides_(c_strides<Ndim>(shape_)), data_(nullptr){};
NDArray() : shape_(), strides_(c_strides<Ndim>(shape_)), data_(nullptr) {};
/**
* @brief Construct a new NDArray object with a given shape.
+1 -1
View File
@@ -63,7 +63,7 @@ template <class T> struct ProducerConsumerQueue {
return *this;
}
ProducerConsumerQueue() : ProducerConsumerQueue(2){};
ProducerConsumerQueue() : ProducerConsumerQueue(2) {};
// size must be >= 2.
//
// Also, note that the number of usable slots in the queue at any
+1 -1
View File
@@ -67,7 +67,7 @@ class Logger {
public:
Logger() = default;
explicit Logger(TLogLevel level) : m_level(level){};
explicit Logger(TLogLevel level) : m_level(level) {};
~Logger() {
// output in the destructor to allow for << syntax
os << RESET << '\n';
+2 -2
View File
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: MPL-2.0
#pragma once
#include "aare/NDView.hpp"
#include "aare/utils/task.hpp"
#include <thread>
#include <utility>
#include <vector>
#include "aare/NDView.hpp"
#include "aare/utils/task.hpp"
namespace aare {
+1 -1
View File
@@ -41,4 +41,4 @@ AARE_INSTALL_PYTHONEXT = "ON"
[tool.pytest.ini_options]
markers = [
"files: marks tests that need additional data (deselect with '-m \"not files\"')",
]
]
+35 -45
View File
@@ -1,37 +1,38 @@
# SPDX-License-Identifier: MPL-2.0
find_package (Python 3.10 COMPONENTS Interpreter Development.Module REQUIRED)
find_package(
Python 3.10
COMPONENTS Interpreter Development.Module
REQUIRED)
set(PYBIND11_FINDPYTHON ON) # Needed for RH8
# Download or find pybind11 depending on configuration
if(AARE_FETCH_PYBIND11)
FetchContent_Declare(
pybind11
GIT_REPOSITORY https://github.com/pybind/pybind11
GIT_TAG v2.13.6
)
FetchContent_MakeAvailable(pybind11)
FetchContent_Declare(
pybind11
GIT_REPOSITORY https://github.com/pybind/pybind11
GIT_TAG v2.13.6)
FetchContent_MakeAvailable(pybind11)
else()
find_package(pybind11 2.13 REQUIRED)
find_package(pybind11 2.13 REQUIRED)
endif()
# Add the compiled python extension
pybind11_add_module(
_aare # name of the module
src/module.cpp # source file
)
set_target_properties(_aare PROPERTIES
LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}
_aare # name of the module
src/module.cpp # source file
)
set_target_properties(_aare PROPERTIES LIBRARY_OUTPUT_DIRECTORY
${CMAKE_BINARY_DIR})
target_link_libraries(_aare PRIVATE aare_core aare_compiler_flags)
target_include_directories(_aare SYSTEM PRIVATE
$<TARGET_PROPERTY:Minuit2::Minuit2,INTERFACE_INCLUDE_DIRECTORIES>
)
target_include_directories(
_aare SYSTEM
PRIVATE $<TARGET_PROPERTY:Minuit2::Minuit2,INTERFACE_INCLUDE_DIRECTORIES>)
# List of python files to be copied to the build directory
set( PYTHON_FILES
set(PYTHON_FILES
aare/__init__.py
aare/CtbRawFile.py
aare/ClusterFinder.py
@@ -42,43 +43,32 @@ set( PYTHON_FILES
aare/RawFile.py
aare/transform.py
aare/ScanParameters.py
aare/utils.py
)
aare/utils.py)
# Copy the python files to the build directory
foreach(FILE ${PYTHON_FILES})
configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} )
configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE})
endforeach(FILE ${PYTHON_FILES})
set_target_properties(_aare PROPERTIES
LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/aare
)
set(PYTHON_EXAMPLES
examples/play.py
examples/fits.py
)
set_target_properties(_aare PROPERTIES LIBRARY_OUTPUT_DIRECTORY
${CMAKE_BINARY_DIR}/aare)
set(PYTHON_EXAMPLES examples/play.py examples/fits.py)
# Copy the python examples to the build directory
foreach(FILE ${PYTHON_EXAMPLES})
configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} )
message(STATUS "Copying ${FILE} to ${CMAKE_BINARY_DIR}/${FILE}")
configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE})
message(STATUS "Copying ${FILE} to ${CMAKE_BINARY_DIR}/${FILE}")
endforeach(FILE ${PYTHON_EXAMPLES})
if(AARE_INSTALL_PYTHONEXT)
install(
TARGETS _aare
EXPORT "${TARGETS_EXPORT_NAME}"
LIBRARY DESTINATION aare
COMPONENT python
)
install(
TARGETS _aare
EXPORT "${TARGETS_EXPORT_NAME}"
LIBRARY DESTINATION aare COMPONENT python)
install(
FILES ${PYTHON_FILES}
DESTINATION aare
COMPONENT python
)
endif()
install(
FILES ${PYTHON_FILES}
DESTINATION aare
COMPONENT python)
endif()
+132 -112
View File
@@ -5,72 +5,88 @@
#include <pybind11/stl.h>
#include <pybind11/stl_bind.h>
#include "aare/Chi2.hpp"
#include "aare/Fit.hpp"
#include "aare/FitModel.hpp"
#include "aare/Models.hpp"
#include "aare/Chi2.hpp"
namespace py = pybind11;
using namespace pybind11::literals;
template <typename Model, typename FCN>
py::object fit_dispatch(
const aare::FitModel<Model>& model,
py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> y,
py::object y_err_obj,
int n_threads);
py::object
fit_dispatch(const aare::FitModel<Model> &model,
py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> y,
py::object y_err_obj, int n_threads);
template <typename Model>
void bind_fit_model(py::module& m, const char* name) {
template <typename Model> void bind_fit_model(py::module &m, const char *name) {
using FM = aare::FitModel<Model>;
using FCN = aare::func::Chi2Model1DGrad<Model>;
py::class_<FM>(m, name)
.def(py::init<unsigned int, unsigned int, double, bool>(),
py::arg("strategy") = 0,
py::arg("max_calls") = 100,
py::arg("tolerance") = 0.5,
py::arg("compute_errors") = false)
.def("SetParLimits", py::overload_cast<unsigned int, double, double>(&FM::SetParLimits), py::arg("idx"), py::arg("lo"), py::arg("hi"))
.def("SetParLimits", py::overload_cast<const std::string&, double, double>(&FM::SetParLimits), py::arg("idx"), py::arg("lo"), py::arg("hi"))
.def("FixParameter", py::overload_cast<unsigned int, double>(&FM::FixParameter), py::arg("idx"), py::arg("val"))
.def("FixParameter", py::overload_cast<const std::string&, double>(&FM::FixParameter), py::arg("idx"), py::arg("val"))
.def("ReleaseParameter", py::overload_cast<unsigned int>(&FM::ReleaseParameter), py::arg("idx"))
.def("ReleaseParameter", py::overload_cast<const std::string&>(&FM::ReleaseParameter), py::arg("idx"))
.def("SetParameter", py::overload_cast<unsigned int, double>(&FM::SetParameter), py::arg("idx"), py::arg("val"))
.def("SetParameter", py::overload_cast<const std::string&, double>(&FM::SetParameter), py::arg("idx"), py::arg("val"))
py::arg("strategy") = 0, py::arg("max_calls") = 100,
py::arg("tolerance") = 0.5, py::arg("compute_errors") = false)
.def("SetParLimits",
py::overload_cast<unsigned int, double, double>(&FM::SetParLimits),
py::arg("idx"), py::arg("lo"), py::arg("hi"))
.def("SetParLimits",
py::overload_cast<const std::string &, double, double>(
&FM::SetParLimits),
py::arg("idx"), py::arg("lo"), py::arg("hi"))
.def("FixParameter",
py::overload_cast<unsigned int, double>(&FM::FixParameter),
py::arg("idx"), py::arg("val"))
.def("FixParameter",
py::overload_cast<const std::string &, double>(&FM::FixParameter),
py::arg("idx"), py::arg("val"))
.def("ReleaseParameter",
py::overload_cast<unsigned int>(&FM::ReleaseParameter),
py::arg("idx"))
.def("ReleaseParameter",
py::overload_cast<const std::string &>(&FM::ReleaseParameter),
py::arg("idx"))
.def("SetParameter",
py::overload_cast<unsigned int, double>(&FM::SetParameter),
py::arg("idx"), py::arg("val"))
.def("SetParameter",
py::overload_cast<const std::string &, double>(&FM::SetParameter),
py::arg("idx"), py::arg("val"))
.def("GetParName", &FM::GetParName, py::arg("idx"))
.def("GetParNames", &FM::GetParNames)
.def_property_readonly("par_names", &FM::GetParNames)
.def_property_readonly("n_par", [](py::object /*cls*/) { return Model::npar; })
.def_property_readonly("n_par",
[](py::object /*cls*/) { return Model::npar; })
.def_property("max_calls", &FM::max_calls, &FM::SetMaxCalls)
.def_property("tolerance", &FM::tolerance, &FM::SetTolerance)
.def_property("compute_errors", &FM::compute_errors, &FM::SetComputeErrors)
.def("__call__",
[](const FM& /*self*/,
py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> par)
{
.def_property("compute_errors", &FM::compute_errors,
&FM::SetComputeErrors)
.def(
"__call__",
[](const FM & /*self*/,
py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast>
par) {
auto x_view = make_view_1d(x);
auto p_view = make_view_1d(par);
std::vector<double> pvec(p_view.begin(), p_view.end());
auto* result = new aare::NDArray<double, 1>({x_view.size()});
auto *result = new aare::NDArray<double, 1>({x_view.size()});
for (ssize_t i = 0; i < x_view.size(); ++i)
(*result)(i) = Model::eval(x_view[i], pvec);
return return_image_data(result);
},
py::arg("x"), py::arg("par"))
.def("fit",
[](const FM& self,
py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> y,
py::object y_err_obj,
int n_threads) -> py::object
{
return fit_dispatch<Model, FCN>(self, x, y, y_err_obj, n_threads);
.def(
"fit",
[](const FM &self,
py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> y,
py::object y_err_obj, int n_threads) -> py::object {
return fit_dispatch<Model, FCN>(self, x, y, y_err_obj,
n_threads);
},
R"doc(
Fit this model to 1D or 3D data using Minuit2.
@@ -86,24 +102,21 @@ void bind_fit_model(py::module& m, const char* name) {
n_threads : int
Number of threads for 3D parallel loop.
)doc",
py::arg("x"),
py::arg("y"),
py::arg("y_err") = py::none(),
py::arg("x"), py::arg("y"), py::arg("y_err") = py::none(),
py::arg("n_threads") = 4);
}
template <typename Model>
py::dict pack_1d_result_dict(const aare::NDArray<double, 1>& result,
bool compute_errors)
{
py::dict pack_1d_result_dict(const aare::NDArray<double, 1> &result,
bool compute_errors) {
constexpr std::size_t npar = Model::npar;
auto res = result.view();
auto par_out = new NDArray<double, 1>({npar}, 0.0);
auto chi2_out = new NDArray<double, 1>({1}, 0.0);
auto par_out = new NDArray<double, 1>({npar}, 0.0);
auto chi2_out = new NDArray<double, 1>({1}, 0.0);
auto par_view = par_out->view();
auto par_view = par_out->view();
auto chi2_view = chi2_out->view();
for (std::size_t i = 0; i < npar; ++i) {
@@ -111,7 +124,7 @@ py::dict pack_1d_result_dict(const aare::NDArray<double, 1>& result,
}
if (compute_errors) {
auto err_out = new NDArray<double, 1>({npar}, 0.0);
auto err_out = new NDArray<double, 1>({npar}, 0.0);
auto err_view = err_out->view();
for (std::size_t i = 0; i < npar; ++i) {
@@ -120,58 +133,59 @@ py::dict pack_1d_result_dict(const aare::NDArray<double, 1>& result,
chi2_view(0) = res(2 * npar);
return py::dict(
"par"_a = return_image_data(par_out),
"par_err"_a = return_image_data(err_out),
"chi2"_a = return_image_data(chi2_out));
return py::dict("par"_a = return_image_data(par_out),
"par_err"_a = return_image_data(err_out),
"chi2"_a = return_image_data(chi2_out));
} else {
chi2_view(0) = res(npar);
return py::dict(
"par"_a = return_image_data(par_out),
"chi2"_a = return_image_data(chi2_out));
return py::dict("par"_a = return_image_data(par_out),
"chi2"_a = return_image_data(chi2_out));
}
}
// Helper: typed dispatch for one Model, handles 1D/3D + y_err logic
template <typename Model, typename FCN>
py::object fit_dispatch(
const aare::FitModel<Model>& model,
py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> y,
py::object y_err_obj,
int n_threads)
{
py::object
fit_dispatch(const aare::FitModel<Model> &model,
py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> y,
py::object y_err_obj, int n_threads) {
constexpr std::size_t npar = Model::npar;
if (y.ndim() == 3) {
auto par_out = new NDArray<double, 3>({y.shape(0), y.shape(1), npar}, 0.0);
auto chi2_out= new NDArray<double, 2>({y.shape(0), y.shape(1)}, 0.0);
auto par_out =
new NDArray<double, 3>({y.shape(0), y.shape(1), npar}, 0.0);
auto chi2_out = new NDArray<double, 2>({y.shape(0), y.shape(1)}, 0.0);
auto x_view = make_view_1d(x);
auto y_view = make_view_3d(y);
if (!y_err_obj.is_none()) {
auto y_err = py::cast<py::array_t<double,
py::array::c_style | py::array::forcecast>>(y_err_obj);
auto y_err = py::cast<
py::array_t<double, py::array::c_style | py::array::forcecast>>(
y_err_obj);
if (y_err.ndim() != 3) {
throw std::runtime_error("For 3D input y, y_err must also be 3D.");
throw std::runtime_error(
"For 3D input y, y_err must also be 3D.");
}
auto err_out = new NDArray<double, 3>({y.shape(0), y.shape(1), npar}, 0.0);
auto err_out =
new NDArray<double, 3>({y.shape(0), y.shape(1), npar}, 0.0);
auto y_view_err = make_view_3d(y_err);
aare::fit_3d<Model, FCN>(model, x_view, y_view, y_view_err,
par_out->view(), err_out->view(), chi2_out->view(), n_threads);
aare::fit_3d<Model, FCN>(model, x_view, y_view, y_view_err,
par_out->view(), err_out->view(),
chi2_out->view(), n_threads);
if (model.compute_errors()) {
return py::dict("par"_a = return_image_data(par_out),
return py::dict("par"_a = return_image_data(par_out),
"par_err"_a = return_image_data(err_out),
"chi2"_a = return_image_data(chi2_out));
"chi2"_a = return_image_data(chi2_out));
} else {
delete err_out;
return py::dict("par"_a = return_image_data(par_out),
return py::dict("par"_a = return_image_data(par_out),
"chi2"_a = return_image_data(chi2_out));
}
} else {
@@ -179,9 +193,10 @@ py::object fit_dispatch(
NDView<double, 3> dummy_err{};
NDView<double, 3> dummy_err_out{};
aare::fit_3d<Model, FCN>(model, x_view, y_view, dummy_err,
par_out->view(), dummy_err_out, chi2_out->view(), n_threads);
aare::fit_3d<Model, FCN>(model, x_view, y_view, dummy_err,
par_out->view(), dummy_err_out,
chi2_out->view(), n_threads);
return py::dict("par"_a = return_image_data(par_out),
"chi2"_a = return_image_data(chi2_out));
}
@@ -192,15 +207,18 @@ py::object fit_dispatch(
auto y_view = make_view_1d(y);
if (!y_err_obj.is_none()) {
auto y_err = py::cast<py::array_t<double,
py::array::c_style | py::array::forcecast>>(y_err_obj);
auto y_err = py::cast<
py::array_t<double, py::array::c_style | py::array::forcecast>>(
y_err_obj);
if (y_err.ndim() != 1) {
throw std::runtime_error("For 1D input y, y_err must also be 1D.");
throw std::runtime_error(
"For 1D input y, y_err must also be 1D.");
}
auto y_view_err = make_view_1d(y_err);
result = aare::fit_pixel<Model, FCN>(model, x_view, y_view, y_view_err);
result =
aare::fit_pixel<Model, FCN>(model, x_view, y_view, y_view_err);
} else {
result = aare::fit_pixel<Model, FCN>(model, x_view, y_view);
}
@@ -403,7 +421,6 @@ void define_fit_bindings(py::module &m) {
)",
py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4);
m.def(
"fit_pol1",
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
@@ -575,7 +592,6 @@ void define_fit_bindings(py::module &m) {
)",
py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4);
m.def(
"fit_scurve2",
[](py::array_t<double, py::array::c_style | py::array::forcecast> x,
@@ -661,7 +677,6 @@ void define_fit_bindings(py::module &m) {
)",
py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4);
// ── Bind model classes ──────────────────────────────────────────
bind_fit_model<aare::model::Gaussian>(m, "Gaussian");
bind_fit_model<aare::model::RisingScurve>(m, "RisingScurve");
@@ -669,48 +684,57 @@ void define_fit_bindings(py::module &m) {
bind_fit_model<aare::model::Pol1>(m, "Pol1");
bind_fit_model<aare::model::Pol2>(m, "Pol2");
m.def("fit",
m.def(
"fit",
[](py::object model_obj,
py::array_t<double, py::array::c_style | py::array::forcecast> x,
py::array_t<double, py::array::c_style | py::array::forcecast> y,
py::object y_err_obj,
int n_threads) -> py::object
{
py::object y_err_obj, int n_threads) -> py::object {
using namespace aare::model;
using namespace aare::func;
// ── Polynomial of degree 1 ───────
if(py::isinstance< aare::FitModel<Pol1> >(model_obj)) {
const auto& mdl = model_obj.cast< const aare::FitModel<Pol1>& >();
return fit_dispatch<Pol1, Chi2Pol1>(mdl, x, y, y_err_obj, n_threads);
if (py::isinstance<aare::FitModel<Pol1>>(model_obj)) {
const auto &mdl =
model_obj.cast<const aare::FitModel<Pol1> &>();
return fit_dispatch<Pol1, Chi2Pol1>(mdl, x, y, y_err_obj,
n_threads);
}
// ── Polynomial of degree 2 ───────
if(py::isinstance< aare::FitModel<Pol2> >(model_obj)) {
const auto& mdl = model_obj.cast< const aare::FitModel<Pol2>& >();
return fit_dispatch<Pol2, Chi2Pol2>(mdl, x, y, y_err_obj, n_threads);
if (py::isinstance<aare::FitModel<Pol2>>(model_obj)) {
const auto &mdl =
model_obj.cast<const aare::FitModel<Pol2> &>();
return fit_dispatch<Pol2, Chi2Pol2>(mdl, x, y, y_err_obj,
n_threads);
}
// ── Gaussian ───────
if(py::isinstance< aare::FitModel<Gaussian> >(model_obj)) {
const auto& mdl = model_obj.cast< const aare::FitModel<Gaussian>& >();
return fit_dispatch<Gaussian, Chi2Gaussian>(mdl, x, y, y_err_obj, n_threads);
if (py::isinstance<aare::FitModel<Gaussian>>(model_obj)) {
const auto &mdl =
model_obj.cast<const aare::FitModel<Gaussian> &>();
return fit_dispatch<Gaussian, Chi2Gaussian>(
mdl, x, y, y_err_obj, n_threads);
}
// ── Rising Scurve ───────
if(py::isinstance< aare::FitModel<RisingScurve> >(model_obj)) {
const auto& mdl = model_obj.cast< const aare::FitModel<RisingScurve>& >();
return fit_dispatch<RisingScurve, Chi2RisingScurve>(mdl, x, y, y_err_obj, n_threads);
if (py::isinstance<aare::FitModel<RisingScurve>>(model_obj)) {
const auto &mdl =
model_obj.cast<const aare::FitModel<RisingScurve> &>();
return fit_dispatch<RisingScurve, Chi2RisingScurve>(
mdl, x, y, y_err_obj, n_threads);
}
// ── Falling Scurve ───────
if(py::isinstance< aare::FitModel<FallingScurve> >(model_obj)) {
const auto& mdl = model_obj.cast< const aare::FitModel<FallingScurve>& >();
return fit_dispatch<FallingScurve, Chi2FallingScurve>(mdl, x, y, y_err_obj, n_threads);
if (py::isinstance<aare::FitModel<FallingScurve>>(model_obj)) {
const auto &mdl =
model_obj.cast<const aare::FitModel<FallingScurve> &>();
return fit_dispatch<FallingScurve, Chi2FallingScurve>(
mdl, x, y, y_err_obj, n_threads);
}
throw std::runtime_error(
"Unknown model type. Expected Pol1, Pol2, Gaussian, RisingScurve or FallingScurve."
);
"Unknown model type. Expected Pol1, Pol2, Gaussian, "
"RisingScurve or FallingScurve.");
},
R"(
Fit a model to 1D or 3D data using Minuit2.
@@ -741,10 +765,6 @@ void define_fit_bindings(py::module &m) {
"par_err" : (rows, cols, npar) parameter errors (if compute_errors).
"chi2" : (rows, cols) chi-squared per pixel.
)",
py::arg("model"),
py::arg("x"),
py::arg("y"),
py::arg("y_err") = py::none(),
py::arg("n_threads") = 4
);
py::arg("model"), py::arg("x"), py::arg("y"),
py::arg("y_err") = py::none(), py::arg("n_threads") = 4);
}
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
+1 -7
View File
@@ -4,11 +4,11 @@
#include "aare/Models.hpp"
#include "aare/utils/par.hpp"
#include "aare/utils/task.hpp"
#include <array>
#include <lmcurve2.h>
#include <lmfit.hpp>
#include <thread>
#include <type_traits>
#include <array>
namespace aare {
@@ -97,8 +97,6 @@ NDArray<double, 3> fit_gaus(NDView<double, 1> x, NDView<double, 3> y,
return result;
}
void fit_gaus(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
NDView<double, 1> par_out, NDView<double, 1> par_err_out,
double &chi2) {
@@ -278,7 +276,6 @@ NDArray<double, 3> fit_pol1(NDView<double, 1> x, NDView<double, 3> y,
// ~~ S-CURVES ~~
// - No error
NDArray<double, 1> fit_scurve(NDView<double, 1> x, NDView<double, 1> y) {
NDArray<double, 1> result = model::RisingScurve::estimate_par(x, y);
@@ -371,8 +368,6 @@ void fit_scurve(NDView<double, 1> x, NDView<double, 3> y,
// SCURVE2 ---
// - No error
NDArray<double, 1> fit_scurve2(NDView<double, 1> x, NDView<double, 1> y) {
NDArray<double, 1> result = model::FallingScurve::estimate_par(x, y);
@@ -463,5 +458,4 @@ void fit_scurve2(NDView<double, 1> x, NDView<double, 3> y,
RunInParallel(process, tasks);
}
} // namespace aare
+1 -2
View File
@@ -199,8 +199,7 @@ std::optional<ROI> RawMasterFile::roi() const {
}
if (m_rois->empty()) {
throw std::runtime_error(LOCATION +
"Zero ROIs in metadata.");
throw std::runtime_error(LOCATION + "Zero ROIs in metadata.");
}
if (m_rois.value().size() > 1) {
+22 -29
View File
@@ -1,48 +1,41 @@
# SPDX-License-Identifier: MPL-2.0
# Download catch2 if configured to do so
if (AARE_FETCH_CATCH)
FetchContent_Declare(
Catch2
GIT_SHALLOW TRUE
GIT_REPOSITORY https://github.com/catchorg/Catch2.git
GIT_TAG v3.5.3
)
FetchContent_MakeAvailable(Catch2)
if(AARE_FETCH_CATCH)
FetchContent_Declare(
Catch2
GIT_SHALLOW TRUE
GIT_REPOSITORY https://github.com/catchorg/Catch2.git
GIT_TAG v3.5.3)
FetchContent_MakeAvailable(Catch2)
else()
# Otherwise look for installed catch2
find_package(Catch2 3 REQUIRED)
# Otherwise look for installed catch2
find_package(Catch2 3 REQUIRED)
endif()
list(APPEND CMAKE_MODULE_PATH ${Catch2_SOURCE_DIR}/extras)
add_executable(tests test.cpp)
target_link_libraries(tests PRIVATE Catch2::Catch2WithMain aare_core aare_compiler_flags)
# target_compile_options(tests PRIVATE -fno-omit-frame-pointer -fsanitize=address)
set_target_properties(tests PROPERTIES
RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}
OUTPUT_NAME run_tests
)
target_link_libraries(tests PRIVATE Catch2::Catch2WithMain aare_core
aare_compiler_flags)
# target_compile_options(tests PRIVATE -fno-omit-frame-pointer
# -fsanitize=address)
set_target_properties(
tests PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR} OUTPUT_NAME
run_tests)
include(CTest)
include(Catch)
catch_discover_tests(tests)
set(TestSources
${CMAKE_CURRENT_SOURCE_DIR}/test.cpp
)
target_sources(tests PRIVATE ${TestSources} )
set(TestSources ${CMAKE_CURRENT_SOURCE_DIR}/test.cpp)
target_sources(tests PRIVATE ${TestSources})
#Work around to remove, this is not the way to do it =)
# Work around to remove, this is not the way to do it =)
# target_link_libraries(tests PRIVATE aare_core aare_compiler_flags)
#configure a header to pass test file paths
# configure a header to pass test file paths
get_filename_component(TEST_FILE_PATH ${PROJECT_SOURCE_DIR}/data ABSOLUTE)
configure_file(test_config.hpp.in test_config.hpp)
target_include_directories(tests PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
target_include_directories(tests PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_BINARY_DIR})
+4
View File
@@ -0,0 +1,4 @@
[default.extend-words]
aare = "aare"
gaus = "gaus"
ND = "ND"