Compare commits
149 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 29d406a290 | |||
| 5d9c0879b4 | |||
| 7f0e6f1026 | |||
| 30574cdf7e | |||
| 6298487bf3 | |||
| 3a7f3cde53 | |||
| dafff07e68 | |||
| 9afd15bcb4 | |||
| 6a4f1c6205 | |||
| 6aacbd5f22 | |||
| dc7dd2a6f2 | |||
| a22c23658f | |||
| 246c179481 | |||
| 30f616d3ab | |||
| 20d3bf45c9 | |||
| f07a06370f | |||
| 7c951a2f14 | |||
| 493095331f | |||
| c78d5412d5 | |||
| 389d485476 | |||
| 9db1d399dc | |||
| 9facb0e04f | |||
| 198fc07421 | |||
| 80ec3da039 | |||
| 09ea513aad | |||
| 4ecf6252a9 | |||
| 0cb7f5ceb2 | |||
| 2df7fd9e7c | |||
| 42234d86fd | |||
| bc84b2e841 | |||
| 26b6057941 | |||
| 1d8fea7498 | |||
| d2fff51787 | |||
| 8347942c15 | |||
| f55d840b8e | |||
| 327d6a0ff8 | |||
| 22943bc513 | |||
|
|
01873d60db | ||
| 654251a194 | |||
| 9e30970d9b | |||
| 7b6f2045cc | |||
| e3d15a1142 | |||
|
|
98244327eb | ||
| 61ba1cf084 | |||
| 54f2169888 | |||
| 24cc0a4287 | |||
| a3d342e5d7 | |||
| c92e4e4d17 | |||
| 4d74237669 | |||
| 6144200551 | |||
| f2c681ffba | |||
| a56f29191d | |||
| 6597ca22d1 | |||
| ce31dcb190 | |||
| 41c5218413 | |||
| 8abd977656 | |||
| 432d85c9b3 | |||
| c222f42a89 | |||
| fbced41b9f | |||
| dfb5aa319f | |||
| c073fae269 | |||
| 447b81d09d | |||
| 003a8c04ce | |||
| 4cd25dec3d | |||
| b38eeb8eb3 | |||
| d722228079 | |||
| 493c2bf802 | |||
| c429429dd2 | |||
| 0550e725e8 | |||
| 5c8b9a8cd6 | |||
| 4e5d085ad7 | |||
| 2d8d1c66c6 | |||
| 14ea89e243 | |||
| 322c00ca54 | |||
| 0b7a56628f | |||
| 77641412ab | |||
| fc1bd66c3d | |||
| 95a1ffade4 | |||
| 4ee1cf7ea7 | |||
| c90bdd3316 | |||
| ac9babb9ad | |||
| aa5b540aed | |||
| 86d676ccc8 | |||
| f8daa93c48 | |||
| 4c2e344d78 | |||
| 469e1c0ab0 | |||
| a947dfd398 | |||
| 4e2269bdae | |||
| 3f93ec2017 | |||
| 9d788da2f1 | |||
| f16feac748 | |||
| 709a0c5392 | |||
| b71b72c6a3 | |||
| 2972ffd5d8 | |||
| 31201795b0 | |||
| 99af021b3e | |||
| aacbe3ed6f | |||
| 6c0c2fcab8 | |||
| db5713ab56 | |||
| 10e9398f29 | |||
| 2d2f0ec5e4 | |||
| 3b3f0a1823 | |||
| 20122eb32e | |||
| c68aa29141 | |||
| b638237f66 | |||
| 9ce902d82c | |||
| a3dccb8429 | |||
| 51cb3087cb | |||
| e39d8b108a | |||
| 2985d10883 | |||
| cec4fc2965 | |||
| 2467ba38b8 | |||
| 3a21a8505a | |||
| e9b860f7cd | |||
| 8f2695f8a7 | |||
| f6bda1160e | |||
| 2a10597dc3 | |||
| 9f0de521ee | |||
| 650730ce12 | |||
| 93405c880d | |||
| 68f671f447 | |||
| cb4415ad3d | |||
| 2a89e58698 | |||
| 5ecdecfe24 | |||
| ae6aa9d24d | |||
| 39b267e96c | |||
| fe2975c71d | |||
| 1e78325663 | |||
| 1d74d947de | |||
| c540fa16fe | |||
| 7cf7ff8e61 | |||
| 753b32203a | |||
| d936f7ae7d | |||
| 429bd47c59 | |||
| 1a3c637b39 | |||
| f46eefb0cb | |||
| 0697e5ce80 | |||
| f88cea417c | |||
| ae5b3e789e | |||
| 082d96510b | |||
| e24ce01419 | |||
| 68e592cfe4 | |||
| 03e2f461cb | |||
| 03539eaea6 | |||
| f5b4f8bba2 | |||
| 319cbb4c2c | |||
| d1e075cca4 | |||
| 7f01f89f2b | |||
| b6e3cb2eef |
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
*.hdf filter=lfs diff=lfs merge=lfs -text
|
||||||
60
.github/workflows/release.yml
vendored
60
.github/workflows/release.yml
vendored
@@ -22,7 +22,35 @@ on:
|
|||||||
- all_incl_release
|
- all_incl_release
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
test:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
python-version: ['3.8', '3.9', '3.10', '3.12']
|
||||||
|
fail-fast: false
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout LFS objects
|
||||||
|
run: git clone https://${{secrets.GITHUB_TOKEN}}@gitea.psi.ch/${{ github.repository }}.git .
|
||||||
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install pytest
|
||||||
|
pip install -r requirements.txt
|
||||||
|
|
||||||
|
- name: Test with pytest
|
||||||
|
run: |
|
||||||
|
python -m pytest tests
|
||||||
|
|
||||||
|
|
||||||
build-ubuntu-latest:
|
build-ubuntu-latest:
|
||||||
|
needs: [test]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ (github.event_name != 'workflow_dispatch') || (contains(fromJson('["all", "linux", "all_incl_release"]'), github.event.inputs.build-items)) }}
|
if: ${{ (github.event_name != 'workflow_dispatch') || (contains(fromJson('["all", "linux", "all_incl_release"]'), github.event.inputs.build-items)) }}
|
||||||
permissions:
|
permissions:
|
||||||
@@ -31,30 +59,28 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: '3.11'
|
python-version: '3.12'
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
pip install build
|
pip install build
|
||||||
pip install -r requirements.txt
|
pip install -r requirements.txt
|
||||||
|
pip install wheel build twine
|
||||||
- name: Build PyPI package
|
- name: Build PyPI package
|
||||||
run: |
|
run: |
|
||||||
python3 -m build
|
python3 -m build
|
||||||
- name: Archive distribution
|
# - name: Archive distribution
|
||||||
uses: actions/upload-artifact@v4
|
# uses: actions/upload-artifact@v3
|
||||||
with:
|
# with:
|
||||||
name: linux-dist
|
# name: linux-dist
|
||||||
path: |
|
# path: |
|
||||||
dist/*.tar.gz
|
# dist/*.tar.gz
|
||||||
- name: Upload to PyPI
|
- name: Upload to PyPI
|
||||||
#if: github.event_name != 'workflow_dispatch'
|
run: |
|
||||||
uses: pypa/gh-action-pypi-publish@release/v1
|
twine upload dist/* -u __token__ -p ${{ secrets.PYPI_TOKEN }} --skip-existing
|
||||||
with:
|
|
||||||
# user: __token__
|
|
||||||
# password: ${{ secrets.PYPI_TOKEN }}
|
|
||||||
skip-existing: true
|
|
||||||
|
|
||||||
build-windows:
|
build-windows:
|
||||||
|
needs: [test]
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
if: ${{ (github.event_name != 'workflow_dispatch') || (contains(fromJson('["all", "windows", "all_incl_release"]'), github.event.inputs.build-items)) }}
|
if: ${{ (github.event_name != 'workflow_dispatch') || (contains(fromJson('["all", "windows", "all_incl_release"]'), github.event.inputs.build-items)) }}
|
||||||
|
|
||||||
@@ -70,11 +96,11 @@ jobs:
|
|||||||
C:\Miniconda\condabin\conda.bat init powershell
|
C:\Miniconda\condabin\conda.bat init powershell
|
||||||
- name: Build with pyinstaller
|
- name: Build with pyinstaller
|
||||||
run: |
|
run: |
|
||||||
pyinstaller windows_folder.spec
|
pyinstaller windows_build.spec
|
||||||
cd dist\eos
|
cd dist\eos
|
||||||
Compress-Archive -Path .\* -Destination ..\..\eos.zip
|
Compress-Archive -Path .\* -Destination ..\..\eos.zip
|
||||||
- name: Archive distribution
|
- name: Archive distribution
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: windows-dist
|
name: windows-dist
|
||||||
path: |
|
path: |
|
||||||
@@ -89,10 +115,10 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
fetch-tags: true
|
fetch-tags: true
|
||||||
- uses: actions/download-artifact@v4
|
- uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: linux-dist
|
name: linux-dist
|
||||||
- uses: actions/download-artifact@v4
|
- uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: windows-dist
|
name: windows-dist
|
||||||
- name: get latest version tag
|
- name: get latest version tag
|
||||||
|
|||||||
18
.github/workflows/unit_tests.yml
vendored
18
.github/workflows/unit_tests.yml
vendored
@@ -10,16 +10,18 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
test:
|
||||||
|
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
|
python-version: ['3.8', '3.9', '3.10', '3.12']
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- name: Checkout LFS objects
|
||||||
|
run: git clone https://${{secrets.GITHUB_TOKEN}}@gitea.psi.ch/${{ github.repository }}.git .
|
||||||
|
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
@@ -31,12 +33,6 @@ jobs:
|
|||||||
pip install pytest
|
pip install pytest
|
||||||
pip install -r requirements.txt
|
pip install -r requirements.txt
|
||||||
|
|
||||||
- name: Backport to 3.8
|
|
||||||
if: matrix.python-version == '3.8'
|
|
||||||
run: |
|
|
||||||
pip install backports.zoneinfo
|
|
||||||
|
|
||||||
- name: Test with pytest
|
- name: Test with pytest
|
||||||
run: |
|
run: |
|
||||||
cd tests
|
python -m pytest tests
|
||||||
python -m pytest --pyargs .
|
|
||||||
|
|||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -8,3 +8,5 @@ raw
|
|||||||
test_results
|
test_results
|
||||||
build
|
build
|
||||||
dist
|
dist
|
||||||
|
profile_test.prof
|
||||||
|
amor_eos.log
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Software repository for the neutron reflectometer Amor at the Paul Scherrer Inst
|
|||||||
|
|
||||||
Reduction of the raw files (.hdf) to reflectivity files in one of the representations of the **ORSO reflectivity file format**:
|
Reduction of the raw files (.hdf) to reflectivity files in one of the representations of the **ORSO reflectivity file format**:
|
||||||
|
|
||||||
eos.py --help
|
eos --help
|
||||||
|
|
||||||
visualisation of the content of a raw file (.hdf):
|
visualisation of the content of a raw file (.hdf):
|
||||||
|
|
||||||
|
|||||||
1082
e2h_new.py
1082
e2h_new.py
File diff suppressed because it is too large
Load Diff
46
eos.py
46
eos.py
@@ -1,46 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
"""
|
|
||||||
eos reduces measurements performed on Amor@SINQ, PSI
|
|
||||||
|
|
||||||
Author: Jochen Stahn (algorithms, python draft),
|
|
||||||
Artur Glavic (structuring and optimisation of code)
|
|
||||||
|
|
||||||
conventions (not strictly followed, yet):
|
|
||||||
- array names end with the suffix '_x[y]' with the meaning
|
|
||||||
_e = events
|
|
||||||
_tof
|
|
||||||
_l = lambda
|
|
||||||
_t = theta
|
|
||||||
_z = detector z
|
|
||||||
_lz = (lambda, detector z)
|
|
||||||
_q = q_z
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from libeos.command_line import command_line_options
|
|
||||||
from libeos.logconfig import setup_logging
|
|
||||||
from libeos.reduction import AmorReduction
|
|
||||||
|
|
||||||
#=====================================================================================================
|
|
||||||
# TODO:
|
|
||||||
# - calculate resolution using the chopperPhase
|
|
||||||
# - deal with background correction
|
|
||||||
# - format of 'call' + add '-Y' if not supplied
|
|
||||||
#=====================================================================================================
|
|
||||||
|
|
||||||
def main():
|
|
||||||
setup_logging()
|
|
||||||
logging.warning('######## eos - data reduction for Amor ########')
|
|
||||||
|
|
||||||
# read command line arguments and generate classes holding configuration parameters
|
|
||||||
config = command_line_options()
|
|
||||||
# Create reducer with these arguments
|
|
||||||
reducer = AmorReduction(config)
|
|
||||||
# Perform actual reduction
|
|
||||||
reducer.reduce()
|
|
||||||
|
|
||||||
logging.info('######## eos - finished ########')
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
"""
|
"""
|
||||||
Package to handle data redction at AMOR instrument to be used by eos.py script.
|
Package to handle data redction at AMOR instrument to be used by __main__.py script.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__version__ = '2.2.0'
|
__version__ = '3.2.2'
|
||||||
__date__ = '2025-09-16'
|
__date__ = '2026-02-27'
|
||||||
|
|
||||||
42
eos/__main__.py
Normal file
42
eos/__main__.py
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
"""
|
||||||
|
eos reduces measurements performed on Amor@SINQ, PSI
|
||||||
|
|
||||||
|
Author: Jochen Stahn (algorithms, python draft),
|
||||||
|
Artur Glavic (structuring and optimisation of code)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
# need to do absolute import here as pyinstaller requires it
|
||||||
|
from eos.options import ReflectivityConfig, ReaderConfig, ExperimentConfig, ReflectivityReductionConfig, ReflectivityOutputConfig
|
||||||
|
from eos.command_line import commandLineArgs
|
||||||
|
from eos.logconfig import setup_logging, update_loglevel
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
setup_logging()
|
||||||
|
|
||||||
|
# read command line arguments and generate classes holding configuration parameters
|
||||||
|
clas = commandLineArgs([ReaderConfig, ExperimentConfig, ReflectivityReductionConfig, ReflectivityOutputConfig],
|
||||||
|
'eos')
|
||||||
|
update_loglevel(clas.verbose)
|
||||||
|
|
||||||
|
reader_config = ReaderConfig.from_args(clas)
|
||||||
|
experiment_config = ExperimentConfig.from_args(clas)
|
||||||
|
reduction_config = ReflectivityReductionConfig.from_args(clas)
|
||||||
|
output_config = ReflectivityOutputConfig.from_args(clas)
|
||||||
|
config = ReflectivityConfig(reader_config, experiment_config, reduction_config, output_config)
|
||||||
|
|
||||||
|
logging.warning('######## eos - data reduction for Amor ########')
|
||||||
|
|
||||||
|
# only import heavy module if sufficient command line parameters were provided
|
||||||
|
from eos.reduction_reflectivity import ReflectivityReduction
|
||||||
|
# Create reducer with these arguments
|
||||||
|
reducer = ReflectivityReduction(config)
|
||||||
|
# Perform actual reduction
|
||||||
|
reducer.reduce()
|
||||||
|
|
||||||
|
logging.info('######## eos - finished ########')
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
42
eos/command_line.py
Normal file
42
eos/command_line.py
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
import argparse
|
||||||
|
|
||||||
|
from typing import List, Type
|
||||||
|
from .options import ArgParsable
|
||||||
|
|
||||||
|
|
||||||
|
def commandLineArgs(config_items: List[Type[ArgParsable]], program_name=None, extra_args=[]):
|
||||||
|
"""
|
||||||
|
Process command line argument.
|
||||||
|
The type of the default values is used for conversion and validation.
|
||||||
|
"""
|
||||||
|
msg = "eos reads data from (one or several) raw file(s) of the .hdf format, \
|
||||||
|
performs various corrections, conversations and projections and exports\
|
||||||
|
the resulting reflectivity in an orso-compatible format."
|
||||||
|
clas = argparse.ArgumentParser(prog=program_name,
|
||||||
|
description = msg, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||||
|
|
||||||
|
clas.add_argument('-v', '--verbose', action='count', default=0)
|
||||||
|
|
||||||
|
clas_groups = {}
|
||||||
|
|
||||||
|
all_arguments = []
|
||||||
|
for cls in config_items:
|
||||||
|
all_arguments += cls.get_commandline_parameters()
|
||||||
|
|
||||||
|
all_arguments.sort() # parameters are sorted alphabetically, unless they have higher priority
|
||||||
|
for cpc in all_arguments:
|
||||||
|
if not cpc.group in clas_groups:
|
||||||
|
clas_groups[cpc.group] = clas.add_argument_group(cpc.group)
|
||||||
|
if cpc.short_form:
|
||||||
|
clas_groups[cpc.group].add_argument(
|
||||||
|
f'-{cpc.short_form}', f'--{cpc.argument}', **cpc.add_argument_args
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
clas_groups[cpc.group].add_argument(
|
||||||
|
f'--{cpc.argument}', **cpc.add_argument_args
|
||||||
|
)
|
||||||
|
|
||||||
|
for ma in extra_args:
|
||||||
|
clas.add_argument(**ma)
|
||||||
|
|
||||||
|
return clas.parse_args()
|
||||||
11
eos/const.py
Normal file
11
eos/const.py
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
"""
|
||||||
|
Constants used in data reduction.
|
||||||
|
"""
|
||||||
|
|
||||||
|
hdm = 6.626176e-34/1.674928e-27 # h / m
|
||||||
|
lamdaCut = 2.5 # Aa
|
||||||
|
lamdaMax = 15.0 # Aa
|
||||||
|
|
||||||
|
polarizationConfigs = ['unpolarized', 'unpolarized', 'po', 'mo', 'op', 'pp', 'mp', 'om', 'pm', 'mm']
|
||||||
|
polarizationLabels = ['undetermined', 'unpolarized', 'spin-up', 'spin-down', 'op',
|
||||||
|
'up-up', 'down-up', 'om', 'up-down', 'down-down']
|
||||||
42
eos/e2h.py
Normal file
42
eos/e2h.py
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
"""
|
||||||
|
events2histogram vizualising data from Amor@SINQ, PSI
|
||||||
|
|
||||||
|
Author: Jochen Stahn (algorithms, python draft),
|
||||||
|
Artur Glavic (structuring and optimisation of code)
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
|
||||||
|
# need to do absolute import here as pyinstaller requires it
|
||||||
|
from eos.options import E2HConfig, ReaderConfig, ExperimentConfig, E2HReductionConfig
|
||||||
|
from eos.command_line import commandLineArgs
|
||||||
|
from eos.logconfig import setup_logging, update_loglevel
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
setup_logging()
|
||||||
|
logging.getLogger('matplotlib').setLevel(logging.WARNING)
|
||||||
|
|
||||||
|
# read command line arguments and generate classes holding configuration parameters
|
||||||
|
clas = commandLineArgs([ReaderConfig, ExperimentConfig, E2HReductionConfig],
|
||||||
|
'events2histogram')
|
||||||
|
update_loglevel(clas.verbose)
|
||||||
|
|
||||||
|
reader_config = ReaderConfig.from_args(clas)
|
||||||
|
experiment_config = ExperimentConfig.from_args(clas)
|
||||||
|
reduction_config = E2HReductionConfig.from_args(clas)
|
||||||
|
config = E2HConfig(reader_config, experiment_config, reduction_config)
|
||||||
|
|
||||||
|
logging.warning('######## events2histogram - data vizualization for Amor ########')
|
||||||
|
from eos.reduction_e2h import E2HReduction
|
||||||
|
|
||||||
|
# only import heavy module if sufficient command line parameters were provided
|
||||||
|
from eos.reduction_reflectivity import ReflectivityReduction
|
||||||
|
# Create reducer with these arguments
|
||||||
|
reducer = E2HReduction(config)
|
||||||
|
# Perform actual reduction
|
||||||
|
reducer.reduce()
|
||||||
|
|
||||||
|
logging.info('######## events2histogram - finished ########')
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
163
eos/event_analysis.py
Normal file
163
eos/event_analysis.py
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
"""
|
||||||
|
Define an event dataformat that performs reduction actions like wavelength calculation on per-event basis.
|
||||||
|
With large number of events these actions can be time consuming so they use numba based functions.
|
||||||
|
"""
|
||||||
|
import numpy as np
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from typing import Tuple
|
||||||
|
|
||||||
|
from . import const
|
||||||
|
from .event_data_types import EventDataAction, EventDatasetProtocol, append_fields, EVENT_BITMASKS
|
||||||
|
from .helpers import filter_project_x, merge_frames, extract_walltime, add_log_to_pulses
|
||||||
|
from .instrument import Detector
|
||||||
|
from .options import IncidentAngle
|
||||||
|
from .header import Header
|
||||||
|
|
||||||
|
class ExtractWalltime(EventDataAction):
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol) ->None:
|
||||||
|
wallTime = extract_walltime(dataset.data.events.tof,
|
||||||
|
dataset.data.packets.start_index,
|
||||||
|
dataset.data.packets.time)
|
||||||
|
logging.debug(f' expending event stream by wallTime')
|
||||||
|
new_events = append_fields(dataset.data.events, [('wallTime', wallTime.dtype)])
|
||||||
|
new_events.wallTime = wallTime
|
||||||
|
dataset.data.events = new_events
|
||||||
|
|
||||||
|
class MergeFrames(EventDataAction):
|
||||||
|
def __init__(self, lamdaCut=None):
|
||||||
|
self.lamdaCut=lamdaCut
|
||||||
|
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol)->None:
|
||||||
|
if self.lamdaCut is None:
|
||||||
|
lamdaCut = const.lamdaCut
|
||||||
|
else:
|
||||||
|
lamdaCut = self.lamdaCut
|
||||||
|
tofCut = lamdaCut*dataset.geometry.chopperDetectorDistance/const.hdm*1e-13
|
||||||
|
total_offset = (tofCut +
|
||||||
|
dataset.timing.tau * (dataset.timing.ch1TriggerPhase + dataset.timing.chopperPhase/2)/180)
|
||||||
|
dataset.data.events.tof = merge_frames(dataset.data.events.tof, tofCut, dataset.timing.tau, total_offset)
|
||||||
|
|
||||||
|
|
||||||
|
class AnalyzePixelIDs(EventDataAction):
|
||||||
|
def __init__(self, yRange: Tuple[int, int]):
|
||||||
|
self.yRange = yRange
|
||||||
|
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol) ->None:
|
||||||
|
d = dataset.data
|
||||||
|
(detZ, detXdist, delta, mask) = filter_project_x(
|
||||||
|
Detector.pixelLookUp, d.events.pixelID, self.yRange[0], self.yRange[1]
|
||||||
|
)
|
||||||
|
ana_events = append_fields(d.events, [
|
||||||
|
('detZ', detZ.dtype), ('detXdist', detXdist.dtype), ('delta', delta.dtype)])
|
||||||
|
# add analysis per event
|
||||||
|
ana_events.detZ = detZ
|
||||||
|
ana_events.detXdist = detXdist
|
||||||
|
ana_events.delta = delta
|
||||||
|
ana_events.mask += np.logical_not(mask)*EVENT_BITMASKS['yRange']
|
||||||
|
d.events = ana_events
|
||||||
|
|
||||||
|
class CalculateWavelength(EventDataAction):
|
||||||
|
def __init__(self, lambdaRange: Tuple[float, float]):
|
||||||
|
self.lambdaRange = lambdaRange
|
||||||
|
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol) ->None:
|
||||||
|
d = dataset.data
|
||||||
|
if not 'detXdist' in dataset.data.events.dtype.names:
|
||||||
|
raise ValueError("CalculateWavelength requires dataset with analyzed pixels, perform AnalyzePixelIDs first")
|
||||||
|
|
||||||
|
#lamdaMax = const.lamdaCut+1.e13*dataset.timing.tau*const.hdm/(dataset.geometry.chopperDetectorDistance+124.)
|
||||||
|
|
||||||
|
# lambda
|
||||||
|
# TODO: one of the most time consuming actions, could be implemented in numba, instead?
|
||||||
|
lamda = (1.e13*const.hdm)*d.events.tof/(dataset.geometry.chopperDetectorDistance+d.events.detXdist)
|
||||||
|
|
||||||
|
final_events = append_fields(d.events, [('lamda', np.float64)])
|
||||||
|
# add analysis per event
|
||||||
|
final_events.lamda = lamda
|
||||||
|
final_events.mask += EVENT_BITMASKS["LamdaRange"]*(
|
||||||
|
(self.lambdaRange[0]>lamda) | (lamda>self.lambdaRange[1]))
|
||||||
|
d.events = final_events
|
||||||
|
|
||||||
|
class CalculateQ(EventDataAction):
|
||||||
|
def __init__(self, incidentAngle: IncidentAngle):
|
||||||
|
self.incidentAngle = incidentAngle
|
||||||
|
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol) ->None:
|
||||||
|
d = dataset.data
|
||||||
|
if not 'lamda' in dataset.data.events.dtype.names:
|
||||||
|
raise ValueError("CalculateQ requires dataset with analyzed wavelength, perform CalculateWavelength first")
|
||||||
|
|
||||||
|
lamda = d.events.lamda
|
||||||
|
|
||||||
|
final_events = append_fields(d.events, [('qz', np.float64)])
|
||||||
|
|
||||||
|
# alpha_f
|
||||||
|
# q_z
|
||||||
|
if self.incidentAngle == IncidentAngle.alphaF:
|
||||||
|
alphaF_e = dataset.geometry.nu - dataset.geometry.mu + d.events.delta
|
||||||
|
final_events.qz = 4*np.pi*(np.sin(np.deg2rad(alphaF_e))/lamda)
|
||||||
|
elif self.incidentAngle == IncidentAngle.nu:
|
||||||
|
alphaF_e = (dataset.geometry.nu + d.events.delta + dataset.geometry.kap + dataset.geometry.kad) / 2.
|
||||||
|
final_events.qz = 4*np.pi*(np.sin(np.deg2rad(alphaF_e))/lamda)
|
||||||
|
else:
|
||||||
|
alphaF_e = dataset.geometry.nu - dataset.geometry.mu + d.events.delta
|
||||||
|
alphaI = dataset.geometry.kap + dataset.geometry.kad + dataset.geometry.mu
|
||||||
|
final_events.qz = 2*np.pi * ((np.sin(np.deg2rad(alphaF_e)) + np.sin(np.deg2rad(alphaI)))/lamda)
|
||||||
|
final_events = append_fields(final_events, [('qx', np.float64)])
|
||||||
|
final_events.qx = 2*np.pi * ((np.cos(np.deg2rad(alphaF_e)) - np.cos(np.deg2rad(alphaI)))/lamda)
|
||||||
|
|
||||||
|
dataset.data.events = final_events
|
||||||
|
|
||||||
|
def update_header(self, header: Header):
|
||||||
|
if self.incidentAngle == IncidentAngle.alphaF:
|
||||||
|
header.measurement_scheme = 'angle- and energy-dispersive'
|
||||||
|
else:
|
||||||
|
header.measurement_scheme = 'energy-dispersive'
|
||||||
|
|
||||||
|
class FilterQzRange(EventDataAction):
|
||||||
|
def __init__(self, qzRange: Tuple[float, float]):
|
||||||
|
self.qzRange = qzRange
|
||||||
|
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol) ->None:
|
||||||
|
d = dataset.data
|
||||||
|
if not 'qz' in dataset.data.events.dtype.names:
|
||||||
|
raise ValueError("FilterQzRange requires dataset with qz values per events, perform WavelengthAndQ first")
|
||||||
|
|
||||||
|
d.events.mask += EVENT_BITMASKS["qRange"]*((self.qzRange[0]>d.events.qz) | (d.events.qz>self.qzRange[1]))
|
||||||
|
|
||||||
|
class FilterByLog(EventDataAction):
|
||||||
|
|
||||||
|
def __init__(self, filter_string, remove_switchpulse=False):
|
||||||
|
if filter_string.startswith('!'):
|
||||||
|
filter_string = filter_string[1:]
|
||||||
|
remove_switchpulse = True
|
||||||
|
self.filter_string = filter_string
|
||||||
|
self.remove_switchpulse = remove_switchpulse
|
||||||
|
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol) -> None:
|
||||||
|
filter_variable = None
|
||||||
|
# go through existing keys in reverse order of their length to insure a name containing another is used
|
||||||
|
existing_keys = list(dataset.data.device_logs.keys())
|
||||||
|
existing_keys.sort(key=lambda x: -len(x))
|
||||||
|
for key in existing_keys:
|
||||||
|
if key in self.filter_string:
|
||||||
|
filter_variable = key
|
||||||
|
break
|
||||||
|
if filter_variable is None:
|
||||||
|
logging.warning(f' could not find suitable parameter to filter on in {self.filter_string}, '
|
||||||
|
f'available parameters are: {list(sorted(existing_keys))}')
|
||||||
|
return
|
||||||
|
logging.debug(f' using parameter {filter_variable} to apply filter {self.filter_string}')
|
||||||
|
if not filter_variable in EVENT_BITMASKS:
|
||||||
|
EVENT_BITMASKS[filter_variable] = max(EVENT_BITMASKS.values())*2
|
||||||
|
if not filter_variable in dataset.data.pulses.dtype.names:
|
||||||
|
# interpolate the parameter values for all existing pulses
|
||||||
|
add_log_to_pulses(filter_variable, dataset)
|
||||||
|
fltr_pulses = eval(self.filter_string, {filter_variable: dataset.data.pulses[filter_variable]})
|
||||||
|
if self.remove_switchpulse:
|
||||||
|
switched = fltr_pulses[:-1] & ~fltr_pulses[1:]
|
||||||
|
fltr_pulses[:-1] &= ~switched
|
||||||
|
goodTimeS = dataset.data.pulses.time[fltr_pulses]
|
||||||
|
filter_e = np.logical_not(np.isin(dataset.data.events.wallTime, goodTimeS))
|
||||||
|
dataset.data.events.mask += EVENT_BITMASKS[filter_variable]*filter_e
|
||||||
146
eos/event_data_types.py
Normal file
146
eos/event_data_types.py
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
"""
|
||||||
|
Specify the data type and protocol used for event datasets.
|
||||||
|
"""
|
||||||
|
from typing import Dict, List, Optional, Protocol, Tuple
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from .header import Header
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from hashlib import sha256
|
||||||
|
import numpy as np
|
||||||
|
import logging
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AmorGeometry:
|
||||||
|
mu:float
|
||||||
|
nu:float
|
||||||
|
kap:float
|
||||||
|
kad:float
|
||||||
|
div:float
|
||||||
|
|
||||||
|
chopperSeparation: float
|
||||||
|
detectorDistance: float
|
||||||
|
chopperDetectorDistance: float
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AmorTiming:
|
||||||
|
ch1TriggerPhase: float
|
||||||
|
ch2TriggerPhase: float
|
||||||
|
chopperSpeed: float
|
||||||
|
chopperPhase: float
|
||||||
|
tau: float
|
||||||
|
|
||||||
|
# Structured datatypes used for event streams
|
||||||
|
EVENT_TYPE = np.dtype([('tof', np.float64), ('pixelID', np.uint32), ('mask', np.int32)])
|
||||||
|
PACKET_TYPE = np.dtype([('start_index', np.uint32), ('time', np.int64)])
|
||||||
|
PULSE_TYPE = np.dtype([('time', np.int64), ('monitor', np.float32)])
|
||||||
|
PC_TYPE = np.dtype([('current', np.float32), ('time', np.int64)])
|
||||||
|
LOG_TYPE = np.dtype([('value', np.float32), ('time', np.int64)])
|
||||||
|
|
||||||
|
# define the bitmask for individual event filters
|
||||||
|
EVENT_BITMASKS = {
|
||||||
|
'MonitorThreshold': 1,
|
||||||
|
'StrangeTimes': 2,
|
||||||
|
'yRange': 4,
|
||||||
|
'LamdaRange': 8,
|
||||||
|
'qRange': 16,
|
||||||
|
}
|
||||||
|
|
||||||
|
def append_fields(input: np.recarray, new_fields: List[Tuple[str, np.dtype]]):
|
||||||
|
# add one ore more fields to a recarray, numpy functions seems to fail
|
||||||
|
flds = [(name, dtypei[0]) for name, dtypei in input.dtype.fields.items()]
|
||||||
|
flds += new_fields
|
||||||
|
output = np.recarray(len(input), dtype=flds)
|
||||||
|
|
||||||
|
for field in input.dtype.fields.keys():
|
||||||
|
output[field] = input[field]
|
||||||
|
return output
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AmorEventStream:
|
||||||
|
events: np.recarray # EVENT_TYPE
|
||||||
|
packets: np.recarray # PACKET_TYPE
|
||||||
|
pulses: np.recarray # PULSE_TYPE
|
||||||
|
proton_current: np.recarray # PC_TYPE
|
||||||
|
device_logs: Dict[str, np.recarray] = field(default_factory=dict) # LOG_TYPE
|
||||||
|
|
||||||
|
class EventDatasetProtocol(Protocol):
|
||||||
|
"""
|
||||||
|
Minimal attributes a dataset needs to provide to work with EventDataAction
|
||||||
|
"""
|
||||||
|
geometry: AmorGeometry
|
||||||
|
timing: AmorTiming
|
||||||
|
data: AmorEventStream
|
||||||
|
|
||||||
|
def append(self, other):
|
||||||
|
# Should define a way to add events from other to own
|
||||||
|
...
|
||||||
|
|
||||||
|
def update_header(self, header:Header):
|
||||||
|
# update a header with the information read from file
|
||||||
|
...
|
||||||
|
|
||||||
|
class EventDataAction(ABC):
|
||||||
|
"""
|
||||||
|
Abstract base class used for actions applied to an EventDatasetProtocol based objects.
|
||||||
|
Each action can optionally modify the header information.
|
||||||
|
Actions can be combined using the pipe operator | (OR).
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __call__(self, dataset: EventDatasetProtocol)->None:
|
||||||
|
logging.debug(f" Enter action {self.__class__.__name__} on {dataset!r}")
|
||||||
|
self.perform_action(dataset)
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol)->None: ...
|
||||||
|
|
||||||
|
def update_header(self, header:Header)->None:
|
||||||
|
if hasattr(self, 'action_name'):
|
||||||
|
header.reduction.corrections.append(getattr(self, 'action_name'))
|
||||||
|
|
||||||
|
def __or__(self, other:'EventDataAction')->'CombinedAction':
|
||||||
|
return CombinedAction([self, other])
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
output = self.__class__.__name__+'('
|
||||||
|
for key,value in self.__dict__.items():
|
||||||
|
output += f'{key}={value}, '
|
||||||
|
return output.rstrip(', ')+')'
|
||||||
|
|
||||||
|
def action_hash(self)->bytes:
|
||||||
|
# generate a unique hash that encodes this action with its configuration parameters
|
||||||
|
mh = sha256()
|
||||||
|
mh.update(self.__class__.__name__.encode())
|
||||||
|
for key,value in sorted(self.__dict__.items()):
|
||||||
|
mh.update(repr(value).encode())
|
||||||
|
return mh.hexdigest()
|
||||||
|
|
||||||
|
class CombinedAction(EventDataAction):
|
||||||
|
"""
|
||||||
|
Used to perform multiple actions in one call. Stores a sequence of actions
|
||||||
|
that are then performed individually one after the other.
|
||||||
|
"""
|
||||||
|
def __init__(self, actions: List[EventDataAction]) -> None:
|
||||||
|
self._actions = actions
|
||||||
|
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol)->None:
|
||||||
|
for action in self._actions:
|
||||||
|
action(dataset)
|
||||||
|
|
||||||
|
def update_header(self, header:Header)->None:
|
||||||
|
for action in self._actions:
|
||||||
|
action.update_header(header)
|
||||||
|
|
||||||
|
def __or__(self, other:'EventDataAction')->'CombinedAction':
|
||||||
|
return CombinedAction(self._actions+[other])
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
output = repr(self._actions[0])
|
||||||
|
for ai in self._actions[1:]:
|
||||||
|
output += ' | '+repr(ai)
|
||||||
|
return output
|
||||||
|
|
||||||
|
def action_hash(self)->bytes:
|
||||||
|
mh = sha256()
|
||||||
|
for action in self._actions:
|
||||||
|
mh.update(action.action_hash().encode())
|
||||||
|
return mh.hexdigest()
|
||||||
202
eos/event_handling.py
Normal file
202
eos/event_handling.py
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
"""
|
||||||
|
Calculations performed on AmorEventData.
|
||||||
|
This module contains actions that do not need the numba base helper functions. Other actions are in event_analysis
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from .header import Header
|
||||||
|
from .options import ExperimentConfig, MonitorType
|
||||||
|
from .event_data_types import EventDatasetProtocol, EventDataAction, EVENT_BITMASKS
|
||||||
|
|
||||||
|
class ApplyPhaseOffset(EventDataAction):
|
||||||
|
def __init__(self, chopperPhaseOffset: float):
|
||||||
|
self.chopperPhaseOffset=chopperPhaseOffset
|
||||||
|
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol) ->None:
|
||||||
|
logging.debug(
|
||||||
|
f' replaced ch1TriggerPhase = {dataset.timing.ch1TriggerPhase} '
|
||||||
|
f'with {self.chopperPhaseOffset}')
|
||||||
|
dataset.timing.ch1TriggerPhase = self.chopperPhaseOffset
|
||||||
|
|
||||||
|
class ApplyParameterOverwrites(EventDataAction):
|
||||||
|
def __init__(self, config: ExperimentConfig):
|
||||||
|
self.config=config
|
||||||
|
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol) ->None:
|
||||||
|
if self.config.muOffset:
|
||||||
|
logging.debug(f' set muOffset = {self.config.muOffset}')
|
||||||
|
dataset.geometry.mu += self.config.muOffset
|
||||||
|
if self.config.mu:
|
||||||
|
logging.debug(f' replaced mu = {dataset.geometry.mu} with {self.config.mu}')
|
||||||
|
dataset.geometry.mu = self.config.mu
|
||||||
|
if self.config.nu:
|
||||||
|
logging.debug(f' replaced nu = {dataset.geometry.nu} with {self.config.nu}')
|
||||||
|
dataset.geometry.nu = self.config.nu
|
||||||
|
logging.info(f' mu = {dataset.geometry.mu:6.3f}, '
|
||||||
|
f'nu = {dataset.geometry.nu:6.3f}, '
|
||||||
|
f'kap = {dataset.geometry.kap:6.3f}, '
|
||||||
|
f'kad = {dataset.geometry.kad:6.3f}')
|
||||||
|
|
||||||
|
def update_header(self, header:Header) ->None:
|
||||||
|
if self.config.sampleModel:
|
||||||
|
import yaml
|
||||||
|
from orsopy.fileio.model_language import SampleModel
|
||||||
|
if self.config.sampleModel.endswith('.yml') or self.config.sampleModel.endswith('.yaml'):
|
||||||
|
if os.path.isfile(self.config.sampleModel):
|
||||||
|
with open(self.config.sampleModel, 'r') as model_yml:
|
||||||
|
model = yaml.safe_load(model_yml)
|
||||||
|
else:
|
||||||
|
logging.warning(f' ! the file {self.config.sampleModel} does not exist. Ignored!')
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
model = dict(stack=self.config.sampleModel)
|
||||||
|
logging.debug(f' set sample.model = {self.config.sampleModel}')
|
||||||
|
header.sample.model = SampleModel.from_dict(model)
|
||||||
|
|
||||||
|
|
||||||
|
class CorrectChopperPhase(EventDataAction):
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol) ->None:
|
||||||
|
dataset.data.events.tof += dataset.timing.tau*(dataset.timing.ch1TriggerPhase-dataset.timing.chopperPhase/2)/180
|
||||||
|
|
||||||
|
|
||||||
|
class CorrectSeriesTime(EventDataAction):
|
||||||
|
def __init__(self, seriesStartTime):
|
||||||
|
self.seriesStartTime = np.int64(seriesStartTime)
|
||||||
|
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol)->None:
|
||||||
|
if not 'wallTime' in dataset.data.events.dtype.names:
|
||||||
|
raise ValueError("CorrectTimeSeries requires walltTime to be extracted, please run ExtractWalltime first")
|
||||||
|
dataset.data.pulses.time -= self.seriesStartTime
|
||||||
|
dataset.data.events.wallTime -= self.seriesStartTime
|
||||||
|
dataset.data.proton_current.time -= self.seriesStartTime
|
||||||
|
for value in dataset.data.device_logs.values():
|
||||||
|
value.time -= self.seriesStartTime
|
||||||
|
start, stop = dataset.data.events.wallTime[0], dataset.data.events.wallTime[-1]
|
||||||
|
logging.debug(f' wall time from {start/1e9:6.1f} s to {stop/1e9:6.1f} s, '
|
||||||
|
f'series time = {self.seriesStartTime/1e9:6.1f}')
|
||||||
|
|
||||||
|
|
||||||
|
class AssociatePulseWithMonitor(EventDataAction):
|
||||||
|
def __init__(self, monitorType:MonitorType):
|
||||||
|
self.monitorType = monitorType
|
||||||
|
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol)->None:
|
||||||
|
logging.debug(f' using monitor type {self.monitorType}')
|
||||||
|
if self.monitorType in [MonitorType.proton_charge or MonitorType.debug]:
|
||||||
|
dataset.data.pulses.monitor = self.get_current_per_pulse(dataset.data.pulses.time,
|
||||||
|
dataset.data.proton_current.time,
|
||||||
|
dataset.data.proton_current.current)\
|
||||||
|
* 2*dataset.timing.tau * 1e-3
|
||||||
|
elif self.monitorType==MonitorType.time:
|
||||||
|
dataset.data.pulses.monitor = 2*dataset.timing.tau
|
||||||
|
else: # pulses
|
||||||
|
dataset.data.pulses.monitor = 1
|
||||||
|
|
||||||
|
if self.monitorType == MonitorType.debug:
|
||||||
|
if not 'wallTime' in dataset.data.events.dtype.names:
|
||||||
|
raise ValueError(
|
||||||
|
"AssociatePulseWithMonitor requires walltTime for debugging, please run ExtractWalltime first")
|
||||||
|
cpp, t_bins = np.histogram(dataset.data.events.wallTime, dataset.data.pulses.time)
|
||||||
|
np.savetxt('tme.hst', np.vstack((dataset.data.pulses.time[:-1], cpp, dataset.data.pulses.monitor[:-1])).T)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_current_per_pulse(pulseTimeS, currentTimeS, currents):
|
||||||
|
# add currents for early pulses and current time value after last pulse (j+1)
|
||||||
|
currentTimeS = np.hstack([[0], currentTimeS, [pulseTimeS[-1]+1]])
|
||||||
|
currents = np.hstack([[0], currents])
|
||||||
|
pulseCurrentS = np.zeros(pulseTimeS.shape[0], dtype=float)
|
||||||
|
j = 0
|
||||||
|
for i, ti in enumerate(pulseTimeS):
|
||||||
|
# find the last current item that was before this pulse
|
||||||
|
while ti >= currentTimeS[j+1]:
|
||||||
|
j += 1
|
||||||
|
pulseCurrentS[i] = currents[j]
|
||||||
|
return pulseCurrentS
|
||||||
|
|
||||||
|
class FilterMonitorThreshold(EventDataAction):
|
||||||
|
def __init__(self, lowCurrentThreshold:float):
|
||||||
|
self.lowCurrentThreshold = lowCurrentThreshold
|
||||||
|
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol) ->None:
|
||||||
|
if not 'wallTime' in dataset.data.events.dtype.names:
|
||||||
|
raise ValueError(
|
||||||
|
"FilterMonitorThreshold requires walltTime to be extracted, please run ExtractWalltime first")
|
||||||
|
low_current_filter = dataset.data.pulses.monitor>2*dataset.timing.tau*self.lowCurrentThreshold*1e-3
|
||||||
|
dataset.data.pulses.monitor[np.logical_not(low_current_filter)] = 0.
|
||||||
|
goodTimeS = dataset.data.pulses.time[low_current_filter]
|
||||||
|
filter_e = np.logical_not(np.isin(dataset.data.events.wallTime, goodTimeS))
|
||||||
|
|
||||||
|
dataset.data.events.mask += EVENT_BITMASKS['MonitorThreshold']*filter_e
|
||||||
|
logging.info(f' low-beam (<{self.lowCurrentThreshold} mC) rejected pulses: '
|
||||||
|
f'{dataset.data.pulses.monitor.shape[0]-goodTimeS.shape[0]} '
|
||||||
|
f'out of {dataset.data.pulses.monitor.shape[0]}')
|
||||||
|
logging.info(f' with {filter_e.sum()} events')
|
||||||
|
if goodTimeS.shape[0]:
|
||||||
|
logging.info(f' average counts per pulse = {dataset.data.events.shape[0]/goodTimeS.shape[0]:7.1f}')
|
||||||
|
else:
|
||||||
|
logging.info(f' average counts per pulse = undefined')
|
||||||
|
|
||||||
|
class FilterStrangeTimes(EventDataAction):
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol)->None:
|
||||||
|
filter_e = np.logical_not(dataset.data.events.tof<=2*dataset.timing.tau)
|
||||||
|
dataset.data.events.mask += EVENT_BITMASKS['StrangeTimes']*filter_e
|
||||||
|
if filter_e.any():
|
||||||
|
logging.warning(f' strange times: {filter_e.sum()}')
|
||||||
|
|
||||||
|
|
||||||
|
class TofTimeCorrection(EventDataAction):
|
||||||
|
def __init__(self, correct_chopper_opening: bool = True):
|
||||||
|
self.correct_chopper_opening = correct_chopper_opening
|
||||||
|
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol) ->None:
|
||||||
|
if not 'delta' in dataset.data.events.dtype.names:
|
||||||
|
raise ValueError(
|
||||||
|
"TofTimeCorrection requires delta to be extracted, please run AnalyzePixelIDs first")
|
||||||
|
d = dataset.data
|
||||||
|
if self.correct_chopper_opening:
|
||||||
|
d.events.tof -= ( d.events.delta / 180. ) * dataset.timing.tau
|
||||||
|
else:
|
||||||
|
d.events.tof -= ( dataset.geometry.kad / 180. ) * dataset.timing.tau
|
||||||
|
|
||||||
|
class ApplyMask(EventDataAction):
|
||||||
|
def __init__(self, bitmask_filter=None):
|
||||||
|
self.bitmask_filter = bitmask_filter
|
||||||
|
|
||||||
|
def perform_action(self, dataset: EventDatasetProtocol) ->None:
|
||||||
|
# TODO: why is this action time consuming?
|
||||||
|
d = dataset.data
|
||||||
|
pre_filter = d.events.shape[0]
|
||||||
|
if logging.getLogger().level <= logging.DEBUG:
|
||||||
|
# only run this calculation if debug level is actually active
|
||||||
|
filtered_by_mask = {}
|
||||||
|
for key, value in EVENT_BITMASKS.items():
|
||||||
|
filtered_by_mask[key] = ((d.events.mask & value)!=0).sum()
|
||||||
|
logging.debug(f" Removed by filters: {filtered_by_mask}")
|
||||||
|
if self.bitmask_filter is None:
|
||||||
|
d.events = d.events[d.events.mask==0]
|
||||||
|
else:
|
||||||
|
# remove the provided bitmask_filter bits from the events
|
||||||
|
# this means that all bits that are set in bitmask_filter will NOT be used to filter events
|
||||||
|
fltr = (d.events.mask & (~self.bitmask_filter)) == 0
|
||||||
|
d.events = d.events[fltr]
|
||||||
|
post_filter = d.events.shape[0]
|
||||||
|
logging.info(f' number of events: total = {pre_filter:7d}, filtered = {post_filter:7d}')
|
||||||
|
if d.device_logs == {} or not hasattr(dataset, 'update_info_from_logs'):
|
||||||
|
return
|
||||||
|
# filter pulses and logs to allow update of header information
|
||||||
|
from .helpers import add_log_to_pulses
|
||||||
|
times = np.unique(d.events.wallTime)
|
||||||
|
# make sure all log variables are associated with pulses
|
||||||
|
for key, log in d.device_logs.items():
|
||||||
|
if not key in d.pulses.dtype.names:
|
||||||
|
# interpolate the parameter values for all existing pulses
|
||||||
|
add_log_to_pulses(key, dataset)
|
||||||
|
# remove all pulses that have no more events
|
||||||
|
d.pulses = d.pulses[np.isin(d.pulses.time, times)]
|
||||||
|
for key, log in d.device_logs.items():
|
||||||
|
d.device_logs[key] = np.recarray(d.pulses.shape, dtype = log.dtype)
|
||||||
|
d.device_logs[key].time = d.pulses.time
|
||||||
|
d.device_logs[key].value = d.pulses[key]
|
||||||
|
dataset.update_info_from_logs()
|
||||||
517
eos/file_reader.py
Normal file
517
eos/file_reader.py
Normal file
@@ -0,0 +1,517 @@
|
|||||||
|
"""
|
||||||
|
Reading of Amor NeXus data files to extract metadata and event stream.
|
||||||
|
"""
|
||||||
|
from typing import BinaryIO, List, Union
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import h5py
|
||||||
|
import numpy as np
|
||||||
|
import logging
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from orsopy import fileio
|
||||||
|
from orsopy.fileio.model_language import SampleModel
|
||||||
|
|
||||||
|
from . import const
|
||||||
|
from .header import Header
|
||||||
|
from .event_data_types import AmorGeometry, AmorTiming, AmorEventStream, LOG_TYPE, PACKET_TYPE, EVENT_TYPE, PULSE_TYPE, \
|
||||||
|
PC_TYPE
|
||||||
|
|
||||||
|
try:
|
||||||
|
import zoneinfo
|
||||||
|
except ImportError:
|
||||||
|
# for python versions < 3.9 try to use the backports version
|
||||||
|
from backports import zoneinfo
|
||||||
|
|
||||||
|
|
||||||
|
# Time zone used to interpret time strings
|
||||||
|
AMOR_LOCAL_TIMEZONE = zoneinfo.ZoneInfo(key='Europe/Zurich')
|
||||||
|
UTC = zoneinfo.ZoneInfo(key='UTC')
|
||||||
|
|
||||||
|
class AmorHeader:
|
||||||
|
"""
|
||||||
|
Collects header information from Amor NeXus fiel without reading event data.
|
||||||
|
"""
|
||||||
|
# mapping of names to (hdf_path, dtype, nicos_key[, suffix])
|
||||||
|
hdf_paths = dict(
|
||||||
|
title=('entry1/title', str),
|
||||||
|
proposal_id=('entry1/proposal_id', str),
|
||||||
|
user_name=('entry1/user/name', str),
|
||||||
|
user_email=('entry1/user/email', str),
|
||||||
|
sample_name=('entry1/sample/name', str),
|
||||||
|
source_name=('entry1/Amor/source/name', str),
|
||||||
|
sample_model=('entry1/sample/model', str),
|
||||||
|
start_time=('entry1/start_time', str),
|
||||||
|
start_time_fallback=('entry1/Amor/instrument_control_parameters/start_time', str),
|
||||||
|
|
||||||
|
chopper_separation=('entry1/Amor/chopper/pair_separation', float),
|
||||||
|
detector_distance=('entry1/Amor/detector/transformation/distance', float),
|
||||||
|
chopper_distance=('entry1/Amor/chopper/distance', float),
|
||||||
|
sample_temperature=('entry1/sample/temperature', float),
|
||||||
|
sample_magnetic_field=('entry1/sample/magnetic_field', float),
|
||||||
|
|
||||||
|
mu=('entry1/Amor/instrument_control_parameters/mu', float, 'mu'),
|
||||||
|
nu=('entry1/Amor/instrument_control_parameters/nu', float, 'nu'),
|
||||||
|
kap=('entry1/Amor/instrument_control_parameters/kappa', float, 'kappa'),
|
||||||
|
kad=('entry1/Amor/instrument_control_parameters/kappa_offset', float, 'kappa_offset'),
|
||||||
|
div=('entry1/Amor/instrument_control_parameters/div', float, 'div'),
|
||||||
|
ch1_trigger_phase=('entry1/Amor/chopper/ch1_trigger_phase', float, 'ch1_trigger_phase'),
|
||||||
|
ch2_trigger_phase=('entry1/Amor/chopper/ch2_trigger_phase', float, 'ch2_trigger_phase'),
|
||||||
|
chopper_speed=('entry1/Amor/chopper/rotation_speed', float, 'chopper_phase'),
|
||||||
|
chopper_phase=('entry1/Amor/chopper/phase', float, 'chopper_phase'),
|
||||||
|
polarization_config_label=('entry1/Amor/polarization/configuration', int, 'polarization_config_label', '/*'),
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self, fileName:Union[str, h5py.File, BinaryIO]):
|
||||||
|
if type(fileName) is str:
|
||||||
|
logging.warning(f' {fileName.split("/")[-1]}')
|
||||||
|
self.hdf = h5py.File(fileName, 'r', swmr=True)
|
||||||
|
elif type(fileName) is h5py.File:
|
||||||
|
self.hdf = fileName
|
||||||
|
else:
|
||||||
|
self.hdf = h5py.File(fileName, 'r')
|
||||||
|
|
||||||
|
self._log_keys = []
|
||||||
|
|
||||||
|
self.read_header_info()
|
||||||
|
self.read_instrument_configuration()
|
||||||
|
|
||||||
|
if type(fileName) is str:
|
||||||
|
# close the input file to free memory, only if the file was opened in this object
|
||||||
|
self.hdf.close()
|
||||||
|
del(self.hdf)
|
||||||
|
|
||||||
|
def _replace_if_missing(self, key, nicos_key, dtype=float, suffix=''):
|
||||||
|
from .nicos_interface import lookup_nicos_value
|
||||||
|
year = self.fileDate.strftime('%Y')
|
||||||
|
return lookup_nicos_value(key, nicos_key, dtype, suffix, year)
|
||||||
|
|
||||||
|
def rv(self, key):
|
||||||
|
"""
|
||||||
|
Generic read value methos based on key in hdf_paths dictionary.
|
||||||
|
"""
|
||||||
|
hdf_path, dtype, *nicos = self.hdf_paths[key]
|
||||||
|
try:
|
||||||
|
hdfgrp = self.hdf[hdf_path]
|
||||||
|
if hdfgrp.attrs.get('NX_class', None) == 'NXlog':
|
||||||
|
# use the last value that was recoreded before the count started
|
||||||
|
time_column = hdfgrp['time'][:]
|
||||||
|
try:
|
||||||
|
start_index = np.where(time_column<=self._start_time_ns)[0][0]
|
||||||
|
except IndexError:
|
||||||
|
start_index = 0
|
||||||
|
if hdfgrp['value'].ndim==1:
|
||||||
|
output = dtype(hdfgrp['value'][start_index])
|
||||||
|
else:
|
||||||
|
output = dtype(hdfgrp['value'][start_index, 0])
|
||||||
|
# make sure key is only appended if no exception was raised
|
||||||
|
self._log_keys.append(key)
|
||||||
|
return output
|
||||||
|
elif dtype is str:
|
||||||
|
return self.read_string(hdf_path)
|
||||||
|
else:
|
||||||
|
if len(hdfgrp.shape)==1:
|
||||||
|
return dtype(hdfgrp[0])
|
||||||
|
else:
|
||||||
|
return dtype(hdfgrp[()])
|
||||||
|
except (KeyError, IndexError):
|
||||||
|
if nicos:
|
||||||
|
nicos_key = nicos[0]
|
||||||
|
suffix = nicos[1] if len(nicos)>1 else ''
|
||||||
|
return self._replace_if_missing(key, nicos_key, dtype, suffix)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def read_string(self, path):
|
||||||
|
if not np.shape(self.hdf[path]):
|
||||||
|
return self.hdf[path][()].decode('utf-8')
|
||||||
|
else:
|
||||||
|
# format until 2025
|
||||||
|
return self.hdf[path][0].decode('utf-8')
|
||||||
|
|
||||||
|
def read_header_info(self):
|
||||||
|
self._start_time_ns = np.uint64(0)
|
||||||
|
try:
|
||||||
|
start_time = self.rv('start_time')
|
||||||
|
except KeyError:
|
||||||
|
start_time = self.rv('start_time_fallback')
|
||||||
|
|
||||||
|
# extract start time as unix time, adding UTC offset of 1h to time string
|
||||||
|
if start_time.endswith('Z') and sys.version_info.minor<11:
|
||||||
|
# older python versions did not support Z format
|
||||||
|
start_time = start_time[:-1]
|
||||||
|
TZ = UTC
|
||||||
|
else:
|
||||||
|
TZ = AMOR_LOCAL_TIMEZONE
|
||||||
|
start_date = datetime.fromisoformat(start_time)
|
||||||
|
self.fileDate = start_date.replace(tzinfo=TZ)
|
||||||
|
self._start_time_ns = np.uint64(self.fileDate.timestamp()*1e9)
|
||||||
|
|
||||||
|
# read general information and first data set
|
||||||
|
title = self.rv('title')
|
||||||
|
proposal_id = self.rv('proposal_id')
|
||||||
|
user_name = self.rv('user_name')
|
||||||
|
user_affiliation = 'unknown'
|
||||||
|
user_email = self.rv('user_email')
|
||||||
|
user_orcid = None
|
||||||
|
sampleName = self.rv('sample_name')
|
||||||
|
instrumentName = 'Amor'
|
||||||
|
source = self.rv('source_name')
|
||||||
|
sourceProbe = 'neutron'
|
||||||
|
model = self.rv('sample_model')
|
||||||
|
if 'stack' in model:
|
||||||
|
import yaml
|
||||||
|
model = yaml.safe_load(model)
|
||||||
|
else:
|
||||||
|
model = dict(stack=model)
|
||||||
|
|
||||||
|
self.owner = fileio.Person(
|
||||||
|
name=user_name,
|
||||||
|
affiliation=user_affiliation,
|
||||||
|
contact=user_email,
|
||||||
|
)
|
||||||
|
if user_orcid:
|
||||||
|
self.owner.orcid = user_orcid
|
||||||
|
|
||||||
|
self.experiment = fileio.Experiment(
|
||||||
|
title=title,
|
||||||
|
instrument=instrumentName,
|
||||||
|
start_date=start_date,
|
||||||
|
probe=sourceProbe,
|
||||||
|
facility=source,
|
||||||
|
proposalID=proposal_id
|
||||||
|
)
|
||||||
|
if model['stack'] == '':
|
||||||
|
om = None
|
||||||
|
else:
|
||||||
|
om = SampleModel.from_dict(model)
|
||||||
|
self.sample = fileio.Sample(
|
||||||
|
name=sampleName,
|
||||||
|
model=om,
|
||||||
|
sample_parameters={},
|
||||||
|
)
|
||||||
|
# while event times are not evaluated, use average_value reported in file for SEE
|
||||||
|
if self.hdf['entry1/sample'].get('temperature', None) is not None:
|
||||||
|
try:
|
||||||
|
sample_temperature = self.rv('sample_temperature')
|
||||||
|
except IndexError: pass
|
||||||
|
else:
|
||||||
|
self.sample.sample_parameters['temperature'] = fileio.Value(sample_temperature, unit='K')
|
||||||
|
if self.hdf['entry1/sample'].get('magnetic_field', None) is not None:
|
||||||
|
try:
|
||||||
|
sample_magnetic_field = self.rv('sample_magnetic_field')
|
||||||
|
except IndexError: pass
|
||||||
|
else:
|
||||||
|
self.sample.sample_parameters['magnetic_field'] = fileio.Value(sample_magnetic_field, unit='T')
|
||||||
|
|
||||||
|
def read_instrument_configuration(self):
|
||||||
|
chopperSeparation = self.rv('chopper_separation')
|
||||||
|
detectorDistance = self.rv('detector_distance')
|
||||||
|
chopperDistance = self.rv('chopper_distance')
|
||||||
|
chopperDetectorDistance = detectorDistance - chopperDistance
|
||||||
|
|
||||||
|
mu = self.rv('mu')
|
||||||
|
nu = self.rv('nu')
|
||||||
|
kap = self.rv('kap')
|
||||||
|
kad = self.rv('kad')
|
||||||
|
div = self.rv('div')
|
||||||
|
ch1TriggerPhase = self.rv('ch1_trigger_phase')
|
||||||
|
ch2TriggerPhase = self.rv('ch2_trigger_phase')
|
||||||
|
try:
|
||||||
|
chopperTriggerTime = (float(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_zero'][7]) \
|
||||||
|
-float(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_zero'][0])) \
|
||||||
|
/7
|
||||||
|
chopperTriggerTimeDiff = float(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_offset'][2])
|
||||||
|
except (KeyError, IndexError):
|
||||||
|
logging.debug(' chopper speed and phase taken from .hdf file')
|
||||||
|
chopperSpeed = self.rv('chopper_speed')
|
||||||
|
chopperPhase = self.rv('chopper_phase')
|
||||||
|
tau = 30/chopperSpeed
|
||||||
|
else:
|
||||||
|
tau = int(1e-6*chopperTriggerTime/2+0.5)*(1e-3)
|
||||||
|
chopperTriggerPhase = 180e-9*chopperTriggerTimeDiff/tau
|
||||||
|
chopperSpeed = 30/tau
|
||||||
|
chopperPhase = chopperTriggerPhase+ch1TriggerPhase-ch2TriggerPhase
|
||||||
|
|
||||||
|
self.geometry = AmorGeometry(mu, nu, kap, kad, div,
|
||||||
|
chopperSeparation, detectorDistance, chopperDetectorDistance)
|
||||||
|
self.timing = AmorTiming(ch1TriggerPhase, ch2TriggerPhase, chopperSpeed, chopperPhase, tau)
|
||||||
|
|
||||||
|
polarizationConfigLabel = self.rv('polarization_config_label')
|
||||||
|
polarizationConfig = fileio.Polarization(const.polarizationConfigs[polarizationConfigLabel])
|
||||||
|
logging.debug(f' polarization configuration: {polarizationConfig} (index {polarizationConfigLabel})')
|
||||||
|
|
||||||
|
|
||||||
|
self.instrument_settings = fileio.InstrumentSettings(
|
||||||
|
incident_angle = fileio.ValueRange(round(mu+kap+kad-0.5*div, 3),
|
||||||
|
round(mu+kap+kad+0.5*div, 3),
|
||||||
|
'deg'),
|
||||||
|
wavelength = fileio.ValueRange(const.lamdaCut, const.lamdaMax, 'angstrom'),
|
||||||
|
polarization = fileio.Polarization(polarizationConfig)
|
||||||
|
)
|
||||||
|
self.instrument_settings.qz = fileio.ValueRange(round(4*np.pi*np.sin(np.deg2rad(mu+kap+kad-0.5*div))/const.lamdaMax, 3),
|
||||||
|
round(4*np.pi*np.sin(np.deg2rad(mu+kap+kad+0.5*div))/const.lamdaCut, 3),
|
||||||
|
'1/angstrom')
|
||||||
|
self.instrument_settings.mu = fileio.Value(
|
||||||
|
round(mu, 3),
|
||||||
|
'deg',
|
||||||
|
comment='sample angle to horizon')
|
||||||
|
self.instrument_settings.nu = fileio.Value(
|
||||||
|
round(nu, 3),
|
||||||
|
'deg',
|
||||||
|
comment='detector angle to horizon')
|
||||||
|
self.instrument_settings.div = fileio.Value(
|
||||||
|
round(div, 3),
|
||||||
|
'deg',
|
||||||
|
comment='incoming beam divergence')
|
||||||
|
self.instrument_settings.kap = fileio.Value(
|
||||||
|
round(kap, 3),
|
||||||
|
'deg',
|
||||||
|
comment='incoming beam inclination')
|
||||||
|
if abs(kad)>0.02:
|
||||||
|
self.instrument_settings.kad = fileio.Value(
|
||||||
|
round(kad, 3),
|
||||||
|
'deg',
|
||||||
|
comment='incoming beam angular offset')
|
||||||
|
|
||||||
|
|
||||||
|
def update_header(self, header:Header):
|
||||||
|
"""
|
||||||
|
Add dataset information into an existing header.
|
||||||
|
"""
|
||||||
|
logging.info(f' meta data from: {self.file_list[0]}')
|
||||||
|
header.owner = self.owner
|
||||||
|
header.experiment = self.experiment
|
||||||
|
header.sample = self.sample
|
||||||
|
header.measurement_instrument_settings = self.instrument_settings
|
||||||
|
|
||||||
|
|
||||||
|
class AmorEventData(AmorHeader):
|
||||||
|
"""
|
||||||
|
Read one amor NeXus datafile and extract relevant header information.
|
||||||
|
|
||||||
|
Implements EventDatasetProtocol
|
||||||
|
"""
|
||||||
|
file_list: List[str]
|
||||||
|
first_index: int
|
||||||
|
last_index: int = -1
|
||||||
|
EOF: bool = False
|
||||||
|
max_events: int
|
||||||
|
owner: fileio.Person
|
||||||
|
experiment: fileio.Experiment
|
||||||
|
sample: fileio.Sample
|
||||||
|
instrument_settings: fileio.InstrumentSettings
|
||||||
|
geometry: AmorGeometry
|
||||||
|
timing: AmorTiming
|
||||||
|
data: AmorEventStream
|
||||||
|
|
||||||
|
eventStartTime: np.int64
|
||||||
|
|
||||||
|
def __init__(self, fileName:Union[str, h5py.File, BinaryIO], first_index:int=0, max_events:int=100_000_000):
|
||||||
|
if type(fileName) is str:
|
||||||
|
logging.warning(f' {fileName.split("/")[-1]}')
|
||||||
|
self.file_list = [fileName]
|
||||||
|
hdf = h5py.File(fileName, 'r', swmr=True)
|
||||||
|
elif type(fileName) is h5py.File:
|
||||||
|
self.file_list = [fileName.filename]
|
||||||
|
hdf = fileName
|
||||||
|
else:
|
||||||
|
self.file_list = [repr(fileName)]
|
||||||
|
hdf = h5py.File(fileName, 'r')
|
||||||
|
self.first_index = first_index
|
||||||
|
self.max_events = max_events
|
||||||
|
|
||||||
|
super().__init__(hdf)
|
||||||
|
self.hdf = hdf
|
||||||
|
try:
|
||||||
|
self.read_event_stream()
|
||||||
|
except EOFError:
|
||||||
|
self.hdf.close()
|
||||||
|
del(self.hdf)
|
||||||
|
raise
|
||||||
|
self.read_log_streams()
|
||||||
|
|
||||||
|
if type(fileName) is str:
|
||||||
|
# close the input file to free memory, only if the file was opened in this object
|
||||||
|
self.hdf.close()
|
||||||
|
del(self.hdf)
|
||||||
|
|
||||||
|
|
||||||
|
def read_event_stream(self):
|
||||||
|
"""
|
||||||
|
Read the actual event data from file. If file is too large, find event index from packets
|
||||||
|
that allow splitting of file smaller than self.max_events.
|
||||||
|
"""
|
||||||
|
packets = np.recarray(self.hdf['/entry1/Amor/detector/data/event_index'].shape, dtype=PACKET_TYPE)
|
||||||
|
packets.start_index = self.hdf['/entry1/Amor/detector/data/event_index'][:]
|
||||||
|
packets.time = self.hdf['/entry1/Amor/detector/data/event_time_zero'][:]
|
||||||
|
try:
|
||||||
|
# packet index that matches first event index
|
||||||
|
start_packet = int(np.where(packets.start_index==self.first_index)[0][0])
|
||||||
|
except IndexError:
|
||||||
|
raise EOFError(f'No event packet found starting at event #{self.first_index}, '
|
||||||
|
f'number of events is {self.hdf["/entry1/Amor/detector/data/event_time_offset"].shape[0]}')
|
||||||
|
packets = packets[start_packet:]
|
||||||
|
if packets.shape[0]==0:
|
||||||
|
raise EOFError(f'No more packets left after start_packet filter')
|
||||||
|
|
||||||
|
nevts = self.hdf['/entry1/Amor/detector/data/event_time_offset'].shape[0]
|
||||||
|
if (nevts-self.first_index)>self.max_events:
|
||||||
|
end_packet = np.where(packets.start_index<=(self.first_index+self.max_events))[0][-1]
|
||||||
|
end_packet = max(1, end_packet)
|
||||||
|
if len(packets)==1:
|
||||||
|
self.last_index = nevts-1
|
||||||
|
else:
|
||||||
|
self.last_index = packets.start_index[end_packet]-1
|
||||||
|
packets = packets[:end_packet]
|
||||||
|
else:
|
||||||
|
self.last_index = nevts-1
|
||||||
|
self.EOF = True
|
||||||
|
|
||||||
|
if packets.shape[0]==0:
|
||||||
|
raise EOFError(f'No more packets left after end_packet filter, first_index={self.first_index}, '
|
||||||
|
f'max_events={self.max_events}, nevts={nevts}')
|
||||||
|
|
||||||
|
nevts = self.last_index+1-self.first_index
|
||||||
|
|
||||||
|
# adapte packet to event index relation
|
||||||
|
packets.start_index -= self.first_index
|
||||||
|
|
||||||
|
events = np.recarray(nevts, dtype=EVENT_TYPE)
|
||||||
|
events.tof = np.array(self.hdf['/entry1/Amor/detector/data/event_time_offset'][self.first_index:self.last_index+1])/1.e9
|
||||||
|
events.pixelID = self.hdf['/entry1/Amor/detector/data/event_id'][self.first_index:self.last_index+1]
|
||||||
|
events.mask = 0
|
||||||
|
|
||||||
|
pulses = self.read_chopper_trigger_stream(packets)
|
||||||
|
current = self.read_proton_current_stream(packets)
|
||||||
|
self.data = AmorEventStream(events, packets, pulses, current)
|
||||||
|
|
||||||
|
if self.first_index>0 and not self.EOF:
|
||||||
|
# label the file name if not all events were used
|
||||||
|
self.file_list[0] += f'[{self.first_index}:{self.last_index}]'
|
||||||
|
|
||||||
|
def read_log_streams(self):
|
||||||
|
"""
|
||||||
|
Read the individual NXlog datasets that can later be used for filtering etc.
|
||||||
|
"""
|
||||||
|
for key in self._log_keys:
|
||||||
|
hdf_path, dtype, *_ = self.hdf_paths[key]
|
||||||
|
hdfgroup = self.hdf[hdf_path]
|
||||||
|
shape = hdfgroup['time'].shape
|
||||||
|
data = np.recarray(shape, dtype=np.dtype([('value', self.hdf_paths[key][1]), ('time', np.int64)]))
|
||||||
|
data.time = hdfgroup['time'][:]
|
||||||
|
if len(hdfgroup['value'].shape)==1:
|
||||||
|
data.value = hdfgroup['value'][:]
|
||||||
|
else:
|
||||||
|
data.value = hdfgroup['value'][:, 0]
|
||||||
|
self.data.device_logs[key] = data
|
||||||
|
|
||||||
|
def update_info_from_logs(self):
|
||||||
|
RELEVANT_ITEMS = ['sample_temperature', 'sample_magnetic_field', 'polarization_config_label']
|
||||||
|
for key, log in self.data.device_logs.items():
|
||||||
|
if key not in RELEVANT_ITEMS:
|
||||||
|
continue
|
||||||
|
if log.value.dtype in [np.int8, np.int16, np.int32, np.int64]:
|
||||||
|
# for integer items (flags) report the most common one
|
||||||
|
value = np.bincount(log.value).argmax()
|
||||||
|
if logging.getLogger().getEffectiveLevel() <= logging.DEBUG \
|
||||||
|
and np.unique(log.value).shape[0]>1:
|
||||||
|
logging.debug(f' filtered values for {key} not unique, '
|
||||||
|
f'has {np.unique(log.value).shape[0]} values')
|
||||||
|
else:
|
||||||
|
value = log.value.mean()
|
||||||
|
if key == 'polarization_config_label':
|
||||||
|
self.instrument_settings.polarization = fileio.Polarization(const.polarizationConfigs[value])
|
||||||
|
elif key == 'sample_temperature':
|
||||||
|
self.sample.sample_parameters['temperature'].magnitue = value
|
||||||
|
elif key == 'sample_magnetic_field':
|
||||||
|
self.sample.sample_parameters['magnetic_field'].magnitue = value
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def read_chopper_trigger_stream(self, packets):
|
||||||
|
chopper1TriggerTime = np.array(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_zero'][:-2], dtype=np.int64)
|
||||||
|
#self.chopper2TriggerTime = self.chopper1TriggerTime + np.array(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time'][:-2], dtype=np.int64)
|
||||||
|
# + np.array(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_offset'][:], dtype=np.int64)
|
||||||
|
if np.shape(chopper1TriggerTime)[0] > 2:
|
||||||
|
startTime = chopper1TriggerTime[0]
|
||||||
|
pulseTimeS = chopper1TriggerTime
|
||||||
|
else:
|
||||||
|
logging.critical(' No chopper trigger data available, using event steram instead, pulse filtering will fail!')
|
||||||
|
startTime = np.array(self.hdf['/entry1/Amor/detector/data/event_time_zero'][0], dtype=np.int64)
|
||||||
|
stopTime = np.array(self.hdf['/entry1/Amor/detector/data/event_time_zero'][-2], dtype=np.int64)
|
||||||
|
pulseTimeS = np.arange(startTime, stopTime, self.timing.tau*1e9, dtype=np.int64)
|
||||||
|
pulses = np.recarray(pulseTimeS.shape, dtype=PULSE_TYPE)
|
||||||
|
pulses.time = pulseTimeS
|
||||||
|
pulses.monitor = 1. # default is monitor pulses as it requires no calculation
|
||||||
|
# apply filter in case the events were filtered
|
||||||
|
if (self.first_index>0 or not self.EOF):
|
||||||
|
pulses = pulses[(pulses.time>=packets.time[0])&(pulses.time<=packets.time[-1])]
|
||||||
|
self.eventStartTime = startTime
|
||||||
|
return pulses
|
||||||
|
|
||||||
|
def read_proton_current_stream(self, packets):
|
||||||
|
proton_current = np.recarray(self.hdf['entry1/Amor/detector/proton_current/time'].shape, dtype=PC_TYPE)
|
||||||
|
proton_current.time = self.hdf['entry1/Amor/detector/proton_current/time'][:]
|
||||||
|
if self.hdf['entry1/Amor/detector/proton_current/value'].ndim==1:
|
||||||
|
proton_current.current = self.hdf['entry1/Amor/detector/proton_current/value'][:]
|
||||||
|
else:
|
||||||
|
proton_current.current = self.hdf['entry1/Amor/detector/proton_current/value'][:,0]
|
||||||
|
|
||||||
|
if self.first_index>0 or not self.EOF:
|
||||||
|
proton_current = proton_current[(proton_current.time>=packets.time[0])&
|
||||||
|
(proton_current.time<=packets.time[-1])]
|
||||||
|
return proton_current
|
||||||
|
|
||||||
|
def info(self):
|
||||||
|
output = ""
|
||||||
|
for key in ['owner', 'experiment', 'sample', 'instrument_settings']:
|
||||||
|
value = repr(getattr(self, key)).replace("\n","\n ")
|
||||||
|
output += f'\n{key}={value},'
|
||||||
|
output += '\n'
|
||||||
|
return output
|
||||||
|
|
||||||
|
def append(self, other):
|
||||||
|
"""
|
||||||
|
Append event streams from another file to this one. Adjusts the event indices in the
|
||||||
|
packets to stay valid.
|
||||||
|
"""
|
||||||
|
new_events = np.concatenate([self.data.events, other.data.events]).view(np.recarray)
|
||||||
|
new_pulses = np.concatenate([self.data.pulses, other.data.pulses]).view(np.recarray)
|
||||||
|
new_proton_current = np.concatenate([self.data.proton_current, other.data.proton_current]).view(np.recarray)
|
||||||
|
new_packets = np.concatenate([self.data.packets, other.data.packets]).view(np.recarray)
|
||||||
|
new_packets.start_index[self.data.packets.shape[0]:] += self.data.events.shape[0]
|
||||||
|
self.data = AmorEventStream(new_events, new_packets, new_pulses, new_proton_current)
|
||||||
|
# Indicate that this is amodified dataset, basically counts number of appends as negative indices
|
||||||
|
self.last_index = min(self.last_index-1, -1)
|
||||||
|
self.file_list += other.file_list
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
output = (f"AmorEventData({self.file_list!r}) # {self.data.events.shape[0]} events, "
|
||||||
|
f"{self.data.pulses.shape[0]} pulses")
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
def get_timeslice(self, start, end)->'AmorEventData':
|
||||||
|
# return a new dataset with just events that occured in given time slice
|
||||||
|
if not 'wallTime' in self.data.events.dtype.names:
|
||||||
|
raise ValueError("This dataset is missing a wallTime that is required for time slicing")
|
||||||
|
# convert from seconds to epoch integer values
|
||||||
|
start , end = start*1e9, end*1e9
|
||||||
|
event_filter = self.data.events.wallTime>=start
|
||||||
|
event_filter &= self.data.events.wallTime<end
|
||||||
|
pulse_filter = self.data.pulses.time>=start
|
||||||
|
pulse_filter &= self.data.pulses.time<end
|
||||||
|
output = super().__new__(AmorEventData)
|
||||||
|
for key, value in self.__dict__.items():
|
||||||
|
if key == 'data':
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
setattr(output, key, value)
|
||||||
|
# TODO: this is not strictly correct, as the packet/event relationship is lost
|
||||||
|
output.data = AmorEventStream(self.data.events[event_filter], self.data.packets,
|
||||||
|
self.data.pulses[pulse_filter], self.data.proton_current)
|
||||||
|
return output
|
||||||
@@ -5,6 +5,7 @@ Class to handle Orso header information that changes gradually during the reduct
|
|||||||
import platform
|
import platform
|
||||||
import sys
|
import sys
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from typing import List, Literal
|
||||||
|
|
||||||
from orsopy import fileio
|
from orsopy import fileio
|
||||||
|
|
||||||
@@ -13,13 +14,16 @@ from . import __version__
|
|||||||
|
|
||||||
class Header:
|
class Header:
|
||||||
"""orso compatible output file header content"""
|
"""orso compatible output file header content"""
|
||||||
|
owner: fileio.Person
|
||||||
|
experiment: fileio.Experiment
|
||||||
|
sample: fileio.Sample
|
||||||
|
measurement_instrument_settings: fileio.InstrumentSettings
|
||||||
|
measurement_scheme: Literal["angle- and energy-dispersive", "angle-dispersive", "energy-dispersive"]
|
||||||
|
measurement_data_files: List[fileio.File]
|
||||||
|
measurement_additional_files: List[fileio.File]
|
||||||
|
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.owner = None
|
|
||||||
self.experiment = None
|
|
||||||
self.sample = None
|
|
||||||
self.measurement_instrument_settings = None
|
|
||||||
self.measurement_scheme = None
|
|
||||||
self.measurement_data_files = []
|
self.measurement_data_files = []
|
||||||
self.measurement_additional_files = []
|
self.measurement_additional_files = []
|
||||||
|
|
||||||
@@ -64,10 +68,3 @@ class Header:
|
|||||||
if columns is None:
|
if columns is None:
|
||||||
columns = self.columns()
|
columns = self.columns()
|
||||||
return fileio.Orso(ds, red, columns+extra_columns)
|
return fileio.Orso(ds, red, columns+extra_columns)
|
||||||
#-------------------------------------------------------------------------------------------------
|
|
||||||
def create_call_string(self):
|
|
||||||
callString = ' '.join(sys.argv)
|
|
||||||
if '-Y' not in callString:
|
|
||||||
callString += f' -Y {datetime.now().year}'
|
|
||||||
return callString
|
|
||||||
#-------------------------------------------------------------------------------------------------
|
|
||||||
35
eos/helpers.py
Normal file
35
eos/helpers.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
"""
|
||||||
|
Helper functions used during calculations. Uses numba enhanced functions if available, otherwise numpy based
|
||||||
|
fallback is imported.
|
||||||
|
"""
|
||||||
|
import numpy as np
|
||||||
|
from .event_data_types import EventDatasetProtocol, append_fields
|
||||||
|
|
||||||
|
try:
|
||||||
|
from .helpers_numba import merge_frames, extract_walltime, filter_project_x, calculate_derived_properties_focussing
|
||||||
|
except ImportError:
|
||||||
|
from .helpers_fallback import merge_frames, extract_walltime, filter_project_x, calculate_derived_properties_focussing
|
||||||
|
|
||||||
|
def add_log_to_pulses(key, dataset: EventDatasetProtocol):
|
||||||
|
"""
|
||||||
|
Add a log value for each pulse to the pulses array.
|
||||||
|
"""
|
||||||
|
pulses = dataset.data.pulses
|
||||||
|
log_data = dataset.data.device_logs[key]
|
||||||
|
if log_data.time[0]>0:
|
||||||
|
logTimeS = np.hstack([[0], log_data.time, [pulses.time[-1]+1]])
|
||||||
|
logValues = np.hstack([[log_data.value[0]], log_data.value])
|
||||||
|
else:
|
||||||
|
logTimeS = np.hstack([log_data.time, [pulses.time[-1]+1]])
|
||||||
|
logValues = log_data.value
|
||||||
|
pulseLogS = np.zeros(pulses.time.shape[0], dtype=log_data.value.dtype)
|
||||||
|
j = 0
|
||||||
|
for i, ti in enumerate(pulses.time):
|
||||||
|
# find the last current item that was before this pulse
|
||||||
|
while ti>=logTimeS[j+1]:
|
||||||
|
j += 1
|
||||||
|
pulseLogS[i] = logValues[j]
|
||||||
|
pulses = append_fields(pulses, [(key, pulseLogS.dtype)])
|
||||||
|
pulses[key] = pulseLogS
|
||||||
|
dataset.data.pulses = pulses
|
||||||
|
|
||||||
26
eos/helpers_fallback.py
Normal file
26
eos/helpers_fallback.py
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
"""
|
||||||
|
Equivalent function as in helpers_numba.py but using just numpy functionality.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
def merge_frames(tof_e, tofCut, tau, total_offset):
|
||||||
|
# tof shifted to 1 frame
|
||||||
|
return np.remainder(tof_e-(tofCut-tau), tau)+total_offset
|
||||||
|
|
||||||
|
def extract_walltime(tof_e, dataPacket_p, dataPacketTime_p):
|
||||||
|
output = np.empty(np.shape(tof_e)[0], dtype=np.int64)
|
||||||
|
for i in range(len(dataPacket_p)-1):
|
||||||
|
output[dataPacket_p[i]:dataPacket_p[i+1]] = dataPacketTime_p[i]
|
||||||
|
output[dataPacket_p[-1]:] = dataPacketTime_p[-1]
|
||||||
|
return output
|
||||||
|
|
||||||
|
def filter_project_x(pixelLookUp, pixelID_e, ymin, ymax):
|
||||||
|
(detY_e, detZ_e, detXdist_e, delta_e) = pixelLookUp[np.int_(pixelID_e)-1, :].T
|
||||||
|
# define mask and filter y range
|
||||||
|
mask_e = (ymin<=detY_e) & (detY_e<=ymax)
|
||||||
|
return (detZ_e, detXdist_e, delta_e, mask_e)
|
||||||
|
|
||||||
|
def calculate_derived_properties_focussing(tof_e, detXdist_e, delta_e, mask_e,
|
||||||
|
lmin, lmax, nu, mu, chopperDetectorDistance, hdm):
|
||||||
|
raise NotImplementedError("Only exists in numba implementation so far.")
|
||||||
@@ -11,7 +11,7 @@ def merge_frames(tof_e, tofCut, tau, total_offset):
|
|||||||
tof_e_out[ti] = ((tof_e[ti]-dt)%tau)+total_offset # tof shifted to 1 frame
|
tof_e_out[ti] = ((tof_e[ti]-dt)%tau)+total_offset # tof shifted to 1 frame
|
||||||
return tof_e_out
|
return tof_e_out
|
||||||
|
|
||||||
@nb.jit(nb.float64[:](nb.float64[:], nb.uint64[:], nb.int64[:]),
|
@nb.jit(nb.int64[:](nb.float64[:], nb.uint32[:], nb.int64[:]),
|
||||||
nopython=True, parallel=True, cache=True)
|
nopython=True, parallel=True, cache=True)
|
||||||
def extract_walltime(tof_e, dataPacket_p, dataPacketTime_p):
|
def extract_walltime(tof_e, dataPacket_p, dataPacketTime_p):
|
||||||
# assigning every event the wall time of the event packet (absolute time of pulse ?start?)
|
# assigning every event the wall time of the event packet (absolute time of pulse ?start?)
|
||||||
@@ -25,7 +25,7 @@ def extract_walltime(tof_e, dataPacket_p, dataPacketTime_p):
|
|||||||
return wallTime_e
|
return wallTime_e
|
||||||
|
|
||||||
@nb.jit(nb.types.Tuple((nb.int64[:], nb.float64[:], nb.float64[:], nb.boolean[:]))
|
@nb.jit(nb.types.Tuple((nb.int64[:], nb.float64[:], nb.float64[:], nb.boolean[:]))
|
||||||
(nb.float64[:, :], nb.int64[:], nb.int64, nb.int64),
|
(nb.float64[:, :], nb.uint32[:], nb.int64, nb.int64),
|
||||||
nopython=True, parallel=True, cache=True)
|
nopython=True, parallel=True, cache=True)
|
||||||
def filter_project_x(pixelLookUp, pixelID_e, ymin, ymax):
|
def filter_project_x(pixelLookUp, pixelID_e, ymin, ymax):
|
||||||
# project events on z-axis and create filter for events outside of y-range
|
# project events on z-axis and create filter for events outside of y-range
|
||||||
130
eos/instrument.py
Normal file
130
eos/instrument.py
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
"""
|
||||||
|
Classes describing the AMOR instrument configuration used during reduction.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from . import const
|
||||||
|
|
||||||
|
try:
|
||||||
|
from functools import cache
|
||||||
|
except ImportError:
|
||||||
|
# python <3.9
|
||||||
|
def cache(func): return func
|
||||||
|
|
||||||
|
|
||||||
|
class Detector:
|
||||||
|
nBlades = 14 # number of active blades in the detector
|
||||||
|
nWires = 32 # number of wires per blade
|
||||||
|
nStripes = 64 # number of stipes per blade
|
||||||
|
angle = np.deg2rad(5.1) # deg angle of incidence of the beam on the blades (def: 5.1)
|
||||||
|
dZ = 4.0*np.sin(angle) # mm height-distance of neighboring pixels on one blade
|
||||||
|
dX = 4.0*np.cos(angle) # mm depth-distance of neighboring pixels on one blace
|
||||||
|
bladeZ = 10.455 # mm distance between detector blades
|
||||||
|
zero = 0.5*nBlades*bladeZ # mm vertical center of the detector
|
||||||
|
distance = 4000. # mm distance from focal point to leading blade edge
|
||||||
|
|
||||||
|
delta_z: np.ndarray
|
||||||
|
pixelLookUp: np.ndarray
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def resolve_pixels():
|
||||||
|
"""
|
||||||
|
Determine spatial coordinats and angles from pixel number,
|
||||||
|
does only have to be computed once for the detector
|
||||||
|
"""
|
||||||
|
if hasattr(Detector, 'pixelLookUp'):
|
||||||
|
return
|
||||||
|
nPixel = Detector.nWires * Detector.nStripes * Detector.nBlades
|
||||||
|
pixelID = np.arange(nPixel)
|
||||||
|
(bladeNr, bPixel) = np.divmod(pixelID, Detector.nWires * Detector.nStripes)
|
||||||
|
(bZi, detYi) = np.divmod(bPixel, Detector.nStripes) # z index on blade, y index on detector
|
||||||
|
detZi = bladeNr * Detector.nWires + bZi # z index on detector
|
||||||
|
detX = bZi * Detector.dX # x position in detector
|
||||||
|
# detZ = Detector.zero - bladeNr * Detector.bladeZ - bZi * Detector.dZ # z position on detector
|
||||||
|
bladeAngle = np.rad2deg( 2. * np.arcsin(0.5*Detector.bladeZ / Detector.distance) )
|
||||||
|
delta = (Detector.nBlades/2. - bladeNr) * bladeAngle \
|
||||||
|
- np.rad2deg( np.arctan(bZi*Detector.dZ / ( Detector.distance + bZi * Detector.dX) ) )
|
||||||
|
delta_z = delta[detYi==1]
|
||||||
|
pixel_lookup=np.vstack((detYi.T, detZi.T, detX.T, delta.T)).T
|
||||||
|
Detector.delta_z = delta_z
|
||||||
|
Detector.pixelLookUp = pixel_lookup
|
||||||
|
|
||||||
|
# guarantee that pixelLookUp has been computed
|
||||||
|
Detector.resolve_pixels()
|
||||||
|
|
||||||
|
class LZGrid:
|
||||||
|
dldl = 0.005 # Delta lambda / lambda
|
||||||
|
|
||||||
|
# as using cahced results, make sure the object is not modified
|
||||||
|
@property
|
||||||
|
def qResolution(self):
|
||||||
|
return self._qResolution
|
||||||
|
@property
|
||||||
|
def qzRange(self):
|
||||||
|
return self._qzRange
|
||||||
|
|
||||||
|
def __init__(self, qResolution, qzRange, lambda_overwrite=None):
|
||||||
|
self._qResolution = qResolution
|
||||||
|
self._qzRange = qzRange
|
||||||
|
if lambda_overwrite is None:
|
||||||
|
self.lamdaMax = const.lamdaMax
|
||||||
|
self.lamdaCut = const.lamdaCut
|
||||||
|
else:
|
||||||
|
self.lamdaCut, self.lamdaMax = lambda_overwrite
|
||||||
|
|
||||||
|
@property
|
||||||
|
@cache
|
||||||
|
def shape(self):
|
||||||
|
# gives the shape of the grid, not of the bin-edges
|
||||||
|
return (self.lamda().shape[0]-1, self.z().shape[0]-1)
|
||||||
|
|
||||||
|
@cache
|
||||||
|
def q(self):
|
||||||
|
resolutions = [0.005, 0.01, 0.02, 0.025, 0.04, 0.05, 0.1, 1]
|
||||||
|
a, b = np.histogram([self.qResolution], bins = resolutions)
|
||||||
|
dqdq = np.matmul(b[:-1],a)
|
||||||
|
if dqdq != self.qResolution:
|
||||||
|
logging.info(f'# changed resolution to {dqdq}')
|
||||||
|
qq = 0.01
|
||||||
|
# linear up to qq
|
||||||
|
q_grid = np.arange(0, qq, qq*dqdq)
|
||||||
|
# exponential from qq on
|
||||||
|
q_grid = np.append(q_grid, qq*(1.+dqdq)**np.arange(int(np.log(self.qzRange[1]/qq)/np.log(1+dqdq))))
|
||||||
|
q_grid = q_grid[q_grid>=self.qzRange[0]]
|
||||||
|
return q_grid
|
||||||
|
|
||||||
|
@cache
|
||||||
|
def lamda(self):
|
||||||
|
lamdaMax = self.lamdaMax
|
||||||
|
lamdaMin = self.lamdaCut
|
||||||
|
lamda_grid = lamdaMin*(1+self.dldl)**np.arange(int(np.log(lamdaMax/lamdaMin)/np.log(1+self.dldl)+1))
|
||||||
|
return lamda_grid
|
||||||
|
|
||||||
|
@cache
|
||||||
|
def z(self):
|
||||||
|
# TODO: shouldn't this be -0.5 to be the edges of each pixel?
|
||||||
|
return np.arange(Detector.nBlades*Detector.nWires+1)
|
||||||
|
|
||||||
|
@cache
|
||||||
|
def lz(self):
|
||||||
|
return np.ones(( self.lamda().shape[0]-1, self.z().shape[0]-1))
|
||||||
|
|
||||||
|
@cache
|
||||||
|
def delta(self, detectorDistance):
|
||||||
|
# unused for now
|
||||||
|
bladeAngle = np.rad2deg( 2. * np.arcsin(0.5*Detector.bladeZ / detectorDistance) )
|
||||||
|
blade_grid = np.arctan( np.arange(33) * Detector.dZ / ( detectorDistance + np.arange(33) * Detector.dX) )
|
||||||
|
blade_grid = np.rad2deg(blade_grid)
|
||||||
|
stepWidth = blade_grid[1] - blade_grid[0]
|
||||||
|
blade_grid = blade_grid - 0.2 * stepWidth
|
||||||
|
|
||||||
|
delta_grid = []
|
||||||
|
for b in np.arange(Detector.nBlades-1):
|
||||||
|
delta_grid = np.concatenate((delta_grid, blade_grid), axis=None)
|
||||||
|
blade_grid = blade_grid + bladeAngle
|
||||||
|
delta_grid = delta_grid[delta_grid<blade_grid[0]-0.5*stepWidth]
|
||||||
|
delta_grid = np.concatenate((delta_grid, blade_grid), axis=None)
|
||||||
|
|
||||||
|
return -np.flip(delta_grid) + 0.5*Detector.nBlades * bladeAngle
|
||||||
165
eos/kafka_events.py
Normal file
165
eos/kafka_events.py
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
"""
|
||||||
|
Collect AMOR detector events send via Kafka.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import numpy as np
|
||||||
|
from threading import Thread, Event
|
||||||
|
from time import time
|
||||||
|
|
||||||
|
from .event_data_types import AmorGeometry, AmorTiming, AmorEventStream, PACKET_TYPE, EVENT_TYPE, PULSE_TYPE, PC_TYPE
|
||||||
|
|
||||||
|
from uuid import uuid4
|
||||||
|
from streaming_data_types.eventdata_ev44 import EventData
|
||||||
|
from streaming_data_types.logdata_f144 import ExtractedLogData
|
||||||
|
from streaming_data_types import deserialise_f144, deserialise_ev44
|
||||||
|
from confluent_kafka import Consumer
|
||||||
|
|
||||||
|
from .header import Header
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from streaming_data_types.utils import get_schema
|
||||||
|
except ImportError:
|
||||||
|
from streaming_data_types.utils import _get_schema as get_schema
|
||||||
|
|
||||||
|
|
||||||
|
KAFKA_BROKER = 'linkafka01.psi.ch:9092'
|
||||||
|
AMOR_EVENTS = 'amor_detector'
|
||||||
|
AMOR_NICOS = 'amor_nicosForwarder'
|
||||||
|
|
||||||
|
class KafkaFrozenData:
|
||||||
|
"""
|
||||||
|
Represents event stream data from Kafka at a given time.
|
||||||
|
Will be returned by KafkaEventData to be use in conjunction
|
||||||
|
with data processing and projections.
|
||||||
|
|
||||||
|
Implements EventDatasetProtocol
|
||||||
|
"""
|
||||||
|
geometry: AmorGeometry
|
||||||
|
timing: AmorTiming
|
||||||
|
data: AmorEventStream
|
||||||
|
|
||||||
|
def __init__(self, geometry, timing, data, monitor=1.):
|
||||||
|
self.geometry = geometry
|
||||||
|
self.timing = timing
|
||||||
|
self.data = data
|
||||||
|
self.monitor = monitor
|
||||||
|
|
||||||
|
def append(self, other):
|
||||||
|
raise NotImplementedError("can't append live datastream to other event data")
|
||||||
|
|
||||||
|
def update_header(self, header:Header):
|
||||||
|
# maybe makes sense later, but for now just used for live vizualization
|
||||||
|
...
|
||||||
|
|
||||||
|
class KafkaEventData(Thread):
|
||||||
|
"""
|
||||||
|
Read Nicos information and events from Kafka. Creates a background
|
||||||
|
thread that listens to Kafka events and converts them to eos compatible information.
|
||||||
|
"""
|
||||||
|
geometry: AmorGeometry
|
||||||
|
timing: AmorTiming
|
||||||
|
events: np.recarray
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.stop_event = Event()
|
||||||
|
self.stop_counting = Event()
|
||||||
|
self.new_events = Event()
|
||||||
|
self.last_read = 0
|
||||||
|
self.last_read_time = 0.
|
||||||
|
self.start_time = time()
|
||||||
|
self.consumer = Consumer(
|
||||||
|
{'bootstrap.servers': 'linkafka01.psi.ch:9092',
|
||||||
|
'group.id': uuid4()})
|
||||||
|
self.consumer.subscribe([AMOR_EVENTS, AMOR_NICOS])
|
||||||
|
self.geometry = AmorGeometry(1.0, 2.0, 0., 0., 1.5, 10.0, 4.0, 10.0)
|
||||||
|
self.timing = AmorTiming(0., 0., 500., 0., 30./500.)
|
||||||
|
# create empty dataset
|
||||||
|
self.events = np.recarray(0, dtype=EVENT_TYPE)
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
while not self.stop_event.is_set():
|
||||||
|
messages = self.consumer.consume(10, timeout=1)
|
||||||
|
for message in messages:
|
||||||
|
self.process_message(message)
|
||||||
|
|
||||||
|
def process_message(self, message):
|
||||||
|
if message.error():
|
||||||
|
logging.info(f" received Kafka message with error: {message.error()}")
|
||||||
|
return
|
||||||
|
schema = get_schema(message.value())
|
||||||
|
if message.topic()==AMOR_EVENTS and schema=='ev44':
|
||||||
|
events:EventData = deserialise_ev44(message.value())
|
||||||
|
self.add_events(events)
|
||||||
|
self.new_events.set()
|
||||||
|
logging.debug(f' new events {events}')
|
||||||
|
elif message.topic()==AMOR_NICOS and schema=='f144':
|
||||||
|
nicos_data:ExtractedLogData = deserialise_f144(message.value())
|
||||||
|
if nicos_data.source_name in self.nicos_mapping.keys():
|
||||||
|
logging.debug(f' {nicos_data.source_name} = {nicos_data.value}')
|
||||||
|
self.update_instrument(nicos_data)
|
||||||
|
|
||||||
|
def add_events(self, events:EventData):
|
||||||
|
"""
|
||||||
|
Add new events to the Dataset. The object keeps raw events
|
||||||
|
and only copies the latest set to the self.data object,
|
||||||
|
this allows to run the event processing to be performed on a "clean"
|
||||||
|
evnet stream each time.
|
||||||
|
"""
|
||||||
|
if self.stop_counting.is_set():
|
||||||
|
return
|
||||||
|
prev_size = self.events.shape[0]
|
||||||
|
new_events = events.pixel_id.shape[0]
|
||||||
|
self.events.resize(prev_size+new_events, refcheck=False)
|
||||||
|
self.events.pixelID[prev_size:] = events.pixel_id
|
||||||
|
self.events.mask[prev_size:] = 0
|
||||||
|
self.events.tof[prev_size:] = events.time_of_flight/1.e9
|
||||||
|
|
||||||
|
nicos_mapping = {
|
||||||
|
'mu': ('geometry', 'mu'),
|
||||||
|
'nu': ('geometry', 'nu'),
|
||||||
|
'kappa': ('geometry', 'kap'),
|
||||||
|
'kappa_offset': ('geometry', 'kad'),
|
||||||
|
'ch1_trigger_phase': ('timing', 'ch1TriggerPhase'),
|
||||||
|
'ch2_trigger_phase': ('timing', 'ch2TriggerPhase'),
|
||||||
|
'ch2_speed': ('timing', 'chopperSpeed'),
|
||||||
|
'chopper_phase': ('timing', 'chopperPhase'),
|
||||||
|
}
|
||||||
|
|
||||||
|
def update_instrument(self, nicos_data:ExtractedLogData):
|
||||||
|
if nicos_data.source_name in self.nicos_mapping:
|
||||||
|
attr, subattr = self.nicos_mapping[nicos_data.source_name]
|
||||||
|
setattr(getattr(self, attr), subattr, nicos_data.value)
|
||||||
|
if nicos_data.source_name=='ch2_speed':
|
||||||
|
self.timing.tau = 30./self.timing.chopperSpeed
|
||||||
|
|
||||||
|
def monitor(self):
|
||||||
|
return time()-self.start_time
|
||||||
|
|
||||||
|
def restart(self):
|
||||||
|
# empty event buffer
|
||||||
|
self.events = np.recarray(0, dtype=EVENT_TYPE)
|
||||||
|
self.stop_counting.clear()
|
||||||
|
self.last_read = 0
|
||||||
|
self.start_time = time()
|
||||||
|
self.new_events.clear()
|
||||||
|
|
||||||
|
def get_events(self, total_counts=False):
|
||||||
|
packets = np.recarray(0, dtype=PACKET_TYPE)
|
||||||
|
pulses = np.recarray(0, dtype=PULSE_TYPE)
|
||||||
|
pc = np.recarray(0, dtype=PC_TYPE)
|
||||||
|
if total_counts:
|
||||||
|
last_read = 0
|
||||||
|
else:
|
||||||
|
last_read = self.last_read
|
||||||
|
if last_read>=self.events.shape[0]:
|
||||||
|
raise EOFError("No new events arrived")
|
||||||
|
data = AmorEventStream(self.events[last_read:].copy(), packets, pulses, pc)
|
||||||
|
self.last_read = self.events.shape[0]
|
||||||
|
self.new_events.clear()
|
||||||
|
t_now = time()
|
||||||
|
monitor = t_now-self.last_read_time
|
||||||
|
self.last_read_time = t_now
|
||||||
|
return KafkaFrozenData(self.geometry, self.timing, data, monitor=monitor)
|
||||||
283
eos/kafka_serializer.py
Normal file
283
eos/kafka_serializer.py
Normal file
@@ -0,0 +1,283 @@
|
|||||||
|
"""
|
||||||
|
Allows to send eos projections to Kafka using ESS histogram serialization.
|
||||||
|
|
||||||
|
For histogram_h01 the message is build using:
|
||||||
|
|
||||||
|
hist = {
|
||||||
|
"source": "some_source",
|
||||||
|
"timestamp": 123456,
|
||||||
|
"current_shape": [2, 5],
|
||||||
|
"dim_metadata": [
|
||||||
|
{
|
||||||
|
"length": 2,
|
||||||
|
"unit": "a",
|
||||||
|
"label": "x",
|
||||||
|
"bin_boundaries": np.array([10, 11, 12]),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"length": 5,
|
||||||
|
"unit": "b",
|
||||||
|
"label": "y",
|
||||||
|
"bin_boundaries": np.array([0, 1, 2, 3, 4, 5]),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"last_metadata_timestamp": 123456,
|
||||||
|
"data": np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]),
|
||||||
|
"errors": np.array([[5, 4, 3, 2, 1], [10, 9, 8, 7, 6]]),
|
||||||
|
"info": "info_string",
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
from typing import List, Tuple, Union
|
||||||
|
from threading import Thread, Event
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import json
|
||||||
|
from time import time
|
||||||
|
from dataclasses import dataclass, asdict
|
||||||
|
from streaming_data_types import histogram_hs01
|
||||||
|
from confluent_kafka import Producer, Consumer
|
||||||
|
|
||||||
|
from uuid import uuid4
|
||||||
|
|
||||||
|
from .projection import TofZProjection, YZProjection
|
||||||
|
|
||||||
|
KAFKA_BROKER = 'linkafka01.psi.ch:9092'
|
||||||
|
KAFKA_TOPICS = {
|
||||||
|
'histogram': 'amor_histograms',
|
||||||
|
'response': 'amor_histResponse',
|
||||||
|
'command': 'amor_histCommands'
|
||||||
|
}
|
||||||
|
|
||||||
|
def ktime():
|
||||||
|
return int(time()*1_000)
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DimMetadata:
|
||||||
|
length: int
|
||||||
|
unit: str
|
||||||
|
label: str
|
||||||
|
bin_boundaries: np.ndarray
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class HistogramMessage:
|
||||||
|
source: str
|
||||||
|
timestamp: int
|
||||||
|
current_shape: Tuple[int, int]
|
||||||
|
dim_metadata: Tuple[DimMetadata, DimMetadata]
|
||||||
|
last_metadata_timestamp: int
|
||||||
|
data: np.ndarray
|
||||||
|
errors: np.ndarray
|
||||||
|
info: str
|
||||||
|
|
||||||
|
def serialize(self):
|
||||||
|
return histogram_hs01.serialise_hs01(asdict(self))
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class CommandMessage:
|
||||||
|
msg_id: str
|
||||||
|
|
||||||
|
cmd=None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_message(cls, data):
|
||||||
|
"""
|
||||||
|
Uses the sub-class cmd attribute to select which message to retugn
|
||||||
|
"""
|
||||||
|
msg = dict([(ci.cmd, ci) for ci in cls.__subclasses__()])
|
||||||
|
return msg[data['cmd']](**data)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Stop(CommandMessage):
|
||||||
|
hist_id: str
|
||||||
|
id: str
|
||||||
|
cmd:str = 'stop'
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class HistogramConfig:
|
||||||
|
id: str
|
||||||
|
type: str
|
||||||
|
data_brokers: List[str]
|
||||||
|
topic: str
|
||||||
|
data_topics: List[str]
|
||||||
|
tof_range: Tuple[float, float]
|
||||||
|
det_range: Tuple[int, int]
|
||||||
|
num_bins: int
|
||||||
|
width: int
|
||||||
|
height: int
|
||||||
|
left_edges: list
|
||||||
|
source: str
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ConfigureHistogram(CommandMessage):
|
||||||
|
histograms: List[HistogramConfig]
|
||||||
|
start: int
|
||||||
|
cmd:str = 'config'
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
self.histograms = [HistogramConfig(**cfg) for cfg in self.histograms]
|
||||||
|
|
||||||
|
|
||||||
|
class ESSSerializer:
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.producer = Producer({
|
||||||
|
'bootstrap.servers': KAFKA_BROKER,
|
||||||
|
'message.max.bytes': 4_000_000,
|
||||||
|
})
|
||||||
|
self.consumer = Consumer({
|
||||||
|
'bootstrap.servers': KAFKA_BROKER,
|
||||||
|
"group.id": uuid4(),
|
||||||
|
"default.topic.config": {"auto.offset.reset": "latest"},
|
||||||
|
})
|
||||||
|
self._active_histogram_yz = None
|
||||||
|
self._active_histogram_tofz = None
|
||||||
|
self.new_count_started = Event()
|
||||||
|
self.count_stopped = Event()
|
||||||
|
|
||||||
|
self.consumer.subscribe([KAFKA_TOPICS['command']])
|
||||||
|
|
||||||
|
def process_message(self, message):
|
||||||
|
if message.error():
|
||||||
|
logging.error("Command Consumer Error: %s", message.error())
|
||||||
|
else:
|
||||||
|
command = json.loads(message.value().decode())
|
||||||
|
try:
|
||||||
|
command = CommandMessage.get_message(command)
|
||||||
|
except Exception:
|
||||||
|
logging.error(f'Could not interpret message: \n{command}', exc_info=True)
|
||||||
|
return
|
||||||
|
logging.info(command)
|
||||||
|
resp = json.dumps({
|
||||||
|
"msg_id": getattr(command, "id", None) or command.msg_id,
|
||||||
|
"response": "ACK",
|
||||||
|
"message": ""
|
||||||
|
})
|
||||||
|
self.producer.produce(
|
||||||
|
topic=KAFKA_TOPICS['response'],
|
||||||
|
value=resp
|
||||||
|
)
|
||||||
|
self.producer.flush()
|
||||||
|
if isinstance(command, Stop):
|
||||||
|
if command.hist_id in [self._active_histogram_yz, self._active_histogram_tofz]:
|
||||||
|
self.count_stopped.set()
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
elif isinstance(command, ConfigureHistogram):
|
||||||
|
for hist in command.histograms:
|
||||||
|
if hist.topic == KAFKA_TOPICS['histogram']+'_YZ':
|
||||||
|
self._active_histogram_yz = hist.id
|
||||||
|
logging.debug(f" histogram data_topic: {hist.data_topics}")
|
||||||
|
self._start = command.start
|
||||||
|
self.count_stopped.clear()
|
||||||
|
self.new_count_started.set()
|
||||||
|
if hist.topic == KAFKA_TOPICS['histogram']+'_TofZ':
|
||||||
|
self._active_histogram_tofz = hist.id
|
||||||
|
|
||||||
|
def receive(self, timeout=5):
|
||||||
|
rec = self.consumer.poll(timeout)
|
||||||
|
if rec is not None:
|
||||||
|
self.process_message(rec)
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def receive_loop(self):
|
||||||
|
while not self._stop_receiving.is_set():
|
||||||
|
try:
|
||||||
|
self.receive()
|
||||||
|
except Exception:
|
||||||
|
logging.error("Exception while receiving", exc_info=True)
|
||||||
|
|
||||||
|
def start_command_thread(self):
|
||||||
|
self._stop_receiving = Event()
|
||||||
|
self._command_thread = Thread(target=self.receive_loop)
|
||||||
|
self._command_thread.start()
|
||||||
|
|
||||||
|
def end_command_thread(self, event=None):
|
||||||
|
self._stop_receiving.set()
|
||||||
|
self._command_thread.join()
|
||||||
|
|
||||||
|
def acked(self, err, msg):
|
||||||
|
# We need to have callback to produce-method to catch server errors
|
||||||
|
if err is not None:
|
||||||
|
logging.warning("Failed to deliver message: %s: %s" % (str(msg), str(err)))
|
||||||
|
else:
|
||||||
|
logging.debug("Message produced: %s" % (str(msg)))
|
||||||
|
|
||||||
|
def send(self, proj: Union[YZProjection, TofZProjection], final=False):
|
||||||
|
if final:
|
||||||
|
state = 'FINISHED'
|
||||||
|
else:
|
||||||
|
state = 'COUNTING'
|
||||||
|
if isinstance(proj, YZProjection):
|
||||||
|
if self._active_histogram_yz is None:
|
||||||
|
return
|
||||||
|
suffix = 'YZ'
|
||||||
|
message = HistogramMessage(
|
||||||
|
source='amor-eos',
|
||||||
|
timestamp=ktime(),
|
||||||
|
current_shape=(proj.y.shape[0]-1, proj.z.shape[0]-1),
|
||||||
|
dim_metadata=(
|
||||||
|
DimMetadata(
|
||||||
|
length=proj.y.shape[0]-1,
|
||||||
|
unit="pixel",
|
||||||
|
label="Y",
|
||||||
|
bin_boundaries=proj.y,
|
||||||
|
),
|
||||||
|
DimMetadata(
|
||||||
|
length=proj.z.shape[0]-1,
|
||||||
|
unit="pixel",
|
||||||
|
label="Z",
|
||||||
|
bin_boundaries=proj.z,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
last_metadata_timestamp=0,
|
||||||
|
data=proj.data.cts,
|
||||||
|
errors=np.sqrt(proj.data.cts),
|
||||||
|
info=json.dumps({
|
||||||
|
"start": self._start,
|
||||||
|
"state": state,
|
||||||
|
"num events": proj.data.cts.sum()
|
||||||
|
})
|
||||||
|
)
|
||||||
|
logging.info(f" {state}: Sending {proj.data.cts.sum()} events to Nicos")
|
||||||
|
elif isinstance(proj, TofZProjection):
|
||||||
|
if self._active_histogram_tofz is None:
|
||||||
|
return
|
||||||
|
suffix = 'TofZ'
|
||||||
|
message = HistogramMessage(
|
||||||
|
source='amor-eos',
|
||||||
|
timestamp=ktime(),
|
||||||
|
current_shape=(proj.tof.shape[0]-1, proj.z.shape[0]-1),
|
||||||
|
dim_metadata=(
|
||||||
|
DimMetadata(
|
||||||
|
length=proj.tof.shape[0]-1,
|
||||||
|
unit="ms",
|
||||||
|
label="ToF",
|
||||||
|
bin_boundaries=proj.tof,
|
||||||
|
),
|
||||||
|
DimMetadata(
|
||||||
|
length=proj.z.shape[0]-1,
|
||||||
|
unit="pixel",
|
||||||
|
label="Z",
|
||||||
|
bin_boundaries=proj.z,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
last_metadata_timestamp=0,
|
||||||
|
data=proj.data.cts,
|
||||||
|
errors=np.sqrt(proj.data.cts),
|
||||||
|
info=json.dumps({
|
||||||
|
"start": self._start,
|
||||||
|
"state": state,
|
||||||
|
"num events": proj.data.I.sum()
|
||||||
|
})
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise NotImplementedError(f"Histogram for {proj.__class__.__name__} not implemented")
|
||||||
|
|
||||||
|
self.producer.produce(value=message.serialize(),
|
||||||
|
topic=KAFKA_TOPICS['histogram']+'_'+suffix,
|
||||||
|
callback=self.acked)
|
||||||
|
self.producer.flush()
|
||||||
@@ -33,10 +33,10 @@ def setup_logging():
|
|||||||
logfile.setLevel(logging.DEBUG)
|
logfile.setLevel(logging.DEBUG)
|
||||||
logger.addHandler(logfile)
|
logger.addHandler(logfile)
|
||||||
|
|
||||||
def update_loglevel(verbose=False, debug=False):
|
def update_loglevel(verbose=0):
|
||||||
if verbose:
|
if verbose==1:
|
||||||
logging.getLogger().handlers[0].setLevel(logging.INFO)
|
logging.getLogger().handlers[0].setLevel(logging.INFO)
|
||||||
if debug:
|
if verbose>1:
|
||||||
console = logging.getLogger().handlers[0]
|
console = logging.getLogger().handlers[0]
|
||||||
console.setLevel(logging.DEBUG)
|
console.setLevel(logging.DEBUG)
|
||||||
formatter = logging.Formatter('%(levelname).1s %(message)s')
|
formatter = logging.Formatter('%(levelname).1s %(message)s')
|
||||||
54
eos/ls.py
Normal file
54
eos/ls.py
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
"""
|
||||||
|
eosls executable script to list available datafiles in current folder with some metadata information.
|
||||||
|
|
||||||
|
Author: Jochen Stahn (algorithms, python draft),
|
||||||
|
Artur Glavic (structuring and optimisation of code)
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from eos.command_line import commandLineArgs
|
||||||
|
|
||||||
|
def main():
|
||||||
|
logging.getLogger().setLevel(logging.CRITICAL)
|
||||||
|
clas = commandLineArgs([], 'eosls', extra_args=[
|
||||||
|
dict(dest='path', nargs='*', default=['.'], help='paths to list file in')])
|
||||||
|
|
||||||
|
from glob import glob
|
||||||
|
import tabulate
|
||||||
|
from eos.file_reader import AmorHeader
|
||||||
|
|
||||||
|
files = []
|
||||||
|
for path in clas.path:
|
||||||
|
files+=glob(os.path.join(path, 'amor*.hdf'))
|
||||||
|
files.sort()
|
||||||
|
|
||||||
|
data = {
|
||||||
|
'File name': [],
|
||||||
|
'Start Time': [],
|
||||||
|
'mu': [],
|
||||||
|
'nu': [],
|
||||||
|
'div': [],
|
||||||
|
'Sample': [],
|
||||||
|
'T [K]': [],
|
||||||
|
'H [T]': [],
|
||||||
|
}
|
||||||
|
for fi in files:
|
||||||
|
data['File name'].append(os.path.basename(fi))
|
||||||
|
ah = AmorHeader(fi)
|
||||||
|
data['Sample'].append(ah.sample.name)
|
||||||
|
data['Start Time'].append(ah.fileDate.strftime('%y %m-%d %H:%M:%S'))
|
||||||
|
data['mu'].append('%.3f' % ah.geometry.mu)
|
||||||
|
data['nu'].append('%.3f' % ah.geometry.nu)
|
||||||
|
data['div'].append('%.3f' % ah.geometry.div)
|
||||||
|
|
||||||
|
T = ah.sample.sample_parameters.get('temperature', None)
|
||||||
|
data['T [K]'].append(T.magnitude if T is not None else '-')
|
||||||
|
|
||||||
|
H = ah.sample.sample_parameters.get('magnetic_field', None)
|
||||||
|
data['H [T]'].append(H.magnitude if H is not None else '-')
|
||||||
|
|
||||||
|
print(tabulate.tabulate(data, headers="keys"))
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
46
eos/nicos.py
Normal file
46
eos/nicos.py
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
"""
|
||||||
|
amor-nicos vizualising data from Amor@SINQ, PSI
|
||||||
|
|
||||||
|
Author: Jochen Stahn (algorithms, python draft),
|
||||||
|
Artur Glavic (structuring and optimisation of code)
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
|
||||||
|
# need to do absolute import here as pyinstaller requires it
|
||||||
|
from eos.options import E2HConfig, ReaderConfig, ExperimentConfig, E2HReductionConfig
|
||||||
|
from eos.command_line import commandLineArgs
|
||||||
|
from eos.logconfig import setup_logging, update_loglevel
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
setup_logging()
|
||||||
|
logging.getLogger('matplotlib').setLevel(logging.WARNING)
|
||||||
|
|
||||||
|
# read command line arguments and generate classes holding configuration parameters
|
||||||
|
clas = commandLineArgs([ReaderConfig, ExperimentConfig, E2HReductionConfig],
|
||||||
|
'amor-nicos')
|
||||||
|
update_loglevel(clas.verbose)
|
||||||
|
if clas.verbose<2:
|
||||||
|
# only log info level in logfile
|
||||||
|
logger = logging.getLogger() # logging.getLogger('quicknxs')
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
reader_config = ReaderConfig.from_args(clas)
|
||||||
|
experiment_config = ExperimentConfig.from_args(clas)
|
||||||
|
reduction_config = E2HReductionConfig.from_args(clas)
|
||||||
|
config = E2HConfig(reader_config, experiment_config, reduction_config)
|
||||||
|
|
||||||
|
logging.warning('######## amor-nicos - Nicos histogram for Amor ########')
|
||||||
|
from eos.reduction_kafka import KafkaReduction
|
||||||
|
|
||||||
|
# only import heavy module if sufficient command line parameters were provided
|
||||||
|
from eos.reduction_reflectivity import ReflectivityReduction
|
||||||
|
# Create reducer with these arguments
|
||||||
|
reducer = KafkaReduction(config)
|
||||||
|
# Perform actual reduction
|
||||||
|
reducer.reduce()
|
||||||
|
|
||||||
|
logging.info('######## amor-nicos - finished ########')
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
60
eos/nicos_interface.py
Normal file
60
eos/nicos_interface.py
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
"""
|
||||||
|
Functions used to directly access information from nicos.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import socket
|
||||||
|
import platform
|
||||||
|
import logging
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
ON_AMOR = platform.node().startswith('amor')
|
||||||
|
NICOS_CACHE_DIR = '/home/data/nicosdata/cache/'
|
||||||
|
GREP = '/usr/bin/grep "value"'
|
||||||
|
|
||||||
|
|
||||||
|
def lookup_nicos_value(key, nicos_key, dtype=float, suffix='', year="2026"):
|
||||||
|
# TODO: Implement direct communication to nicos to read the cache
|
||||||
|
if nicos_key=='ignore':
|
||||||
|
return dtype(0)
|
||||||
|
try:
|
||||||
|
logging.debug(f' trying socket request for device {nicos_key}')
|
||||||
|
response = nicos_single_request(nicos_key)
|
||||||
|
logging.info(f" using parameter {nicos_key} from nicos cache via socket")
|
||||||
|
return dtype(response)
|
||||||
|
except Exception as e:
|
||||||
|
logging.debug(f' socket request failed with {e!r}')
|
||||||
|
if ON_AMOR:
|
||||||
|
logging.debug(f" trying to extract {nicos_key} from nicos cache files")
|
||||||
|
call = f'{GREP} {NICOS_CACHE_DIR}nicos-{nicos_key}/{year}{suffix}'
|
||||||
|
try:
|
||||||
|
value = str(subprocess.getoutput(call)).split('\t')[-1]
|
||||||
|
logging.info(f" using parameter {nicos_key} from nicos cache file")
|
||||||
|
return dtype(value)
|
||||||
|
except Exception:
|
||||||
|
logging.error(f" couldn't get value from nicos cache {nicos_key}, {call}")
|
||||||
|
return dtype(0)
|
||||||
|
else:
|
||||||
|
logging.warning(f" parameter {key} not found, relpace by zero")
|
||||||
|
return dtype(0)
|
||||||
|
|
||||||
|
def nicos_single_request(device):
|
||||||
|
sentinel = b'\n'
|
||||||
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
|
s.settimeout(1.0)
|
||||||
|
s.connect(('amor', 14869))
|
||||||
|
|
||||||
|
tosend = f'@nicos/{device}/value?\n'
|
||||||
|
|
||||||
|
# write request
|
||||||
|
# self.log.debug("get_explicit: sending %r", tosend)
|
||||||
|
s.sendall(tosend.encode())
|
||||||
|
|
||||||
|
# read response
|
||||||
|
data = b''
|
||||||
|
while not data.endswith(sentinel):
|
||||||
|
newdata = s.recv(8192) # blocking read
|
||||||
|
if not newdata:
|
||||||
|
raise IOError('cache closed connection')
|
||||||
|
data += newdata
|
||||||
|
s.shutdown(socket.SHUT_RDWR)
|
||||||
|
return data.decode('utf-8').split('=')[-1]
|
||||||
132
eos/normalization.py
Normal file
132
eos/normalization.py
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
"""
|
||||||
|
Defines how to normalize a focusing reflectometry dataset by a reference measurement.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
from orsopy import fileio
|
||||||
|
|
||||||
|
from .event_data_types import EventDatasetProtocol
|
||||||
|
from .header import Header
|
||||||
|
from .options import NormalisationMethod
|
||||||
|
from .instrument import Detector, LZGrid
|
||||||
|
|
||||||
|
|
||||||
|
class LZNormalisation:
|
||||||
|
file_list = List[str]
|
||||||
|
angle: float
|
||||||
|
monitor: float
|
||||||
|
norm: np.ndarray
|
||||||
|
|
||||||
|
def __init__(self, reference:EventDatasetProtocol, normalisationMethod: NormalisationMethod, grid: LZGrid):
|
||||||
|
self.angle = reference.geometry.nu-reference.geometry.mu
|
||||||
|
lamda_e = reference.data.events.lamda
|
||||||
|
detZ_e = reference.data.events.detZ
|
||||||
|
self.monitor = np.sum(reference.data.pulses.monitor)
|
||||||
|
norm_lz, _, _ = np.histogram2d(lamda_e, detZ_e, bins=(grid.lamda(), grid.z()))
|
||||||
|
if normalisationMethod==NormalisationMethod.direct_beam:
|
||||||
|
self.norm = np.flip(norm_lz, 1)
|
||||||
|
else:
|
||||||
|
# correct for reference sm reflectivity
|
||||||
|
lamda_l = grid.lamda()
|
||||||
|
theta_z = self.angle+Detector.delta_z
|
||||||
|
lamda_lz = (grid.lz().T*lamda_l[:-1]).T
|
||||||
|
theta_lz = grid.lz()*theta_z
|
||||||
|
qz_lz = 4.0*np.pi*np.sin(np.deg2rad(theta_lz))/lamda_lz
|
||||||
|
# TODO: introduce variable for `m` and propably for the slope
|
||||||
|
# Correct reflectivity of m=5 supermirror
|
||||||
|
Rsm_lz = np.ones(np.shape(qz_lz))
|
||||||
|
Rsm_lz = np.where(qz_lz>0.0217, 1-(qz_lz-0.0217)*(0.0625/0.0217), Rsm_lz)
|
||||||
|
Rsm_lz = np.where(qz_lz>0.0217*5, np.nan, Rsm_lz)
|
||||||
|
self.norm = norm_lz/Rsm_lz
|
||||||
|
self.file_list = [os.path.basename(entry) for entry in reference.file_list]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_file(cls, filename, check_hash=None) -> Optional['LZNormalisation']:
|
||||||
|
self = super().__new__(cls)
|
||||||
|
with open(filename, 'rb') as fh:
|
||||||
|
hash = str(np.load(fh, allow_pickle=True))
|
||||||
|
self.file_list = np.load(fh, allow_pickle=True).tolist()
|
||||||
|
self.angle = np.load(fh, allow_pickle=True)
|
||||||
|
self.norm = np.load(fh, allow_pickle=True)
|
||||||
|
self.monitor = np.load(fh, allow_pickle=True)
|
||||||
|
if check_hash is not None and hash != check_hash:
|
||||||
|
logging.info(' file hash does not match this reduction configuration')
|
||||||
|
raise ValueError('file hash does not match this reduction configuration')
|
||||||
|
return self
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def unity(cls, grid:LZGrid) -> 'LZNormalisation':
|
||||||
|
logging.warning(f'normalisation is unity')
|
||||||
|
self = super().__new__(cls)
|
||||||
|
self.norm = grid.lz()
|
||||||
|
self.file_list = []
|
||||||
|
self.angle = 1.
|
||||||
|
self.monitor = 1.
|
||||||
|
return self
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def model(cls, grid:LZGrid) -> 'LZNormalisation':
|
||||||
|
# generate a normalization based on angular and wavelength distribution model
|
||||||
|
# TODO: add options for sample size for better absolute normalization
|
||||||
|
logging.warning(f'normalisation is model')
|
||||||
|
self = super().__new__(cls)
|
||||||
|
self.angle = 1.0
|
||||||
|
self.monitor = 4e6
|
||||||
|
|
||||||
|
lamda_l = grid.lamda()
|
||||||
|
lamda_c = (lamda_l[:-1]+lamda_l[1:])/2
|
||||||
|
|
||||||
|
delta = np.rad2deg(np.arctan2(grid.z(), Detector.distance))/2.0
|
||||||
|
delta_c = (delta[:-1]+delta[1:])/2-delta.mean()
|
||||||
|
|
||||||
|
# approximate spectrum by Maxwell-Boltzmann and intensity by linear footprint
|
||||||
|
a = 3.8
|
||||||
|
Ilambda = np.sqrt(2./np.pi)*lamda_c**2/a**3*np.exp(-lamda_c**2/(2.*a**2))
|
||||||
|
Idelta = np.where(abs(delta_c)<0.75, (self.angle-delta_c), np.nan)
|
||||||
|
|
||||||
|
self.norm = 1e6*Ilambda[:, np.newaxis]*Idelta[np.newaxis, :]
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
def safe(self, filename, hash):
|
||||||
|
with open(filename, 'wb') as fh:
|
||||||
|
np.save(fh, hash, allow_pickle=False)
|
||||||
|
np.save(fh, np.array(self.file_list), allow_pickle=False)
|
||||||
|
np.save(fh, np.array(self.angle), allow_pickle=False)
|
||||||
|
np.save(fh, self.norm, allow_pickle=False)
|
||||||
|
np.save(fh, self.monitor, allow_pickle=False)
|
||||||
|
|
||||||
|
def update_header(self, header:Header):
|
||||||
|
header.measurement_additional_files = [fileio.File(file=os.path.basename(entry)) for entry in self.file_list]
|
||||||
|
|
||||||
|
def smooth(self, width):
|
||||||
|
logging.debug(f'apply convolution with gaussian along lambda axis to smooth norm, sigma={width}')
|
||||||
|
try:
|
||||||
|
from scipy.signal import fftconvolve
|
||||||
|
except ImportError:
|
||||||
|
self._smooth_slow(width)
|
||||||
|
kx = np.arange(self.norm.shape[0])-self.norm.shape[0]/2.
|
||||||
|
kernel = np.exp(-0.5*kx**2/width**2)
|
||||||
|
kernel/=kernel.sum()
|
||||||
|
kernel = kernel[:, np.newaxis]*np.ones(self.norm.shape[1])[np.newaxis, :]
|
||||||
|
unorm = np.where(np.isnan(self.norm), 0., self.norm)
|
||||||
|
nnorm = fftconvolve(unorm, kernel, mode='same', axes=0)
|
||||||
|
nnorm[np.isnan(self.norm)] = np.nan
|
||||||
|
self.norm = nnorm
|
||||||
|
|
||||||
|
def _smooth_slow(self, width):
|
||||||
|
# like smooth but using numpy buildin slow convolve
|
||||||
|
nnorm = np.zeros_like(self.norm)
|
||||||
|
|
||||||
|
kx = np.arange(self.norm.shape[0])-self.norm.shape[0]/2.
|
||||||
|
kernel = np.exp(-0.5*kx**2/width**2)
|
||||||
|
kernel/=kernel.sum()
|
||||||
|
unorm = np.where(np.isnan(self.norm), 0., self.norm)
|
||||||
|
|
||||||
|
for row in range(self.norm.shape[1]):
|
||||||
|
nnorm[:, row] = np.convolve(unorm[:, row], kernel, mode='same')
|
||||||
|
nnorm[np.isnan(self.norm)] = np.nan
|
||||||
|
self.norm = nnorm
|
||||||
752
eos/options.py
Normal file
752
eos/options.py
Normal file
@@ -0,0 +1,752 @@
|
|||||||
|
"""
|
||||||
|
Classes for stroing various configurations needed for reduction.
|
||||||
|
"""
|
||||||
|
import argparse
|
||||||
|
from dataclasses import dataclass, field, Field, fields, MISSING
|
||||||
|
from typing import get_args, get_origin, List, Optional, Tuple, Union
|
||||||
|
from datetime import datetime
|
||||||
|
from os import path
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from enum import StrEnum
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
# python <3.11 try to use backports
|
||||||
|
from backports.strenum import StrEnum
|
||||||
|
except ImportError:
|
||||||
|
# python <3.10 use Enum instead
|
||||||
|
from enum import Enum as StrEnum
|
||||||
|
|
||||||
|
class InCallString(StrEnum):
|
||||||
|
auto='auto'
|
||||||
|
always='always'
|
||||||
|
never='never'
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class CommandlineParameterConfig:
|
||||||
|
argument: str # default parameter for command line resutign ins "--argument"
|
||||||
|
add_argument_args: dict # all arguments that will be passed to add_argument method
|
||||||
|
short_form: Optional[str] = None
|
||||||
|
group: str = 'misc'
|
||||||
|
priority: int = 0
|
||||||
|
in_call_string: InCallString = InCallString.auto
|
||||||
|
|
||||||
|
def __gt__(self, other):
|
||||||
|
"""
|
||||||
|
Sort required arguments first, then use priority, then name
|
||||||
|
"""
|
||||||
|
return (not self.add_argument_args.get('required', False), -self.priority, self.argument)>(
|
||||||
|
not other.add_argument_args.get('required', False), -other.priority, other.argument)
|
||||||
|
|
||||||
|
class ArgParsable:
|
||||||
|
def __init_subclass__(cls):
|
||||||
|
# create a nice documentation string that takes help into account
|
||||||
|
cls.__doc__ = cls.__name__ + " Parameters:\n"
|
||||||
|
for key, typ in cls.__annotations__.items():
|
||||||
|
if get_origin(typ) is Union and type(None) in get_args(typ):
|
||||||
|
optional = True
|
||||||
|
typ = get_args(typ)[0]
|
||||||
|
else:
|
||||||
|
optional = False
|
||||||
|
|
||||||
|
value = getattr(cls, key, None)
|
||||||
|
try:
|
||||||
|
cls.__doc__ += f" {key} ({typ.__name__})"
|
||||||
|
except AttributeError:
|
||||||
|
cls.__doc__ += f" {key}"
|
||||||
|
if isinstance(value, Field):
|
||||||
|
if value.default is not MISSING:
|
||||||
|
cls.__doc__ += f" = {value.default}"
|
||||||
|
if 'help' in value.metadata:
|
||||||
|
cls.__doc__ += f" - {value.metadata['help']}"
|
||||||
|
elif value is not None:
|
||||||
|
cls.__doc__ += f" = {value}"
|
||||||
|
if optional:
|
||||||
|
cls.__doc__ += " [Optional]"
|
||||||
|
cls.__doc__ += "\n"
|
||||||
|
return cls
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_commandline_parameters(cls) -> List[CommandlineParameterConfig]:
|
||||||
|
"""
|
||||||
|
Return a list of arguments used in building the command line parameters.
|
||||||
|
|
||||||
|
Union types besides Optional are not supported.
|
||||||
|
"""
|
||||||
|
output = []
|
||||||
|
for field in fields(cls):
|
||||||
|
args={}
|
||||||
|
if field.default is not MISSING:
|
||||||
|
args['default'] = field.default
|
||||||
|
args['required'] = False
|
||||||
|
elif field.default_factory is not MISSING:
|
||||||
|
args['default'] = field.default_factory()
|
||||||
|
args['required'] = False
|
||||||
|
else:
|
||||||
|
args['required'] = True
|
||||||
|
if get_origin(field.type) is Union and type(None) in get_args(field.type):
|
||||||
|
# optional argument
|
||||||
|
typ = get_args(field.type)[0]
|
||||||
|
del(args['default'])
|
||||||
|
else:
|
||||||
|
typ = field.type
|
||||||
|
if get_origin(typ) is list:
|
||||||
|
args['nargs'] = '+'
|
||||||
|
args['action'] = 'extend'
|
||||||
|
typ = get_args(typ)[0]
|
||||||
|
if get_origin(typ) is tuple:
|
||||||
|
# tuple of items are put together during evaluation
|
||||||
|
typ = get_args(typ)[0]
|
||||||
|
elif get_origin(typ) is tuple:
|
||||||
|
args['nargs'] = len(get_args(typ))
|
||||||
|
typ = get_args(typ)[0]
|
||||||
|
if issubclass(typ, StrEnum):
|
||||||
|
args['choices'] = [ci.value for ci in typ]
|
||||||
|
if field.default is not MISSING:
|
||||||
|
args['default'] = field.default.value
|
||||||
|
typ = str
|
||||||
|
|
||||||
|
if typ is bool:
|
||||||
|
args['action'] = 'store_false' if field.default else 'store_true'
|
||||||
|
else:
|
||||||
|
args['type'] = typ
|
||||||
|
|
||||||
|
if 'help' in field.metadata:
|
||||||
|
args['help'] = field.metadata['help']
|
||||||
|
|
||||||
|
output.append(CommandlineParameterConfig(
|
||||||
|
field.name,
|
||||||
|
add_argument_args=args,
|
||||||
|
group=field.metadata.get('group', 'misc'),
|
||||||
|
short_form=field.metadata.get('short', None),
|
||||||
|
priority=field.metadata.get('priority', 0),
|
||||||
|
in_call_string=field.metadata.get('in_call_string', InCallString.auto),
|
||||||
|
))
|
||||||
|
return output
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_default(cls, key):
|
||||||
|
"""
|
||||||
|
Return the default argument for an attribute, None if it doesn't exist.
|
||||||
|
"""
|
||||||
|
for field in fields(cls):
|
||||||
|
if field.name != key:
|
||||||
|
continue
|
||||||
|
if field.default is not MISSING:
|
||||||
|
return field.default
|
||||||
|
elif field.default_factory is not MISSING:
|
||||||
|
return field.default_factory()
|
||||||
|
return None
|
||||||
|
|
||||||
|
def is_default(self, key):
|
||||||
|
value = getattr(self, key)
|
||||||
|
return value == self.get_default(key)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_args(cls, args: argparse.Namespace):
|
||||||
|
"""
|
||||||
|
Create the child class from the command line argument Namespace object.
|
||||||
|
All attributes that are not needed for this class are ignored.
|
||||||
|
"""
|
||||||
|
inpargs = {}
|
||||||
|
for field in fields(cls):
|
||||||
|
value = getattr(args, field.name)
|
||||||
|
typ = field.type
|
||||||
|
if get_origin(field.type) is Union and type(None) in get_args(field.type):
|
||||||
|
# optional argument
|
||||||
|
typ = get_args(field.type)[0]
|
||||||
|
if get_origin(typ) is list:
|
||||||
|
item_typ = get_args(typ)[0]
|
||||||
|
if get_origin(item_typ) is tuple:
|
||||||
|
# tuple of items are put together during evaluation
|
||||||
|
tuple_length = len(get_args(item_typ))
|
||||||
|
value = [tuple(value[i*tuple_length+j] for j in range(tuple_length)) for i in range(len(value)//tuple_length)]
|
||||||
|
if isinstance(typ, type) and issubclass(typ, StrEnum):
|
||||||
|
# convert str to enum
|
||||||
|
try:
|
||||||
|
value = typ(value)
|
||||||
|
except ValueError:
|
||||||
|
choices = [ci.value for ci in typ]
|
||||||
|
raise ValueError(f"Parameter --{field.name} has to be one of {choices}")
|
||||||
|
|
||||||
|
inpargs[field.name] = value
|
||||||
|
return cls(**inpargs)
|
||||||
|
|
||||||
|
def get_call_parameters(self, abbrv=True):
|
||||||
|
"""
|
||||||
|
Return a list of command line arguments that reproduce this config, do not add default parameters.
|
||||||
|
"""
|
||||||
|
output = []
|
||||||
|
for arg in sorted(self.get_commandline_parameters()):
|
||||||
|
if ((arg.in_call_string==InCallString.auto and self.is_default(arg.argument)) or
|
||||||
|
arg.in_call_string==InCallString.never):
|
||||||
|
# skip default arguments or arguments defined to never appear in call string
|
||||||
|
continue
|
||||||
|
if arg.short_form and abbrv:
|
||||||
|
item = '-' + arg.short_form + ' '
|
||||||
|
else:
|
||||||
|
item = '--' + arg.argument + ' '
|
||||||
|
if arg.add_argument_args.get('type', None) in [str, float, int]:
|
||||||
|
nargs = arg.add_argument_args.get('nargs', None)
|
||||||
|
if nargs is None:
|
||||||
|
item += str(getattr(self, arg.argument))
|
||||||
|
elif nargs=='+':
|
||||||
|
# remove the default parameters, only show added ones
|
||||||
|
ignore = len(arg.add_argument_args.get('default', []))
|
||||||
|
item += ' '.join([str(pi) for pi in getattr(self, arg.argument)[ignore:]])
|
||||||
|
else:
|
||||||
|
item += ' '.join([str(pi) for pi in getattr(self, arg.argument)])
|
||||||
|
# boolean flags only reach this point if they are non-default
|
||||||
|
output.append((arg, item))
|
||||||
|
return output
|
||||||
|
|
||||||
|
# definition of command line arguments
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ReaderConfig(ArgParsable):
|
||||||
|
year: int = field(
|
||||||
|
default=datetime.now().year,
|
||||||
|
metadata={
|
||||||
|
'short': 'Y',
|
||||||
|
'group': 'input data',
|
||||||
|
'help': 'year the measurement was performed',
|
||||||
|
'in_call_string': InCallString.always,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
rawPath: List[str] = field(
|
||||||
|
default_factory=lambda: ['.', path.join('.','raw'), path.join('..','raw'), path.join('..','..','raw')],
|
||||||
|
metadata={
|
||||||
|
'short': 'rp',
|
||||||
|
'group': 'input data',
|
||||||
|
'help': 'search paths for hdf files',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
startTime: Optional[float] = field(
|
||||||
|
default = None,
|
||||||
|
metadata={
|
||||||
|
'short': 'st',
|
||||||
|
'group': 'data manicure',
|
||||||
|
'help': 'set time zero other than the start of the data aquisition',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
class IncidentAngle(StrEnum):
|
||||||
|
alphaF = 'alphaF'
|
||||||
|
mu = 'mu'
|
||||||
|
nu = 'nu'
|
||||||
|
|
||||||
|
class MonitorType(StrEnum):
|
||||||
|
auto = 'a'
|
||||||
|
proton_charge = 'p'
|
||||||
|
time = 't'
|
||||||
|
neutron_monitor = 'n'
|
||||||
|
debug = 'x'
|
||||||
|
|
||||||
|
MONITOR_UNITS = {
|
||||||
|
MonitorType.neutron_monitor: 'cnts',
|
||||||
|
MonitorType.proton_charge: 'mC',
|
||||||
|
MonitorType.time: 's',
|
||||||
|
MonitorType.auto: 'various',
|
||||||
|
MonitorType.debug: 'mC',
|
||||||
|
}
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ExperimentConfig(ArgParsable):
|
||||||
|
chopperPhase: float = field(
|
||||||
|
default=0,
|
||||||
|
metadata={
|
||||||
|
'short': 'cp',
|
||||||
|
'group': 'instrument settings',
|
||||||
|
'help': 'phase between opening of chopper 1 and closing of chopper 2 window',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
chopperPhaseOffset: float = field(
|
||||||
|
default=-5,
|
||||||
|
metadata={
|
||||||
|
'short': 'co',
|
||||||
|
'group': 'instrument settings',
|
||||||
|
'help': 'phase between chopper 1 index pulse and closing edge',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
chopperSpeed: float = field(
|
||||||
|
default=500,
|
||||||
|
metadata={
|
||||||
|
'short': 'cs',
|
||||||
|
'group': 'instrument settings',
|
||||||
|
'help': 'rotation speed of the chopper disks in rpm',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
yRange: Tuple[int, int] = field(
|
||||||
|
default=(18, 48),
|
||||||
|
metadata={
|
||||||
|
'short': 'y',
|
||||||
|
'group': 'region of interest',
|
||||||
|
'help': 'horizontal pixel range on the detector to be used',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
lambdaRange: Tuple[float, float] = field(
|
||||||
|
default_factory=lambda: [3, 12.5],
|
||||||
|
metadata={
|
||||||
|
'short': 'l',
|
||||||
|
'group': 'region of interest',
|
||||||
|
'help': 'wavelength range to be used (in angstrom)',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
lowCurrentThreshold: float = field(
|
||||||
|
default=50,
|
||||||
|
metadata={
|
||||||
|
'short': 'pt',
|
||||||
|
'group': 'instrument settings',
|
||||||
|
'help': 'proton current below which the events are ignored (per chopper pulse)',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
incidentAngle: IncidentAngle = field(
|
||||||
|
default=IncidentAngle.alphaF,
|
||||||
|
metadata={
|
||||||
|
'short': 'ai',
|
||||||
|
'group': 'instrument settings',
|
||||||
|
'help': 'calculate alphaI = [alphaF], [mu]+kappa+delta_kappa or ([nu]+kappa+delta_kappa)/2',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
alphaF = 'alphaF'
|
||||||
|
sampleModel: Optional[str] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={
|
||||||
|
'short': 'sm',
|
||||||
|
'group': 'sample',
|
||||||
|
'help': 'orso type string to describe the sample in one line',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
mu: Optional[float] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={
|
||||||
|
'short': 'mu',
|
||||||
|
'group': 'sample',
|
||||||
|
'help': 'inclination of the sample surface w.r.t. the instrument horizon',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
nu: Optional[float] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={
|
||||||
|
'short': 'nu',
|
||||||
|
'group': 'sample',
|
||||||
|
'help': 'inclination of the detector w.r.t. the instrument horizon',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
muOffset: Optional[float] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={
|
||||||
|
'short': 'm',
|
||||||
|
'group': 'sample',
|
||||||
|
'help': 'correction offset for mu misalignment (mu_real = mu_file + mu_offset)',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
monitorType: MonitorType = field(
|
||||||
|
default=MonitorType.proton_charge,
|
||||||
|
metadata={
|
||||||
|
'short': 'mt',
|
||||||
|
'group': 'instrument settings',
|
||||||
|
'help': 'one of [a]uto, [p]rotonCurrent, [t]ime or [n]eutronMonitor',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
class NormalisationMethod(StrEnum):
|
||||||
|
direct_beam = 'd'
|
||||||
|
over_illuminated = 'o'
|
||||||
|
under_illuminated = 'u'
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ReflectivityReductionConfig(ArgParsable):
|
||||||
|
fileIdentifier: List[str] = field(
|
||||||
|
metadata={
|
||||||
|
'short': 'f',
|
||||||
|
'priority': 100,
|
||||||
|
'group': 'input data',
|
||||||
|
'help': 'file number(s) or offset (if < 1)',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
qResolution: float = field(
|
||||||
|
default=0.01,
|
||||||
|
metadata={
|
||||||
|
'short': 'r',
|
||||||
|
'group': 'data manicure',
|
||||||
|
'help': 'output resolution of q-scale Delta q / q',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
qzRange: Tuple[float, float] = field(
|
||||||
|
default_factory=lambda: [0.005, 0.51],
|
||||||
|
metadata={
|
||||||
|
'short': 'q',
|
||||||
|
'group': 'region of interest',
|
||||||
|
'help': '?',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
thetaRange: Tuple[float, float] = field(
|
||||||
|
default_factory=lambda: [-12., 12.],
|
||||||
|
metadata={
|
||||||
|
'short': 't',
|
||||||
|
'group': 'region of interest',
|
||||||
|
'help': 'absolute theta region of interest',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
thetaRangeR: Tuple[float, float] = field(
|
||||||
|
default_factory=lambda: [-0.75, 0.75],
|
||||||
|
metadata={
|
||||||
|
'short': 'T',
|
||||||
|
'group': 'region of interest',
|
||||||
|
'help': 'theta region of interest w.r.t. beam center',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
thetaFilters: List[Tuple[float, float]] = field(
|
||||||
|
default_factory=lambda: [],
|
||||||
|
metadata={
|
||||||
|
'short': 'TF',
|
||||||
|
'group': 'region of interest',
|
||||||
|
'help': 'add one or more theta ranges that will be filtered in reduction',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
normalisationMethod: NormalisationMethod = field(
|
||||||
|
default=NormalisationMethod.over_illuminated,
|
||||||
|
metadata={
|
||||||
|
'short': 'nm',
|
||||||
|
'priority': 90,
|
||||||
|
'group': 'input data',
|
||||||
|
'help': 'normalisation method: [o]verillumination, [u]nderillumination, [d]irect_beam'})
|
||||||
|
normalizationFilter: float = field(
|
||||||
|
default=-1,
|
||||||
|
metadata={
|
||||||
|
'group': 'input data',
|
||||||
|
'help': 'minimum normalization counts in lambda-theta bin to use, else filter'})
|
||||||
|
normAngleFilter: float = field(
|
||||||
|
default=-1,
|
||||||
|
metadata={
|
||||||
|
'group': 'input data',
|
||||||
|
'help': 'minimum normalization counts total thetat bin to use, else filter'})
|
||||||
|
normalizationSmoothing: float = field(
|
||||||
|
default=0,
|
||||||
|
metadata={
|
||||||
|
'group': 'input data',
|
||||||
|
'help': 'apply convolution on lambda axes to smooth the normalization data, useful for low statistics'})
|
||||||
|
scale: List[float] = field(
|
||||||
|
default_factory=lambda: [1.],
|
||||||
|
metadata={
|
||||||
|
'short': 's',
|
||||||
|
'group': 'data manicure',
|
||||||
|
'help': '(list of) scaling factors, if less elements than files use the last one',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
autoscale: Tuple[float, float] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={
|
||||||
|
'short': 'S',
|
||||||
|
'group': 'data manicure',
|
||||||
|
'help': '',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
subtract: Optional[str] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={
|
||||||
|
'short': 'sub',
|
||||||
|
'group': 'input data',
|
||||||
|
'help': 'File with R(q_z) curve to be subtracted (in .Rqz.ort format)'})
|
||||||
|
normalisationFileIdentifier: Optional[List[str]] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={
|
||||||
|
'short': 'n',
|
||||||
|
'priority': 90,
|
||||||
|
'group': 'input data',
|
||||||
|
'help': 'file number(s) of normalisation measurement'})
|
||||||
|
timeSlize: Optional[List[float]] = field(
|
||||||
|
default= None,
|
||||||
|
metadata={
|
||||||
|
'short': 'ts',
|
||||||
|
'group': 'region of interest',
|
||||||
|
'help': 'time slizing <interval> ,[<start> [,stop]]',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
logfilter: List[str] = field(
|
||||||
|
default_factory=lambda: [],
|
||||||
|
metadata={
|
||||||
|
'short': 'lf',
|
||||||
|
'group': 'region of interest',
|
||||||
|
'help': 'filter using comparison to a log values, multpiple allowd (example "sample_temperature<150")',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class OutputFomatOption(StrEnum):
|
||||||
|
Rqz_ort = "Rqz.ort"
|
||||||
|
Rqz_orb = "Rqz.orb"
|
||||||
|
Rlt_ort = "Rlt.ort"
|
||||||
|
Rlt_orb = "Rlt.orb"
|
||||||
|
ort = "ort"
|
||||||
|
orb = "orb"
|
||||||
|
Rqz = "Rqz"
|
||||||
|
Rlt = "Rlt"
|
||||||
|
|
||||||
|
|
||||||
|
class PlotColormaps(StrEnum):
|
||||||
|
gist_ncar = "gist_ncar"
|
||||||
|
viridis = "viridis"
|
||||||
|
inferno = "inferno"
|
||||||
|
gist_rainbow = "gist_rainbow"
|
||||||
|
nipy_spectral = "nipy_spectral"
|
||||||
|
jochen_deluxe = "jochen_deluxe"
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ReflectivityOutputConfig(ArgParsable):
|
||||||
|
outputFormats: List[OutputFomatOption] = field(
|
||||||
|
default_factory=lambda: ['Rqz.ort'],
|
||||||
|
metadata={
|
||||||
|
'short': 'of',
|
||||||
|
'group': 'output',
|
||||||
|
'help': 'one of "Rqz[.ort]", "Rlt[.ort]" or both with "ort"',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
outputName: str = field(
|
||||||
|
default='fromEOS',
|
||||||
|
metadata={
|
||||||
|
'short': 'o',
|
||||||
|
'group': 'output',
|
||||||
|
'help': '?',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
outputPath: str = field(
|
||||||
|
default='.',
|
||||||
|
metadata={
|
||||||
|
'short': 'op',
|
||||||
|
'group': 'output',
|
||||||
|
'help': '?',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
plot: bool = field(
|
||||||
|
default=False,
|
||||||
|
metadata={
|
||||||
|
'group': 'output',
|
||||||
|
'help': 'show matplotlib graphs of results',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
plot_colormap: PlotColormaps = field(
|
||||||
|
default=PlotColormaps.gist_ncar,
|
||||||
|
metadata={
|
||||||
|
'short': 'pcmap',
|
||||||
|
'group': 'output',
|
||||||
|
'help': 'matplotlib colormap used in lambda-theta graphs when plotting',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
append: bool = field(
|
||||||
|
default=False,
|
||||||
|
metadata={
|
||||||
|
'group': 'output',
|
||||||
|
'help': 'if file already exists, append result as additional ORSO dataset (only Rqz.ort)',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
def _output_format_list(self, outputFormat):
|
||||||
|
format_list = []
|
||||||
|
if OutputFomatOption.ort in outputFormat\
|
||||||
|
or OutputFomatOption.Rqz_ort in outputFormat\
|
||||||
|
or OutputFomatOption.Rqz in outputFormat:
|
||||||
|
format_list.append(OutputFomatOption.Rqz_ort)
|
||||||
|
if OutputFomatOption.ort in outputFormat\
|
||||||
|
or OutputFomatOption.Rlt_ort in outputFormat\
|
||||||
|
or OutputFomatOption.Rlt in outputFormat:
|
||||||
|
format_list.append(OutputFomatOption.Rlt_ort)
|
||||||
|
if OutputFomatOption.orb in outputFormat\
|
||||||
|
or OutputFomatOption.Rqz_orb in outputFormat\
|
||||||
|
or OutputFomatOption.Rqz in outputFormat:
|
||||||
|
format_list.append(OutputFomatOption.Rqz_orb)
|
||||||
|
if OutputFomatOption.orb in outputFormat\
|
||||||
|
or OutputFomatOption.Rlt_orb in outputFormat\
|
||||||
|
or OutputFomatOption.Rlt in outputFormat:
|
||||||
|
format_list.append(OutputFomatOption.Rlt_orb)
|
||||||
|
return sorted(format_list, reverse=True)
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
self.outputFormats = self._output_format_list(self.outputFormats)
|
||||||
|
|
||||||
|
|
||||||
|
# ===================================
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ReflectivityConfig:
|
||||||
|
reader: ReaderConfig
|
||||||
|
experiment: ExperimentConfig
|
||||||
|
reduction: ReflectivityReductionConfig
|
||||||
|
output: ReflectivityOutputConfig
|
||||||
|
|
||||||
|
_call_string_overwrite=None
|
||||||
|
|
||||||
|
#@property
|
||||||
|
#def call_string(self)->str:
|
||||||
|
# if self._call_string_overwrite:
|
||||||
|
# return self._call_string_overwrite
|
||||||
|
# else:
|
||||||
|
# return self.calculate_call_string()
|
||||||
|
|
||||||
|
def call_string(self):
|
||||||
|
base = 'eos'
|
||||||
|
|
||||||
|
call_parameters = self.reader.get_call_parameters()
|
||||||
|
call_parameters += self.output.get_call_parameters()
|
||||||
|
call_parameters += self.reduction.get_call_parameters()
|
||||||
|
call_parameters += self.experiment.get_call_parameters()
|
||||||
|
|
||||||
|
call_parameters.sort()
|
||||||
|
|
||||||
|
cpout = f'{base} ' + ' '.join([cp[1] for cp in call_parameters])
|
||||||
|
|
||||||
|
|
||||||
|
logging.debug(f'Argument list build in EOSConfig.call_string: {cpout}')
|
||||||
|
return cpout
|
||||||
|
|
||||||
|
class E2HPlotSelection(StrEnum):
|
||||||
|
All = 'all'
|
||||||
|
Raw = 'raw'
|
||||||
|
YZ = 'Iyz'
|
||||||
|
LT = 'Ilt'
|
||||||
|
YT = 'Iyt'
|
||||||
|
TZ = 'Itz'
|
||||||
|
Q = 'Iq'
|
||||||
|
L = 'Il'
|
||||||
|
T = 'It'
|
||||||
|
ToF = 'tof'
|
||||||
|
|
||||||
|
|
||||||
|
class E2HPlotArguments(StrEnum):
|
||||||
|
Default = 'def'
|
||||||
|
OutputFile = 'file'
|
||||||
|
Logarithmic = 'log'
|
||||||
|
Linear = 'lin'
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class E2HReductionConfig(ArgParsable):
|
||||||
|
fileIdentifier: str = field(
|
||||||
|
default='0',
|
||||||
|
metadata={
|
||||||
|
'short': 'f',
|
||||||
|
'priority': 100,
|
||||||
|
'group': 'input data',
|
||||||
|
'help': 'file number(s) or offset (if < 1), events2histogram only accepts one short code',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
show_plot: bool = field(
|
||||||
|
default=False,
|
||||||
|
metadata={
|
||||||
|
'short': 'sp',
|
||||||
|
'group': 'output',
|
||||||
|
'help': 'show matplotlib graphs of results',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
plot: E2HPlotSelection = field(
|
||||||
|
default=E2HPlotSelection.All,
|
||||||
|
metadata={
|
||||||
|
'short': 'p',
|
||||||
|
'group': 'output',
|
||||||
|
'help': 'select what to plot or write',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
kafka: bool = field(
|
||||||
|
default=False,
|
||||||
|
metadata={
|
||||||
|
'group': 'output',
|
||||||
|
'help': 'send result to kafka for Nicos',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
plotArgs: E2HPlotArguments = field(
|
||||||
|
default=E2HPlotArguments.Default,
|
||||||
|
metadata={
|
||||||
|
'short': 'pa',
|
||||||
|
'group': 'output',
|
||||||
|
'help': 'select configuration for plot',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
update: bool = field(
|
||||||
|
default=False,
|
||||||
|
metadata={
|
||||||
|
'short': 'u',
|
||||||
|
'group': 'output',
|
||||||
|
'help': 'keep running in the background and update plot when file is modified, implies --plot',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
fast: bool = field(
|
||||||
|
default=False,
|
||||||
|
metadata={
|
||||||
|
'group': 'input data',
|
||||||
|
'help': 'skip some reduction steps to speed up calculations',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
normalizationModel: bool = field(
|
||||||
|
default=False,
|
||||||
|
metadata={
|
||||||
|
'short': 'nm',
|
||||||
|
'group': 'input data',
|
||||||
|
'help': 'use model for incoming spectrum and divergence to normalize for reflectivity',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
plot_colormap: PlotColormaps = field(
|
||||||
|
default=PlotColormaps.jochen_deluxe,
|
||||||
|
metadata={
|
||||||
|
'short': 'pcmap',
|
||||||
|
'group': 'output',
|
||||||
|
'help': 'matplotlib colormap used in lambda-theta graphs when plotting',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
max_events: int = field(
|
||||||
|
default = 10_000_000,
|
||||||
|
metadata={
|
||||||
|
'group': 'input data',
|
||||||
|
'help': 'maximum number of events read at once',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
thetaRangeR: Tuple[float, float] = field(
|
||||||
|
default_factory=lambda: [-0.75, 0.75],
|
||||||
|
metadata={
|
||||||
|
'short': 'T',
|
||||||
|
'group': 'region of interest',
|
||||||
|
'help': 'theta region of interest w.r.t. beam center',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
thetaFilters: List[Tuple[float, float]] = field(
|
||||||
|
default_factory=lambda: [],
|
||||||
|
metadata={
|
||||||
|
'short': 'TF',
|
||||||
|
'group': 'region of interest',
|
||||||
|
'help': 'add one or more theta ranges that will be filtered in reduction',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
fontsize: float = field(
|
||||||
|
default=8.,
|
||||||
|
metadata={
|
||||||
|
'short': 'pf',
|
||||||
|
'group': 'output',
|
||||||
|
'help': 'font size for graphs',
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class E2HConfig:
|
||||||
|
reader: ReaderConfig
|
||||||
|
experiment: ExperimentConfig
|
||||||
|
reduction: E2HReductionConfig
|
||||||
82
eos/path_handling.py
Normal file
82
eos/path_handling.py
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
"""
|
||||||
|
Defines how file paths are resolved from short_notation, year and number to filename.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
|
||||||
|
class PathResolver:
|
||||||
|
def __init__(self, year, rawPath):
|
||||||
|
self.year = year
|
||||||
|
self.rawPath = rawPath
|
||||||
|
|
||||||
|
def resolve(self, short_notation):
|
||||||
|
return list(map(self.get_path, self.expand_file_list(short_notation)))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def expand_file_list(short_notation)->List[int]:
|
||||||
|
"""Evaluate string entry for file number lists"""
|
||||||
|
file_list = []
|
||||||
|
for i in short_notation.split(','):
|
||||||
|
if '-' in i and not i.startswith('-'):
|
||||||
|
if ':' in i:
|
||||||
|
step = i.split(':', 1)[1]
|
||||||
|
file_list += range(int(i.split('-', 1)[0]),
|
||||||
|
int((i.rsplit('-', 1)[1]).split(':', 1)[0])+1,
|
||||||
|
int(step))
|
||||||
|
else:
|
||||||
|
step = 1
|
||||||
|
file_list += range(int(i.split('-', 1)[0]),
|
||||||
|
int(i.split('-', 1)[1])+1,
|
||||||
|
int(step))
|
||||||
|
else:
|
||||||
|
file_list += [int(i)]
|
||||||
|
return list(sorted(file_list))
|
||||||
|
|
||||||
|
def get_path(self, number):
|
||||||
|
if number<=0:
|
||||||
|
number = self.search_latest(number)
|
||||||
|
fileName = f'amor{self.year}n{number:06d}.hdf'
|
||||||
|
path = ''
|
||||||
|
for rawd in self.rawPath:
|
||||||
|
if os.path.exists(os.path.join(rawd, fileName)):
|
||||||
|
path = rawd
|
||||||
|
break
|
||||||
|
if not path:
|
||||||
|
from glob import glob
|
||||||
|
potential_file = glob(f'/home/amor/data/{self.year}/*/{fileName}')
|
||||||
|
if len(potential_file)>0:
|
||||||
|
path = os.path.dirname(potential_file[0])
|
||||||
|
else:
|
||||||
|
raise FileNotFoundError(f'# ERROR: the file {fileName} can not be found '
|
||||||
|
f'in {self.rawPath+["/home/amor/data"]}')
|
||||||
|
return os.path.join(path, fileName)
|
||||||
|
|
||||||
|
def search_latest(self, number):
|
||||||
|
if number>0:
|
||||||
|
raise ValueError('number needs to be relative index (negative)')
|
||||||
|
if os.path.exists(f'/home/amor/data/{self.year}/DataNumber'):
|
||||||
|
try:
|
||||||
|
with open(f'/home/amor/data/{self.year}/DataNumber', 'r') as fh:
|
||||||
|
current_index = int(fh.readline())-1
|
||||||
|
except Exception:
|
||||||
|
logging.error('Can not access DataNumber', exc_info=True)
|
||||||
|
else:
|
||||||
|
return current_index+number
|
||||||
|
# find all files from the given year, convert to number and return latest
|
||||||
|
from glob import glob
|
||||||
|
possible_files = []
|
||||||
|
for rawd in self.rawPath:
|
||||||
|
possible_files += glob(os.path.join(rawd, f'amor{self.year}n??????.hdf'))
|
||||||
|
|
||||||
|
possible_files += glob(f'/home/amor/data/{self.year}/*/amor{self.year}n??????.hdf')
|
||||||
|
possible_indices = list(set([int(os.path.basename(fi)[9:15]) for fi in possible_files]))
|
||||||
|
possible_indices.sort()
|
||||||
|
try:
|
||||||
|
return possible_indices[number-1]
|
||||||
|
except IndexError:
|
||||||
|
raise FileNotFoundError(f'# Could not find suitable file for relative index {number} '
|
||||||
|
f'in {self.rawPath+["/home/amor/data"]}, '
|
||||||
|
f'possible indices {possible_indices}')
|
||||||
|
|
||||||
832
eos/projection.py
Normal file
832
eos/projection.py
Normal file
@@ -0,0 +1,832 @@
|
|||||||
|
"""
|
||||||
|
Classes used to calculate projections/binnings from event data onto given grids.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import warnings
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import List, Tuple, Union
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
from .event_data_types import EventDatasetProtocol
|
||||||
|
from .instrument import Detector, LZGrid
|
||||||
|
from .normalization import LZNormalisation
|
||||||
|
|
||||||
|
class ProjectionInterface(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
def project(self, dataset: EventDatasetProtocol, monitor: float): ...
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def clear(self): ...
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def plot(self, **kwargs): ...
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def update_plot(self): ...
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ProjectedReflectivity:
|
||||||
|
R: np.ndarray
|
||||||
|
dR: np.ndarray
|
||||||
|
Q: np.ndarray
|
||||||
|
dQ: np.ndarray
|
||||||
|
|
||||||
|
@property
|
||||||
|
def data(self):
|
||||||
|
"""
|
||||||
|
Return combined data compatible with storing as columns in orso file.
|
||||||
|
Q, R, dR, dQ
|
||||||
|
"""
|
||||||
|
return np.array([self.Q, self.R, self.dR, self.dQ]).T
|
||||||
|
|
||||||
|
def data_for_time(self, time):
|
||||||
|
tme = np.ones(np.shape(self.Q))*time
|
||||||
|
return np.array([self.Q, self.R, self.dR, self.dQ, tme]).T
|
||||||
|
|
||||||
|
def scale(self, factor):
|
||||||
|
self.R *= factor
|
||||||
|
self.dR *= factor
|
||||||
|
|
||||||
|
def autoscale(self, range):
|
||||||
|
filter_q = (range[0]<=self.Q) & (self.Q<=range[1])
|
||||||
|
filter_q &= self.dR>0
|
||||||
|
if filter_q.sum()>0:
|
||||||
|
scale = (self.R[filter_q]/self.dR[filter_q]).sum()/(self.R[filter_q]**2/self.dR[filter_q]).sum()
|
||||||
|
self.scale(scale)
|
||||||
|
logging.info(f' scaling factor = {scale}')
|
||||||
|
return scale
|
||||||
|
else:
|
||||||
|
logging.warning(' automatic scaling not possible')
|
||||||
|
return 1.0
|
||||||
|
|
||||||
|
def stitch(self, other: 'ProjectedReflectivity'):
|
||||||
|
# find scaling factor between two reflectivities at points both are not zero
|
||||||
|
filter_q = np.logical_not(np.isnan(other.R*self.R))
|
||||||
|
filter_q &= self.R>0
|
||||||
|
filter_q &= other.R>0
|
||||||
|
R1 = self.R[filter_q]
|
||||||
|
dR1 = self.dR[filter_q]
|
||||||
|
R2 = other.R[filter_q]
|
||||||
|
dR2 = other.dR[filter_q]
|
||||||
|
if len(R1)>0:
|
||||||
|
scale = (R1**2*R2**2/(dR1**2*dR2**2)).sum() / (R1**3*R2/(dR1**2*dR2**2)).sum()
|
||||||
|
self.scale(scale)
|
||||||
|
logging.info(f' scaling factor = {scale}')
|
||||||
|
return scale
|
||||||
|
else:
|
||||||
|
logging.warning(' automatic scaling not possible')
|
||||||
|
return 1.0
|
||||||
|
|
||||||
|
def subtract(self, R, dR):
|
||||||
|
# subtract another dataset with same q-points
|
||||||
|
self.R -= R
|
||||||
|
self.dR = np.sqrt(self.dR**2+dR**2)
|
||||||
|
|
||||||
|
def plot(self, **kwargs):
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
plt.errorbar(self.Q, self.R, yerr=self.dR, **kwargs)
|
||||||
|
plt.yscale('log')
|
||||||
|
plt.xlabel('Q / Å$^{-1}$')
|
||||||
|
plt.ylabel('R')
|
||||||
|
|
||||||
|
class LZProjection(ProjectionInterface):
|
||||||
|
grid: LZGrid
|
||||||
|
lamda: np.ndarray
|
||||||
|
alphaF: np.ndarray
|
||||||
|
is_normalized: bool
|
||||||
|
angle: float
|
||||||
|
|
||||||
|
data: np.recarray
|
||||||
|
_dtype = np.dtype([
|
||||||
|
('I', np.float64),
|
||||||
|
('mask', bool),
|
||||||
|
('ref', np.float64),
|
||||||
|
('err', np.float64),
|
||||||
|
('res', np.float64),
|
||||||
|
('qz', np.float64),
|
||||||
|
('qx', np.float64),
|
||||||
|
('norm', np.float64),
|
||||||
|
])
|
||||||
|
|
||||||
|
def __init__(self, tthh: float, grid: LZGrid):
|
||||||
|
self.grid = grid
|
||||||
|
self.is_normalized = False
|
||||||
|
self.angle = tthh
|
||||||
|
|
||||||
|
alphaF_z = tthh + Detector.delta_z
|
||||||
|
lamda_l = self.grid.lamda()
|
||||||
|
lamda_c = (lamda_l[:-1]+lamda_l[1:])/2
|
||||||
|
|
||||||
|
lz_shape = self.grid.lz()
|
||||||
|
|
||||||
|
self.lamda = lz_shape*lamda_c[:, np.newaxis]
|
||||||
|
self.alphaF = lz_shape*alphaF_z[np.newaxis, :]
|
||||||
|
self.data = np.zeros(self.alphaF.shape, dtype=self._dtype).view(np.recarray)
|
||||||
|
self.data.mask = True
|
||||||
|
self.monitor = 0.
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dataset(cls, dataset: EventDatasetProtocol, grid: LZGrid, has_offspecular=False):
|
||||||
|
tthh = dataset.geometry.nu - dataset.geometry.mu
|
||||||
|
output = cls(tthh, grid)
|
||||||
|
output.correct_gravity(dataset.geometry.detectorDistance)
|
||||||
|
if has_offspecular:
|
||||||
|
alphaI_lz = grid.lz()*(dataset.geometry.mu+dataset.geometry.kap+dataset.geometry.kad)
|
||||||
|
output.calculate_q(alphaI_lz)
|
||||||
|
else:
|
||||||
|
output.calculate_q()
|
||||||
|
return output
|
||||||
|
|
||||||
|
def correct_gravity(self, detector_distance):
|
||||||
|
self.alphaF += np.rad2deg( np.arctan( 3.07e-10 * detector_distance * self.lamda**2 ) )
|
||||||
|
|
||||||
|
def calculate_q(self, alphaI=None):
|
||||||
|
if alphaI is None:
|
||||||
|
self.data.qz = 4.0*np.pi*np.sin(np.deg2rad(self.alphaF))/self.lamda
|
||||||
|
self.data.qx = 0.*self.data.qz
|
||||||
|
else:
|
||||||
|
self.data.qz = 2.0*np.pi*(np.sin(np.deg2rad(self.alphaF))+np.sin(np.deg2rad(alphaI)))/self.lamda
|
||||||
|
self.data.qx = 2.0*np.pi*(np.cos(np.deg2rad(self.alphaF))-np.cos(np.deg2rad(alphaI)))/self.lamda
|
||||||
|
|
||||||
|
if self.data.qz[0,self.data.qz.shape[1]//2] < 0:
|
||||||
|
# assuming a 'measurement from below' when center of detector at negative qz
|
||||||
|
self.data.qz *= -1
|
||||||
|
|
||||||
|
self.calculate_q_resolution()
|
||||||
|
|
||||||
|
def calculate_q_resolution(self):
|
||||||
|
res_lz = self.grid.lz() * 0.022**2
|
||||||
|
res_lz = res_lz + (0.008/self.alphaF)**2
|
||||||
|
self.data.res = self.data.qz * np.sqrt(res_lz)
|
||||||
|
|
||||||
|
def apply_theta_filter(self, theta_range):
|
||||||
|
# Filters points within theta range
|
||||||
|
self.data.mask &= (self.alphaF<theta_range[0])|(self.alphaF>theta_range[1])
|
||||||
|
|
||||||
|
def apply_theta_mask(self, theta_range):
|
||||||
|
# Mask points outside theta range
|
||||||
|
self.data.mask &= self.alphaF>=theta_range[0]
|
||||||
|
self.data.mask &= self.alphaF<=theta_range[1]
|
||||||
|
|
||||||
|
def apply_lamda_mask(self, lamda_range):
|
||||||
|
# Mask points outside lambda range
|
||||||
|
self.data.mask &= self.lamda>=lamda_range[0]
|
||||||
|
self.data.mask &= self.lamda<=lamda_range[1]
|
||||||
|
|
||||||
|
def apply_norm_mask(self, norm: LZNormalisation, min_norm=-1, min_theta=-1):
|
||||||
|
# Mask points where normliazation is nan
|
||||||
|
self.data.mask &= np.logical_not(np.isnan(norm.norm))&(norm.norm>min_norm)
|
||||||
|
if min_theta>0:
|
||||||
|
thsum = np.nansum(norm.norm, axis=0)
|
||||||
|
self.data.mask &= (thsum>min_theta)[np.newaxis, :]
|
||||||
|
|
||||||
|
def project(self, dataset: EventDatasetProtocol, monitor: float):
|
||||||
|
"""
|
||||||
|
Project dataset on grid and add to intensity.
|
||||||
|
Can be called multiple times to sequentially add events.
|
||||||
|
"""
|
||||||
|
# TODO: maybe move monitor calculation in here instead of reduction?
|
||||||
|
e = dataset.data.events
|
||||||
|
int_lz, *_ = np.histogram2d(e.lamda, e.detZ, bins = (self.grid.lamda(), self.grid.z()))
|
||||||
|
self.data.I += int_lz
|
||||||
|
self.monitor += monitor
|
||||||
|
# in case the intensity changed one needs to normalize again
|
||||||
|
self.is_normalized = False
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
# empty data
|
||||||
|
self.data[:] = 0
|
||||||
|
self.data.mask = True
|
||||||
|
self.monitor = 0.
|
||||||
|
self.norm_monitor = 1.
|
||||||
|
|
||||||
|
@property
|
||||||
|
def I(self):
|
||||||
|
output = self.data.I[:]
|
||||||
|
output[np.logical_not(self.data.mask)] = np.nan
|
||||||
|
return output / self.monitor
|
||||||
|
|
||||||
|
def calc_error(self):
|
||||||
|
# calculate error bars for resulting intensity after normalization
|
||||||
|
self.data.err = self.data.ref * np.sqrt( 1./(self.data.I+.1) + 1./(self.data.norm+0.1) )
|
||||||
|
|
||||||
|
def normalize_over_illuminated(self, norm: LZNormalisation):
|
||||||
|
"""
|
||||||
|
Normalize the dataaset and take into account a difference in
|
||||||
|
detector angle for measurement and reference.
|
||||||
|
"""
|
||||||
|
logging.debug(f' correcting for incident angle difference from norm {norm.angle} to data {self.angle}')
|
||||||
|
norm_lz = norm.norm
|
||||||
|
delta_lz = np.ones_like(norm_lz)*Detector.delta_z
|
||||||
|
# do not perform gravity correction for footprint, would require norm detector distance that is unknown here
|
||||||
|
fp_corr_lz = np.where(np.absolute(delta_lz+norm.angle)>5e-3,
|
||||||
|
(delta_lz+self.angle)/(delta_lz+norm.angle), np.nan)
|
||||||
|
fp_corr_lz[fp_corr_lz<0] = np.nan
|
||||||
|
self.data.mask &= np.logical_not(np.isnan(fp_corr_lz))
|
||||||
|
self.data.norm = norm_lz*fp_corr_lz
|
||||||
|
self.norm_monitor = norm.monitor
|
||||||
|
ref_lz = self.data.I/np.where(self.data.norm>0, self.data.norm, np.nan)
|
||||||
|
ref_lz *= norm.monitor/self.monitor
|
||||||
|
ref_lz[np.logical_not(self.data.mask)] = np.nan
|
||||||
|
self.data.ref = ref_lz
|
||||||
|
self.calc_error()
|
||||||
|
self.is_normalized = True
|
||||||
|
|
||||||
|
def normalize_no_footprint(self, norm: LZNormalisation):
|
||||||
|
norm_lz = norm.norm
|
||||||
|
ref_lz = (self.data.I/norm_lz)
|
||||||
|
ref_lz *= norm.monitor/self.monitor
|
||||||
|
ref_lz[np.logical_not(self.data.mask)] = np.nan
|
||||||
|
self.data.norm = norm_lz
|
||||||
|
self.data.ref = ref_lz
|
||||||
|
self.calc_error()
|
||||||
|
self.is_normalized = True
|
||||||
|
|
||||||
|
def scale(self, factor: float):
|
||||||
|
if not self.is_normalized:
|
||||||
|
raise ValueError("Dataset needs to be normalized, first")
|
||||||
|
self.data.ref *= factor
|
||||||
|
self.data.err *= factor
|
||||||
|
self.norm_monitor /= factor
|
||||||
|
|
||||||
|
def project_on_qz(self):
|
||||||
|
if not self.is_normalized:
|
||||||
|
raise ValueError("Dataset needs to be normalized, first")
|
||||||
|
q_q = self.grid.q()
|
||||||
|
weights_lzf = self.data.norm[self.data.mask]
|
||||||
|
q_lzf = self.data.qz[self.data.mask]
|
||||||
|
I_lzf = self.data.I[self.data.mask]
|
||||||
|
dq_lzf = self.data.res[self.data.mask]
|
||||||
|
|
||||||
|
# get number of grid points contributing to a bin, filter points with no contribution
|
||||||
|
N_q = np.histogram(q_lzf, bins = q_q)[0]
|
||||||
|
N_q = np.where(N_q > 0, N_q, np.nan)
|
||||||
|
fltr = N_q>0
|
||||||
|
|
||||||
|
# calculate sum of all normalization weights per bin
|
||||||
|
W_q = np.maximum(np.histogram(q_lzf, bins = q_q, weights = weights_lzf)[0], 1e-10)
|
||||||
|
# calculate sum of all dataset counts per bin
|
||||||
|
I_q = np.histogram(q_lzf, bins = q_q, weights = I_lzf)[0]
|
||||||
|
# normlaize dataaset by normalization counts and scale by monitor
|
||||||
|
R_q = np.where(fltr, I_q*self.norm_monitor/self.monitor / W_q, np.nan)
|
||||||
|
# error as squar-root of counts and sqrt from normalization (dR/R = sqrt( (dI/I)² + (dW/W)²)
|
||||||
|
dR_q = np.where(fltr, R_q*(np.sqrt(1./(I_q+0.1)+ 1./(W_q+0.1))), np.nan)
|
||||||
|
# q-resolution is the weighted sum of individual points q-resolutions
|
||||||
|
dq_q = np.histogram(q_lzf, bins = q_q, weights = weights_lzf * dq_lzf )[0]
|
||||||
|
dq_q = np.where(fltr, dq_q/W_q, np.nan)
|
||||||
|
return ProjectedReflectivity(R_q, dR_q, (q_q[1:]+q_q[:-1])/2., dq_q)
|
||||||
|
|
||||||
|
def plot(self, **kwargs):
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
from matplotlib.colors import LogNorm
|
||||||
|
|
||||||
|
if 'colorbar' in kwargs:
|
||||||
|
cmap=True
|
||||||
|
del(kwargs['colorbar'])
|
||||||
|
else:
|
||||||
|
cmap=False
|
||||||
|
|
||||||
|
if self.is_normalized:
|
||||||
|
I = self.data.ref
|
||||||
|
else:
|
||||||
|
I = self.data.I
|
||||||
|
|
||||||
|
|
||||||
|
if not 'norm' in kwargs:
|
||||||
|
vmin = I[(I>0)].min()
|
||||||
|
vmax = np.nanmax(I)
|
||||||
|
kwargs['norm'] = LogNorm(vmin, vmax, clip=True)
|
||||||
|
|
||||||
|
|
||||||
|
# suppress warning for wrongly sorted y-axis pixels (blades overlap)
|
||||||
|
with warnings.catch_warnings(action='ignore', category=UserWarning):
|
||||||
|
self._graph = plt.pcolormesh(self.lamda, self.alphaF, I, **kwargs)
|
||||||
|
if cmap:
|
||||||
|
if self.is_normalized:
|
||||||
|
plt.colorbar(label='R')
|
||||||
|
else:
|
||||||
|
plt.colorbar(label='I / cpm')
|
||||||
|
plt.xlabel('$\\lambda$ / $\\AA$')
|
||||||
|
plt.ylabel('$\\Theta$ / °')
|
||||||
|
plt.xlim(self.lamda[0,0], self.lamda[-1,0])
|
||||||
|
af = self.alphaF[self.data.mask]
|
||||||
|
plt.ylim(af.min(), af.max())
|
||||||
|
plt.title('Wavelength vs. Reflection Angle')
|
||||||
|
|
||||||
|
self._graph_axis = plt.gca()
|
||||||
|
plt.connect('button_press_event', self.draw_qline)
|
||||||
|
|
||||||
|
def update_plot(self):
|
||||||
|
"""
|
||||||
|
Inline update of previous plot by just updating the data.
|
||||||
|
"""
|
||||||
|
from matplotlib.colors import LogNorm
|
||||||
|
if self.is_normalized:
|
||||||
|
I = self.data.ref
|
||||||
|
else:
|
||||||
|
I = self.data.I
|
||||||
|
|
||||||
|
if isinstance(self._graph.norm, LogNorm):
|
||||||
|
vmin = I[(I>0)].min()*0.5
|
||||||
|
else:
|
||||||
|
vmin = 0
|
||||||
|
vmax = np.nanmax(I)
|
||||||
|
self._graph.set_array(I)
|
||||||
|
self._graph.norm.vmin = vmin
|
||||||
|
self._graph.norm.vmax = vmax
|
||||||
|
|
||||||
|
if self.is_normalized:
|
||||||
|
self._graph.set_array(self.data.ref)
|
||||||
|
else:
|
||||||
|
self._graph.set_array(self.data.I)
|
||||||
|
|
||||||
|
def draw_qline(self, event):
|
||||||
|
if event.inaxes is not self._graph_axis:
|
||||||
|
return
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
tbm = self._graph_axis.figure.canvas.manager.toolbar.mode
|
||||||
|
if event.button is plt.MouseButton.LEFT and tbm=='':
|
||||||
|
slope = event.ydata/event.xdata
|
||||||
|
xmax = 12.5
|
||||||
|
self._graph_axis.plot([0, xmax], [0, slope*xmax], '-', color='grey')
|
||||||
|
self._graph_axis.text(event.xdata, event.ydata, f'q={np.deg2rad(slope)*4.*np.pi:.3f}', backgroundcolor='white')
|
||||||
|
plt.draw()
|
||||||
|
if event.button is plt.MouseButton.RIGHT and tbm=='':
|
||||||
|
for art in list(self._graph_axis.lines)+list(self._graph_axis.texts):
|
||||||
|
art.remove()
|
||||||
|
plt.draw()
|
||||||
|
|
||||||
|
ONLY_MAP = ['colorbar', 'cmap', 'norm']
|
||||||
|
|
||||||
|
class ReflectivityProjector(ProjectionInterface):
|
||||||
|
lzprojection: LZProjection
|
||||||
|
data: ProjectedReflectivity
|
||||||
|
# TODO: maybe implement direct 1d projection in here
|
||||||
|
|
||||||
|
def __init__(self, lzprojection, norm):
|
||||||
|
self.lzprojection = lzprojection
|
||||||
|
self.norm = norm
|
||||||
|
|
||||||
|
def project(self, dataset: EventDatasetProtocol, monitor: float):
|
||||||
|
self.lzprojection.project(dataset, monitor)
|
||||||
|
self.lzprojection.normalize_over_illuminated(self.norm)
|
||||||
|
self.data = self.lzprojection.project_on_qz()
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
self.lzprojection.clear()
|
||||||
|
|
||||||
|
def plot(self, **kwargs):
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
for key in ONLY_MAP:
|
||||||
|
if key in kwargs: del(kwargs[key])
|
||||||
|
|
||||||
|
self._graph = plt.errorbar(self.data.Q, self.data.R, xerr=self.data.dQ, yerr=self.data.dR, **kwargs)
|
||||||
|
self._graph_axis = plt.gca()
|
||||||
|
plt.title('Reflectivity (might be improperly normalized)')
|
||||||
|
plt.yscale('log')
|
||||||
|
plt.xlabel('Q / $\\AA^{-1}$')
|
||||||
|
plt.ylabel('R')
|
||||||
|
|
||||||
|
def update_plot(self):
|
||||||
|
ln, _, (barsx, barsy) = self._graph
|
||||||
|
|
||||||
|
yerr_top = self.data.R+self.data.dR
|
||||||
|
yerr_bot = self.data.R-self.data.dR
|
||||||
|
xerr_top = self.data.Q+self.data.dQ
|
||||||
|
xerr_bot = self.data.Q-self.data.dQ
|
||||||
|
|
||||||
|
new_segments_x = [np.array([[xt, y], [xb, y]]) for xt, xb, y in zip(xerr_top, xerr_bot, self.data.R)]
|
||||||
|
new_segments_y = [np.array([[x, yt], [x, yb]]) for x, yt, yb in zip(self.data.Q, yerr_top, yerr_bot)]
|
||||||
|
barsx.set_segments(new_segments_x)
|
||||||
|
barsy.set_segments(new_segments_y)
|
||||||
|
|
||||||
|
ln.set_ydata(self.data.R)
|
||||||
|
|
||||||
|
|
||||||
|
class YZProjection(ProjectionInterface):
|
||||||
|
y: np.ndarray
|
||||||
|
z: np.ndarray
|
||||||
|
|
||||||
|
data: np.recarray
|
||||||
|
_dtype = np.dtype([
|
||||||
|
('cts', np.float64),
|
||||||
|
('I', np.float64),
|
||||||
|
('err', np.float64),
|
||||||
|
])
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.z = np.arange(Detector.nBlades*Detector.nWires+1)-0.5
|
||||||
|
self.y = np.arange(Detector.nStripes+1)-0.5
|
||||||
|
self.data = np.zeros((self.y.shape[0]-1, self.z.shape[0]-1), dtype=self._dtype).view(np.recarray)
|
||||||
|
self.monitor = 0.
|
||||||
|
|
||||||
|
def project(self, dataset: EventDatasetProtocol, monitor: float):
|
||||||
|
detYi, detZi, detX, delta = Detector.pixelLookUp[dataset.data.events.pixelID-1].T
|
||||||
|
|
||||||
|
cts , *_ = np.histogram2d(detYi, detZi, bins=(self.y, self.z))
|
||||||
|
self.data.cts += cts
|
||||||
|
self.monitor += monitor
|
||||||
|
|
||||||
|
self.data.I = self.data.cts / self.monitor
|
||||||
|
self.data.err = np.sqrt(self.data.cts) / self.monitor
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
self.data[:] = 0
|
||||||
|
self.monitor = 0.
|
||||||
|
|
||||||
|
def plot(self, **kwargs):
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
from matplotlib.colors import LogNorm
|
||||||
|
|
||||||
|
if 'colorbar' in kwargs:
|
||||||
|
cmap=True
|
||||||
|
del(kwargs['colorbar'])
|
||||||
|
else:
|
||||||
|
cmap=False
|
||||||
|
|
||||||
|
vmax = self.data.I.max()
|
||||||
|
|
||||||
|
if not 'norm' in kwargs:
|
||||||
|
vmin = self.data.I[(self.data.I>0)].min()*0.5
|
||||||
|
kwargs['norm'] = LogNorm(vmin, vmax)
|
||||||
|
|
||||||
|
self._graph = plt.pcolormesh(self.y, self.z, self.data.I.T, **kwargs)
|
||||||
|
if cmap:
|
||||||
|
plt.colorbar(label='I / cpm')
|
||||||
|
|
||||||
|
plt.xlabel('Y')
|
||||||
|
plt.ylabel('Z')
|
||||||
|
plt.xlim(self.y[0], self.y[-1])
|
||||||
|
plt.ylim(self.z[-1], self.z[0])
|
||||||
|
plt.title('Horizontal Pixel vs. Vertical Pixel')
|
||||||
|
|
||||||
|
self._graph_axis = plt.gca()
|
||||||
|
plt.connect('button_press_event', self.draw_yzcross)
|
||||||
|
|
||||||
|
def update_plot(self):
|
||||||
|
"""
|
||||||
|
Inline update of previous plot by just updating the data.
|
||||||
|
"""
|
||||||
|
from matplotlib.colors import LogNorm
|
||||||
|
if isinstance(self._graph.norm, LogNorm):
|
||||||
|
vmin = self.data.I[(self.data.I>0)].min()*0.5
|
||||||
|
else:
|
||||||
|
vmin = 0
|
||||||
|
vmax = self.data.I.max()
|
||||||
|
self._graph.set_array(self.data.I.T)
|
||||||
|
self._graph.norm.vmin = vmin
|
||||||
|
self._graph.norm.vmax = vmax
|
||||||
|
|
||||||
|
def draw_yzcross(self, event):
|
||||||
|
if event.inaxes is not self._graph_axis:
|
||||||
|
return
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
tbm = self._graph_axis.figure.canvas.manager.toolbar.mode
|
||||||
|
if event.button is plt.MouseButton.LEFT and tbm=='':
|
||||||
|
self._graph_axis.plot([event.xdata, event.xdata], [self.z[0], self.z[-1]], '-', color='grey')
|
||||||
|
self._graph_axis.plot([self.y[0], self.y[-1]], [event.ydata, event.ydata], '-', color='grey')
|
||||||
|
self._graph_axis.text(event.xdata, event.ydata, f'({event.xdata:.1f}, {event.ydata:.1f})', backgroundcolor='white')
|
||||||
|
plt.draw()
|
||||||
|
if event.button is plt.MouseButton.RIGHT and tbm=='':
|
||||||
|
for art in list(self._graph_axis.lines)+list(self._graph_axis.texts):
|
||||||
|
art.remove()
|
||||||
|
plt.draw()
|
||||||
|
|
||||||
|
class YTProjection(YZProjection):
|
||||||
|
theta: np.ndarray
|
||||||
|
|
||||||
|
def __init__(self, tthh: float):
|
||||||
|
dd = Detector.delta_z[1]-Detector.delta_z[0]
|
||||||
|
delta = np.hstack([Detector.delta_z, Detector.delta_z[-1]+dd])-dd/2.
|
||||||
|
self.theta = tthh + delta
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
def plot(self, **kwargs):
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
from matplotlib.colors import LogNorm
|
||||||
|
|
||||||
|
if 'colorbar' in kwargs:
|
||||||
|
cmap=True
|
||||||
|
del(kwargs['colorbar'])
|
||||||
|
else:
|
||||||
|
cmap=False
|
||||||
|
|
||||||
|
if not 'norm' in kwargs:
|
||||||
|
kwargs['norm'] = LogNorm()
|
||||||
|
|
||||||
|
self._graph = plt.pcolormesh(self.y, self.theta, self.data.I.T, **kwargs)
|
||||||
|
if cmap:
|
||||||
|
plt.colorbar(label='I / cpm')
|
||||||
|
|
||||||
|
plt.xlabel('Y')
|
||||||
|
plt.ylabel('Theta / °')
|
||||||
|
plt.xlim(self.y[0], self.y[-1])
|
||||||
|
plt.ylim(self.theta[-1], self.theta[0])
|
||||||
|
plt.title('Horizontal Pixel vs. Angle')
|
||||||
|
|
||||||
|
self._graph_axis = plt.gca()
|
||||||
|
plt.connect('button_press_event', self.draw_tzcross)
|
||||||
|
|
||||||
|
def draw_tzcross(self, event):
|
||||||
|
if event.inaxes is not self._graph_axis:
|
||||||
|
return
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
tbm = self._graph_axis.figure.canvas.manager.toolbar.mode
|
||||||
|
if event.button is plt.MouseButton.LEFT and tbm=='':
|
||||||
|
self._graph_axis.plot([event.xdata, event.xdata], [self.theta[0], self.theta[-1]], '-', color='grey')
|
||||||
|
self._graph_axis.plot([self.y[0], self.y[-1]], [event.ydata, event.ydata], '-', color='grey')
|
||||||
|
self._graph_axis.text(event.xdata, event.ydata, f'({event.xdata:.1f}, {event.ydata:.1f})', backgroundcolor='white')
|
||||||
|
plt.draw()
|
||||||
|
if event.button is plt.MouseButton.RIGHT and tbm=='':
|
||||||
|
for art in list(self._graph_axis.lines)+list(self._graph_axis.texts):
|
||||||
|
art.remove()
|
||||||
|
plt.draw()
|
||||||
|
|
||||||
|
|
||||||
|
class TofZProjection(ProjectionInterface):
|
||||||
|
tof: np.ndarray
|
||||||
|
z: np.ndarray
|
||||||
|
|
||||||
|
data: np.recarray
|
||||||
|
_dtype = np.dtype([
|
||||||
|
('cts', np.float64),
|
||||||
|
('I', np.float64),
|
||||||
|
('err', np.float64),
|
||||||
|
])
|
||||||
|
|
||||||
|
def __init__(self, tau, foldback=False, combine=1):
|
||||||
|
self.z = np.arange(Detector.nBlades*Detector.nWires+1)-0.5
|
||||||
|
if foldback:
|
||||||
|
self.tof = np.arange(0, tau, 0.0005*combine)
|
||||||
|
else:
|
||||||
|
self.tof = np.arange(0, 2*tau, 0.0005*combine)
|
||||||
|
self.data = np.zeros((self.tof.shape[0]-1, self.z.shape[0]-1), dtype=self._dtype).view(np.recarray)
|
||||||
|
self.monitor = 0.
|
||||||
|
|
||||||
|
def project(self, dataset: EventDatasetProtocol, monitor: float):
|
||||||
|
detYi, detZi, detX, delta = Detector.pixelLookUp[dataset.data.events.pixelID-1].T
|
||||||
|
|
||||||
|
cts , *_ = np.histogram2d(dataset.data.events.tof, detZi, bins=(self.tof, self.z))
|
||||||
|
self.data.cts += cts
|
||||||
|
self.monitor += monitor
|
||||||
|
|
||||||
|
self.data.I = self.data.cts / self.monitor
|
||||||
|
self.data.err = np.sqrt(self.data.cts) / self.monitor
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
self.data[:] = 0
|
||||||
|
self.monitor = 0.
|
||||||
|
|
||||||
|
def plot(self, **kwargs):
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
from matplotlib.colors import LogNorm
|
||||||
|
|
||||||
|
if 'colorbar' in kwargs:
|
||||||
|
cmap=True
|
||||||
|
del(kwargs['colorbar'])
|
||||||
|
else:
|
||||||
|
cmap=False
|
||||||
|
|
||||||
|
if not 'norm' in kwargs:
|
||||||
|
kwargs['norm'] = LogNorm()
|
||||||
|
|
||||||
|
self._graph = plt.pcolormesh(self.tof*1e3, self.z, self.data.I.T, **kwargs)
|
||||||
|
if cmap:
|
||||||
|
plt.colorbar(label='I / cpm')
|
||||||
|
|
||||||
|
plt.xlabel('Time of Flight / ms')
|
||||||
|
plt.ylabel('Z')
|
||||||
|
plt.xlim(self.tof[0]*1e3, self.tof[-1]*1e3)
|
||||||
|
plt.ylim(self.z[-1], self.z[0])
|
||||||
|
plt.title('Time of Flight vs. Vertical Pixel')
|
||||||
|
|
||||||
|
self._graph_axis = plt.gca()
|
||||||
|
plt.connect('button_press_event', self.draw_tzcross)
|
||||||
|
|
||||||
|
def update_plot(self):
|
||||||
|
"""
|
||||||
|
Inline update of previous plot by just updating the data.
|
||||||
|
"""
|
||||||
|
from matplotlib.colors import LogNorm
|
||||||
|
if isinstance(self._graph.norm, LogNorm):
|
||||||
|
vmin = self.data.I[(self.data.I>0)].min()*0.5
|
||||||
|
else:
|
||||||
|
vmin = 0
|
||||||
|
vmax = self.data.I.max()
|
||||||
|
self._graph.set_array(self.data.I.T)
|
||||||
|
self._graph.norm.vmin = vmin
|
||||||
|
self._graph.norm.vmax = vmax
|
||||||
|
|
||||||
|
def draw_tzcross(self, event):
|
||||||
|
if event.inaxes is not self._graph_axis:
|
||||||
|
return
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
tbm = self._graph_axis.figure.canvas.manager.toolbar.mode
|
||||||
|
if event.button is plt.MouseButton.LEFT and tbm=='':
|
||||||
|
self._graph_axis.plot([event.xdata, event.xdata], [self.z[0], self.z[-1]], '-', color='grey')
|
||||||
|
self._graph_axis.plot([self.tof[0]*1e3, self.tof[-1]*1e3], [event.ydata, event.ydata], '-', color='grey')
|
||||||
|
self._graph_axis.text(event.xdata, event.ydata, f'({event.xdata:.2f}, {event.ydata:.1f})', backgroundcolor='white')
|
||||||
|
plt.draw()
|
||||||
|
if event.button is plt.MouseButton.RIGHT and tbm=='':
|
||||||
|
for art in list(self._graph_axis.lines)+list(self._graph_axis.texts):
|
||||||
|
art.remove()
|
||||||
|
plt.draw()
|
||||||
|
|
||||||
|
class TofProjection(ProjectionInterface):
|
||||||
|
tof: np.ndarray
|
||||||
|
|
||||||
|
data: np.recarray
|
||||||
|
_dtype = np.dtype([
|
||||||
|
('cts', np.float64),
|
||||||
|
('I', np.float64),
|
||||||
|
('err', np.float64),
|
||||||
|
])
|
||||||
|
|
||||||
|
def __init__(self, tau, foldback=False):
|
||||||
|
if foldback:
|
||||||
|
self.tof = np.arange(0, tau, 0.0005)
|
||||||
|
else:
|
||||||
|
self.tof = np.arange(0, 2*tau, 0.0005)
|
||||||
|
self.data = np.zeros(self.tof.shape[0]-1, dtype=self._dtype).view(np.recarray)
|
||||||
|
self.monitor = 0.
|
||||||
|
|
||||||
|
def project(self, dataset: EventDatasetProtocol, monitor: float):
|
||||||
|
cts , *_ = np.histogram(dataset.data.events.tof, bins=self.tof)
|
||||||
|
self.data.cts += cts
|
||||||
|
self.monitor += monitor
|
||||||
|
|
||||||
|
self.data.I = self.data.cts / self.monitor
|
||||||
|
self.data.err = np.sqrt(self.data.cts) / self.monitor
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
self.data[:] = 0
|
||||||
|
self.monitor = 0.
|
||||||
|
|
||||||
|
def plot(self, **kwargs):
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
for key in ONLY_MAP:
|
||||||
|
if key in kwargs: del(kwargs[key])
|
||||||
|
|
||||||
|
|
||||||
|
self._graph = plt.plot(self.tof[:-1]*1e3, self.data.I, **kwargs)
|
||||||
|
|
||||||
|
plt.xlabel('Time of Flight / ms')
|
||||||
|
plt.ylabel('I / cpm')
|
||||||
|
plt.xlim(self.tof[0]*1e3, self.tof[-1]*1e3)
|
||||||
|
plt.title('Time of Flight')
|
||||||
|
|
||||||
|
def update_plot(self):
|
||||||
|
"""
|
||||||
|
Inline update of previous plot by just updating the data.
|
||||||
|
"""
|
||||||
|
self._graph[0].set_ydata(self.data.I.T)
|
||||||
|
|
||||||
|
class LProjection(ProjectionInterface):
|
||||||
|
lamda: np.ndarray
|
||||||
|
|
||||||
|
data: np.recarray
|
||||||
|
_dtype = np.dtype([
|
||||||
|
('cts', np.float64),
|
||||||
|
('I', np.float64),
|
||||||
|
('err', np.float64),
|
||||||
|
])
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.lamda = np.linspace(3.0, 12.0, 91)
|
||||||
|
self.data = np.zeros(self.lamda.shape[0]-1, dtype=self._dtype).view(np.recarray)
|
||||||
|
self.monitor = 0.
|
||||||
|
|
||||||
|
def project(self, dataset: EventDatasetProtocol, monitor: float):
|
||||||
|
cts , *_ = np.histogram(dataset.data.events.lamda, bins=self.lamda)
|
||||||
|
self.data.cts += cts
|
||||||
|
self.monitor += monitor
|
||||||
|
|
||||||
|
self.data.I = self.data.cts / self.monitor
|
||||||
|
self.data.err = np.sqrt(self.data.cts) / self.monitor
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
self.data[:] = 0
|
||||||
|
self.monitor = 0.
|
||||||
|
|
||||||
|
def plot(self, **kwargs):
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
for key in ONLY_MAP:
|
||||||
|
if key in kwargs: del(kwargs[key])
|
||||||
|
|
||||||
|
|
||||||
|
self._graph = plt.plot(self.lamda[:-1], self.data.I, **kwargs)
|
||||||
|
|
||||||
|
plt.xlabel('Wavelength / Angstrom')
|
||||||
|
plt.ylabel('I / cpm')
|
||||||
|
plt.xlim(self.lamda[0], self.lamda[-1])
|
||||||
|
plt.title('Wavelength')
|
||||||
|
|
||||||
|
def update_plot(self):
|
||||||
|
"""
|
||||||
|
Inline update of previous plot by just updating the data.
|
||||||
|
"""
|
||||||
|
self._graph[0].set_ydata(self.data.I.T)
|
||||||
|
|
||||||
|
class TProjection(ProjectionInterface):
|
||||||
|
theta: np.ndarray
|
||||||
|
z: np.ndarray
|
||||||
|
|
||||||
|
data: np.recarray
|
||||||
|
_dtype = np.dtype([
|
||||||
|
('cts', np.float64),
|
||||||
|
('I', np.float64),
|
||||||
|
('err', np.float64),
|
||||||
|
])
|
||||||
|
|
||||||
|
def __init__(self, tthh):
|
||||||
|
self.z = np.arange(Detector.nBlades*Detector.nWires+1)-0.5
|
||||||
|
dd = Detector.delta_z[1]-Detector.delta_z[0]
|
||||||
|
delta = np.hstack([Detector.delta_z, Detector.delta_z[-1]+dd])-dd/2.
|
||||||
|
self.theta = tthh+delta
|
||||||
|
self.data = np.zeros(self.theta.shape[0]-1, dtype=self._dtype).view(np.recarray)
|
||||||
|
self.monitor = 0.
|
||||||
|
|
||||||
|
def project(self, dataset: EventDatasetProtocol, monitor: float):
|
||||||
|
detYi, detZi, detX, delta = Detector.pixelLookUp[dataset.data.events.pixelID-1].T
|
||||||
|
|
||||||
|
cts , *_ = np.histogram(detZi, bins=self.z)
|
||||||
|
self.data.cts += cts
|
||||||
|
self.monitor += monitor
|
||||||
|
|
||||||
|
self.data.I = self.data.cts / self.monitor
|
||||||
|
self.data.err = np.sqrt(self.data.cts) / self.monitor
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
self.data[:] = 0
|
||||||
|
self.monitor = 0.
|
||||||
|
|
||||||
|
def plot(self, **kwargs):
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
for key in ONLY_MAP:
|
||||||
|
if key in kwargs: del(kwargs[key])
|
||||||
|
|
||||||
|
|
||||||
|
self._graph = plt.plot(self.theta[:-1], self.data.I, **kwargs)
|
||||||
|
|
||||||
|
plt.xlabel('Reflection Angle / °')
|
||||||
|
plt.ylabel('I / cpm')
|
||||||
|
plt.xlim(self.theta[-1], self.theta[0])
|
||||||
|
plt.title('Theta')
|
||||||
|
|
||||||
|
def update_plot(self):
|
||||||
|
"""
|
||||||
|
Inline update of previous plot by just updating the data.
|
||||||
|
"""
|
||||||
|
self._graph[0].set_ydata(self.data.I.T)
|
||||||
|
|
||||||
|
|
||||||
|
class CombinedProjection(ProjectionInterface):
|
||||||
|
"""
|
||||||
|
Allows to put multiple projections together to conveniently generate combined graphs.
|
||||||
|
"""
|
||||||
|
projections: List[ProjectionInterface]
|
||||||
|
projection_placements: List[Union[Tuple[int, int], Tuple[int, int, int, int]]]
|
||||||
|
grid_size: Tuple[int, int]
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, grid_rows, grid_cols, projections, projection_placements):
|
||||||
|
self.projections = projections
|
||||||
|
self.projection_placements = projection_placements
|
||||||
|
self.grid_size = grid_rows, grid_cols
|
||||||
|
|
||||||
|
def project(self, dataset: EventDatasetProtocol, monitor: float):
|
||||||
|
for pi in self.projections:
|
||||||
|
pi.project(dataset, monitor)
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
for pi in self.projections:
|
||||||
|
pi.clear()
|
||||||
|
|
||||||
|
def plot(self, **kwargs):
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
fig = plt.gcf()
|
||||||
|
axs = fig.add_gridspec(self.grid_size[0], self.grid_size[1])
|
||||||
|
# axs = fig.add_gridspec(self.grid_size[0]+1, self.grid_size[1],
|
||||||
|
# height_ratios=[1.0 for i in range(self.grid_size[0])]+[0.2])
|
||||||
|
self._axes = []
|
||||||
|
for pi, placement in zip(self.projections, self.projection_placements):
|
||||||
|
if len(placement) == 2:
|
||||||
|
ax = fig.add_subplot(axs[placement[0], placement[1]])
|
||||||
|
else:
|
||||||
|
ax = fig.add_subplot(axs[placement[0]:placement[1], placement[2]:placement[3]])
|
||||||
|
pi.plot(**dict(kwargs))
|
||||||
|
# Create the RangeSlider
|
||||||
|
# from matplotlib.widgets import RangeSlider
|
||||||
|
# slider_ax = fig.add_subplot(axs[self.grid_size[0], :])
|
||||||
|
# self._slider = RangeSlider(slider_ax, "Plot Range", 0., 1., valinit=(0., 1.))
|
||||||
|
# self._slider.on_changed(self.update_range)
|
||||||
|
|
||||||
|
def update_plot(self):
|
||||||
|
for pi in self.projections:
|
||||||
|
pi.update_plot()
|
||||||
|
|
||||||
|
# def update_range(self, event):
|
||||||
|
# ...
|
||||||
318
eos/reduction_e2h.py
Normal file
318
eos/reduction_e2h.py
Normal file
@@ -0,0 +1,318 @@
|
|||||||
|
"""
|
||||||
|
Events 2 histogram, quick reduction of single file to display during experiment.
|
||||||
|
Can be used as a live preview with automatic update when files are modified.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from matplotlib.colors import ListedColormap
|
||||||
|
|
||||||
|
from orsopy import fileio
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from .file_reader import AmorEventData, AmorHeader
|
||||||
|
from .header import Header
|
||||||
|
from .instrument import LZGrid
|
||||||
|
from .normalization import LZNormalisation
|
||||||
|
from .options import E2HConfig, E2HPlotArguments, IncidentAngle, MonitorType, E2HPlotSelection
|
||||||
|
from . import event_handling as eh
|
||||||
|
from .path_handling import PathResolver
|
||||||
|
from .projection import CombinedProjection, LProjection, LZProjection, ProjectionInterface, ReflectivityProjector, \
|
||||||
|
TofProjection, TofZProjection, TProjection, YTProjection, YZProjection
|
||||||
|
|
||||||
|
NEEDS_LAMDA = (E2HPlotSelection.All, E2HPlotSelection.LT, E2HPlotSelection.Q, E2HPlotSelection.L)
|
||||||
|
|
||||||
|
class E2HReduction:
|
||||||
|
config: E2HConfig
|
||||||
|
header: Header
|
||||||
|
event_actions: eh.EventDataAction
|
||||||
|
|
||||||
|
_last_mtime = 0.
|
||||||
|
projection: ProjectionInterface
|
||||||
|
|
||||||
|
def __init__(self, config: E2HConfig):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
self.header = Header()
|
||||||
|
|
||||||
|
self.fig = plt.figure()
|
||||||
|
self.register_colormap()
|
||||||
|
self.prepare_actions()
|
||||||
|
|
||||||
|
def prepare_actions(self):
|
||||||
|
"""
|
||||||
|
Does not do any actual reduction.
|
||||||
|
"""
|
||||||
|
self.path_resolver = PathResolver(self.config.reader.year, self.config.reader.rawPath)
|
||||||
|
self.file_list = self.path_resolver.resolve(self.config.reduction.fileIdentifier)
|
||||||
|
self.file_index = 0
|
||||||
|
self.plot_kwds = {}
|
||||||
|
plt.rcParams.update({'font.size': self.config.reduction.fontsize})
|
||||||
|
|
||||||
|
self.overwrite = eh.ApplyParameterOverwrites(self.config.experiment) # some actions use instrument parameters, change before that
|
||||||
|
if self.config.reduction.update:
|
||||||
|
# live update implies plotting
|
||||||
|
self.config.reduction.show_plot = True
|
||||||
|
|
||||||
|
if self.config.reduction.plot==E2HPlotSelection.Raw:
|
||||||
|
# Raw implies fast caculations
|
||||||
|
self.config.reduction.fast = True
|
||||||
|
if not self.config.experiment.is_default('lambdaRange'):
|
||||||
|
# filtering wavelength requires frame analysis
|
||||||
|
self.config.reduction.fast = False
|
||||||
|
|
||||||
|
if not self.config.reduction.fast or self.config.reduction.plot in NEEDS_LAMDA:
|
||||||
|
from . import event_analysis as ea
|
||||||
|
|
||||||
|
# Actions on datasets not used for normalization
|
||||||
|
self.event_actions = eh.ApplyPhaseOffset(self.config.experiment.chopperPhaseOffset)
|
||||||
|
if not self.config.reduction.fast:
|
||||||
|
self.event_actions |= self.overwrite
|
||||||
|
self.event_actions |= eh.CorrectChopperPhase()
|
||||||
|
self.event_actions |= ea.ExtractWalltime()
|
||||||
|
else:
|
||||||
|
logging.info(' Fast reduction always uses time normalization')
|
||||||
|
self.config.experiment.monitorType = MonitorType.time
|
||||||
|
self.event_actions |= eh.AssociatePulseWithMonitor(self.config.experiment.monitorType)
|
||||||
|
if self.config.experiment.monitorType in [MonitorType.proton_charge, MonitorType.debug]:
|
||||||
|
# the filtering only makes sense if using actual monitor data, not time
|
||||||
|
self.event_actions |= eh.FilterMonitorThreshold(self.config.experiment.lowCurrentThreshold)
|
||||||
|
if not self.config.reduction.fast:
|
||||||
|
self.event_actions |= eh.FilterStrangeTimes()
|
||||||
|
if self.config.reduction.plot in [E2HPlotSelection.YT, E2HPlotSelection.YZ]:
|
||||||
|
# perform time fold-back and apply yRange filter if not fast mode
|
||||||
|
self.event_actions |= ea.MergeFrames()
|
||||||
|
self.event_actions |= ea.AnalyzePixelIDs(self.config.experiment.yRange)
|
||||||
|
if self.config.reduction.plot==E2HPlotSelection.YT:
|
||||||
|
# perform corrections for tof if not fast mode
|
||||||
|
self.event_actions |= eh.TofTimeCorrection(self.config.experiment.incidentAngle==IncidentAngle.alphaF)
|
||||||
|
# select needed actions in depenence of plots
|
||||||
|
if self.config.reduction.plot in NEEDS_LAMDA or not self.config.experiment.is_default('lambdaRange'):
|
||||||
|
self.event_actions |= ea.MergeFrames(lamdaCut=self.config.experiment.lambdaRange[0])
|
||||||
|
self.event_actions |= ea.AnalyzePixelIDs(self.config.experiment.yRange)
|
||||||
|
self.event_actions |= eh.TofTimeCorrection(self.config.experiment.incidentAngle==IncidentAngle.alphaF)
|
||||||
|
self.event_actions |= ea.CalculateWavelength(self.config.experiment.lambdaRange)
|
||||||
|
self.event_actions |= eh.ApplyMask()
|
||||||
|
|
||||||
|
# plot dependant options
|
||||||
|
if self.config.reduction.plot in [E2HPlotSelection.All, E2HPlotSelection.LT, E2HPlotSelection.Q]:
|
||||||
|
self.grid = LZGrid(0.05, [0.0, 0.25], lambda_overwrite=self.config.experiment.lambdaRange)
|
||||||
|
self.grid.dldl = 0.01
|
||||||
|
|
||||||
|
if self.config.reduction.plot in [E2HPlotSelection.All, E2HPlotSelection.Raw,
|
||||||
|
E2HPlotSelection.LT, E2HPlotSelection.YT,
|
||||||
|
E2HPlotSelection.YZ, E2HPlotSelection.TZ]:
|
||||||
|
self.plot_kwds['colorbar'] = True
|
||||||
|
self.plot_kwds['cmap'] = str(self.config.reduction.plot_colormap)
|
||||||
|
if self.config.reduction.plotArgs==E2HPlotArguments.Linear:
|
||||||
|
self.plot_kwds['norm'] = None
|
||||||
|
|
||||||
|
def reduce(self):
|
||||||
|
if self.config.reduction.plot in [E2HPlotSelection.All, E2HPlotSelection.LT, E2HPlotSelection.Q]:
|
||||||
|
if self.config.reduction.normalizationModel:
|
||||||
|
self.norm = LZNormalisation.model(self.grid)
|
||||||
|
else:
|
||||||
|
self.norm = LZNormalisation.unity(self.grid)
|
||||||
|
|
||||||
|
self.prepare_graphs()
|
||||||
|
|
||||||
|
while self.file_index < len(self.file_list):
|
||||||
|
self.read_data()
|
||||||
|
self.add_data()
|
||||||
|
|
||||||
|
if self.config.reduction.plotArgs==E2HPlotArguments.OutputFile:
|
||||||
|
self.create_file_output()
|
||||||
|
if self.config.reduction.plotArgs!=E2HPlotArguments.OutputFile or self.config.reduction.show_plot:
|
||||||
|
self.create_graph()
|
||||||
|
|
||||||
|
if self.config.reduction.plotArgs==E2HPlotArguments.Default and not self.config.reduction.update:
|
||||||
|
# safe to image file if not auto-updating graph
|
||||||
|
plt.savefig(f'e2h_{self.config.reduction.plot}.png', dpi=300)
|
||||||
|
if self.config.reduction.kafka:
|
||||||
|
from .kafka_serializer import ESSSerializer
|
||||||
|
self.serializer = ESSSerializer()
|
||||||
|
self.fig.canvas.mpl_connect('close_event', self.serializer.end_command_thread)
|
||||||
|
self.serializer.start_command_thread()
|
||||||
|
self.serializer.send(self.projection)
|
||||||
|
if self.config.reduction.update:
|
||||||
|
self.timer = self.fig.canvas.new_timer(1000)
|
||||||
|
self.timer.add_callback(self.update)
|
||||||
|
self.timer.start()
|
||||||
|
if self.config.reduction.show_plot:
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
def register_colormap(self):
|
||||||
|
cmap = plt.colormaps['turbo'](np.arange(256))
|
||||||
|
cmap[:1, :] = np.array([256/256, 255/256, 236/256, 1])
|
||||||
|
cmap = ListedColormap(cmap, name='jochen_deluxe', N=cmap.shape[0])
|
||||||
|
#cmap.set_bad((1.,1.,0.9))
|
||||||
|
plt.colormaps.register(cmap)
|
||||||
|
|
||||||
|
def prepare_graphs(self):
|
||||||
|
last_file_header = AmorHeader(self.file_list[-1])
|
||||||
|
self.overwrite.perform_action(last_file_header)
|
||||||
|
tthh = last_file_header.geometry.nu - last_file_header.geometry.mu
|
||||||
|
|
||||||
|
if not self.config.reduction.is_default('thetaRangeR'):
|
||||||
|
# adjust range based on detector center
|
||||||
|
thetaRange = [ti+tthh for ti in self.config.reduction.thetaRangeR]
|
||||||
|
else:
|
||||||
|
thetaRange = [tthh - last_file_header.geometry.div/2, tthh + last_file_header.geometry.div/2]
|
||||||
|
|
||||||
|
if self.config.reduction.plot==E2HPlotSelection.LT:
|
||||||
|
self.projection = LZProjection(tthh, self.grid)
|
||||||
|
if not self.config.reduction.fast:
|
||||||
|
self.projection.correct_gravity(last_file_header.geometry.detectorDistance)
|
||||||
|
self.projection.apply_lamda_mask(self.config.experiment.lambdaRange)
|
||||||
|
self.projection.apply_theta_mask(thetaRange)
|
||||||
|
for thi in self.config.reduction.thetaFilters:
|
||||||
|
self.projection.apply_theta_filter((thi[0]+tthh, thi[1]+tthh))
|
||||||
|
self.projection.apply_norm_mask(self.norm)
|
||||||
|
|
||||||
|
if self.config.reduction.plot==E2HPlotSelection.Q:
|
||||||
|
plz = LZProjection(tthh, self.grid)
|
||||||
|
if not self.config.reduction.fast:
|
||||||
|
plz.correct_gravity(last_file_header.geometry.detectorDistance)
|
||||||
|
plz.calculate_q()
|
||||||
|
plz.apply_lamda_mask(self.config.experiment.lambdaRange)
|
||||||
|
plz.apply_theta_mask(thetaRange)
|
||||||
|
for thi in self.config.reduction.thetaFilters:
|
||||||
|
self.projection.apply_theta_filter((thi[0]+tthh, thi[1]+tthh))
|
||||||
|
plz.apply_norm_mask(self.norm)
|
||||||
|
self.projection = ReflectivityProjector(plz, self.norm)
|
||||||
|
|
||||||
|
if self.config.reduction.plot==E2HPlotSelection.YZ:
|
||||||
|
self.projection = YZProjection()
|
||||||
|
|
||||||
|
if self.config.reduction.plot==E2HPlotSelection.YT:
|
||||||
|
self.projection = YTProjection(tthh)
|
||||||
|
|
||||||
|
if self.config.reduction.plot==E2HPlotSelection.T:
|
||||||
|
self.projection = TProjection(tthh)
|
||||||
|
|
||||||
|
if self.config.reduction.plot==E2HPlotSelection.L:
|
||||||
|
self.projection = LProjection()
|
||||||
|
|
||||||
|
if self.config.reduction.plot==E2HPlotSelection.TZ:
|
||||||
|
self.projection = TofZProjection(last_file_header.timing.tau, foldback=not self.config.reduction.fast)
|
||||||
|
|
||||||
|
if self.config.reduction.plot==E2HPlotSelection.ToF:
|
||||||
|
self.projection = TofProjection(last_file_header.timing.tau, foldback=not self.config.reduction.fast)
|
||||||
|
|
||||||
|
if self.config.reduction.plot==E2HPlotSelection.All:
|
||||||
|
plz = LZProjection(tthh, self.grid)
|
||||||
|
if not self.config.reduction.fast:
|
||||||
|
plz.correct_gravity(last_file_header.geometry.detectorDistance)
|
||||||
|
plz.calculate_q()
|
||||||
|
plz.apply_lamda_mask(self.config.experiment.lambdaRange)
|
||||||
|
plz.apply_theta_mask(thetaRange)
|
||||||
|
for thi in self.config.reduction.thetaFilters:
|
||||||
|
plz.apply_theta_filter((thi[0]+tthh, thi[1]+tthh))
|
||||||
|
plz.apply_norm_mask(self.norm)
|
||||||
|
pr = ReflectivityProjector(plz, self.norm)
|
||||||
|
pyz = YZProjection()
|
||||||
|
self.projection = CombinedProjection(3, 2, [plz, pyz, pr],
|
||||||
|
[(0, 2, 0, 1), (0, 2, 1, 2), (2, 3, 0, 2)])
|
||||||
|
|
||||||
|
if self.config.reduction.plot==E2HPlotSelection.Raw:
|
||||||
|
del(self.plot_kwds['colorbar'])
|
||||||
|
# A combined graph that does not require longer calculations
|
||||||
|
plyt = YTProjection(tthh)
|
||||||
|
pltofz = TofZProjection(last_file_header.timing.tau, foldback=not self.config.reduction.fast)
|
||||||
|
pltof = TofProjection(last_file_header.timing.tau, foldback=not self.config.reduction.fast)
|
||||||
|
plt = TProjection(tthh)
|
||||||
|
|
||||||
|
self.projection = CombinedProjection(3, 3, [plyt, pltofz, plt, pltof],
|
||||||
|
[(0,2, 0, 1), (0, 2, 1, 3), (2,3, 0,1),(2,3,1,3)])
|
||||||
|
|
||||||
|
def read_data(self):
|
||||||
|
fileName = self.file_list[self.file_index]
|
||||||
|
self.dataset = AmorEventData(fileName, max_events=self.config.reduction.max_events)
|
||||||
|
if self.dataset.EOF or fileName==self.file_list[-1]:
|
||||||
|
self.file_index += 1
|
||||||
|
self.event_actions(self.dataset)
|
||||||
|
self.dataset.update_header(self.header)
|
||||||
|
|
||||||
|
self.header.measurement_data_files.append(fileio.File(file=os.path.basename(fileName),
|
||||||
|
timestamp=self.dataset.fileDate))
|
||||||
|
|
||||||
|
def add_data(self):
|
||||||
|
self.monitor = self.dataset.data.pulses.monitor.sum()
|
||||||
|
self.projection.project(self.dataset, monitor=self.monitor)
|
||||||
|
if self.config.reduction.plot==E2HPlotSelection.LT:
|
||||||
|
self.projection.normalize_over_illuminated(self.norm)
|
||||||
|
|
||||||
|
def create_file_output(self):
|
||||||
|
raise NotImplementedError("Export to text output not yet implemented")
|
||||||
|
|
||||||
|
def create_title(self):
|
||||||
|
output = "Events to Histogram - "
|
||||||
|
output += ",".join(["#"+os.path.basename(fi)[9:15].lstrip('0') for fi in self.file_list])
|
||||||
|
output += f" ($\\mu$={self.dataset.geometry.mu:.2f} ;"
|
||||||
|
output += f" $\\nu$={self.dataset.geometry.nu:.2f})"
|
||||||
|
if self.config.reduction.update:
|
||||||
|
output += f"\n at "+datetime.now().strftime("%m/%d/%Y %H:%M:%S")
|
||||||
|
return output
|
||||||
|
|
||||||
|
def create_graph(self):
|
||||||
|
plt.suptitle(self.create_title())
|
||||||
|
self.projection.plot(**self.plot_kwds)
|
||||||
|
plt.tight_layout(pad=0.5)
|
||||||
|
|
||||||
|
def replace_dataset(self, latest):
|
||||||
|
new_files = self.path_resolver.resolve(f'{latest}')
|
||||||
|
if not os.path.exists(new_files[-1]):
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
# check that events exist in the new file
|
||||||
|
AmorEventData(new_files[-1], 0, max_events=1_000)
|
||||||
|
except Exception:
|
||||||
|
logging.debug("Problem when trying to load new dataset", exc_info=True)
|
||||||
|
return
|
||||||
|
|
||||||
|
logging.warning(f"Preceding to next file {latest}")
|
||||||
|
self.file_list = new_files
|
||||||
|
self.file_index = 0
|
||||||
|
self.prepare_actions()
|
||||||
|
self.prepare_graphs()
|
||||||
|
self.read_data()
|
||||||
|
self.add_data()
|
||||||
|
self.fig.clear()
|
||||||
|
self.create_graph()
|
||||||
|
plt.draw()
|
||||||
|
|
||||||
|
def update(self):
|
||||||
|
logging.debug(" check for update")
|
||||||
|
if self.config.reduction.fileIdentifier=='0':
|
||||||
|
# if latest file was choosen, check if new one available and switch to it
|
||||||
|
current = int(os.path.basename(self.file_list[-1])[9:15])
|
||||||
|
latest = self.path_resolver.search_latest(0)
|
||||||
|
if latest>current:
|
||||||
|
self.replace_dataset(latest)
|
||||||
|
return
|
||||||
|
# if all events were read last time, only load more if file was modified
|
||||||
|
if self.dataset.EOF and os.path.getmtime(self.file_list[-1])<=self._last_mtime:
|
||||||
|
return
|
||||||
|
|
||||||
|
self._last_mtime = os.path.getmtime(self.file_list[-1])
|
||||||
|
try:
|
||||||
|
update_data = AmorEventData(self.file_list[-1], self.dataset.last_index+1,
|
||||||
|
max_events=self.config.reduction.max_events)
|
||||||
|
except EOFError:
|
||||||
|
return
|
||||||
|
logging.info(" updating with new data")
|
||||||
|
|
||||||
|
self.event_actions(update_data)
|
||||||
|
self.dataset=update_data
|
||||||
|
self.monitor = self.dataset.data.pulses.monitor.sum()
|
||||||
|
self.projection.project(update_data, self.monitor)
|
||||||
|
|
||||||
|
self.projection.update_plot()
|
||||||
|
plt.suptitle(self.create_title())
|
||||||
|
plt.draw()
|
||||||
|
|
||||||
|
if self.config.reduction.kafka:
|
||||||
|
self.serializer.send(self.projection)
|
||||||
128
eos/reduction_kafka.py
Normal file
128
eos/reduction_kafka.py
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
"""
|
||||||
|
Events 2 histogram, quick reduction of single file to display during experiment.
|
||||||
|
Can be used as a live preview with automatic update when files are modified.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
from time import sleep
|
||||||
|
from .kafka_events import KafkaEventData
|
||||||
|
from .header import Header
|
||||||
|
from .options import E2HConfig
|
||||||
|
from . import event_handling as eh, event_analysis as ea
|
||||||
|
from .projection import TofZProjection, YZProjection
|
||||||
|
from .kafka_serializer import ESSSerializer
|
||||||
|
|
||||||
|
|
||||||
|
class KafkaReduction:
|
||||||
|
config: E2HConfig
|
||||||
|
header: Header
|
||||||
|
event_actions: eh.EventDataAction
|
||||||
|
|
||||||
|
_last_mtime = 0.
|
||||||
|
proj_yz: YZProjection
|
||||||
|
proj_tofz = TofZProjection
|
||||||
|
|
||||||
|
def __init__(self, config: E2HConfig):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
self.header = Header()
|
||||||
|
self.event_data = KafkaEventData()
|
||||||
|
self.event_data.start()
|
||||||
|
|
||||||
|
self.prepare_actions()
|
||||||
|
|
||||||
|
def prepare_actions(self):
|
||||||
|
"""
|
||||||
|
Does not do any actual reduction.
|
||||||
|
"""
|
||||||
|
# Actions on datasets not used for normalization
|
||||||
|
self.event_actions = eh.ApplyPhaseOffset(self.config.experiment.chopperPhaseOffset)
|
||||||
|
self.event_actions |= eh.CorrectChopperPhase()
|
||||||
|
self.event_actions |= ea.MergeFrames()
|
||||||
|
self.event_actions |= eh.ApplyMask()
|
||||||
|
|
||||||
|
def reduce(self):
|
||||||
|
self.create_projections()
|
||||||
|
self.read_data()
|
||||||
|
self.add_data()
|
||||||
|
|
||||||
|
self.serializer = ESSSerializer()
|
||||||
|
self.serializer.start_command_thread()
|
||||||
|
|
||||||
|
self.loop()
|
||||||
|
|
||||||
|
def create_projections(self):
|
||||||
|
self.proj_yz = YZProjection()
|
||||||
|
self.proj_tofz = TofZProjection(self.event_data.timing.tau, foldback=True, combine=2)
|
||||||
|
|
||||||
|
def read_data(self):
|
||||||
|
# make sure the first events have arrived before starting analysis
|
||||||
|
self.event_data.new_events.wait()
|
||||||
|
self.dataset = self.event_data.get_events()
|
||||||
|
self.event_actions(self.dataset)
|
||||||
|
|
||||||
|
|
||||||
|
def add_data(self):
|
||||||
|
self.monitor = self.dataset.monitor
|
||||||
|
self.proj_yz.project(self.dataset, monitor=self.monitor)
|
||||||
|
self.proj_tofz.project(self.dataset, monitor=self.monitor)
|
||||||
|
|
||||||
|
def loop(self):
|
||||||
|
self.wait_for = self.serializer.new_count_started
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
self.update()
|
||||||
|
self.wait_for.wait(1.0)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
self.event_data.stop_event.set()
|
||||||
|
self.event_data.join()
|
||||||
|
self.serializer.end_command_thread()
|
||||||
|
return
|
||||||
|
|
||||||
|
def update(self):
|
||||||
|
if self.serializer.new_count_started.is_set():
|
||||||
|
logging.warning('Start new count, clearing event data')
|
||||||
|
self.wait_for = self.serializer.count_stopped
|
||||||
|
self.event_data.restart()
|
||||||
|
self.serializer.new_count_started.clear()
|
||||||
|
self.create_projections()
|
||||||
|
return
|
||||||
|
elif self.serializer.count_stopped.is_set() and not self.event_data.stop_counting.is_set():
|
||||||
|
return self.finish_count()
|
||||||
|
try:
|
||||||
|
update_data = self.event_data.get_events()
|
||||||
|
except EOFError:
|
||||||
|
return
|
||||||
|
logging.info(" updating with new data")
|
||||||
|
|
||||||
|
self.event_actions(update_data)
|
||||||
|
self.dataset=update_data
|
||||||
|
self.monitor = self.dataset.monitor
|
||||||
|
self.proj_yz.project(update_data, self.monitor)
|
||||||
|
self.proj_tofz.project(update_data, self.monitor)
|
||||||
|
|
||||||
|
self.serializer.send(self.proj_yz)
|
||||||
|
self.serializer.send(self.proj_tofz)
|
||||||
|
|
||||||
|
def finish_count(self):
|
||||||
|
logging.debug(" stop event set, hold event collection and send final results")
|
||||||
|
self.wait_for = self.serializer.new_count_started
|
||||||
|
self.event_data.stop_counting.set()
|
||||||
|
|
||||||
|
try:
|
||||||
|
update_data = self.event_data.get_events()
|
||||||
|
except EOFError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
self.event_actions(update_data)
|
||||||
|
self.dataset = update_data
|
||||||
|
self.monitor = self.dataset.monitor
|
||||||
|
self.proj_yz.project(update_data, self.monitor)
|
||||||
|
self.proj_tofz.project(update_data, self.monitor)
|
||||||
|
|
||||||
|
logging.warning(f' stop counting, total events {int(self.proj_tofz.data.cts.sum())}')
|
||||||
|
|
||||||
|
self.serializer.send(self.proj_yz, final=True)
|
||||||
|
self.serializer.send(self.proj_tofz, final=True)
|
||||||
471
eos/reduction_reflectivity.py
Normal file
471
eos/reduction_reflectivity.py
Normal file
@@ -0,0 +1,471 @@
|
|||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from orsopy import fileio
|
||||||
|
|
||||||
|
from .event_analysis import FilterByLog
|
||||||
|
from .event_handling import ApplyMask
|
||||||
|
from .file_reader import AmorEventData
|
||||||
|
from .header import Header
|
||||||
|
from .path_handling import PathResolver
|
||||||
|
from .options import ReflectivityConfig, IncidentAngle, MonitorType, NormalisationMethod, MONITOR_UNITS
|
||||||
|
from .instrument import LZGrid
|
||||||
|
from .normalization import LZNormalisation
|
||||||
|
from . import event_handling as eh, event_analysis as ea
|
||||||
|
from .projection import LZProjection
|
||||||
|
|
||||||
|
|
||||||
|
class ReflectivityReduction:
|
||||||
|
config: ReflectivityConfig
|
||||||
|
header: Header
|
||||||
|
normevent_actions: eh.EventDataAction
|
||||||
|
dataevent_actions: eh.EventDataAction
|
||||||
|
|
||||||
|
def __init__(self, config: ReflectivityConfig):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
self.header = Header()
|
||||||
|
self.header.reduction.call = config.call_string()
|
||||||
|
|
||||||
|
self.prepare_actions()
|
||||||
|
|
||||||
|
def prepare_actions(self):
|
||||||
|
"""
|
||||||
|
Prepare the actions applied to each event dataset, does not do any actual reduction.
|
||||||
|
"""
|
||||||
|
self.path_resolver = PathResolver(self.config.reader.year, self.config.reader.rawPath)
|
||||||
|
|
||||||
|
# setup all actions performed on event datasets before projection on the grid
|
||||||
|
# The order of these corrections matter as some rely on parameters modified before
|
||||||
|
if self.config.reduction.normalisationFileIdentifier:
|
||||||
|
# explicit steps performed on AmorEventDataset for normalization files
|
||||||
|
self.normevent_actions = eh.ApplyPhaseOffset(self.config.experiment.chopperPhaseOffset)
|
||||||
|
self.normevent_actions |= eh.CorrectChopperPhase()
|
||||||
|
self.normevent_actions |= eh.AssociatePulseWithMonitor(self.config.experiment.monitorType)
|
||||||
|
if self.config.experiment.monitorType in [MonitorType.proton_charge, MonitorType.debug]:
|
||||||
|
self.normevent_actions |= ea.ExtractWalltime()
|
||||||
|
self.normevent_actions |= eh.FilterMonitorThreshold(self.config.experiment.lowCurrentThreshold)
|
||||||
|
self.normevent_actions |= eh.FilterStrangeTimes()
|
||||||
|
self.normevent_actions |= ea.MergeFrames()
|
||||||
|
self.normevent_actions |= ea.AnalyzePixelIDs(self.config.experiment.yRange)
|
||||||
|
self.normevent_actions |= eh.TofTimeCorrection(self.config.experiment.incidentAngle==IncidentAngle.alphaF)
|
||||||
|
self.normevent_actions |= ea.CalculateWavelength(self.config.experiment.lambdaRange)
|
||||||
|
self.normevent_actions |= eh.ApplyMask()
|
||||||
|
# Actions on datasets not used for normalization
|
||||||
|
self.dataevent_actions = eh.ApplyPhaseOffset(self.config.experiment.chopperPhaseOffset)
|
||||||
|
self.dataevent_actions |= eh.ApplyParameterOverwrites(self.config.experiment) # some actions use instrument parameters, change before that
|
||||||
|
self.dataevent_actions |= eh.CorrectChopperPhase()
|
||||||
|
self.dataevent_actions |= ea.ExtractWalltime()
|
||||||
|
self.dataevent_time_correction = eh.CorrectSeriesTime(0) # will be set from first dataset
|
||||||
|
self.dataevent_actions |= self.dataevent_time_correction
|
||||||
|
self.dataevent_actions |= eh.AssociatePulseWithMonitor(self.config.experiment.monitorType)
|
||||||
|
if self.config.experiment.monitorType in [MonitorType.proton_charge or MonitorType.debug]:
|
||||||
|
# the filtering only makes sense if using actual monitor data, not time
|
||||||
|
self.dataevent_actions |= eh.FilterMonitorThreshold(self.config.experiment.lowCurrentThreshold)
|
||||||
|
self.dataevent_actions |= eh.FilterStrangeTimes()
|
||||||
|
self.dataevent_actions |= ea.MergeFrames()
|
||||||
|
self.dataevent_actions |= ea.AnalyzePixelIDs(self.config.experiment.yRange)
|
||||||
|
self.dataevent_actions |= eh.TofTimeCorrection(self.config.experiment.incidentAngle==IncidentAngle.alphaF)
|
||||||
|
self.dataevent_actions |= ea.CalculateWavelength(self.config.experiment.lambdaRange)
|
||||||
|
self.dataevent_actions |= ea.CalculateQ(self.config.experiment.incidentAngle)
|
||||||
|
if not self.config.reduction.is_default('qzRange'):
|
||||||
|
self.dataevent_actions |= ea.FilterQzRange(self.config.reduction.qzRange)
|
||||||
|
for lf in self.config.reduction.logfilter:
|
||||||
|
self.dataevent_actions |= ea.FilterByLog(lf)
|
||||||
|
self.dataevent_actions |= eh.ApplyMask()
|
||||||
|
|
||||||
|
self.grid = LZGrid(self.config.reduction.qResolution, self.config.reduction.qzRange)
|
||||||
|
|
||||||
|
def reduce(self):
|
||||||
|
if not os.path.exists(f'{self.config.output.outputPath}'):
|
||||||
|
logging.debug(f'Creating destination path {self.config.output.outputPath}')
|
||||||
|
os.system(f'mkdir {self.config.output.outputPath}')
|
||||||
|
|
||||||
|
# load or create normalisation matrix
|
||||||
|
if self.config.reduction.normalisationFileIdentifier:
|
||||||
|
# TODO: change option definition to single normalization short_code
|
||||||
|
self.create_normalisation_map(self.config.reduction.normalisationFileIdentifier[0])
|
||||||
|
else:
|
||||||
|
self.norm = LZNormalisation.unity(self.grid)
|
||||||
|
|
||||||
|
if self.config.reduction.normalizationSmoothing:
|
||||||
|
self.norm.smooth(self.config.reduction.normalizationSmoothing)
|
||||||
|
|
||||||
|
# load R(q_z) curve to be subtracted:
|
||||||
|
if self.config.reduction.subtract:
|
||||||
|
self.sq_q, self.sR_q, self.sdR_q, self.sFileName = self.loadRqz(self.config.reduction.subtract)
|
||||||
|
logging.warning(f'loaded background file: {self.sFileName}')
|
||||||
|
self.header.reduction.corrections.append(f'background from \'{self.sFileName}\' subtracted')
|
||||||
|
self.subtract = True
|
||||||
|
else:
|
||||||
|
self.subtract = False
|
||||||
|
|
||||||
|
# load measurement data and do the reduction
|
||||||
|
self.datasetsRqz = []
|
||||||
|
self.datasetsRlt = []
|
||||||
|
for i, short_notation in enumerate(self.config.reduction.fileIdentifier):
|
||||||
|
self.read_file_block(i, short_notation)
|
||||||
|
|
||||||
|
# output
|
||||||
|
if self.config.output.is_default('outputName'):
|
||||||
|
import datetime
|
||||||
|
_date = datetime.datetime.now().replace(microsecond=0).isoformat().replace(':', '-')
|
||||||
|
if self.header.sample.name:
|
||||||
|
_sampleName = self.header.sample.name.replace(' ', '_')
|
||||||
|
else:
|
||||||
|
_sampleName = 'unknown'
|
||||||
|
_mu = int(self.dataset.geometry.mu * 3)
|
||||||
|
self.config.output.outputName = f'{_sampleName}_{_mu:03}_{_date}'
|
||||||
|
|
||||||
|
logging.warning('output:')
|
||||||
|
|
||||||
|
if 'Rqz.ort' in self.config.output.outputFormats:
|
||||||
|
self.save_Rqz()
|
||||||
|
|
||||||
|
if 'Rlt.ort' in self.config.output.outputFormats:
|
||||||
|
self.save_Rtl()
|
||||||
|
|
||||||
|
if self.config.output.plot:
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
if 'Rqz.ort' in self.config.output.outputFormats:
|
||||||
|
plt.figure(num=99)
|
||||||
|
plt.legend()
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
def read_file_block(self, i, short_notation):
|
||||||
|
logging.warning('input:')
|
||||||
|
file_list = self.path_resolver.resolve(short_notation)
|
||||||
|
|
||||||
|
self.header.measurement_data_files = []
|
||||||
|
|
||||||
|
self.dataset = AmorEventData(file_list[0])
|
||||||
|
if self.config.experiment.monitorType==MonitorType.auto:
|
||||||
|
if self.dataset.data.proton_current.current.sum()>1:
|
||||||
|
self.config.experiment.monitorType = MonitorType.proton_charge
|
||||||
|
logging.debug(' monitor type set to "proton current"')
|
||||||
|
else:
|
||||||
|
self.config.experiment.monitorType = MonitorType.time
|
||||||
|
logging.debug(' monitor type set to "time"')
|
||||||
|
# update actions to sue selected monitor
|
||||||
|
self.prepare_actions()
|
||||||
|
# reload normalization to make sure the monitor matches
|
||||||
|
if self.config.reduction.normalisationFileIdentifier:
|
||||||
|
self.create_normalisation_map(self.config.reduction.normalisationFileIdentifier[0])
|
||||||
|
|
||||||
|
self.dataevent_time_correction.seriesStartTime = self.dataset.eventStartTime
|
||||||
|
self.dataevent_actions(self.dataset)
|
||||||
|
self.dataset.update_header(self.header)
|
||||||
|
self.dataevent_actions.update_header(self.header)
|
||||||
|
for fi in file_list[1:]:
|
||||||
|
di = AmorEventData(fi)
|
||||||
|
self.dataevent_actions(di)
|
||||||
|
self.dataset.append(di)
|
||||||
|
|
||||||
|
for fileName in file_list:
|
||||||
|
self.header.measurement_data_files.append(fileio.File( file=os.path.basename(fileName),
|
||||||
|
timestamp=self.dataset.fileDate))
|
||||||
|
|
||||||
|
if 'polarization_config_label' in self.dataset.data.device_logs:
|
||||||
|
pols = np.unique(self.dataset.data.device_logs['polarization_config_label'].value)
|
||||||
|
pols = pols[pols>0]
|
||||||
|
if len(pols)>1:
|
||||||
|
logging.warning(f' found {len(pols)} polarization configurations, splitting dataset accordingly')
|
||||||
|
from copy import deepcopy
|
||||||
|
from . import const
|
||||||
|
full_ds = deepcopy(self.dataset)
|
||||||
|
for pi in pols:
|
||||||
|
plabel = const.polarizationLabels[pi]
|
||||||
|
pol_filter = FilterByLog(f'polarization_config_label=={pi}',
|
||||||
|
remove_switchpulse=True) | ApplyMask()
|
||||||
|
logging.info(f' filter {plabel} using polarization_config_label=={pi}')
|
||||||
|
pol_filter(self.dataset)
|
||||||
|
self.dataset.update_header(self.header)
|
||||||
|
pol_filter.update_header(self.header)
|
||||||
|
if self.config.reduction.timeSlize:
|
||||||
|
if i>0:
|
||||||
|
logging.warning(
|
||||||
|
" time slizing should only be used for one set of datafiles, check parameters")
|
||||||
|
self.analyze_timeslices(i, polstr=f' : polarization = {plabel}')
|
||||||
|
else:
|
||||||
|
self.analyze_unsliced(i, polstr=f' : polarization = {plabel}')
|
||||||
|
self.dataset = deepcopy(full_ds)
|
||||||
|
return
|
||||||
|
if self.config.reduction.timeSlize:
|
||||||
|
if i>0:
|
||||||
|
logging.warning(" time slizing should only be used for one set of datafiles, check parameters")
|
||||||
|
self.analyze_timeslices(i)
|
||||||
|
else:
|
||||||
|
self.analyze_unsliced(i)
|
||||||
|
|
||||||
|
def analyze_unsliced(self, i, polstr=''):
|
||||||
|
self.monitor = self.dataset.data.pulses.monitor.sum()
|
||||||
|
logging.info(f' monitor = {self.monitor:8.2f} {MONITOR_UNITS[self.config.experiment.monitorType]}')
|
||||||
|
|
||||||
|
proj:LZProjection = self.project_on_lz()
|
||||||
|
try:
|
||||||
|
scale = self.config.reduction.scale[i]
|
||||||
|
except IndexError:
|
||||||
|
scale = self.config.reduction.scale[-1]
|
||||||
|
proj.scale(scale)
|
||||||
|
|
||||||
|
if 'Rqz.ort' in self.config.output.outputFormats:
|
||||||
|
headerRqz = self.header.orso_header()
|
||||||
|
headerRqz.data_set = f'Nr {i} : mu = {self.dataset.geometry.mu:6.3f} deg{polstr}'
|
||||||
|
|
||||||
|
# projection on qz-grid
|
||||||
|
result = proj.project_on_qz()
|
||||||
|
|
||||||
|
if self.config.reduction.autoscale:
|
||||||
|
if i==0:
|
||||||
|
result.autoscale(self.config.reduction.autoscale)
|
||||||
|
else:
|
||||||
|
result.stitch(self.last_result)
|
||||||
|
|
||||||
|
if self.subtract:
|
||||||
|
if len(result.Q)==len(self.sq_q):
|
||||||
|
result.subtract(self.sR_q, self.sdR_q)
|
||||||
|
else:
|
||||||
|
logging.warning(
|
||||||
|
f'backgroung file {self.sFileName} not compatible with q_z scale ({len(self.sq_q)} vs. {len(result.Q)})')
|
||||||
|
|
||||||
|
orso_data = fileio.OrsoDataset(headerRqz, result.data)
|
||||||
|
self.last_result = result
|
||||||
|
self.datasetsRqz.append(orso_data)
|
||||||
|
|
||||||
|
if self.config.output.plot:
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
# plot all reflectivity results in same graph
|
||||||
|
plt.figure(num=99)
|
||||||
|
result.plot(label=f'{self.config.reduction.fileIdentifier[i]}')
|
||||||
|
if 'Rlt.ort' in self.config.output.outputFormats:
|
||||||
|
columns = [
|
||||||
|
fileio.Column('Qz', '1/angstrom', 'normal momentum transfer'),
|
||||||
|
fileio.Column('R', '', 'specular reflectivity'),
|
||||||
|
fileio.ErrorColumn(error_of='R', error_type='uncertainty', value_is='sigma'),
|
||||||
|
fileio.ErrorColumn(error_of='Qz', error_type='resolution', value_is='sigma'),
|
||||||
|
fileio.Column('lambda', 'angstrom', 'wavelength'),
|
||||||
|
fileio.Column('alpha_f', 'deg', 'final angle'),
|
||||||
|
fileio.Column('l', '', 'index of lambda-bin'),
|
||||||
|
fileio.Column('t', '', 'index of theta bin'),
|
||||||
|
fileio.Column('intensity', '', 'filtered neutron events per pixel'),
|
||||||
|
fileio.Column('norm', '', 'normalisation matrix'),
|
||||||
|
fileio.Column('mask', '', 'pixels used for calculating R(q_z)'),
|
||||||
|
fileio.Column('Qx', '1/angstrom', 'parallel momentum transfer'),
|
||||||
|
]
|
||||||
|
|
||||||
|
ts, zs = proj.data.shape
|
||||||
|
lindex_lz = np.tile(np.arange(1, ts+1), (zs, 1)).T
|
||||||
|
tindex_lz = np.tile(np.arange(1, zs+1), (ts, 1))
|
||||||
|
|
||||||
|
j = 0
|
||||||
|
for item in zip(
|
||||||
|
proj.data.qz.T,
|
||||||
|
proj.data.ref.T,
|
||||||
|
proj.data.err.T,
|
||||||
|
proj.data.res.T,
|
||||||
|
proj.lamda.T,
|
||||||
|
proj.alphaF.T,
|
||||||
|
lindex_lz.T,
|
||||||
|
tindex_lz.T,
|
||||||
|
proj.data.I.T,
|
||||||
|
proj.data.norm.T,
|
||||||
|
proj.data.mask.T,
|
||||||
|
proj.data.qx.T,
|
||||||
|
):
|
||||||
|
data = np.array(list(item)).T
|
||||||
|
headerRlt = self.header.orso_header(columns=columns)
|
||||||
|
headerRlt.data_set = f'dataset_{i}_{j+1} : alpha_f = {proj.alphaF[0, j]:6.3f} deg'
|
||||||
|
orso_data = fileio.OrsoDataset(headerRlt, data)
|
||||||
|
self.datasetsRlt.append(orso_data)
|
||||||
|
j += 1
|
||||||
|
|
||||||
|
if self.config.output.plot:
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
plt.figure()
|
||||||
|
proj.plot(colorbar=True, cmap=str(self.config.output.plot_colormap))
|
||||||
|
plt.title(f'{self.config.reduction.fileIdentifier[i]}')
|
||||||
|
|
||||||
|
def analyze_timeslices(self, i, polstr=''):
|
||||||
|
wallTime_e = np.float64(self.dataset.data.events.wallTime)/1e9
|
||||||
|
pulseTimeS = np.float64(self.dataset.data.pulses.time)/1e9
|
||||||
|
interval = self.config.reduction.timeSlize[0]
|
||||||
|
try:
|
||||||
|
start = self.config.reduction.timeSlize[1]
|
||||||
|
except IndexError:
|
||||||
|
start = 0
|
||||||
|
try:
|
||||||
|
stop = self.config.reduction.timeSlize[2]
|
||||||
|
except IndexError:
|
||||||
|
stop = wallTime_e[-1]
|
||||||
|
# make overwriting log lines possible by removing newline at the end
|
||||||
|
#logging.StreamHandler.terminator = "\r"
|
||||||
|
logging.warning(f' time slizing')
|
||||||
|
logging.info(' slize time monitor')
|
||||||
|
for ti, time in enumerate(np.arange(start, stop, interval)):
|
||||||
|
slice = self.dataset.get_timeslice(time, time+interval)
|
||||||
|
self.monitor = np.sum(slice.data.pulses.monitor)
|
||||||
|
logging.info(f' {ti:<4d} {time:6.0f} {self.monitor:7.2f} {MONITOR_UNITS[self.config.experiment.monitorType]}')
|
||||||
|
|
||||||
|
proj: LZProjection = self.project_on_lz(slice)
|
||||||
|
try:
|
||||||
|
scale = self.config.reduction.scale[i]
|
||||||
|
except IndexError:
|
||||||
|
scale = self.config.reduction.scale[-1]
|
||||||
|
proj.scale(scale)
|
||||||
|
|
||||||
|
# projection on qz-grid
|
||||||
|
result = proj.project_on_qz()
|
||||||
|
|
||||||
|
if self.config.reduction.autoscale:
|
||||||
|
# scale every slice the same
|
||||||
|
if ti==0:
|
||||||
|
if i==0:
|
||||||
|
atscale = result.autoscale(self.config.reduction.autoscale)
|
||||||
|
else:
|
||||||
|
atscale = result.stitch(self.last_result)
|
||||||
|
else:
|
||||||
|
result.scale(atscale)
|
||||||
|
|
||||||
|
if self.subtract:
|
||||||
|
if len(result.Q)==len(self.sq_q):
|
||||||
|
result.subtract(self.sR_q, self.sdR_q)
|
||||||
|
else:
|
||||||
|
logging.warning(
|
||||||
|
f'backgroung file {self.sFileName} not compatible with q_z scale ({len(self.sq_q)} vs. {len(result.Q)})')
|
||||||
|
|
||||||
|
headerRqz = self.header.orso_header(
|
||||||
|
extra_columns=[fileio.Column('time', 's', 'time relative to start of measurement series')])
|
||||||
|
headerRqz.data_set = f'{i}_{ti}: time = {time:8.1f} s to {time+interval:8.1f} s{polstr}'
|
||||||
|
orso_data = fileio.OrsoDataset(headerRqz, result.data_for_time(time))
|
||||||
|
self.datasetsRqz.append(orso_data)
|
||||||
|
|
||||||
|
if self.config.output.plot:
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
# plot all reflectivity results in same graph
|
||||||
|
plt.figure(num=99)
|
||||||
|
result.plot(label=f'{self.config.reduction.fileIdentifier[i]} @ {time:.1f}s')
|
||||||
|
|
||||||
|
self.last_result = result
|
||||||
|
# reset normal logging behavior
|
||||||
|
#logging.StreamHandler.terminator = "\n"
|
||||||
|
logging.info(f' done {min(time+interval, pulseTimeS[-1]):5.0f}')
|
||||||
|
|
||||||
|
def save_Rqz(self):
|
||||||
|
fname = os.path.join(self.config.output.outputPath, f'{self.config.output.outputName}.Rqz.ort')
|
||||||
|
logging.warning(f' {fname}')
|
||||||
|
if os.path.exists(fname) and self.config.output.append:
|
||||||
|
logging.info(' file already exists, append as new dataset')
|
||||||
|
with open(fname, 'r') as f:
|
||||||
|
f.readline()
|
||||||
|
theSecondLine = f.readline()[3:]
|
||||||
|
prev_data = fileio.load_orso(fname)
|
||||||
|
prev_names = [di.info.data_set for di in prev_data]
|
||||||
|
for i, di in enumerate(self.datasetsRqz):
|
||||||
|
while di.info.data_set in prev_names:
|
||||||
|
if di.info.data_set.startswith('Nr '):
|
||||||
|
di.info.data_set = f'Nr {i+len(prev_data)} :'+di.info.data_set.split(':', 1)[1]
|
||||||
|
break
|
||||||
|
di.info.data_set = di.info.data_set+'_'
|
||||||
|
fileio.save_orso(prev_data+self.datasetsRqz, fname, data_separator='\n', comment=theSecondLine)
|
||||||
|
else:
|
||||||
|
theSecondLine = f' {self.header.experiment.title} | {self.header.experiment.start_date} | sample {self.header.sample.name} | R(q_z)'
|
||||||
|
fileio.save_orso(self.datasetsRqz, fname, data_separator='\n', comment=theSecondLine)
|
||||||
|
|
||||||
|
def save_Rtl(self):
|
||||||
|
fname = os.path.join(self.config.output.outputPath, f'{self.config.output.outputName}.Rlt.ort')
|
||||||
|
logging.warning(f' {fname}')
|
||||||
|
theSecondLine = f' {self.header.experiment.title} | {self.header.experiment.start_date} | sample {self.header.sample.name} | R(lambda, theta)'
|
||||||
|
fileio.save_orso(self.datasetsRlt, fname, data_separator='\n', comment=theSecondLine)
|
||||||
|
|
||||||
|
def loadRqz(self, name):
|
||||||
|
fname = os.path.join(self.config.output.outputPath, name)
|
||||||
|
if os.path.exists(fname):
|
||||||
|
fileName = fname
|
||||||
|
elif os.path.exists(f'{fname}.Rqz.ort'):
|
||||||
|
fileName = f'{fname}.Rqz.ort'
|
||||||
|
else:
|
||||||
|
sys.exit(f'### the background file \'{fname}\' does not exist! => stopping')
|
||||||
|
|
||||||
|
q_q, Sq_q, dS_q = np.loadtxt(fileName, usecols=(0, 1, 2), comments='#', unpack=True)
|
||||||
|
|
||||||
|
return q_q, Sq_q, dS_q, fileName
|
||||||
|
|
||||||
|
def create_normalisation_map(self, short_notation):
|
||||||
|
outputPath = self.config.output.outputPath
|
||||||
|
normalisation_list = self.path_resolver.expand_file_list(short_notation)
|
||||||
|
name = '_'.join(map(str, normalisation_list))
|
||||||
|
n_path = os.path.join(outputPath, f'{name}.norm')
|
||||||
|
|
||||||
|
self.norm = None
|
||||||
|
if os.path.exists(n_path):
|
||||||
|
logging.debug(f'trying to load matrix from file {n_path}')
|
||||||
|
try:
|
||||||
|
self.norm = LZNormalisation.from_file(n_path, self.normevent_actions.action_hash())
|
||||||
|
except (ValueError, EOFError):
|
||||||
|
self.norm =None
|
||||||
|
else:
|
||||||
|
logging.warning(f'normalisation matrix: found and using {n_path}')
|
||||||
|
if self.norm is None:
|
||||||
|
# in case file does not exist or the action hash doesn't match, create new normalization
|
||||||
|
logging.warning(f'normalisation matrix: using the files {normalisation_list}')
|
||||||
|
normalization_files = list(map(self.path_resolver.get_path, normalisation_list))
|
||||||
|
reference = AmorEventData(normalization_files[0])
|
||||||
|
self.normevent_actions(reference)
|
||||||
|
for nfi in normalization_files[1:]:
|
||||||
|
toadd = AmorEventData(nfi)
|
||||||
|
self.normevent_actions(toadd)
|
||||||
|
reference.append(toadd)
|
||||||
|
self.norm = LZNormalisation(reference, self.config.reduction.normalisationMethod, self.grid)
|
||||||
|
if reference.data.events.shape[0] > 1e6:
|
||||||
|
self.norm.safe(n_path, self.normevent_actions.action_hash())
|
||||||
|
|
||||||
|
self.norm.update_header(self.header)
|
||||||
|
self.header.reduction.corrections.append('normalisation with \'additional files\'')
|
||||||
|
|
||||||
|
def project_on_lz(self, dataset=None):
|
||||||
|
if dataset is None:
|
||||||
|
dataset=self.dataset
|
||||||
|
proj = LZProjection.from_dataset(dataset, self.grid,
|
||||||
|
has_offspecular=(self.config.experiment.incidentAngle!=IncidentAngle.alphaF))
|
||||||
|
|
||||||
|
t0 = dataset.geometry.nu-dataset.geometry.mu
|
||||||
|
if not self.config.reduction.is_default('thetaRangeR'):
|
||||||
|
# adjust range based on detector center
|
||||||
|
thetaRange = [ti+t0 for ti in self.config.reduction.thetaRangeR]
|
||||||
|
proj.apply_theta_mask(thetaRange)
|
||||||
|
elif not self.config.reduction.is_default('thetaRange'):
|
||||||
|
proj.apply_theta_mask(self.config.reduction.thetaRange)
|
||||||
|
else:
|
||||||
|
thetaRange = [dataset.geometry.nu - dataset.geometry.mu - dataset.geometry.div/2,
|
||||||
|
dataset.geometry.nu - dataset.geometry.mu + dataset.geometry.div/2]
|
||||||
|
proj.apply_theta_mask(thetaRange)
|
||||||
|
for thi in self.config.reduction.thetaFilters:
|
||||||
|
# apply theta filters relative to angle on detector (issues with parts of the incoming divergence)
|
||||||
|
proj.apply_theta_filter((thi[0]+t0, thi[1]+t0))
|
||||||
|
|
||||||
|
proj.apply_lamda_mask(self.config.experiment.lambdaRange)
|
||||||
|
|
||||||
|
proj.apply_norm_mask(self.norm, min_norm=self.config.reduction.normalizationFilter,
|
||||||
|
min_theta=self.config.reduction.normAngleFilter)
|
||||||
|
|
||||||
|
proj.project(dataset, self.monitor)
|
||||||
|
|
||||||
|
if self.config.reduction.normalisationMethod == NormalisationMethod.over_illuminated:
|
||||||
|
logging.debug(' assuming an overilluminated sample and correcting for the angle of incidence')
|
||||||
|
proj.normalize_over_illuminated(self.norm)
|
||||||
|
elif self.config.reduction.normalisationMethod==NormalisationMethod.under_illuminated:
|
||||||
|
logging.debug(' assuming an underilluminated sample and ignoring the angle of incidence')
|
||||||
|
proj.normalize_no_footprint(self.norm)
|
||||||
|
elif self.config.reduction.normalisationMethod==NormalisationMethod.direct_beam:
|
||||||
|
logging.debug(' assuming direct beam for normalisation and ignoring the angle of incidence')
|
||||||
|
proj.normalize_no_footprint(self.norm)
|
||||||
|
else:
|
||||||
|
logging.error('unknown normalisation method! Use [u]nder, [o]ver or [d]irect illumination')
|
||||||
|
proj.normalize_no_footprint(self.norm)
|
||||||
|
if self.monitor<=1e-6:
|
||||||
|
logging.info(' low monitor -> nan output')
|
||||||
|
proj.data.ref *= np.nan
|
||||||
|
|
||||||
|
return proj
|
||||||
1068
events2histogram.py
1068
events2histogram.py
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,220 +0,0 @@
|
|||||||
import argparse
|
|
||||||
|
|
||||||
from .logconfig import update_loglevel
|
|
||||||
from .options import ReaderConfig, EOSConfig, ExperimentConfig, OutputConfig, ReductionConfig, Defaults
|
|
||||||
|
|
||||||
|
|
||||||
def commandLineArgs():
|
|
||||||
"""
|
|
||||||
Process command line argument.
|
|
||||||
The type of the default values is used for conversion and validation.
|
|
||||||
"""
|
|
||||||
msg = "eos reads data from (one or several) raw file(s) of the .hdf format, \
|
|
||||||
performs various corrections, conversations and projections and exports\
|
|
||||||
the resulting reflectivity in an orso-compatible format."
|
|
||||||
clas = argparse.ArgumentParser(description = msg)
|
|
||||||
|
|
||||||
input_data = clas.add_argument_group('input data')
|
|
||||||
input_data.add_argument("-f", "--fileIdentifier",
|
|
||||||
required = True,
|
|
||||||
nargs = '+',
|
|
||||||
help = "file number(s) or offset (if < 1)")
|
|
||||||
input_data.add_argument("-n", "--normalisationFileIdentifier",
|
|
||||||
default = Defaults.normalisationFileIdentifier,
|
|
||||||
nargs = '+',
|
|
||||||
help = "file number(s) of normalisation measurement")
|
|
||||||
input_data.add_argument("-rp", "--rawPath",
|
|
||||||
type = str,
|
|
||||||
default = Defaults.rawPath,
|
|
||||||
help = "ath to directory with .hdf files")
|
|
||||||
input_data.add_argument("-Y", "--year",
|
|
||||||
default = Defaults.year,
|
|
||||||
type = int,
|
|
||||||
help = "year the measurement was performed")
|
|
||||||
input_data.add_argument("-sub", "--subtract",
|
|
||||||
help = "R(q_z) curve to be subtracted (in .Rqz.ort format)")
|
|
||||||
input_data.add_argument("-nm", "--normalisationMethod",
|
|
||||||
default = Defaults.normalisationMethod,
|
|
||||||
help = "normalisation method: [o]verillumination, [u]nderillumination, [d]irect_beam")
|
|
||||||
input_data.add_argument("-mt", "--monitorType",
|
|
||||||
type = str,
|
|
||||||
default = Defaults.monitorType,
|
|
||||||
help = "one of [p]rotonCurrent, [t]ime or [n]eutronMonitor")
|
|
||||||
|
|
||||||
output = clas.add_argument_group('output')
|
|
||||||
output.add_argument("-o", "--outputName",
|
|
||||||
default = Defaults.outputName,
|
|
||||||
help = "output file name (withot suffix)")
|
|
||||||
output.add_argument("-op", "--outputPath",
|
|
||||||
type = str,
|
|
||||||
default = Defaults.outputPath,
|
|
||||||
help = "path for output")
|
|
||||||
output.add_argument("-of", "--outputFormat",
|
|
||||||
nargs = '+',
|
|
||||||
default = Defaults.outputFormat,
|
|
||||||
help = "one of [Rqz.ort, Rlt.ort]")
|
|
||||||
output.add_argument("-ai", "--incidentAngle",
|
|
||||||
type = str,
|
|
||||||
default = Defaults.incidentAngle,
|
|
||||||
help = "calulate alpha_i from [alphaF, mu, nu]",
|
|
||||||
)
|
|
||||||
output.add_argument("-r", "--qResolution",
|
|
||||||
default = Defaults.qResolution,
|
|
||||||
type = float,
|
|
||||||
help = "q_z resolution")
|
|
||||||
output.add_argument("-ts", "--timeSlize",
|
|
||||||
nargs = '+',
|
|
||||||
type = float,
|
|
||||||
help = "time slizing <interval> ,[<start> [,stop]]")
|
|
||||||
output.add_argument("-s", "--scale",
|
|
||||||
nargs = '+',
|
|
||||||
default = Defaults.scale,
|
|
||||||
type = float,
|
|
||||||
help = "scaling factor for R(q_z)")
|
|
||||||
output.add_argument("-S", "--autoscale",
|
|
||||||
nargs = 2,
|
|
||||||
type = float,
|
|
||||||
help = "scale to 1 in the given q_z range")
|
|
||||||
|
|
||||||
masks = clas.add_argument_group('masks')
|
|
||||||
masks.add_argument("-l", "--lambdaRange",
|
|
||||||
default = Defaults.lambdaRange,
|
|
||||||
nargs = 2,
|
|
||||||
type = float,
|
|
||||||
help = "wavelength range")
|
|
||||||
masks.add_argument("-t", "--thetaRange",
|
|
||||||
default = Defaults.thetaRange,
|
|
||||||
nargs = 2,
|
|
||||||
type = float,
|
|
||||||
help = "absolute theta range")
|
|
||||||
masks.add_argument("-T", "--thetaRangeR",
|
|
||||||
default = Defaults.thetaRangeR,
|
|
||||||
nargs = 2,
|
|
||||||
type = float,
|
|
||||||
help = "relative theta range")
|
|
||||||
masks.add_argument("-y", "--yRange",
|
|
||||||
default = Defaults.yRange,
|
|
||||||
nargs = 2,
|
|
||||||
type = int,
|
|
||||||
help = "detector y range")
|
|
||||||
masks.add_argument("-q", "--qzRange",
|
|
||||||
default = Defaults.qzRange,
|
|
||||||
nargs = 2,
|
|
||||||
type = float,
|
|
||||||
help = "q_z range")
|
|
||||||
masks.add_argument("-ct", "--lowCurrentThreshold",
|
|
||||||
default = Defaults.lowCurrentThreshold,
|
|
||||||
type = float,
|
|
||||||
help = "proton current threshold for discarding neutron pulses")
|
|
||||||
|
|
||||||
|
|
||||||
overwrite = clas.add_argument_group('overwrite')
|
|
||||||
overwrite.add_argument("-cs", "--chopperSpeed",
|
|
||||||
default = Defaults.chopperSpeed,
|
|
||||||
type = float,
|
|
||||||
help = "chopper speed in rpm")
|
|
||||||
overwrite.add_argument("-cp", "--chopperPhase",
|
|
||||||
default = Defaults.chopperPhase,
|
|
||||||
type = float,
|
|
||||||
help = "chopper phase")
|
|
||||||
overwrite.add_argument("-co", "--chopperPhaseOffset",
|
|
||||||
default = Defaults.chopperPhaseOffset,
|
|
||||||
type = float,
|
|
||||||
help = "phase offset between chopper opening and trigger pulse")
|
|
||||||
overwrite.add_argument("-m", "--muOffset",
|
|
||||||
default = Defaults.muOffset,
|
|
||||||
type = float,
|
|
||||||
help = "mu offset")
|
|
||||||
overwrite.add_argument("-mu", "--mu",
|
|
||||||
default = Defaults.mu,
|
|
||||||
type = float,
|
|
||||||
help ="value of mu")
|
|
||||||
overwrite.add_argument("-nu", "--nu",
|
|
||||||
default = Defaults.nu,
|
|
||||||
type = float,
|
|
||||||
help = "value of nu")
|
|
||||||
overwrite.add_argument("-sm", "--sampleModel",
|
|
||||||
default = Defaults.sampleModel,
|
|
||||||
type = str,
|
|
||||||
help = "1-line orso sample model description")
|
|
||||||
|
|
||||||
misc = clas.add_argument_group('misc')
|
|
||||||
misc.add_argument('-v', '--verbose', action='store_true')
|
|
||||||
misc.add_argument('-vv', '--debug', action='store_true')
|
|
||||||
|
|
||||||
return clas.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
def expand_file_list(short_notation):
|
|
||||||
"""Evaluate string entry for file number lists"""
|
|
||||||
#log().debug('Executing get_flist')
|
|
||||||
file_list=[]
|
|
||||||
for i in short_notation.split(','):
|
|
||||||
if '-' in i:
|
|
||||||
if ':' in i:
|
|
||||||
step = i.split(':', 1)[1]
|
|
||||||
file_list += range(int(i.split('-', 1)[0]), int((i.rsplit('-', 1)[1]).split(':', 1)[0])+1, int(step))
|
|
||||||
else:
|
|
||||||
step = 1
|
|
||||||
file_list += range(int(i.split('-', 1)[0]), int(i.split('-', 1)[1])+1, int(step))
|
|
||||||
else:
|
|
||||||
file_list += [int(i)]
|
|
||||||
|
|
||||||
return sorted(file_list)
|
|
||||||
|
|
||||||
|
|
||||||
def output_format_list(outputFormat):
|
|
||||||
format_list = []
|
|
||||||
if 'ort' in outputFormat or 'Rqz.ort' in outputFormat or 'Rqz' in outputFormat:
|
|
||||||
format_list.append('Rqz.ort')
|
|
||||||
if 'ort' in outputFormat or 'Rlt.ort' in outputFormat or 'Rlt' in outputFormat:
|
|
||||||
format_list.append('Rlt.ort')
|
|
||||||
if 'orb' in outputFormat or 'Rqz.orb' in outputFormat or 'Rqz' in outputFormat:
|
|
||||||
format_list.append('Rqz.orb')
|
|
||||||
if 'orb' in outputFormat or 'Rlt.orb' in outputFormat or 'Rlt' in outputFormat:
|
|
||||||
format_list.append('Rlt.orb')
|
|
||||||
return sorted(format_list, reverse=True)
|
|
||||||
|
|
||||||
def command_line_options():
|
|
||||||
clas = commandLineArgs()
|
|
||||||
update_loglevel(clas.verbose, clas.debug)
|
|
||||||
|
|
||||||
reader_config = ReaderConfig(
|
|
||||||
year = clas.year,
|
|
||||||
rawPath = clas.rawPath,
|
|
||||||
)
|
|
||||||
experiment_config = ExperimentConfig(
|
|
||||||
sampleModel = clas.sampleModel,
|
|
||||||
chopperSpeed = clas.chopperSpeed,
|
|
||||||
chopperPhase = clas.chopperPhase,
|
|
||||||
chopperPhaseOffset = clas.chopperPhaseOffset,
|
|
||||||
yRange = clas.yRange,
|
|
||||||
lambdaRange = clas.lambdaRange,
|
|
||||||
qzRange = clas.qzRange,
|
|
||||||
lowCurrentThreshold = clas.lowCurrentThreshold,
|
|
||||||
incidentAngle = clas.incidentAngle,
|
|
||||||
mu = clas.mu,
|
|
||||||
nu = clas.nu,
|
|
||||||
muOffset = clas.muOffset,
|
|
||||||
monitorType = clas.monitorType,
|
|
||||||
)
|
|
||||||
reduction_config = ReductionConfig(
|
|
||||||
qResolution = clas.qResolution,
|
|
||||||
qzRange = clas.qzRange,
|
|
||||||
autoscale = clas.autoscale,
|
|
||||||
thetaRange = clas.thetaRange,
|
|
||||||
thetaRangeR = clas.thetaRangeR,
|
|
||||||
fileIdentifier = clas.fileIdentifier,
|
|
||||||
scale = clas.scale,
|
|
||||||
subtract = clas.subtract,
|
|
||||||
normalisationFileIdentifier = clas.normalisationFileIdentifier,
|
|
||||||
normalisationMethod = clas.normalisationMethod,
|
|
||||||
timeSlize = clas.timeSlize,
|
|
||||||
)
|
|
||||||
output_config = OutputConfig(
|
|
||||||
outputFormats = output_format_list(clas.outputFormat),
|
|
||||||
outputName = clas.outputName,
|
|
||||||
outputPath = clas.outputPath,
|
|
||||||
)
|
|
||||||
|
|
||||||
return EOSConfig(reader_config, experiment_config, reduction_config, output_config)
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
"""
|
|
||||||
Constants used in data reduction.
|
|
||||||
"""
|
|
||||||
|
|
||||||
hdm = 6.626176e-34/1.674928e-27 # h / m
|
|
||||||
lamdaCut = 2.5 # Aa
|
|
||||||
@@ -1,503 +0,0 @@
|
|||||||
import logging
|
|
||||||
import os
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
from datetime import datetime, timezone
|
|
||||||
try:
|
|
||||||
import zoneinfo
|
|
||||||
except ImportError:
|
|
||||||
# for python versions < 3.9 try to use the backports version
|
|
||||||
from backports import zoneinfo
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
import h5py
|
|
||||||
import numpy as np
|
|
||||||
from orsopy import fileio
|
|
||||||
from orsopy.fileio.model_language import SampleModel
|
|
||||||
|
|
||||||
from . import const
|
|
||||||
from .header import Header
|
|
||||||
from .instrument import Detector
|
|
||||||
from .options import ExperimentConfig, ReaderConfig
|
|
||||||
|
|
||||||
try:
|
|
||||||
from . import nb_helpers
|
|
||||||
except Exception:
|
|
||||||
nb_helpers = None
|
|
||||||
|
|
||||||
# Time zone used to interpret time strings
|
|
||||||
AMOR_LOCAL_TIMEZONE = zoneinfo.ZoneInfo(key='Europe/Zurich')
|
|
||||||
|
|
||||||
class AmorData:
|
|
||||||
"""read meta-data and event streams from .hdf file(s), apply filters and conversions"""
|
|
||||||
chopperDetectorDistance: float
|
|
||||||
chopperDistance: float
|
|
||||||
chopperPhase: float
|
|
||||||
chopperSpeed: float
|
|
||||||
chopper1TriggerPhase: float
|
|
||||||
chopper2TriggerPhase: float
|
|
||||||
div: float
|
|
||||||
data_file_numbers: List[int]
|
|
||||||
delta_z: np.ndarray
|
|
||||||
detZ_e: np.ndarray
|
|
||||||
lamda_e: np.ndarray
|
|
||||||
wallTime_e: np.ndarray
|
|
||||||
kad: float
|
|
||||||
kap: float
|
|
||||||
lambdaMax: float
|
|
||||||
lambda_e: np.ndarray
|
|
||||||
#monitor: float
|
|
||||||
mu: float
|
|
||||||
nu: float
|
|
||||||
tau: float
|
|
||||||
tofCut: float
|
|
||||||
start_date: str
|
|
||||||
monitorType: str
|
|
||||||
|
|
||||||
seriesStartTime = None
|
|
||||||
|
|
||||||
#-------------------------------------------------------------------------------------------------
|
|
||||||
def __init__(self, header: Header, reader_config: ReaderConfig, config: ExperimentConfig,
|
|
||||||
short_notation:str, norm=False):
|
|
||||||
#self.startTime = reader_config.startTime
|
|
||||||
self.header = header
|
|
||||||
self.config = config
|
|
||||||
self.reader_config = reader_config
|
|
||||||
self.expand_file_list(short_notation)
|
|
||||||
self.read_data(norm=norm)
|
|
||||||
|
|
||||||
#-------------------------------------------------------------------------------------------------
|
|
||||||
def read_data(self, norm=False):
|
|
||||||
self.file_list = []
|
|
||||||
for number in self.data_file_numbers:
|
|
||||||
self.file_list.append(self.path_generator(number))
|
|
||||||
## read specific meta data and measurement from first file
|
|
||||||
if norm:
|
|
||||||
self.readHeaderInfo = False
|
|
||||||
else:
|
|
||||||
self.readHeaderInfo = True
|
|
||||||
|
|
||||||
_detZ_e = []
|
|
||||||
_lamda_e = []
|
|
||||||
_wallTime_e = []
|
|
||||||
#_monitor = 0
|
|
||||||
_monitorPerPulse = []
|
|
||||||
_pulseTimeS = []
|
|
||||||
for file in self.file_list:
|
|
||||||
self.read_individual_data(file, norm)
|
|
||||||
_detZ_e = np.append(_detZ_e, self.detZ_e)
|
|
||||||
_lamda_e = np.append(_lamda_e, self.lamda_e)
|
|
||||||
_wallTime_e = np.append(_wallTime_e, self.wallTime_e)
|
|
||||||
_monitorPerPulse = np.append(_monitorPerPulse, self.monitorPerPulse)
|
|
||||||
_pulseTimeS = np.append(_pulseTimeS, self.pulseTimeS)
|
|
||||||
#_monitor += self.monitor
|
|
||||||
self.detZ_e = _detZ_e
|
|
||||||
self.lamda_e = _lamda_e
|
|
||||||
self.wallTime_e = _wallTime_e
|
|
||||||
#self.monitor = _monitor
|
|
||||||
self.monitorPerPulse = _monitorPerPulse
|
|
||||||
self.pulseTimeS = _pulseTimeS
|
|
||||||
|
|
||||||
#-------------------------------------------------------------------------------------------------
|
|
||||||
def path_generator(self, number):
|
|
||||||
fileName = f'amor{self.reader_config.year}n{number:06d}.hdf'
|
|
||||||
path = ''
|
|
||||||
for rawd in self.reader_config.rawPath:
|
|
||||||
if os.path.exists(os.path.join(rawd,fileName)):
|
|
||||||
path = rawd
|
|
||||||
break
|
|
||||||
if not path:
|
|
||||||
if os.path.exists(f'/afs/psi.ch/project/sinqdata/{self.reader_config.year}/amor/{int(number/1000)}/{fileName}'):
|
|
||||||
path = f'/afs/psi.ch/project/sinqdata/{self.reader_config.year}/amor/{int(number/1000)}'
|
|
||||||
else:
|
|
||||||
sys.exit(f'# ERROR: the file {fileName} can not be found in {self.reader_config.rawPath}')
|
|
||||||
return os.path.join(path, fileName)
|
|
||||||
#-------------------------------------------------------------------------------------------------
|
|
||||||
def expand_file_list(self, short_notation):
|
|
||||||
"""Evaluate string entry for file number lists"""
|
|
||||||
#log().debug('Executing get_flist')
|
|
||||||
file_list=[]
|
|
||||||
for i in short_notation.split(','):
|
|
||||||
if '-' in i:
|
|
||||||
if ':' in i:
|
|
||||||
step = i.split(':', 1)[1]
|
|
||||||
file_list += range(int(i.split('-', 1)[0]), int((i.rsplit('-', 1)[1]).split(':', 1)[0])+1, int(step))
|
|
||||||
else:
|
|
||||||
step = 1
|
|
||||||
file_list += range(int(i.split('-', 1)[0]), int(i.split('-', 1)[1])+1, int(step))
|
|
||||||
else:
|
|
||||||
file_list += [int(i)]
|
|
||||||
self.data_file_numbers=sorted(file_list)
|
|
||||||
#-------------------------------------------------------------------------------------------------
|
|
||||||
def resolve_pixels(self):
|
|
||||||
"""determine spatial coordinats and angles from pixel number"""
|
|
||||||
nPixel = Detector.nWires * Detector.nStripes * Detector.nBlades
|
|
||||||
pixelID = np.arange(nPixel)
|
|
||||||
(bladeNr, bPixel) = np.divmod(pixelID, Detector.nWires * Detector.nStripes)
|
|
||||||
(bZi, detYi) = np.divmod(bPixel, Detector.nStripes) # z index on blade, y index on detector
|
|
||||||
detZi = bladeNr * Detector.nWires + bZi # z index on detector
|
|
||||||
detX = bZi * Detector.dX # x position in detector
|
|
||||||
# detZ = Detector.zero - bladeNr * Detector.bladeZ - bZi * Detector.dZ # z position on detector
|
|
||||||
bladeAngle = np.rad2deg( 2. * np.arcsin(0.5*Detector.bladeZ / Detector.distance) )
|
|
||||||
delta = (Detector.nBlades/2. - bladeNr) * bladeAngle \
|
|
||||||
- np.rad2deg( np.arctan(bZi*Detector.dZ / ( Detector.distance + bZi * Detector.dX) ) )
|
|
||||||
self.delta_z = delta[detYi==1]
|
|
||||||
return np.vstack((detYi.T, detZi.T, detX.T, delta.T)).T
|
|
||||||
#-------------------------------------------------------------------------------------------------
|
|
||||||
def read_individual_data(self, fileName, norm=False):
|
|
||||||
self.hdf = h5py.File(fileName, 'r', swmr=True)
|
|
||||||
|
|
||||||
if self.readHeaderInfo:
|
|
||||||
self.read_header_info()
|
|
||||||
|
|
||||||
logging.warning(f' from file: {fileName}')
|
|
||||||
self.read_individual_header()
|
|
||||||
|
|
||||||
# add header content
|
|
||||||
if self.readHeaderInfo:
|
|
||||||
self.readHeaderInfo = False
|
|
||||||
self.header.measurement_instrument_settings = fileio.InstrumentSettings(
|
|
||||||
incident_angle = fileio.ValueRange(round(self.mu+self.kap+self.kad-0.5*self.div, 3),
|
|
||||||
round(self.mu+self.kap+self.kad+0.5*self.div, 3),
|
|
||||||
'deg'),
|
|
||||||
wavelength = fileio.ValueRange(const.lamdaCut, self.config.lambdaRange[1], 'angstrom'),
|
|
||||||
#polarization = fileio.Polarization.unpolarized,
|
|
||||||
polarization = self.polarizationConfig
|
|
||||||
)
|
|
||||||
self.header.measurement_instrument_settings.mu = fileio.Value(round(self.mu, 3), 'deg', comment='sample angle to horizon')
|
|
||||||
self.header.measurement_instrument_settings.nu = fileio.Value(round(self.nu, 3), 'deg', comment='detector angle to horizon')
|
|
||||||
self.header.measurement_instrument_settings.div = fileio.Value(round(self.div, 3), 'deg', comment='incoming beam divergence')
|
|
||||||
self.header.measurement_instrument_settings.kap = fileio.Value(round(self.kap, 3), 'deg', comment='incoming beam inclination')
|
|
||||||
if abs(self.kad)>0.02:
|
|
||||||
self.header.measurement_instrument_settings.kad = fileio.Value(round(self.kad, 3), 'deg', comment='incoming beam angular offset')
|
|
||||||
if norm:
|
|
||||||
self.header.measurement_additional_files.append(fileio.File(file=fileName.split('/')[-1], timestamp=self.fileDate))
|
|
||||||
else:
|
|
||||||
self.header.measurement_data_files.append(fileio.File(file=fileName.split('/')[-1], timestamp=self.fileDate))
|
|
||||||
logging.info(f' mu = {self.mu:6.3f}, nu = {self.nu:6.3f}, kap = {self.kap:6.3f}, kad = {self.kad:6.3f}')
|
|
||||||
|
|
||||||
self.read_event_stream()
|
|
||||||
|
|
||||||
self.correct_for_chopper_phases()
|
|
||||||
|
|
||||||
self.read_chopper_trigger_stream()
|
|
||||||
|
|
||||||
self.extract_walltime(norm)
|
|
||||||
|
|
||||||
self.read_proton_current_stream()
|
|
||||||
|
|
||||||
self.associate_pulse_with_monitor()
|
|
||||||
|
|
||||||
# following lines: debugging output to trace the time-offset of proton current and neutron pulses
|
|
||||||
if self.config.monitorType == 'x':
|
|
||||||
cpp, t_bins = np.histogram(self.wallTime_e, self.pulseTimeS)
|
|
||||||
np.savetxt('tme.hst', np.vstack((self.pulseTimeS[:-1], cpp, self.monitorPerPulse[:-1])).T)
|
|
||||||
|
|
||||||
#self.average_events_per_pulse() # for debugging only. VERY time consuming!!!
|
|
||||||
|
|
||||||
self.monitor_threshold()
|
|
||||||
|
|
||||||
self.filter_strange_times()
|
|
||||||
|
|
||||||
self.merge_time_frames()
|
|
||||||
|
|
||||||
self.filter_project_x()
|
|
||||||
|
|
||||||
self.correct_for_chopper_opening()
|
|
||||||
|
|
||||||
self.calculate_derived_properties()
|
|
||||||
|
|
||||||
self.filter_qz_range(norm)
|
|
||||||
|
|
||||||
logging.info(f' number of events: total = {self.totalNumber:7d}, filtered = {np.shape(self.lamda_e)[0]:7d}')
|
|
||||||
|
|
||||||
def read_event_stream(self):
|
|
||||||
self.tof_e = np.array(self.hdf['/entry1/Amor/detector/data/event_time_offset'][:])/1.e9
|
|
||||||
self.pixelID_e = np.array(self.hdf['/entry1/Amor/detector/data/event_id'][:], dtype=np.int64)
|
|
||||||
self.dataPacket_p = np.array(self.hdf['/entry1/Amor/detector/data/event_index'][:], dtype=np.uint64)
|
|
||||||
self.dataPacketTime_p = np.array(self.hdf['/entry1/Amor/detector/data/event_time_zero'][:], dtype=np.int64)
|
|
||||||
|
|
||||||
def correct_for_chopper_phases(self):
|
|
||||||
#print(f'chopperTriggerPhase: {self.ch1TriggerPhase}')
|
|
||||||
self.tof_e += self.tau * (self.ch1TriggerPhase + self.chopperPhase/2)/180
|
|
||||||
|
|
||||||
def extract_walltime(self, norm):
|
|
||||||
if nb_helpers:
|
|
||||||
self.wallTime_e = nb_helpers.extract_walltime(self.tof_e, self.dataPacket_p, self.dataPacketTime_p)
|
|
||||||
else:
|
|
||||||
self.wallTime_e = np.empty(np.shape(self.tof_e)[0], dtype=np.int64)
|
|
||||||
for i in range(len(self.dataPacket_p)-1):
|
|
||||||
self.wallTime_e[self.dataPacket_p[i]:self.dataPacket_p[i+1]] = self.dataPacketTime_p[i]
|
|
||||||
self.wallTime_e[self.dataPacket_p[-1]:] = self.dataPacketTime_p[-1]
|
|
||||||
self.wallTime_e -= np.int64(self.seriesStartTime)
|
|
||||||
logging.debug(f' wall time from {self.wallTime_e[0]/1e9:6.1f} s to {self.wallTime_e[-1]/1e9:6.1f} s')
|
|
||||||
|
|
||||||
def read_chopper_trigger_stream(self):
|
|
||||||
self.chopper1TriggerTime = np.array(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_zero'][:-2], dtype=np.int64)
|
|
||||||
#self.chopper2TriggerTime = self.chopper1TriggerTime + np.array(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time'][:-2], dtype=np.int64)
|
|
||||||
# + np.array(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_offset'][:], dtype=np.int64)
|
|
||||||
if np.shape(self.chopper1TriggerTime)[0] > 2:
|
|
||||||
self.startTime = self.chopper1TriggerTime[0]
|
|
||||||
self.stopTime = self.chopper1TriggerTime[-1]
|
|
||||||
self.pulseTimeS = self.chopper1TriggerTime
|
|
||||||
else:
|
|
||||||
logging.warn(' no chopper trigger data available, using event steram instead')
|
|
||||||
self.startTime = np.array(self.hdf['/entry1/Amor/detector/data/event_time_zero'][0], dtype=np.int64)
|
|
||||||
self.stopTime = np.array(self.hdf['/entry1/Amor/detector/data/event_time_zero'][-2], dtype=np.int64)
|
|
||||||
self.pulseTimeS = np.arange(self.startTime, self.stopTime, self.tau*1e9)
|
|
||||||
if self.seriesStartTime is None:
|
|
||||||
self.seriesStartTime = self.startTime
|
|
||||||
logging.debug(f' series start time (epoch): {self.seriesStartTime/1e9:13.2f} s')
|
|
||||||
self.pulseTimeS -= self.seriesStartTime
|
|
||||||
logging.debug(f' epoch time from {self.startTime/1e9:13.2f} s to {self.stopTime/1e9:13.2f} s')
|
|
||||||
logging.debug(f' => counting time {self.stopTime/1e9-self.startTime/1e9:8.2f} s')
|
|
||||||
|
|
||||||
def read_proton_current_stream(self):
|
|
||||||
self.currentTime = np.array(self.hdf['entry1/Amor/detector/proton_current/time'][:], dtype=np.int64)
|
|
||||||
self.current = np.array(self.hdf['entry1/Amor/detector/proton_current/value'][:,0], dtype=float)
|
|
||||||
|
|
||||||
def get_current_per_pulse(self, pulseTimeS, currentTimeS, currents):
|
|
||||||
# add currents for early pulses and current time value after last pulse (j+1)
|
|
||||||
currentTimeS = np.hstack([[0], currentTimeS, [pulseTimeS[-1]+1]])
|
|
||||||
currents = np.hstack([[0], currents])
|
|
||||||
pulseCurrentS = np.zeros(pulseTimeS.shape[0], dtype=float)
|
|
||||||
j = 0
|
|
||||||
for i, ti in enumerate(pulseTimeS):
|
|
||||||
while ti >= currentTimeS[j+1]:
|
|
||||||
j += 1
|
|
||||||
pulseCurrentS[i] = currents[j]
|
|
||||||
#print(f' {i} {pulseTimeS[i]} {pulseCurrentS[i]}')
|
|
||||||
return pulseCurrentS
|
|
||||||
|
|
||||||
def associate_pulse_with_monitor(self):
|
|
||||||
if self.config.monitorType == 'p': # protonCharge
|
|
||||||
self.currentTime -= np.int64(self.seriesStartTime)
|
|
||||||
self.monitorPerPulse = self.get_current_per_pulse(self.pulseTimeS, self.currentTime, self.current) * 2*self.tau * 1e-3
|
|
||||||
# filter low-current pulses
|
|
||||||
self.monitorPerPulse = np.where(self.monitorPerPulse > 2*self.tau * self.config.lowCurrentThreshold * 1e-3, self.monitorPerPulse, 0)
|
|
||||||
elif self.config.monitorType == 't': # countingTime
|
|
||||||
self.monitorPerPulse = np.ones(np.shape(self.pulseTimeS)[0])*2*self.tau
|
|
||||||
else: # pulses
|
|
||||||
self.monitorPerPulse = np.ones(np.shape(self.pulseTimeS)[0])
|
|
||||||
|
|
||||||
def average_events_per_pulse(self):
|
|
||||||
if self.config.monitorType == 'p':
|
|
||||||
for i, time in enumerate(self.pulseTimeS):
|
|
||||||
events = np.shape(self.wallTime_e[self.wallTime_e == time])[0]
|
|
||||||
logging.info(f'pulse: {i:6.0f}, events: {events:6.0f}, monitor: {self.monitorPerPulse[i]:6.2f}')
|
|
||||||
|
|
||||||
def monitor_threshold(self):
|
|
||||||
#if self.config.monitorType == 'p': # fix to check for file compatibility
|
|
||||||
self.totalNumber = np.shape(self.tof_e[self.tof_e<=self.stopTime])[0]
|
|
||||||
if True:
|
|
||||||
goodTimeS = self.pulseTimeS[self.monitorPerPulse!=0]
|
|
||||||
filter_e = np.where(np.isin(self.wallTime_e, goodTimeS), True, False)
|
|
||||||
self.tof_e = self.tof_e[filter_e]
|
|
||||||
self.pixelID_e = self.pixelID_e[filter_e]
|
|
||||||
self.wallTime_e = self.wallTime_e[filter_e]
|
|
||||||
logging.info(f' low-beam rejected pulses: {np.shape(self.monitorPerPulse)[0]-1-np.shape(goodTimeS)[0]} out of {np.shape(self.monitorPerPulse)[0]-1}')
|
|
||||||
logging.info(f' with {np.shape(filter_e)[0]-np.shape(self.tof_e)[0]} events')
|
|
||||||
logging.info(f' average counts per pulse = {np.shape(self.tof_e)[0] / np.shape(goodTimeS[goodTimeS!=0])[0]:7.1f}')
|
|
||||||
|
|
||||||
def filter_qz_range(self, norm):
|
|
||||||
if self.config.qzRange[1]<0.5 and not norm:
|
|
||||||
self.mask_e = np.logical_and(self.mask_e,
|
|
||||||
(self.config.qzRange[0]<=self.qz_e) & (self.qz_e<=self.config.qzRange[1]))
|
|
||||||
self.detZ_e = self.detZ_e[self.mask_e]
|
|
||||||
self.lamda_e = self.lamda_e[self.mask_e]
|
|
||||||
self.wallTime_e = self.wallTime_e[self.mask_e]
|
|
||||||
|
|
||||||
def calculate_derived_properties(self):
|
|
||||||
self.lamdaMax = const.lamdaCut+1.e13*self.tau*const.hdm/(self.chopperDetectorDistance+124.)
|
|
||||||
#if nb_helpers:
|
|
||||||
if False:
|
|
||||||
self.lamda_e, self.qz_e, self.mask_e = nb_helpers.calculate_derived_properties_focussing(
|
|
||||||
self.tof_e, self.detXdist_e, self.delta_e, self.mask_e,
|
|
||||||
self.config.lambdaRange[0], self.config.lambdaRange[1], self.nu, self.mu,
|
|
||||||
self.chopperDetectorDistance, const.hdm
|
|
||||||
)
|
|
||||||
return
|
|
||||||
# lambda
|
|
||||||
self.lamda_e = (1.e13*const.hdm)*self.tof_e/(self.chopperDetectorDistance+self.detXdist_e)
|
|
||||||
self.mask_e = np.logical_and(self.mask_e, (self.config.lambdaRange[0]<=self.lamda_e) & (
|
|
||||||
self.lamda_e<=self.config.lambdaRange[1]))
|
|
||||||
# alpha_f
|
|
||||||
# q_z
|
|
||||||
if self.config.incidentAngle == 'alphaF':
|
|
||||||
alphaF_e = self.nu - self.mu + self.delta_e
|
|
||||||
self.qz_e = 4*np.pi*(np.sin(np.deg2rad(alphaF_e))/self.lamda_e)
|
|
||||||
# qx_e = 0.
|
|
||||||
self.header.measurement_scheme = 'angle- and energy-dispersive'
|
|
||||||
elif self.config.incidentAngle == 'nu':
|
|
||||||
alphaF_e = (self.nu + self.delta_e + self.kap + self.kad) / 2.
|
|
||||||
self.qz_e = 4*np.pi*(np.sin(np.deg2rad(alphaF_e))/self.lamda_e)
|
|
||||||
# qx_e = 0.
|
|
||||||
self.header.measurement_scheme = 'energy-dispersive'
|
|
||||||
else:
|
|
||||||
alphaF_e = self.nu - self.mu + self.delta_e
|
|
||||||
alphaI = self.kap + self.kad + self.mu
|
|
||||||
self.qz_e = 2*np.pi * ((np.sin(np.deg2rad(alphaF_e)) + np.sin(np.deg2rad(alphaI)))/self.lamda_e)
|
|
||||||
self.qx_e = 2*np.pi * ((np.cos(np.deg2rad(alphaF_e)) - np.cos(np.deg2rad(alphaI)))/self.lamda_e)
|
|
||||||
self.header.measurement_scheme = 'energy-dispersive'
|
|
||||||
|
|
||||||
def correct_for_chopper_opening(self):
|
|
||||||
# correct tof for beam size effect at chopper: t_cor = (delta / 180 deg) * tau
|
|
||||||
if self.config.incidentAngle == 'alphaF':
|
|
||||||
self.tof_e -= ( self.delta_e / 180. ) * self.tau
|
|
||||||
else:
|
|
||||||
# TODO: check sign of correction
|
|
||||||
self.tof_e -= ( self.kad / 180. ) * self.tau
|
|
||||||
|
|
||||||
def filter_project_x(self):
|
|
||||||
pixelLookUp = self.resolve_pixels()
|
|
||||||
if nb_helpers:
|
|
||||||
(self.detZ_e, self.detXdist_e, self.delta_e, self.mask_e) = nb_helpers.filter_project_x(
|
|
||||||
pixelLookUp, self.pixelID_e.astype(np.int64), self.config.yRange[0], self.config.yRange[1]
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# resolve pixel ID into y and z indicees, x position and angle
|
|
||||||
(detY_e, self.detZ_e, self.detXdist_e, self.delta_e) = pixelLookUp[np.int_(self.pixelID_e)-1, :].T
|
|
||||||
# define mask and filter y range
|
|
||||||
self.mask_e = (self.config.yRange[0]<=detY_e) & (detY_e<=self.config.yRange[1])
|
|
||||||
|
|
||||||
# TODO: - handle each neutron pulse individually, - associate with correct monitor also for slow neutrons
|
|
||||||
def merge_time_frames(self):
|
|
||||||
total_offset = self.tofCut + self.tau * (self.ch1TriggerPhase + self.chopperPhase/2)/180
|
|
||||||
if nb_helpers:
|
|
||||||
self.tof_e = nb_helpers.merge_frames(self.tof_e, self.tofCut, self.tau, total_offset)
|
|
||||||
else:
|
|
||||||
self.tof_e = np.remainder(self.tof_e-(self.tofCut-self.tau), self.tau)+total_offset # tof shifted to 1 frame
|
|
||||||
|
|
||||||
def filter_strange_times(self):
|
|
||||||
# 'strange' tof times are those with t > 2 tau (originating from the efu)
|
|
||||||
filter_e = (self.tof_e<=2*self.tau)
|
|
||||||
self.tof_e = self.tof_e[filter_e]
|
|
||||||
self.pixelID_e = self.pixelID_e[filter_e]
|
|
||||||
self.wallTime_e = self.wallTime_e[filter_e]
|
|
||||||
if np.shape(filter_e)[0]-np.shape(self.tof_e)[0]>0.5:
|
|
||||||
logging.warning(f' strange times: {np.shape(filter_e)[0]-np.shape(self.tof_e)[0]}')
|
|
||||||
|
|
||||||
def read_individual_header(self):
|
|
||||||
self.chopperDistance = float(np.take(self.hdf['entry1/Amor/chopper/pair_separation'], 0))
|
|
||||||
self.detectorDistance = float(np.take(self.hdf['entry1/Amor/detector/transformation/distance'], 0))
|
|
||||||
self.chopperDetectorDistance = self.detectorDistance-float(np.take(self.hdf['entry1/Amor/chopper/distance'], 0))
|
|
||||||
self.tofCut = const.lamdaCut*self.chopperDetectorDistance/const.hdm*1.e-13
|
|
||||||
|
|
||||||
polarizationConfigs = ['undefined', 'unpolarized', 'po', 'mo', 'op', 'pp', 'mp', 'om', 'pm', 'mm']
|
|
||||||
try:
|
|
||||||
self.mu = float(np.take(self.hdf['/entry1/Amor/instrument_control_parameters/mu'], 0))
|
|
||||||
self.nu = float(np.take(self.hdf['/entry1/Amor/instrument_control_parameters/nu'], 0))
|
|
||||||
self.kap = float(np.take(self.hdf['/entry1/Amor/instrument_control_parameters/kap'], 0))
|
|
||||||
self.kad = float(np.take(self.hdf['/entry1/Amor/instrument_control_parameters/kad'], 0))
|
|
||||||
self.div = float(np.take(self.hdf['/entry1/Amor/instrument_control_parameters/div'], 0))
|
|
||||||
self.ch1TriggerPhase = float(np.take(self.hdf['/entry1/Amor/chopper/ch1_trigger_phase'], 0))
|
|
||||||
self.ch2TriggerPhase = float(np.take(self.hdf['/entry1/Amor/chopper/ch2_trigger_phase'], 0))
|
|
||||||
try:
|
|
||||||
chopperTriggerTime = float(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_zero'][2])\
|
|
||||||
- float(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_zero'][1])
|
|
||||||
self.tau = int(1e-6*chopperTriggerTime/2+0.5)*(1e-3)
|
|
||||||
self.chopperSpeed = 30/self.tau
|
|
||||||
chopperTriggerTimeDiff = float(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_offset'][2])
|
|
||||||
chopperTriggerPhase = 180e-9*chopperTriggerTimeDiff/self.tau
|
|
||||||
#TODO: check the next line
|
|
||||||
self.chopperPhase = chopperTriggerPhase + self.ch1TriggerPhase - self.ch2TriggerPhase
|
|
||||||
except(KeyError, IndexError):
|
|
||||||
logging.debug(' chopper speed and phase taken from .hdf file')
|
|
||||||
self.chopperSpeed = float(np.take(self.hdf['/entry1/Amor/chopper/rotation_speed'], 0))
|
|
||||||
self.chopperPhase = float(np.take(self.hdf['/entry1/Amor/chopper/phase'], 0))
|
|
||||||
self.tau = 30/self.chopperSpeed
|
|
||||||
try:
|
|
||||||
polarizationConfigLabel = int(self.hdf['/entry1/Amor/polarization/configuration/value'][0])
|
|
||||||
except(KeyError, IndexError):
|
|
||||||
polarizationConfigLabel = 0
|
|
||||||
self.polarizationConfig = polarizationConfigs[polarizationConfigLabel]
|
|
||||||
logging.debug(f' polarization configuration: {self.polarizationConfig} (index {polarizationConfigLabel} (index {polarizationConfigLabel}))')
|
|
||||||
except(KeyError, IndexError):
|
|
||||||
logging.warning(" using parameters from nicos cache")
|
|
||||||
year_date = str(self.start_date).replace('-', '/', 1)
|
|
||||||
# TODO: check new cache pathes
|
|
||||||
cachePath = '/home/amor/cache/'
|
|
||||||
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-mu/{year_date}')).split('\t')[-1]
|
|
||||||
self.mu = float(value)
|
|
||||||
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-nu/{year_date}')).split('\t')[-1]
|
|
||||||
self.nu = float(value)
|
|
||||||
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-kap/{year_date}')).split('\t')[-1]
|
|
||||||
self.kap = float(value)
|
|
||||||
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-kad/{year_date}')).split('\t')[-1]
|
|
||||||
self.kad = float(value)
|
|
||||||
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-div/{year_date}')).split('\t')[-1]
|
|
||||||
self.div = float(value)
|
|
||||||
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-ch1_speed/{year_date}')).split('\t')[-1]
|
|
||||||
self.chopperSpeed = float(value)
|
|
||||||
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-chopper_phase/{year_date}')).split('\t')[-1]
|
|
||||||
self.chopperPhase = float(value)
|
|
||||||
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-ch1_trigger_phase/{year_date}')).split('\t')[-1]
|
|
||||||
self.ch1TriggerPhase = float(value)
|
|
||||||
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-ch2_trigger_phase/{year_date}')).split('\t')[-1]
|
|
||||||
self.ch2TriggerPhase = float(value)
|
|
||||||
|
|
||||||
self.tau = 30. / self.chopperSpeed
|
|
||||||
|
|
||||||
logging.debug(f' tau = {self.tau} s')
|
|
||||||
if self.config.muOffset:
|
|
||||||
logging.debug(f' set muOffset = {self.config.muOffset}')
|
|
||||||
self.mu += self.config.muOffset
|
|
||||||
if self.config.mu:
|
|
||||||
logging.debug(f' replaced mu = {self.mu} with {self.config.mu}')
|
|
||||||
self.mu = self.config.mu
|
|
||||||
if self.config.nu:
|
|
||||||
logging.debug(f' replaced nu = {self.nu} with {self.config.nu}')
|
|
||||||
self.nu = self.config.nu
|
|
||||||
if self.config.chopperPhaseOffset:
|
|
||||||
logging.debug(f' replaced ch1TriggerPhase = {self.ch1TriggerPhase} with {self.config.chopperPhaseOffset}')
|
|
||||||
self.ch1TriggerPhase = self.config.chopperPhaseOffset
|
|
||||||
|
|
||||||
# extract start time as unix time, adding UTC offset of 1h to time string
|
|
||||||
dz = datetime.fromisoformat(self.hdf['/entry1/start_time'][0].decode('utf-8'))
|
|
||||||
self.fileDate=dz.replace(tzinfo=AMOR_LOCAL_TIMEZONE)
|
|
||||||
#self.startTime = np.int64( (self.fileDate.timestamp() ) * 1e9 )
|
|
||||||
#if self.seriesStartTime is None:
|
|
||||||
# self.seriesStartTime = self.startTime
|
|
||||||
|
|
||||||
def read_header_info(self):
|
|
||||||
# read general information and first data set
|
|
||||||
logging.info(f' meta data from: {self.file_list[0]}')
|
|
||||||
self.hdf = h5py.File(self.file_list[0], 'r', swmr=True)
|
|
||||||
title = self.hdf['entry1/title'][0].decode('utf-8')
|
|
||||||
proposal_id = self.hdf['entry1/proposal_id'][0].decode('utf-8')
|
|
||||||
user_name = self.hdf['entry1/user/name'][0].decode('utf-8')
|
|
||||||
user_affiliation = 'unknown'
|
|
||||||
user_email = self.hdf['entry1/user/email'][0].decode('utf-8')
|
|
||||||
user_orcid = None
|
|
||||||
sampleName = self.hdf['entry1/sample/name'][0].decode('utf-8')
|
|
||||||
model = self.hdf['entry1/sample/model'][0].decode('utf-8')
|
|
||||||
instrumentName = 'Amor'
|
|
||||||
source = self.hdf['entry1/Amor/source/name'][0].decode('utf-8')
|
|
||||||
sourceProbe = 'neutron'
|
|
||||||
start_time = self.hdf['entry1/start_time'][0].decode('utf-8')
|
|
||||||
self.start_date = start_time.split(' ')[0]
|
|
||||||
if self.config.sampleModel:
|
|
||||||
model = self.config.sampleModel
|
|
||||||
# assembling orso header information
|
|
||||||
self.header.owner = fileio.Person(
|
|
||||||
name=user_name,
|
|
||||||
affiliation=user_affiliation,
|
|
||||||
contact=user_email,
|
|
||||||
)
|
|
||||||
if user_orcid:
|
|
||||||
self.header.owner.orcid = user_orcid
|
|
||||||
self.header.experiment = fileio.Experiment(
|
|
||||||
title=title,
|
|
||||||
instrument=instrumentName,
|
|
||||||
start_date=self.start_date,
|
|
||||||
probe=sourceProbe,
|
|
||||||
facility=source,
|
|
||||||
proposalID=proposal_id
|
|
||||||
)
|
|
||||||
self.header.sample = fileio.Sample(
|
|
||||||
name=sampleName,
|
|
||||||
model=SampleModel(stack=model),
|
|
||||||
sample_parameters=None,
|
|
||||||
)
|
|
||||||
self.header.measurement_scheme = 'angle- and energy-dispersive'
|
|
||||||
|
|
||||||
@@ -1,70 +0,0 @@
|
|||||||
"""
|
|
||||||
Classes describing the AMOR instrument configuration used during reduction.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
from . import const
|
|
||||||
|
|
||||||
class Detector:
|
|
||||||
nBlades = 14 # number of active blades in the detector
|
|
||||||
nWires = 32 # number of wires per blade
|
|
||||||
nStripes = 64 # number of stipes per blade
|
|
||||||
angle = np.deg2rad(5.1) # deg angle of incidence of the beam on the blades (def: 5.1)
|
|
||||||
dZ = 4.0*np.sin(angle) # mm height-distance of neighboring pixels on one blade
|
|
||||||
dX = 4.0*np.cos(angle) # mm depth-distance of neighboring pixels on one blace
|
|
||||||
bladeZ = 10.455 # mm distance between detector blades
|
|
||||||
zero = 0.5*nBlades*bladeZ # mm vertical center of the detector
|
|
||||||
distance = 4000. # mm distance from focal point to leading blade edge
|
|
||||||
|
|
||||||
class Grid:
|
|
||||||
|
|
||||||
def __init__(self, qResolution, qzRange):
|
|
||||||
self.lamdaCut = const.lamdaCut
|
|
||||||
self.dldl = 0.005 # Delta lambda / lambda
|
|
||||||
self.qResolution = qResolution
|
|
||||||
self.qzRange = qzRange
|
|
||||||
|
|
||||||
def q(self):
|
|
||||||
resolutions = [0.005, 0.01, 0.02, 0.025, 0.04, 0.05, 0.1, 1]
|
|
||||||
a, b = np.histogram([self.qResolution], bins = resolutions)
|
|
||||||
dqdq = np.matmul(b[:-1],a)
|
|
||||||
if dqdq != self.qResolution:
|
|
||||||
logging.info(f'# changed resolution to {dqdq}')
|
|
||||||
qq = 0.01
|
|
||||||
# linear up to qq
|
|
||||||
q_grid = np.arange(0, qq, qq*dqdq)
|
|
||||||
# exponential from qq on
|
|
||||||
q_grid = np.append(q_grid, qq*(1.+dqdq)**np.arange(int(np.log(self.qzRange[1]/qq)/np.log(1+dqdq))))
|
|
||||||
q_grid = q_grid[q_grid>=self.qzRange[0]]
|
|
||||||
return q_grid
|
|
||||||
|
|
||||||
def lamda(self):
|
|
||||||
lamdaMax = 16
|
|
||||||
lamdaMin = self.lamdaCut
|
|
||||||
lamda_grid = lamdaMin*(1+self.dldl)**np.arange(int(np.log(lamdaMax/lamdaMin)/np.log(1+self.dldl)+1))
|
|
||||||
return lamda_grid
|
|
||||||
|
|
||||||
def z(self):
|
|
||||||
return np.arange(Detector.nBlades*Detector.nWires+1)
|
|
||||||
|
|
||||||
def lz(self):
|
|
||||||
return np.ones(( np.shape(self.lamda()[:-1])[0], np.shape(self.z()[:-1])[0] ))
|
|
||||||
|
|
||||||
def delta(self, detectorDistance):
|
|
||||||
# unused for now
|
|
||||||
bladeAngle = np.rad2deg( 2. * np.arcsin(0.5*Detector.bladeZ / detectorDistance) )
|
|
||||||
blade_grid = np.arctan( np.arange(33) * Detector.dZ / ( detectorDistance + np.arange(33) * Detector.dX) )
|
|
||||||
blade_grid = np.rad2deg(blade_grid)
|
|
||||||
stepWidth = blade_grid[1] - blade_grid[0]
|
|
||||||
blade_grid = blade_grid - 0.2 * stepWidth
|
|
||||||
|
|
||||||
delta_grid = []
|
|
||||||
for b in np.arange(Detector.nBlades-1):
|
|
||||||
delta_grid = np.concatenate((delta_grid, blade_grid), axis=None)
|
|
||||||
blade_grid = blade_grid + bladeAngle
|
|
||||||
delta_grid = delta_grid[delta_grid<blade_grid[0]-0.5*stepWidth]
|
|
||||||
delta_grid = np.concatenate((delta_grid, blade_grid), axis=None)
|
|
||||||
|
|
||||||
return -np.flip(delta_grid) + 0.5*Detector.nBlades * bladeAngle
|
|
||||||
@@ -1,193 +0,0 @@
|
|||||||
"""
|
|
||||||
Classes for stroing various configurations needed for reduction.
|
|
||||||
"""
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
from typing import Optional, Tuple
|
|
||||||
from datetime import datetime
|
|
||||||
from os import path
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
class Defaults:
|
|
||||||
# fileIdentifier
|
|
||||||
outputPath = '.'
|
|
||||||
rawPath = ['.', path.join('.','raw'), path.join('..','raw'), path.join('..','..','raw')]
|
|
||||||
year = datetime.now().year
|
|
||||||
normalisationFileIdentifier = []
|
|
||||||
normalisationMethod = 'o'
|
|
||||||
monitorType = 'auto'
|
|
||||||
# subtract
|
|
||||||
outputName = "fromEOS"
|
|
||||||
outputFormat = ['Rqz.ort']
|
|
||||||
incidentAngle = 'alphaF'
|
|
||||||
qResolution = 0.01
|
|
||||||
#timeSlize
|
|
||||||
scale = [1]
|
|
||||||
# autoscale
|
|
||||||
lambdaRange = [2., 15.]
|
|
||||||
thetaRange = [-12., 12.]
|
|
||||||
thetaRangeR = [-0.75, 0.75]
|
|
||||||
yRange = [11, 41]
|
|
||||||
qzRange = [0.005, 0.51]
|
|
||||||
chopperSpeed = 500
|
|
||||||
chopperPhase = 0.0
|
|
||||||
chopperPhaseOffset = -9.1
|
|
||||||
muOffset = 0
|
|
||||||
mu = 0
|
|
||||||
nu = 0
|
|
||||||
sampleModel = None
|
|
||||||
lowCurrentThreshold = 50
|
|
||||||
#
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class ReaderConfig:
|
|
||||||
year: int
|
|
||||||
rawPath: Tuple[str]
|
|
||||||
startTime: Optional[float] = 0
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class ExperimentConfig:
|
|
||||||
incidentAngle: str
|
|
||||||
chopperPhase: float
|
|
||||||
chopperSpeed: float
|
|
||||||
yRange: Tuple[float, float]
|
|
||||||
lambdaRange: Tuple[float, float]
|
|
||||||
qzRange: Tuple[float, float]
|
|
||||||
monitorType: str
|
|
||||||
lowCurrentThreshold: float
|
|
||||||
|
|
||||||
sampleModel: Optional[str] = None
|
|
||||||
chopperPhaseOffset: float = 0
|
|
||||||
mu: Optional[float] = None
|
|
||||||
nu: Optional[float] = None
|
|
||||||
muOffset: Optional[float] = None
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class ReductionConfig:
|
|
||||||
normalisationMethod: str
|
|
||||||
qResolution: float
|
|
||||||
qzRange: Tuple[float, float]
|
|
||||||
thetaRange: Tuple[float, float]
|
|
||||||
thetaRangeR: Tuple[float, float]
|
|
||||||
|
|
||||||
fileIdentifier: list = field(default_factory=lambda: ["0"])
|
|
||||||
scale: list = field(default_factory=lambda: [1]) #per file scaling; if less elements than files use the last one
|
|
||||||
|
|
||||||
autoscale: Optional[Tuple[bool, bool]] = None
|
|
||||||
subtract: Optional[str] = None
|
|
||||||
normalisationFileIdentifier: Optional[list] = None
|
|
||||||
timeSlize: Optional[list] = None
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class OutputConfig:
|
|
||||||
outputFormats: list
|
|
||||||
outputName: str
|
|
||||||
outputPath: str
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class EOSConfig:
|
|
||||||
reader: ReaderConfig
|
|
||||||
experiment: ExperimentConfig
|
|
||||||
reduction: ReductionConfig
|
|
||||||
output: OutputConfig
|
|
||||||
|
|
||||||
_call_string_overwrite=None
|
|
||||||
|
|
||||||
#@property
|
|
||||||
#def call_string(self)->str:
|
|
||||||
# if self._call_string_overwrite:
|
|
||||||
# return self._call_string_overwrite
|
|
||||||
# else:
|
|
||||||
# return self.calculate_call_string()
|
|
||||||
|
|
||||||
def call_string(self):
|
|
||||||
base = 'python eos.py'
|
|
||||||
|
|
||||||
inpt = ''
|
|
||||||
if self.reader.year:
|
|
||||||
inpt += f' -Y {self.reader.year}'
|
|
||||||
else:
|
|
||||||
inpt += f' -Y {datetime.now().year}'
|
|
||||||
if np.shape(self.reader.rawPath)[0] == 1:
|
|
||||||
inpt += f' --rawPath {self.reader.rawPath}'
|
|
||||||
if self.reduction.subtract:
|
|
||||||
inpt += f' -subtract {self.reduction.subtract}'
|
|
||||||
if self.reduction.normalisationFileIdentifier:
|
|
||||||
inpt += f' -n {" ".join(self.reduction.normalisationFileIdentifier)}'
|
|
||||||
if self.reduction.fileIdentifier:
|
|
||||||
inpt += f' -f {" ".join(self.reduction.fileIdentifier)}'
|
|
||||||
|
|
||||||
otpt = ''
|
|
||||||
if self.reduction.qResolution:
|
|
||||||
otpt += f' -r {self.reduction.qResolution}'
|
|
||||||
if self.output.outputPath != '.':
|
|
||||||
inpt += f' --outputdPath {self.output.outputPath}'
|
|
||||||
if self.output.outputName:
|
|
||||||
otpt += f' -o {self.output.outputName}'
|
|
||||||
if self.output.outputFormats != ['Rqz.ort']:
|
|
||||||
otpt += f' -of {" ".join(self.output.outputFormats)}'
|
|
||||||
|
|
||||||
mask = ''
|
|
||||||
if self.experiment.yRange != Defaults.yRange:
|
|
||||||
mask += f' -y {" ".join(str(ii) for ii in self.experiment.yRange)}'
|
|
||||||
if self.experiment.lambdaRange!= Defaults.lambdaRange:
|
|
||||||
mask += f' -l {" ".join(str(ff) for ff in self.experiment.lambdaRange)}'
|
|
||||||
if self.reduction.thetaRange != Defaults.thetaRange:
|
|
||||||
mask += f' -t {" ".join(str(ff) for ff in self.reduction.thetaRange)}'
|
|
||||||
elif self.reduction.thetaRangeR != Defaults.thetaRangeR:
|
|
||||||
mask += f' -T {" ".join(str(ff) for ff in self.reduction.thetaRangeR)}'
|
|
||||||
if self.experiment.qzRange!= Defaults.qzRange:
|
|
||||||
mask += f' -q {" ".join(str(ff) for ff in self.experiment.qzRange)}'
|
|
||||||
|
|
||||||
para = ''
|
|
||||||
if self.experiment.chopperPhase != Defaults.chopperPhase:
|
|
||||||
para += f' --chopperPhase {self.experiment.chopperPhase}'
|
|
||||||
if self.experiment.chopperPhaseOffset != Defaults.chopperPhaseOffset:
|
|
||||||
para += f' --chopperPhaseOffset {self.experiment.chopperPhaseOffset}'
|
|
||||||
if self.experiment.mu:
|
|
||||||
para += f' --mu {self.experiment.mu}'
|
|
||||||
elif self.experiment.muOffset:
|
|
||||||
para += f' --muOffset {self.experiment.muOffset}'
|
|
||||||
if self.experiment.nu:
|
|
||||||
para += f' --nu {self.experiment.nu}'
|
|
||||||
|
|
||||||
modl = ''
|
|
||||||
if self.experiment.sampleModel:
|
|
||||||
modl += f" --sampleModel '{self.experiment.sampleModel}'"
|
|
||||||
|
|
||||||
acts = ''
|
|
||||||
if self.reduction.autoscale:
|
|
||||||
acts += f' --autoscale {" ".join(str(ff) for ff in self.reduction.autoscale)}'
|
|
||||||
if self.reduction.scale != Defaults.scale:
|
|
||||||
acts += f' --scale {self.reduction.scale}'
|
|
||||||
if self.reduction.timeSlize:
|
|
||||||
acts += f' --timeSlize {" ".join(str(ff) for ff in self.reduction.timeSlize)}'
|
|
||||||
|
|
||||||
mlst = base + inpt + otpt
|
|
||||||
if mask:
|
|
||||||
mlst += mask
|
|
||||||
if para:
|
|
||||||
mlst += para
|
|
||||||
if acts:
|
|
||||||
mlst += acts
|
|
||||||
if modl:
|
|
||||||
mlst += modl
|
|
||||||
|
|
||||||
if len(mlst) > 70:
|
|
||||||
mlst = base + ' ' + inpt + ' ' + otpt
|
|
||||||
if mask:
|
|
||||||
mlst += ' ' + mask
|
|
||||||
if para:
|
|
||||||
mlst += ' ' + para
|
|
||||||
if acts:
|
|
||||||
mlst += ' ' + acts
|
|
||||||
if modl:
|
|
||||||
mlst += ' ' + modl
|
|
||||||
|
|
||||||
logging.debug(f'Argument list build in EOSConfig.call_string: {mlst}')
|
|
||||||
return mlst
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,505 +0,0 @@
|
|||||||
import logging
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
from orsopy import fileio
|
|
||||||
|
|
||||||
from .command_line import expand_file_list
|
|
||||||
from .file_reader import AmorData
|
|
||||||
from .header import Header
|
|
||||||
from .options import EOSConfig
|
|
||||||
from .instrument import Grid
|
|
||||||
|
|
||||||
class AmorReduction:
|
|
||||||
def __init__(self, config: EOSConfig):
|
|
||||||
self.experiment_config = config.experiment
|
|
||||||
self.reader_config = config.reader
|
|
||||||
self.reduction_config = config.reduction
|
|
||||||
self.output_config = config.output
|
|
||||||
self.grid = Grid(config.reduction.qResolution, config.experiment.qzRange)
|
|
||||||
|
|
||||||
self.header = Header()
|
|
||||||
self.header.reduction.call = config.call_string()
|
|
||||||
|
|
||||||
self.monitorUnit = {'n': 'cnts', 'p': 'mC', 't': 's', 'auto': 'pulses'}
|
|
||||||
|
|
||||||
def reduce(self):
|
|
||||||
if not os.path.exists(f'{self.output_config.outputPath}'):
|
|
||||||
logging.debug(f'Creating destination path {self.output_config.outputPath}')
|
|
||||||
os.system(f'mkdir {self.output_config.outputPath}')
|
|
||||||
|
|
||||||
# load or create normalisation matrix
|
|
||||||
if self.reduction_config.normalisationFileIdentifier:
|
|
||||||
self.create_normalisation_map(self.reduction_config.normalisationFileIdentifier[0])
|
|
||||||
else:
|
|
||||||
self.norm_lz = self.grid.lz()
|
|
||||||
self.normAngle = 1.
|
|
||||||
self.normMonitor = 1.
|
|
||||||
|
|
||||||
logging.warning('normalisation matrix: none requested')
|
|
||||||
|
|
||||||
# load R(q_z) curve to be subtracted:
|
|
||||||
if self.reduction_config.subtract:
|
|
||||||
self.sq_q, self.sR_q, self.sdR_q, self.sFileName = self.loadRqz(self.reduction_config.subtract)
|
|
||||||
logging.warning(f'loaded background file: {self.sFileName}')
|
|
||||||
self.header.reduction.corrections.append(f'background from \'{self.sFileName}\' subtracted')
|
|
||||||
self.subtract = True
|
|
||||||
else:
|
|
||||||
self.subtract = False
|
|
||||||
|
|
||||||
# load measurement data and do the reduction
|
|
||||||
self.datasetsRqz = []
|
|
||||||
self.datasetsRlt = []
|
|
||||||
for i, short_notation in enumerate(self.reduction_config.fileIdentifier):
|
|
||||||
self.read_file_block(i, short_notation)
|
|
||||||
|
|
||||||
# output
|
|
||||||
logging.warning('output:')
|
|
||||||
|
|
||||||
if 'Rqz.ort' in self.output_config.outputFormats:
|
|
||||||
self.save_Rqz()
|
|
||||||
|
|
||||||
if 'Rlt.ort' in self.output_config.outputFormats:
|
|
||||||
self.save_Rtl()
|
|
||||||
|
|
||||||
def read_file_block(self, i, short_notation):
|
|
||||||
logging.warning('reading input:')
|
|
||||||
self.header.measurement_data_files = []
|
|
||||||
self.file_reader = AmorData(header=self.header,
|
|
||||||
reader_config=self.reader_config,
|
|
||||||
config=self.experiment_config,
|
|
||||||
short_notation=short_notation)
|
|
||||||
if self.reduction_config.timeSlize:
|
|
||||||
self.read_timeslices(i)
|
|
||||||
else:
|
|
||||||
self.read_unsliced(i)
|
|
||||||
|
|
||||||
def read_unsliced(self, i):
|
|
||||||
lamda_e = self.file_reader.lamda_e
|
|
||||||
detZ_e = self.file_reader.detZ_e
|
|
||||||
self.monitor = np.sum(self.file_reader.monitorPerPulse)
|
|
||||||
logging.warning(f' monitor = {self.monitor:8.2f} {self.monitorUnit[self.experiment_config.monitorType]}')
|
|
||||||
qz_lz, qx_lz, ref_lz, err_lz, res_lz, lamda_lz, theta_lz, int_lz, self.mask_lz = self.project_on_lz(
|
|
||||||
self.file_reader, self.norm_lz, self.normAngle, lamda_e, detZ_e)
|
|
||||||
#if self.monitor>1 :
|
|
||||||
# ref_lz /= self.monitor
|
|
||||||
# err_lz /= self.monitor
|
|
||||||
try:
|
|
||||||
ref_lz *= self.reduction_config.scale[i]
|
|
||||||
err_lz *= self.reduction_config.scale[i]
|
|
||||||
except IndexError:
|
|
||||||
ref_lz *= self.reduction_config.scale[-1]
|
|
||||||
err_lz *= self.reduction_config.scale[-1]
|
|
||||||
if 'Rqz.ort' in self.output_config.outputFormats:
|
|
||||||
headerRqz = self.header.orso_header()
|
|
||||||
headerRqz.data_set = f'Nr {i} : mu = {self.file_reader.mu:6.3f} deg'
|
|
||||||
|
|
||||||
if qz_lz[0,int(np.shape(qz_lz)[1]/2)] < 0:
|
|
||||||
# assuming a 'measurement from below' when center of detector at negative qz
|
|
||||||
qz_lz *= -1
|
|
||||||
|
|
||||||
# projection on qz-grid
|
|
||||||
q_q, R_q, dR_q, dq_q = self.project_on_qz(qz_lz, ref_lz, err_lz, res_lz, self.norm_lz, self.mask_lz)
|
|
||||||
|
|
||||||
# The filtering is now done by restricting the qz-grid
|
|
||||||
#filter_q = np.where((self.experiment_config.qzRange[0]>q_q) & (q_q>self.experiment_config.qzRange[1]),
|
|
||||||
# False, True)
|
|
||||||
#q_q = q_q[filter_q]
|
|
||||||
#R_q = R_q[filter_q]
|
|
||||||
#dR_q = dR_q[filter_q]
|
|
||||||
#dq_q = dq_q[filter_q]
|
|
||||||
|
|
||||||
if self.reduction_config.autoscale:
|
|
||||||
if i==0:
|
|
||||||
R_q, dR_q = self.autoscale(q_q, R_q, dR_q)
|
|
||||||
else:
|
|
||||||
pRq_z = self.datasetsRqz[i-1].data[:, 1]
|
|
||||||
pdRq_z = self.datasetsRqz[i-1].data[:, 2]
|
|
||||||
R_q, dR_q = self.autoscale(q_q, R_q, dR_q, pRq_z, pdRq_z)
|
|
||||||
|
|
||||||
if self.subtract:
|
|
||||||
if len(q_q)==len(self.sq_q):
|
|
||||||
R_q -= self.sR_q
|
|
||||||
dR_q = np.sqrt(dR_q**2+self.sdR_q**2)
|
|
||||||
else:
|
|
||||||
logging.warning(
|
|
||||||
f'backgroung file {self.sFileName} not compatible with q_z scale ({len(self.sq_q)} vs. {len(q_q)})')
|
|
||||||
|
|
||||||
data = np.array([q_q, R_q, dR_q, dq_q]).T
|
|
||||||
orso_data = fileio.OrsoDataset(headerRqz, data)
|
|
||||||
self.datasetsRqz.append(orso_data)
|
|
||||||
if 'Rlt.ort' in self.output_config.outputFormats:
|
|
||||||
columns = [
|
|
||||||
fileio.Column('Qz', '1/angstrom', 'normal momentum transfer'),
|
|
||||||
fileio.Column('R', '', 'specular reflectivity'),
|
|
||||||
fileio.ErrorColumn(error_of='R', error_type='uncertainty', value_is='sigma'),
|
|
||||||
fileio.ErrorColumn(error_of='Qz', error_type='resolution', value_is='sigma'),
|
|
||||||
fileio.Column('lambda', 'angstrom', 'wavelength'),
|
|
||||||
fileio.Column('alpha_f', 'deg', 'final angle'),
|
|
||||||
fileio.Column('l', '', 'index of lambda-bin'),
|
|
||||||
fileio.Column('t', '', 'index of theta bin'),
|
|
||||||
fileio.Column('intensity', '', 'filtered neutron events per pixel'),
|
|
||||||
fileio.Column('norm', '', 'normalisation matrix'),
|
|
||||||
fileio.Column('mask', '', 'pixels used for calculating R(q_z)'),
|
|
||||||
fileio.Column('Qx', '1/angstrom', 'parallel momentum transfer'),
|
|
||||||
]
|
|
||||||
# data_source = file_reader.data_source
|
|
||||||
|
|
||||||
ts, zs = ref_lz.shape
|
|
||||||
lindex_lz = np.tile(np.arange(1, ts+1), (zs, 1)).T
|
|
||||||
tindex_lz = np.tile(np.arange(1, zs+1), (ts, 1))
|
|
||||||
|
|
||||||
j = 0
|
|
||||||
for item in zip(
|
|
||||||
qz_lz.T,
|
|
||||||
ref_lz.T,
|
|
||||||
err_lz.T,
|
|
||||||
res_lz.T,
|
|
||||||
lamda_lz.T,
|
|
||||||
theta_lz.T,
|
|
||||||
lindex_lz.T,
|
|
||||||
tindex_lz.T,
|
|
||||||
int_lz.T,
|
|
||||||
self.norm_lz.T,
|
|
||||||
np.where(self.mask_lz, 1, 0).T,
|
|
||||||
qx_lz.T,
|
|
||||||
):
|
|
||||||
data = np.array(list(item)).T
|
|
||||||
headerRlt = self.header.orso_header(columns=columns)
|
|
||||||
headerRlt.data_set = f'dataset_{i}_{j+1} : alpha_f = {theta_lz[0, j]:6.3f} deg'
|
|
||||||
orso_data = fileio.OrsoDataset(headerRlt, data)
|
|
||||||
self.datasetsRlt.append(orso_data)
|
|
||||||
j += 1
|
|
||||||
|
|
||||||
def read_timeslices(self, i):
|
|
||||||
wallTime_e = np.float64(self.file_reader.wallTime_e)/1e9
|
|
||||||
pulseTimeS = np.float64(self.file_reader.pulseTimeS)/1e9
|
|
||||||
interval = self.reduction_config.timeSlize[0]
|
|
||||||
try:
|
|
||||||
start = self.reduction_config.timeSlize[1]
|
|
||||||
except IndexError:
|
|
||||||
start = 0
|
|
||||||
try:
|
|
||||||
stop = self.reduction_config.timeSlize[2]
|
|
||||||
except IndexError:
|
|
||||||
stop = wallTime_e[-1]
|
|
||||||
# make overwriting log lines possible by removing newline at the end
|
|
||||||
#logging.StreamHandler.terminator = "\r"
|
|
||||||
logging.warning(f' time slizing')
|
|
||||||
logging.info(' slize time monitor')
|
|
||||||
for ti, time in enumerate(np.arange(start, stop, interval)):
|
|
||||||
|
|
||||||
filter_e = np.where((time<wallTime_e) & (wallTime_e<time+interval), True, False)
|
|
||||||
lamda_e = self.file_reader.lamda_e[filter_e]
|
|
||||||
detZ_e = self.file_reader.detZ_e[filter_e]
|
|
||||||
filter_m = np.where((time<pulseTimeS) & (pulseTimeS<time+interval), True, False)
|
|
||||||
self.monitor = np.sum(self.file_reader.monitorPerPulse[filter_m])
|
|
||||||
logging.info(f' {ti:<4d} {time:6.0f} {self.monitor:7.2f} {self.monitorUnit[self.experiment_config.monitorType]}')
|
|
||||||
|
|
||||||
qz_lz, qx_lz, ref_lz, err_lz, res_lz, lamda_lz, theta_lz, int_lz, mask_lz = self.project_on_lz(
|
|
||||||
self.file_reader, self.norm_lz, self.normAngle, lamda_e, detZ_e)
|
|
||||||
try:
|
|
||||||
ref_lz *= self.reduction_config.scale[i]
|
|
||||||
err_lz *= self.reduction_config.scale[i]
|
|
||||||
except IndexError:
|
|
||||||
ref_lz *= self.reduction_config.scale[-1]
|
|
||||||
err_lz *= self.reduction_config.scale[-1]
|
|
||||||
q_q, R_q, dR_q, dq_q = self.project_on_qz(qz_lz, ref_lz, err_lz, res_lz, self.norm_lz, mask_lz)
|
|
||||||
|
|
||||||
filter_q = np.where((self.experiment_config.qzRange[0]<q_q) & (q_q<self.experiment_config.qzRange[1]),
|
|
||||||
True, False)
|
|
||||||
q_q = q_q[filter_q]
|
|
||||||
R_q = R_q[filter_q]
|
|
||||||
dR_q = dR_q[filter_q]
|
|
||||||
dq_q = dq_q[filter_q]
|
|
||||||
|
|
||||||
if self.reduction_config.autoscale:
|
|
||||||
R_q, dR_q = self.autoscale(q_q, R_q, dR_q)
|
|
||||||
|
|
||||||
if self.subtract:
|
|
||||||
if len(q_q)==len(self.sq_q):
|
|
||||||
R_q -= self.sR_q
|
|
||||||
dR_q = np.sqrt(dR_q**2+self.sdR_q**2)
|
|
||||||
else:
|
|
||||||
self.subtract = False
|
|
||||||
logging.warning(
|
|
||||||
f'background file {self.sFileName} not compatible with q_z scale ({len(self.sq_q)} vs. {len(q_q)})')
|
|
||||||
|
|
||||||
tme_q = np.ones(np.shape(q_q))*time
|
|
||||||
data = np.array([q_q, R_q, dR_q, dq_q, tme_q]).T
|
|
||||||
headerRqz = self.header.orso_header(
|
|
||||||
extra_columns=[fileio.Column('time', 's', 'time relative to start of measurement series')])
|
|
||||||
headerRqz.data_set = f'{i}_{ti}: time = {time:8.1f} s to {time+interval:8.1f} s'
|
|
||||||
orso_data = fileio.OrsoDataset(headerRqz, data)
|
|
||||||
self.datasetsRqz.append(orso_data)
|
|
||||||
# reset normal logging behavior
|
|
||||||
#logging.StreamHandler.terminator = "\n"
|
|
||||||
logging.info(f' done {time+interval:5.0f}')
|
|
||||||
|
|
||||||
def save_Rqz(self):
|
|
||||||
fname = os.path.join(self.output_config.outputPath, f'{self.output_config.outputName}.Rqz.ort')
|
|
||||||
logging.warning(f' {fname}')
|
|
||||||
theSecondLine = f' {self.header.experiment.title} | {self.header.experiment.start_date} | sample {self.header.sample.name} | R(q_z)'
|
|
||||||
fileio.save_orso(self.datasetsRqz, fname, data_separator='\n', comment=theSecondLine)
|
|
||||||
|
|
||||||
def save_Rtl(self):
|
|
||||||
fname = os.path.join(self.output_config.outputPath, f'{self.output_config.outputName}.Rlt.ort')
|
|
||||||
logging.warning(f' {fname}')
|
|
||||||
theSecondLine = f' {self.header.experiment.title} | {self.header.experiment.start_date} | sample {self.header.sample.name} | R(lambda, theta)'
|
|
||||||
fileio.save_orso(self.datasetsRlt, fname, data_separator='\n', comment=theSecondLine)
|
|
||||||
|
|
||||||
def autoscale(self, q_q, R_q, dR_q, pR_q=[], pdR_q=[]):
|
|
||||||
autoscale = self.reduction_config.autoscale
|
|
||||||
if len(pR_q) == 0:
|
|
||||||
filter_q = np.where((autoscale[0]<=q_q)&(q_q<=autoscale[1]), True, False)
|
|
||||||
filter_q = np.where(dR_q>0, filter_q, False)
|
|
||||||
if len(filter_q[filter_q]) > 0:
|
|
||||||
scale = np.sum(R_q[filter_q]**2/dR_q[filter_q]) / np.sum(R_q[filter_q]/dR_q[filter_q])
|
|
||||||
else:
|
|
||||||
logging.warning(' automatic scaling not possible')
|
|
||||||
scale = 1.
|
|
||||||
else:
|
|
||||||
filter_q = np.where(np.isnan(pR_q*R_q), False, True)
|
|
||||||
filter_q = np.where(R_q>0, filter_q, False)
|
|
||||||
filter_q = np.where(pR_q>0, filter_q, False)
|
|
||||||
if len(filter_q[filter_q]) > 0:
|
|
||||||
scale = np.sum(R_q[filter_q]**3 * pR_q[filter_q] / (dR_q[filter_q]**2 * pdR_q[filter_q]**2)) \
|
|
||||||
/ np.sum(R_q[filter_q]**2 * pR_q[filter_q]**2 / (dR_q[filter_q]**2 * pdR_q[filter_q]**2))
|
|
||||||
else:
|
|
||||||
logging.warning(' automatic scaling not possible')
|
|
||||||
scale = 1.
|
|
||||||
R_q /= scale
|
|
||||||
dR_q /= scale
|
|
||||||
logging.info(f' scaling factor = {1/scale}')
|
|
||||||
|
|
||||||
return R_q, dR_q
|
|
||||||
|
|
||||||
def project_on_qz(self, q_lz, R_lz, dR_lz, dq_lz, norm_lz, mask_lz):
|
|
||||||
q_q = self.grid.q()
|
|
||||||
mask_lzf = mask_lz.flatten()
|
|
||||||
q_lzf = q_lz.flatten()[mask_lzf]
|
|
||||||
R_lzf = R_lz.flatten()[mask_lzf]
|
|
||||||
dR_lzf = dR_lz.flatten()[mask_lzf]
|
|
||||||
dq_lzf = dq_lz.flatten()[mask_lzf]
|
|
||||||
norm_lzf = norm_lz.flatten()[mask_lzf]
|
|
||||||
|
|
||||||
weights_lzf = norm_lzf
|
|
||||||
#weights_lzf = np.sqrt(norm_lzf)
|
|
||||||
#weights_lzf = 1 / dR_lzf
|
|
||||||
|
|
||||||
N_q = np.histogram(q_lzf, bins = q_q, weights = weights_lzf )[0]
|
|
||||||
N_q = np.where(N_q > 0, N_q, np.nan)
|
|
||||||
|
|
||||||
R_q = np.histogram(q_lzf, bins = q_q, weights = weights_lzf * R_lzf )[0]
|
|
||||||
R_q = R_q / N_q
|
|
||||||
|
|
||||||
dR_q = np.histogram(q_lzf, bins = q_q, weights = (weights_lzf * dR_lzf)**2 )[0]
|
|
||||||
dR_q = np.sqrt( dR_q ) / N_q
|
|
||||||
|
|
||||||
# TODO: different error propagations for dR and dq!
|
|
||||||
# this is what should work:
|
|
||||||
#dq_q = np.histogram(q_lzf, bins = q_q, weights = (weights_lzf * dq_lzf)**2 )[0]
|
|
||||||
#dq_q = np.sqrt( dq_q ) / N_q
|
|
||||||
# and this actually works:
|
|
||||||
N_q = np.histogram(q_lzf, bins = q_q, weights = weights_lzf**2 )[0]
|
|
||||||
N_q = np.where(N_q > 0, N_q, np.nan)
|
|
||||||
dq_q = np.histogram(q_lzf, bins = q_q, weights = (weights_lzf * dq_lzf)**2 )[0]
|
|
||||||
dq_q = np.sqrt( dq_q / N_q )
|
|
||||||
|
|
||||||
q_q = 0.5 * (q_q + np.roll(q_q, 1))
|
|
||||||
|
|
||||||
return q_q[1:], R_q, dR_q, dq_q
|
|
||||||
|
|
||||||
def loadRqz(self, name):
|
|
||||||
fname = os.path.join(self.output_config.outputPath, name)
|
|
||||||
if os.path.exists(fname):
|
|
||||||
fileName = fname
|
|
||||||
elif os.path.exists(f'{fname}.Rqz.ort'):
|
|
||||||
fileName = f'{fname}.Rqz.ort'
|
|
||||||
else:
|
|
||||||
sys.exit(f'### the background file \'{fname}\' does not exist! => stopping')
|
|
||||||
|
|
||||||
q_q, Sq_q, dS_q = np.loadtxt(fileName, usecols=(0, 1, 2), comments='#', unpack=True)
|
|
||||||
|
|
||||||
return q_q, Sq_q, dS_q, fileName
|
|
||||||
|
|
||||||
def create_normalisation_map(self, short_notation):
|
|
||||||
outputPath = self.output_config.outputPath
|
|
||||||
normalisation_list = expand_file_list(short_notation)
|
|
||||||
name = str(normalisation_list[0])
|
|
||||||
for i in range(1, len(normalisation_list), 1):
|
|
||||||
name = f'{name}_{normalisation_list[i]}'
|
|
||||||
n_path = os.path.join(outputPath, f'{name}.norm')
|
|
||||||
if os.path.exists(n_path):
|
|
||||||
logging.warning(f'normalisation matrix: found and using {n_path}')
|
|
||||||
with open(n_path, 'rb') as fh:
|
|
||||||
self.normFileList = np.load(fh, allow_pickle=True)
|
|
||||||
self.normAngle = np.load(fh, allow_pickle=True)
|
|
||||||
self.norm_lz = np.load(fh, allow_pickle=True)
|
|
||||||
self.normMonitor = np.load(fh, allow_pickle=True)
|
|
||||||
for i, entry in enumerate(self.normFileList):
|
|
||||||
self.normFileList[i] = entry.split('/')[-1]
|
|
||||||
self.header.measurement_additional_files = self.normFileList
|
|
||||||
else:
|
|
||||||
logging.warning(f'normalisation matrix: using the files {normalisation_list}')
|
|
||||||
fromHDF = AmorData(header=self.header,
|
|
||||||
reader_config=self.reader_config,
|
|
||||||
config=self.experiment_config,
|
|
||||||
short_notation=short_notation, norm=True)
|
|
||||||
self.normAngle = fromHDF.nu - fromHDF.mu
|
|
||||||
lamda_e = fromHDF.lamda_e
|
|
||||||
detZ_e = fromHDF.detZ_e
|
|
||||||
self.normMonitor = np.sum(fromHDF.monitorPerPulse)
|
|
||||||
norm_lz, bins_l, bins_z = np.histogram2d(lamda_e, detZ_e, bins = (self.grid.lamda(), self.grid.z()))
|
|
||||||
norm_lz = np.where(norm_lz>2, norm_lz, np.nan)
|
|
||||||
if self.reduction_config.normalisationMethod == 'd':
|
|
||||||
# direct reference => invert map vertically
|
|
||||||
self.norm_lz = np.flip(norm_lz, 1)
|
|
||||||
else:
|
|
||||||
# correct for reference sm reflectivity
|
|
||||||
lamda_l = self.grid.lamda()
|
|
||||||
theta_z = self.normAngle + fromHDF.delta_z
|
|
||||||
lamda_lz = (self.grid.lz().T*lamda_l[:-1]).T
|
|
||||||
theta_lz = self.grid.lz()*theta_z
|
|
||||||
qz_lz = 4.0*np.pi * np.sin(np.deg2rad(theta_lz)) / lamda_lz
|
|
||||||
# TODO: introduce variable for `m` and propably for the slope
|
|
||||||
Rsm_lz = np.ones(np.shape(qz_lz))
|
|
||||||
Rsm_lz = np.where(qz_lz>0.0217, 1-(qz_lz-0.0217)*(0.0625/0.0217), Rsm_lz)
|
|
||||||
Rsm_lz = np.where(qz_lz>0.0217*5, np.nan, Rsm_lz)
|
|
||||||
self.norm_lz = norm_lz / Rsm_lz
|
|
||||||
|
|
||||||
if len(lamda_e) > 1e6:
|
|
||||||
with open(n_path, 'wb') as fh:
|
|
||||||
np.save(fh, np.array(fromHDF.file_list), allow_pickle=False)
|
|
||||||
np.save(fh, np.array(self.normAngle), allow_pickle=False)
|
|
||||||
np.save(fh, self.norm_lz, allow_pickle=False)
|
|
||||||
np.save(fh, self.normMonitor, allow_pickle=False)
|
|
||||||
self.normFileList = fromHDF.file_list
|
|
||||||
self.header.reduction.corrections.append('normalisation with \'additional files\'')
|
|
||||||
|
|
||||||
def project_on_lz(self, fromHDF, norm_lz, normAngle, lamda_e, detZ_e):
|
|
||||||
# projection on lambda-z-grid
|
|
||||||
lamda_l = self.grid.lamda()
|
|
||||||
alphaF_z = fromHDF.nu - fromHDF.mu + fromHDF.delta_z
|
|
||||||
# TODO: implement various methods to obtain alpha_i.
|
|
||||||
#if self.experiment_config.incidentAngle == 'alphaF':
|
|
||||||
# # for specular reflectometry with a highly divergent beam
|
|
||||||
# alphaF_z = fromHDF.nu - fromHDF.mu + fromHDF.delta_z
|
|
||||||
#elif self.experiment_config.incidentAngle == 'nu':
|
|
||||||
# # for specular reflectometry, using kappa nad nu but ignoring mu
|
|
||||||
# alphaF_z = (fromHDF.nu + fromHDF.delta_z + fromHDF.kap + fromHDF.kad) / 2.
|
|
||||||
#else:
|
|
||||||
# # using kappa, for a collimated incoming beam
|
|
||||||
# pass
|
|
||||||
lamda_lz = (self.grid.lz().T*lamda_l[:-1]).T
|
|
||||||
alphaF_lz = self.grid.lz()*alphaF_z
|
|
||||||
|
|
||||||
mask_lz = np.where(np.isnan(norm_lz), False, True)
|
|
||||||
mask_lz = np.logical_and(mask_lz, np.where(np.absolute(alphaF_lz)>5e-3, True, False))
|
|
||||||
if self.reduction_config.thetaRangeR[1]<12:
|
|
||||||
t0 = fromHDF.nu - fromHDF.mu
|
|
||||||
mask_lz = np.logical_and(mask_lz, np.where(alphaF_lz-t0 >= self.reduction_config.thetaRangeR[0], True, False))
|
|
||||||
mask_lz = np.logical_and(mask_lz, np.where(alphaF_lz-t0 <= self.reduction_config.thetaRangeR[1], True, False))
|
|
||||||
elif self.reduction_config.thetaRange[1]<12:
|
|
||||||
mask_lz = np.logical_and(mask_lz, np.where(alphaF_lz >= self.reduction_config.thetaRange[0], True, False))
|
|
||||||
mask_lz = np.logical_and(mask_lz, np.where(alphaF_lz <= self.reduction_config.thetaRange[1], True, False))
|
|
||||||
else:
|
|
||||||
self.reduction_config.thetaRange = [fromHDF.nu - fromHDF.mu - fromHDF.div/2,
|
|
||||||
fromHDF.nu - fromHDF.mu + fromHDF.div/2]
|
|
||||||
mask_lz = np.logical_and(mask_lz, np.where(alphaF_lz >= self.reduction_config.thetaRange[0], True, False))
|
|
||||||
mask_lz = np.logical_and(mask_lz, np.where(alphaF_lz <= self.reduction_config.thetaRange[1], True, False))
|
|
||||||
if self.experiment_config.lambdaRange[1]<15:
|
|
||||||
mask_lz = np.logical_and(mask_lz, np.where(lamda_lz >= self.experiment_config.lambdaRange[0], True, False))
|
|
||||||
mask_lz = np.logical_and(mask_lz, np.where(lamda_lz <= self.experiment_config.lambdaRange[1], True, False))
|
|
||||||
|
|
||||||
# gravity correction
|
|
||||||
#alphaF_lz += np.rad2deg( np.arctan( 3.07e-10 * (fromHDF.detectorDistance + detXdist_e) * lamda_lz**2 ) )
|
|
||||||
alphaF_lz += np.rad2deg( np.arctan( 3.07e-10 * fromHDF.detectorDistance * lamda_lz**2 ) )
|
|
||||||
|
|
||||||
if self.experiment_config.incidentAngle == 'alphaF':
|
|
||||||
#alphaI_lz = alphaF_lz
|
|
||||||
qz_lz = 4.0*np.pi * np.sin(np.deg2rad(alphaF_lz)) / lamda_lz
|
|
||||||
qx_lz = self.grid.lz() * 0.
|
|
||||||
else:
|
|
||||||
alphaI_lz = self.grid.lz()*(fromHDF.mu + fromHDF.kap + fromHDF.kad)
|
|
||||||
qz_lz = 2.0*np.pi * (np.sin(np.deg2rad(alphaF_lz)) + np.sin(np.deg2rad(alphaI_lz))) / lamda_lz
|
|
||||||
qx_lz = 2.0*np.pi * (np.cos(np.deg2rad(alphaF_lz)) - np.cos(np.deg2rad(alphaI_lz))) / lamda_lz
|
|
||||||
|
|
||||||
int_lz, bins_l, bins_z = np.histogram2d(lamda_e, detZ_e, bins = (lamda_l, self.grid.z()))
|
|
||||||
# cut normalisation sample horizon
|
|
||||||
int_lz = np.where(mask_lz, int_lz, np.nan)
|
|
||||||
thetaF_lz = np.where(mask_lz, alphaF_lz, np.nan)
|
|
||||||
|
|
||||||
if self.reduction_config.normalisationMethod == 'o':
|
|
||||||
logging.debug(' assuming an overilluminated sample and correcting for the angle of incidence')
|
|
||||||
thetaN_z = fromHDF.delta_z + normAngle
|
|
||||||
thetaN_lz = np.ones(np.shape(norm_lz))*thetaN_z
|
|
||||||
thetaN_lz = np.where(np.absolute(thetaN_lz)>5e-3, thetaN_lz, np.nan)
|
|
||||||
mask_lz = np.logical_and(mask_lz, np.where(np.absolute(thetaN_lz)>5e-3, True, False))
|
|
||||||
ref_lz = (int_lz * np.absolute(thetaN_lz)) / (norm_lz * np.absolute(thetaF_lz))
|
|
||||||
elif self.reduction_config.normalisationMethod == 'u':
|
|
||||||
logging.debug(' assuming an underilluminated sample and ignoring the angle of incidence')
|
|
||||||
ref_lz = (int_lz / norm_lz)
|
|
||||||
elif self.reduction_config.normalisationMethod == 'd':
|
|
||||||
logging.debug(' assuming direct beam for normalisation and ignoring the angle of incidence')
|
|
||||||
ref_lz = (int_lz / norm_lz)
|
|
||||||
else:
|
|
||||||
logging.error('unknown normalisation method! Use [u]nder, [o]ver or [d]irect illumination')
|
|
||||||
ref_lz = (int_lz / norm_lz)
|
|
||||||
if self.monitor > 1e-6 :
|
|
||||||
ref_lz *= self.normMonitor / self.monitor
|
|
||||||
else:
|
|
||||||
logging.info(' too small monitor value for normalisation -> ignoring monitors')
|
|
||||||
err_lz = ref_lz * np.sqrt( 1/(int_lz+.1) + 1/norm_lz )
|
|
||||||
|
|
||||||
# TODO: allow for non-ideal Delta lambda / lambda (rather than 2.2%)
|
|
||||||
res_lz = np.ones((np.shape(lamda_l[:-1])[0], np.shape(alphaF_z)[0])) * 0.022**2
|
|
||||||
res_lz = res_lz + (0.008/alphaF_lz)**2
|
|
||||||
res_lz = qz_lz * np.sqrt(res_lz)
|
|
||||||
|
|
||||||
return qz_lz, qx_lz, ref_lz, err_lz, res_lz, lamda_lz, alphaF_lz, int_lz, mask_lz
|
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def histogram2d_lz(lamda_e, detZ_e, bins):
|
|
||||||
"""
|
|
||||||
Perform binning operation equivalent to numpy bin2d for the sepcial case
|
|
||||||
of the second dimension using integer positions (pre-defined pixels).
|
|
||||||
Based on the devide_bin algorithm below.
|
|
||||||
"""
|
|
||||||
dimension = bins[1].shape[0]-1
|
|
||||||
if not (np.array(bins[1])==np.arange(dimension+1)).all():
|
|
||||||
raise ValueError("histogram2d_lz requires second bin dimension to be contigous integer range")
|
|
||||||
binning = AmorReduction.devide_bin(lamda_e, detZ_e.astype(np.int64), bins[0], dimension)
|
|
||||||
return np.array(binning), bins[0], bins[1]
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def devide_bin(lambda_e, position_e, lamda_edges, dimension):
|
|
||||||
'''
|
|
||||||
Use a divide and conquer strategy to bin the data. For the actual binning the
|
|
||||||
numpy bincount function is used, as it is much faster than histogram for
|
|
||||||
counting of integer values.
|
|
||||||
|
|
||||||
:param lambda_e: Array of wavelength for each event
|
|
||||||
:param position_e: Array of positional indices for each event
|
|
||||||
:param lamda_edges: The edges of bins to be used for the histogram
|
|
||||||
:param dimension: position number of buckets in output arrray
|
|
||||||
|
|
||||||
:return: 2D list of dimensions (lambda, x) of counts
|
|
||||||
'''
|
|
||||||
if len(lambda_e)==0:
|
|
||||||
# no more events in range, return empty bins
|
|
||||||
return [np.zeros(dimension, dtype=np.int64).tolist()]*(len(lamda_edges)-1)
|
|
||||||
if len(lamda_edges)==2:
|
|
||||||
# deepest recursion reached, all items should be within the two ToF edges
|
|
||||||
return [np.bincount(position_e, minlength=dimension).tolist()]
|
|
||||||
# split all events into two time of flight regions
|
|
||||||
split_idx = len(lamda_edges)//2
|
|
||||||
left_region = lambda_e<lamda_edges[split_idx]
|
|
||||||
left_list = AmorReduction.devide_bin(lambda_e[left_region], position_e[left_region],
|
|
||||||
lamda_edges[:split_idx+1], dimension)
|
|
||||||
right_region = np.logical_not(left_region)
|
|
||||||
right_list = AmorReduction.devide_bin(lambda_e[right_region], position_e[right_region],
|
|
||||||
lamda_edges[split_idx:], dimension)
|
|
||||||
return left_list+right_list
|
|
||||||
43
nicos_config.md
Normal file
43
nicos_config.md
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
EOS-Service
|
||||||
|
===========
|
||||||
|
|
||||||
|
EOS can be used as histogram service to send images to the Nicos instrument control software.
|
||||||
|
For that you need to run it on the amor instrument computer:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
amor-nicos {-vv}
|
||||||
|
```
|
||||||
|
|
||||||
|
The instrument config in Nicos needs to configure a Kafka JustBinItImage instance
|
||||||
|
for each histogram that should be used:
|
||||||
|
|
||||||
|
```python
|
||||||
|
hist_yz = device('nicos_sinq.devices.just_bin_it.JustBinItImage',
|
||||||
|
description = 'Detector pixel histogram over all times',
|
||||||
|
hist_topic = 'AMOR_histograms_YZ',
|
||||||
|
data_topic = 'AMOR_detector',
|
||||||
|
command_topic = 'AMOR_histCommands',
|
||||||
|
brokers = ['linkafka01.psi.ch:9092'],
|
||||||
|
unit = 'evts',
|
||||||
|
hist_type = '2-D SANSLLB',
|
||||||
|
det_width = 446,
|
||||||
|
det_height = 64,
|
||||||
|
),
|
||||||
|
hist_tofz = device('nicos_sinq.devices.just_bin_it.JustBinItImage',
|
||||||
|
description = 'Detector time of flight vs. z-pixel histogram over all y-values',
|
||||||
|
hist_topic = 'AMOR_histograms_TofZ',
|
||||||
|
data_topic = 'AMOR_detector',
|
||||||
|
command_topic = 'AMOR_histCommands',
|
||||||
|
brokers = ['linkafka01.psi.ch:9092'],
|
||||||
|
unit = 'evts',
|
||||||
|
hist_type = '2-D SANSLLB',
|
||||||
|
det_width = 118,
|
||||||
|
det_height = 446,
|
||||||
|
),
|
||||||
|
```
|
||||||
|
|
||||||
|
These images have then to be set in the detector configuration as _images_ items:
|
||||||
|
|
||||||
|
```
|
||||||
|
images=['hist_yz', 'hist_tofz'],
|
||||||
|
```
|
||||||
@@ -2,3 +2,7 @@ numpy
|
|||||||
h5py
|
h5py
|
||||||
orsopy
|
orsopy
|
||||||
numba
|
numba
|
||||||
|
matplotlib
|
||||||
|
tabulate
|
||||||
|
backports.strenum; python_version<"3.11"
|
||||||
|
backports.zoneinfo; python_version<"3.9"
|
||||||
|
|||||||
15
setup.cfg
15
setup.cfg
@@ -3,7 +3,7 @@ universal = 1
|
|||||||
|
|
||||||
[metadata]
|
[metadata]
|
||||||
name = amor_eos
|
name = amor_eos
|
||||||
version = attr: libeos.__version__
|
version = attr: eos.__version__
|
||||||
author = Jochen Stahn - Paul Scherrer Institut
|
author = Jochen Stahn - Paul Scherrer Institut
|
||||||
author_email = jochen.stahn@psi.ch
|
author_email = jochen.stahn@psi.ch
|
||||||
description = EOS reflectometry reduction for AMOR instrument
|
description = EOS reflectometry reduction for AMOR instrument
|
||||||
@@ -19,14 +19,21 @@ classifiers =
|
|||||||
[options]
|
[options]
|
||||||
python_requires = >=3.8
|
python_requires = >=3.8
|
||||||
packages =
|
packages =
|
||||||
libeos
|
eos
|
||||||
scripts =
|
|
||||||
eos.py
|
|
||||||
install_requires =
|
install_requires =
|
||||||
numpy
|
numpy
|
||||||
h5py
|
h5py
|
||||||
orsopy
|
orsopy
|
||||||
numba
|
numba
|
||||||
|
backports.strenum; python_version<"3.11"
|
||||||
|
backports.zoneinfo; python_version<"3.9"
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
Homepage = "https://github.com/jochenstahn/amor"
|
Homepage = "https://github.com/jochenstahn/amor"
|
||||||
|
|
||||||
|
[options.entry_points]
|
||||||
|
console_scripts =
|
||||||
|
eos = eos.__main__:main
|
||||||
|
eosls = eos.ls:main
|
||||||
|
events2histogram = eos.e2h:main
|
||||||
|
amor-nicos = eos.nicos:main
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
test_data/amor2025n005952.hdf
LFS
Normal file
BIN
test_data/amor2025n005952.hdf
LFS
Normal file
Binary file not shown.
BIN
test_data/amor2025n006003.hdf
LFS
Normal file
BIN
test_data/amor2025n006003.hdf
LFS
Normal file
Binary file not shown.
BIN
test_data/amor2025n006004.hdf
LFS
Normal file
BIN
test_data/amor2025n006004.hdf
LFS
Normal file
Binary file not shown.
BIN
test_data/amor2025n006005.hdf
LFS
Normal file
BIN
test_data/amor2025n006005.hdf
LFS
Normal file
Binary file not shown.
BIN
test_data/amor2026n000826.hdf
LFS
Normal file
BIN
test_data/amor2026n000826.hdf
LFS
Normal file
Binary file not shown.
31
tests/analyze_hdf.py
Normal file
31
tests/analyze_hdf.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
"""
|
||||||
|
Small helper to find information about hdf datafiles for debugging
|
||||||
|
"""
|
||||||
|
|
||||||
|
import h5py
|
||||||
|
|
||||||
|
def rec_tree(group, min_size=128):
|
||||||
|
if hasattr(group, 'keys'):
|
||||||
|
output = {}
|
||||||
|
total_size = 0
|
||||||
|
for key in group.keys():
|
||||||
|
subkeys, size = rec_tree(group[key], min_size)
|
||||||
|
total_size += size
|
||||||
|
if size>min_size:
|
||||||
|
if subkeys:
|
||||||
|
output[key] = subkeys
|
||||||
|
else:
|
||||||
|
output[key] = size
|
||||||
|
return output, size
|
||||||
|
elif hasattr(group, 'size'):
|
||||||
|
return None, group.size
|
||||||
|
else:
|
||||||
|
return None, 0
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import sys
|
||||||
|
for fi in sys.argv[1:]:
|
||||||
|
print(fi)
|
||||||
|
print(rec_tree(sys.argv[1]))
|
||||||
|
|
||||||
|
|
||||||
64
tests/mock_data.py
Normal file
64
tests/mock_data.py
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
"""
|
||||||
|
Generate a mock dataset in memory for running unit tests.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import h5py
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
MOCK_METADATA = {
|
||||||
|
'title': 'Testdata',
|
||||||
|
'proposal_id': 'none',
|
||||||
|
'user/name': 'test user',
|
||||||
|
'user/email': 'test@user.de',
|
||||||
|
'sample/name': 'test sample',
|
||||||
|
'sample/model': 'air | Fe 12 | Si',
|
||||||
|
'Amor/source/name': 'SINQ',
|
||||||
|
'start_time': '2025-01-01 00:00:01',
|
||||||
|
}
|
||||||
|
MOCK_META_TYPED = {
|
||||||
|
'Amor/chopper/pair_separation': (1000.0, np.float32),
|
||||||
|
'Amor/detector/transformation/distance': (4000.0, np.float64),
|
||||||
|
'Amor/instrument_control_parameters/kappa': (1000.0, np.float64),
|
||||||
|
'Amor/instrument_control_parameters/kappa_offset': (1000.0, np.float64),
|
||||||
|
'Amor/instrument_control_parameters/div': (1.6, np.float64),
|
||||||
|
'Amor/chopper/ch1_trigger_phase': (-9.1, np.float64),
|
||||||
|
'Amor/chopper/ch2_trigger_phase': (6.75, np.float64),
|
||||||
|
'Amor/chopper/ch2_trigger/event_time_zero': ([0.0]*10, np.uint64),
|
||||||
|
'Amor/chopper/ch2_trigger/event_time_offset': ([0.0]*10, np.uint32),
|
||||||
|
'Amor/chopper/rotation_speed': (500.0, np.float64),
|
||||||
|
'Amor/chopper/phase': (0.0, np.float64),
|
||||||
|
'Amor/polarization/configuration/value': (0.0, np.float64),
|
||||||
|
}
|
||||||
|
|
||||||
|
def mock_data(mu=1.0, nu=2.0):
|
||||||
|
hdf = h5py.File.in_memory() # requires h5py >=3.13
|
||||||
|
ds = hdf.create_group('entry1')
|
||||||
|
for key, value in MOCK_METADATA.items():
|
||||||
|
ds.create_dataset(key, data=np.array([value.encode('utf-8')]))
|
||||||
|
for key, (value, dtype) in MOCK_META_TYPED.items():
|
||||||
|
if type(value) is list:
|
||||||
|
ds.create_dataset(key, data=np.array(value), dtype=dtype)
|
||||||
|
else:
|
||||||
|
ds.create_dataset(key, data=np.array([value]), dtype=dtype)
|
||||||
|
|
||||||
|
ds.create_dataset('Amor/instrument_control_parameters/mu', np.array([mu]), dtype=np.float64)
|
||||||
|
ds.create_dataset('Amor/instrument_control_parameters/nu', np.array([nu]), dtype=np.float64)
|
||||||
|
|
||||||
|
return hdf
|
||||||
|
|
||||||
|
def compare_with_real_data(fname):
|
||||||
|
hdf = h5py.File(fname, 'r')
|
||||||
|
ds = hdf['entry1']
|
||||||
|
for key, value in MOCK_METADATA.items():
|
||||||
|
try:
|
||||||
|
ds[key][0].decode('utf-8')
|
||||||
|
except KeyError:
|
||||||
|
print(f'/entry1/{key} does not exist in file')
|
||||||
|
for key, (value, dtype) in MOCK_META_TYPED.items():
|
||||||
|
try:
|
||||||
|
item = ds[key]
|
||||||
|
except KeyError:
|
||||||
|
print(f'/entry1/{key} does not exist in file')
|
||||||
|
else:
|
||||||
|
if item.dtype != dtype:
|
||||||
|
print(f'/entry1/{key} does not match {dtype}, dataset is {item.dtype}')
|
||||||
562
tests/test_event_handling.py
Normal file
562
tests/test_event_handling.py
Normal file
@@ -0,0 +1,562 @@
|
|||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from unittest import TestCase
|
||||||
|
from datetime import datetime
|
||||||
|
from copy import deepcopy
|
||||||
|
|
||||||
|
from orsopy.fileio import Person, Experiment, Sample, InstrumentSettings, Value, ValueRange, Polarization
|
||||||
|
|
||||||
|
from eos import const
|
||||||
|
from eos.header import Header
|
||||||
|
from eos.event_data_types import EVENT_BITMASKS, AmorGeometry, AmorTiming, AmorEventStream, \
|
||||||
|
EventDataAction, EventDatasetProtocol, PACKET_TYPE, PC_TYPE, PULSE_TYPE, EVENT_TYPE, append_fields
|
||||||
|
from eos.event_handling import ApplyPhaseOffset, ApplyParameterOverwrites, CorrectChopperPhase, CorrectSeriesTime, \
|
||||||
|
AssociatePulseWithMonitor, FilterMonitorThreshold, FilterStrangeTimes, TofTimeCorrection, ApplyMask
|
||||||
|
from eos.event_analysis import ExtractWalltime, MergeFrames, AnalyzePixelIDs, CalculateWavelength, CalculateQ, \
|
||||||
|
FilterQzRange, FilterByLog
|
||||||
|
from eos.options import MonitorType, IncidentAngle, ExperimentConfig
|
||||||
|
|
||||||
|
|
||||||
|
class MockEventData:
|
||||||
|
"""
|
||||||
|
Simulated dataset to be used with event handling unit tests
|
||||||
|
"""
|
||||||
|
geometry: AmorGeometry
|
||||||
|
timing: AmorTiming
|
||||||
|
data: AmorEventStream
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.geometry = AmorGeometry(mu=2.0, nu=1.0, kap=0.5, kad=0.0, div=1.5,
|
||||||
|
chopperSeparation=1000.0, detectorDistance=4000., chopperDetectorDistance=18842.)
|
||||||
|
self.timing = AmorTiming(
|
||||||
|
ch1TriggerPhase=-9.1, ch2TriggerPhase=6.75,
|
||||||
|
chopperPhase=0.17, chopperSpeed=500., tau=0.06
|
||||||
|
)
|
||||||
|
self.create_data()
|
||||||
|
|
||||||
|
def create_data(self):
|
||||||
|
# list of events, here with random time of fligh and pixel location
|
||||||
|
events = np.recarray((10000, ), dtype=EVENT_TYPE)
|
||||||
|
events.tof = np.random.uniform(low=0., high=0.12, size=events.shape)
|
||||||
|
events.pixelID = np.random.randint(0, 28671, size=events.shape)
|
||||||
|
events.mask = 0
|
||||||
|
|
||||||
|
# list of data packates containing previous events
|
||||||
|
packets = np.recarray((1000,), dtype=PACKET_TYPE)
|
||||||
|
packets.start_index = np.linspace(0, events.shape[0]-1, packets.shape[0], dtype=np.uint32)
|
||||||
|
packets.time = np.linspace(1700000000000000000, 1700000000000000000+3_600_000_000,
|
||||||
|
packets.shape[0], dtype=np.int64)
|
||||||
|
|
||||||
|
# chopper pulses within the measurement time
|
||||||
|
pulses = np.recarray((packets.shape[0],), dtype=PULSE_TYPE)
|
||||||
|
pulses.monitor = 1.0
|
||||||
|
pulses.time = packets.time
|
||||||
|
|
||||||
|
# proton current information with independent timing
|
||||||
|
proton_current = np.recarray((50,), dtype=PC_TYPE)
|
||||||
|
proton_current.current = 1500.0
|
||||||
|
proton_current[np.random.randint(0, proton_current.shape[0]-1, 10)] = 0. # random time with no current
|
||||||
|
proton_current.time = np.linspace(1700000000000000300, 1700000000000000000+3_600_000_000,
|
||||||
|
proton_current.shape[0], dtype=np.int64)
|
||||||
|
|
||||||
|
self.data = AmorEventStream(events, packets, pulses, proton_current)
|
||||||
|
self.orig_data = deepcopy(self.data)
|
||||||
|
|
||||||
|
|
||||||
|
def append(self, other):
|
||||||
|
raise NotImplementedError("Just for testing, no append")
|
||||||
|
|
||||||
|
def update_header(self, header:Header):
|
||||||
|
# update a header with the information read from file
|
||||||
|
header.owner = Person(name="test user", affiliation='PSI')
|
||||||
|
header.experiment = Experiment(title='test experiment', instrument='amor',
|
||||||
|
start_date=datetime.now(), probe="neutron")
|
||||||
|
header.sample = Sample(name='test sample')
|
||||||
|
header.measurement_instrument_settings = InstrumentSettings(incident_angle=Value(1.5, 'deg'),
|
||||||
|
wavelength = ValueRange(3.0, 12.5, 'angstrom'),
|
||||||
|
polarization = Polarization.unpolarized)
|
||||||
|
|
||||||
|
def update_info_from_logs(self):
|
||||||
|
RELEVANT_ITEMS = ['sample_temperature', 'sample_magnetic_field', 'polarization_config_label']
|
||||||
|
for key, log in self.data.device_logs.items():
|
||||||
|
if key not in RELEVANT_ITEMS:
|
||||||
|
continue
|
||||||
|
if log.value.dtype in [np.int8, np.int16, np.int32, np.int64]:
|
||||||
|
# for integer items (flags) report the most common one
|
||||||
|
value = np.bincount(log.value).argmax()
|
||||||
|
if logging.getLogger().getEffectiveLevel() <= logging.DEBUG \
|
||||||
|
and np.unique(log.value).shape[0]>1:
|
||||||
|
logging.debug(f' filtered values for {key} not unique, '
|
||||||
|
f'has {np.unique(log.value).shape[0]} values')
|
||||||
|
else:
|
||||||
|
value = log.value.mean()
|
||||||
|
if key == 'polarization_config_label':
|
||||||
|
self.instrument_settings.polarization = Polarization(const.polarizationConfigs[value])
|
||||||
|
elif key == 'sample_temperature':
|
||||||
|
self.sample.sample_parameters['temperature'].magnitue = value
|
||||||
|
elif key == 'sample_magnetic_field':
|
||||||
|
self.sample.sample_parameters['magnetic_field'].magnitue = value
|
||||||
|
|
||||||
|
|
||||||
|
class TestActionClass(TestCase):
|
||||||
|
@classmethod
|
||||||
|
def setUpClass(cls):
|
||||||
|
"""
|
||||||
|
Create test classes to be used
|
||||||
|
"""
|
||||||
|
class T1(EventDataAction):
|
||||||
|
def perform_action(self, event: EventDatasetProtocol):
|
||||||
|
event.data.events.mask += 1
|
||||||
|
class T2(EventDataAction):
|
||||||
|
def perform_action(self, event: EventDatasetProtocol):
|
||||||
|
event.data.events.mask += 2
|
||||||
|
class T4(EventDataAction):
|
||||||
|
def perform_action(self, event: EventDatasetProtocol):
|
||||||
|
event.data.events.mask += 4
|
||||||
|
cls.T1=T1; cls.T2=T2; cls.T4=T4
|
||||||
|
|
||||||
|
class H1(EventDataAction):
|
||||||
|
def perform_action(self, event: EventDatasetProtocol):
|
||||||
|
...
|
||||||
|
def update_header(self, header:Header) ->None:
|
||||||
|
header.sample.name = 'h1'
|
||||||
|
class H2(EventDataAction):
|
||||||
|
def perform_action(self, event: EventDatasetProtocol):
|
||||||
|
...
|
||||||
|
def update_header(self, header: Header) -> None:
|
||||||
|
header.sample.name = 'h2'
|
||||||
|
class HN(EventDataAction):
|
||||||
|
def __init__(self, n):
|
||||||
|
self._n = n
|
||||||
|
def perform_action(self, event: EventDatasetProtocol):
|
||||||
|
...
|
||||||
|
def update_header(self, header: Header) -> None:
|
||||||
|
header.sample.name = self._n
|
||||||
|
cls.H1=H1; cls.H2=H2; cls.HN = HN
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.d = MockEventData()
|
||||||
|
self.header = Header()
|
||||||
|
self.d.update_header(self.header)
|
||||||
|
|
||||||
|
def test_individual(self):
|
||||||
|
t1 = self.T1()
|
||||||
|
t2 = self.T2()
|
||||||
|
t4 = self.T4()
|
||||||
|
|
||||||
|
np.testing.assert_array_equal(self.d.data.events.mask, 0)
|
||||||
|
t1.perform_action(self.d)
|
||||||
|
np.testing.assert_array_equal(self.d.data.events.mask, 1)
|
||||||
|
t2.perform_action(self.d)
|
||||||
|
np.testing.assert_array_equal(self.d.data.events.mask, 3)
|
||||||
|
t4.perform_action(self.d)
|
||||||
|
np.testing.assert_array_equal(self.d.data.events.mask, 7)
|
||||||
|
|
||||||
|
def test_header(self):
|
||||||
|
h1 = self.H1()
|
||||||
|
h2 = self.H2()
|
||||||
|
h3 = self.HN('h3')
|
||||||
|
h4 = self.HN('h4')
|
||||||
|
|
||||||
|
h1.update_header(self.header)
|
||||||
|
self.assertEqual(self.header.sample.name, 'h1')
|
||||||
|
h2.update_header(self.header)
|
||||||
|
self.assertEqual(self.header.sample.name, 'h2')
|
||||||
|
h3.update_header(self.header)
|
||||||
|
self.assertEqual(self.header.sample.name, 'h3')
|
||||||
|
h4.update_header(self.header)
|
||||||
|
self.assertEqual(self.header.sample.name, 'h4')
|
||||||
|
|
||||||
|
def test_combination(self):
|
||||||
|
t1 = self.T1()
|
||||||
|
t2 = self.T2()
|
||||||
|
t4 = self.T4()
|
||||||
|
t12 = t1 | t2
|
||||||
|
t24 = t2 | t4
|
||||||
|
t1224 = t1 | t2 | t2 | t4
|
||||||
|
t1224b = t12 | t24
|
||||||
|
|
||||||
|
np.testing.assert_array_equal(self.d.data.events.mask, 0)
|
||||||
|
t12.perform_action(self.d)
|
||||||
|
np.testing.assert_array_equal(self.d.data.events.mask, 3)
|
||||||
|
t24.perform_action(self.d)
|
||||||
|
np.testing.assert_array_equal(self.d.data.events.mask, 9)
|
||||||
|
|
||||||
|
t1224.perform_action(self.d)
|
||||||
|
np.testing.assert_array_equal(self.d.data.events.mask, 18)
|
||||||
|
t1224b.perform_action(self.d)
|
||||||
|
np.testing.assert_array_equal(self.d.data.events.mask, 27)
|
||||||
|
|
||||||
|
|
||||||
|
def test_combine_header(self):
|
||||||
|
h1 = self.H1()
|
||||||
|
h2 = self.H2()
|
||||||
|
h3 = self.HN('h3')
|
||||||
|
h4 = self.HN('h4')
|
||||||
|
|
||||||
|
(h1|h2).update_header(self.header)
|
||||||
|
self.assertEqual(self.header.sample.name, 'h2')
|
||||||
|
(h2|h1).update_header(self.header)
|
||||||
|
self.assertEqual(self.header.sample.name, 'h1')
|
||||||
|
(h3|h4).update_header(self.header)
|
||||||
|
self.assertEqual(self.header.sample.name, 'h4')
|
||||||
|
(h4|h3).update_header(self.header)
|
||||||
|
self.assertEqual(self.header.sample.name, 'h3')
|
||||||
|
|
||||||
|
def test_abstract_misssing(self):
|
||||||
|
with self.assertRaises(TypeError):
|
||||||
|
class E(EventDataAction):
|
||||||
|
...
|
||||||
|
_ = E()
|
||||||
|
|
||||||
|
def test_hash(self):
|
||||||
|
"""
|
||||||
|
Check that hashes of different actions are different but
|
||||||
|
instances of same action have same hash
|
||||||
|
"""
|
||||||
|
t1 = self.T1()
|
||||||
|
t1b = self.T1()
|
||||||
|
t2 = self.T2()
|
||||||
|
t4 = self.T4()
|
||||||
|
h3 = self.HN('h3')
|
||||||
|
h3b = self.HN('h3')
|
||||||
|
h4 = self.HN('h4')
|
||||||
|
|
||||||
|
self.assertNotEqual(t1.action_hash(), t2.action_hash())
|
||||||
|
self.assertNotEqual(t2.action_hash(), t4.action_hash())
|
||||||
|
self.assertNotEqual(t1.action_hash(), t4.action_hash())
|
||||||
|
self.assertNotEqual(h3.action_hash(), h4.action_hash())
|
||||||
|
self.assertEqual(t1.action_hash(), t1b.action_hash())
|
||||||
|
self.assertEqual(h3.action_hash(), h3b.action_hash())
|
||||||
|
|
||||||
|
|
||||||
|
class TestSimpleActions(TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.d = MockEventData()
|
||||||
|
self.header = Header()
|
||||||
|
self.d.update_header(self.header)
|
||||||
|
|
||||||
|
def test_chopper_phase(self):
|
||||||
|
cp = CorrectChopperPhase()
|
||||||
|
cp.perform_action(self.d)
|
||||||
|
np.testing.assert_array_equal(
|
||||||
|
self.d.data.events.tof,
|
||||||
|
self.d.orig_data.events.tof+
|
||||||
|
self.d.timing.tau*(self.d.timing.ch1TriggerPhase-self.d.timing.chopperPhase/2)/180
|
||||||
|
)
|
||||||
|
|
||||||
|
def _extract_walltime(self):
|
||||||
|
# Extract wall time for events and orig copy
|
||||||
|
wt = ExtractWalltime()
|
||||||
|
d = self.d.data
|
||||||
|
self.d.data = self.d.orig_data
|
||||||
|
wt.perform_action(self.d)
|
||||||
|
self.d.data = d
|
||||||
|
wt.perform_action(self.d)
|
||||||
|
|
||||||
|
def test_extract_walltime(self):
|
||||||
|
self._extract_walltime()
|
||||||
|
# wallTime should be always a time present in the packet times
|
||||||
|
np.testing.assert_array_equal(np.isin(self.d.data.events.wallTime, self.d.data.packets.time), True)
|
||||||
|
# make sure extraction works on both original and copy
|
||||||
|
np.testing.assert_array_equal(self.d.data.events.wallTime, self.d.orig_data.events.wallTime)
|
||||||
|
|
||||||
|
def test_series_time(self):
|
||||||
|
corr = 100
|
||||||
|
ct = CorrectSeriesTime(corr)
|
||||||
|
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
ct.perform_action(self.d)
|
||||||
|
|
||||||
|
self._extract_walltime()
|
||||||
|
|
||||||
|
|
||||||
|
ct.perform_action(self.d)
|
||||||
|
np.testing.assert_array_equal(
|
||||||
|
self.d.data.pulses.time,
|
||||||
|
self.d.orig_data.pulses.time-corr
|
||||||
|
)
|
||||||
|
np.testing.assert_array_equal(
|
||||||
|
self.d.data.events.wallTime,
|
||||||
|
self.d.orig_data.events.wallTime-corr
|
||||||
|
)
|
||||||
|
np.testing.assert_array_equal(
|
||||||
|
self.d.data.proton_current.time,
|
||||||
|
self.d.orig_data.proton_current.time-corr
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_associate_monitor(self):
|
||||||
|
amPC = AssociatePulseWithMonitor(MonitorType.proton_charge)
|
||||||
|
amT = AssociatePulseWithMonitor(MonitorType.time)
|
||||||
|
amN = AssociatePulseWithMonitor(MonitorType.neutron_monitor)
|
||||||
|
|
||||||
|
self.d.data.pulses.monitor = 13
|
||||||
|
amN.perform_action(self.d)
|
||||||
|
np.testing.assert_array_equal(self.d.data.pulses.monitor, 1)
|
||||||
|
|
||||||
|
self.d.data.pulses.monitor = 13
|
||||||
|
amT.perform_action(self.d)
|
||||||
|
np.testing.assert_array_equal(self.d.data.pulses.monitor, np.float32(2*self.d.timing.tau))
|
||||||
|
|
||||||
|
self.d.data.pulses.monitor = 13
|
||||||
|
amPC.perform_action(self.d)
|
||||||
|
pcm = self.d.data.proton_current.current *2*self.d.timing.tau*1e-3
|
||||||
|
np.testing.assert_array_equal(np.isin(self.d.data.pulses.monitor, pcm), True)
|
||||||
|
|
||||||
|
def test_filter_monitor_threashold(self):
|
||||||
|
amPC = AssociatePulseWithMonitor(MonitorType.proton_charge)
|
||||||
|
fmt = amPC | FilterMonitorThreshold(1000.)
|
||||||
|
fma = amPC | FilterMonitorThreshold(2000.)
|
||||||
|
fm0 = amPC | FilterMonitorThreshold(-1.0)
|
||||||
|
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
fmt.perform_action(self.d)
|
||||||
|
|
||||||
|
self._extract_walltime()
|
||||||
|
fm0.perform_action(self.d)
|
||||||
|
self.assertEqual(self.d.data.events.mask.sum(), 0)
|
||||||
|
fmt.perform_action(self.d)
|
||||||
|
# calculate, which events should have 0 monitor
|
||||||
|
zero_times = self.d.data.pulses.time[self.d.data.pulses.monitor==0]
|
||||||
|
zero_sum = np.isin(self.d.data.events.wallTime, zero_times).sum()
|
||||||
|
self.assertEqual(self.d.data.events.mask.sum(), zero_sum*EVENT_BITMASKS['MonitorThreshold'])
|
||||||
|
# filter all events
|
||||||
|
self.d.data.events.mask = 0
|
||||||
|
fma.perform_action(self.d)
|
||||||
|
self.assertEqual(self.d.data.events.mask.sum(), self.d.data.events.shape[0]*EVENT_BITMASKS['MonitorThreshold'])
|
||||||
|
|
||||||
|
def test_filter_strage_times(self):
|
||||||
|
st = FilterStrangeTimes()
|
||||||
|
|
||||||
|
st.perform_action(self.d)
|
||||||
|
self.assertEqual(self.d.data.events.mask.sum(), 0)
|
||||||
|
|
||||||
|
# half events should be strange times (outside of ToF frame)
|
||||||
|
self.d.data.events.tof += self.d.timing.tau
|
||||||
|
st.perform_action(self.d)
|
||||||
|
self.assertEqual(self.d.data.events.mask.sum(),
|
||||||
|
(self.d.data.events.tof>2*self.d.timing.tau).sum()*EVENT_BITMASKS['StrangeTimes'])
|
||||||
|
|
||||||
|
def test_apply_phase_offset(self):
|
||||||
|
action = ApplyPhaseOffset(12.5)
|
||||||
|
action.perform_action(self.d)
|
||||||
|
self.assertEqual(self.d.timing.ch1TriggerPhase, 12.5)
|
||||||
|
|
||||||
|
def test_apply_parameter_overwrites(self):
|
||||||
|
action = ApplyParameterOverwrites(ExperimentConfig(muOffset=0.25, mu=3.5, nu=4.5))
|
||||||
|
action.perform_action(self.d)
|
||||||
|
self.assertEqual(self.d.geometry.mu, 3.5)
|
||||||
|
self.assertEqual(self.d.geometry.nu, 4.5)
|
||||||
|
|
||||||
|
action = ApplyParameterOverwrites(ExperimentConfig(muOffset=0.25))
|
||||||
|
action.perform_action(self.d)
|
||||||
|
self.assertEqual(self.d.geometry.mu, 3.75)
|
||||||
|
|
||||||
|
action = ApplyParameterOverwrites(ExperimentConfig(sampleModel='air | Si | Fe'))
|
||||||
|
action.update_header(self.header)
|
||||||
|
self.assertIsNotNone(self.header.sample.model)
|
||||||
|
|
||||||
|
def test_apply_sample_model_file(self):
|
||||||
|
if os.path.isfile('test.yaml'):
|
||||||
|
os.remove('test.yaml')
|
||||||
|
action = ApplyParameterOverwrites(ExperimentConfig(sampleModel='test.yaml'))
|
||||||
|
action.update_header(self.header)
|
||||||
|
self.assertIsNone(self.header.sample.model)
|
||||||
|
|
||||||
|
with open('test.yaml', 'w') as fh:
|
||||||
|
fh.write("""stack: air | Si | Fe""")
|
||||||
|
|
||||||
|
try:
|
||||||
|
action = ApplyParameterOverwrites(ExperimentConfig(sampleModel='test.yaml'))
|
||||||
|
action.update_header(self.header)
|
||||||
|
self.assertEqual(self.header.sample.model.stack, 'air | Si | Fe')
|
||||||
|
finally:
|
||||||
|
os.remove('test.yaml')
|
||||||
|
|
||||||
|
def test_tof_time_correction(self):
|
||||||
|
action = TofTimeCorrection()
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
action.perform_action(self.d)
|
||||||
|
|
||||||
|
new_events = append_fields(self.d.data.events, [('delta', np.float64)])
|
||||||
|
new_events.delta = 10.0
|
||||||
|
self.d.data.events = new_events
|
||||||
|
tof_before = self.d.data.events.tof.copy()
|
||||||
|
action.perform_action(self.d)
|
||||||
|
np.testing.assert_allclose(
|
||||||
|
self.d.data.events.tof,
|
||||||
|
tof_before - (10.0 / 180.0) * self.d.timing.tau
|
||||||
|
)
|
||||||
|
|
||||||
|
self.d.create_data()
|
||||||
|
new_events = append_fields(self.d.data.events, [('delta', np.float64)])
|
||||||
|
new_events.delta = 10.0
|
||||||
|
self.d.data.events = new_events
|
||||||
|
tof_before = self.d.data.events.tof.copy()
|
||||||
|
action = TofTimeCorrection(correct_chopper_opening=False)
|
||||||
|
action.perform_action(self.d)
|
||||||
|
np.testing.assert_allclose(
|
||||||
|
self.d.data.events.tof,
|
||||||
|
tof_before - (self.d.geometry.kad / 180.0) * self.d.timing.tau
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_apply_mask(self):
|
||||||
|
self.d.data.events = self.d.data.events[:6].copy()
|
||||||
|
self.d.data.events.mask[:] = [0, 1, 2, 3, 4, 5]
|
||||||
|
|
||||||
|
action = ApplyMask()
|
||||||
|
action.perform_action(self.d)
|
||||||
|
self.assertEqual(self.d.data.events.shape[0], 1)
|
||||||
|
self.assertEqual(self.d.data.events.mask[0], 0)
|
||||||
|
|
||||||
|
self.d.create_data()
|
||||||
|
self.d.data.events = self.d.data.events[:6].copy()
|
||||||
|
self.d.data.events.mask[:] = [0, 1, 2, 3, 4, 5]
|
||||||
|
action = ApplyMask(bitmask_filter=EVENT_BITMASKS['MonitorThreshold'])
|
||||||
|
action.perform_action(self.d)
|
||||||
|
np.testing.assert_array_equal(self.d.data.events.mask, np.array([0, EVENT_BITMASKS['MonitorThreshold']],
|
||||||
|
dtype=np.int32))
|
||||||
|
|
||||||
|
def test_merge_frames(self):
|
||||||
|
action = MergeFrames(lamdaCut=0.0)
|
||||||
|
action.perform_action(self.d)
|
||||||
|
self.assertEqual(self.d.data.events.tof.shape, self.d.orig_data.events.tof.shape)
|
||||||
|
np.testing.assert_array_compare(lambda x,y: x<=y, self.d.data.events.tof, self.d.orig_data.events.tof)
|
||||||
|
self.assertTrue((-self.d.timing.tau<=self.d.data.events.tof).all())
|
||||||
|
np.testing.assert_array_less(self.d.data.events.tof, self.d.timing.tau)
|
||||||
|
|
||||||
|
action = MergeFrames(lamdaCut=2.0)
|
||||||
|
self.d.data.events.tof = self.d.orig_data.events.tof[:]
|
||||||
|
action.perform_action(self.d)
|
||||||
|
tofCut = 2.0*self.d.geometry.chopperDetectorDistance/const.hdm*1e-13
|
||||||
|
self.assertTrue((tofCut-self.d.timing.tau<=self.d.data.events.tof).all())
|
||||||
|
self.assertTrue((self.d.data.events.tof<=tofCut+self.d.timing.tau).all())
|
||||||
|
|
||||||
|
def test_analyze_pixel_ids(self):
|
||||||
|
action = AnalyzePixelIDs((1000, 1001))
|
||||||
|
action.perform_action(self.d)
|
||||||
|
self.assertIn('detZ', self.d.data.events.dtype.names)
|
||||||
|
self.assertIn('detXdist', self.d.data.events.dtype.names)
|
||||||
|
self.assertIn('delta', self.d.data.events.dtype.names)
|
||||||
|
self.assertEqual(
|
||||||
|
np.bitwise_and(self.d.data.events.mask, EVENT_BITMASKS['yRange']).astype(bool).sum(),
|
||||||
|
self.d.data.events.shape[0]
|
||||||
|
)
|
||||||
|
# TODO: maybe add a test actually checking correct detector-id resolution
|
||||||
|
|
||||||
|
def test_calculate_wavelength(self):
|
||||||
|
action = CalculateWavelength((3.0, 5.0))
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
action.perform_action(self.d)
|
||||||
|
|
||||||
|
new_events = append_fields(self.d.data.events, [('detXdist', np.float64)])
|
||||||
|
new_events.detXdist = 0.0
|
||||||
|
self.d.data.events = new_events
|
||||||
|
action.perform_action(self.d)
|
||||||
|
self.assertIn('lamda', self.d.data.events.dtype.names)
|
||||||
|
flt = self.d.data.events.mask!=EVENT_BITMASKS['LamdaRange']
|
||||||
|
# check all wavelength in range not filtered
|
||||||
|
np.testing.assert_array_less(self.d.data.events.lamda[flt], 5.0)
|
||||||
|
np.testing.assert_array_less(3.0, self.d.data.events.lamda[flt])
|
||||||
|
# check all wavelength out of range filtered
|
||||||
|
flt = self.d.data.events.mask==EVENT_BITMASKS['LamdaRange']
|
||||||
|
self.assertTrue(((self.d.data.events.lamda[flt]<3.0)|(self.d.data.events.lamda[flt]>5.0)).all())
|
||||||
|
|
||||||
|
def test_calculate_q(self):
|
||||||
|
action = CalculateQ(IncidentAngle.alphaF)
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
action.perform_action(self.d)
|
||||||
|
|
||||||
|
# TODO: add checks for actual resulting values
|
||||||
|
|
||||||
|
new_events = append_fields(self.d.data.events, [('lamda', np.float64), ('delta', np.float64)])
|
||||||
|
new_events.lamda = 5.0
|
||||||
|
new_events.delta = 0.0
|
||||||
|
self.d.data.events = new_events
|
||||||
|
action.perform_action(self.d)
|
||||||
|
self.assertIn('qz', self.d.data.events.dtype.names)
|
||||||
|
self.assertNotIn('qx', self.d.data.events.dtype.names)
|
||||||
|
action.update_header(self.header)
|
||||||
|
self.assertEqual(self.header.measurement_scheme, 'angle- and energy-dispersive')
|
||||||
|
|
||||||
|
self.d.create_data()
|
||||||
|
new_events = append_fields(self.d.data.events, [('lamda', np.float64), ('delta', np.float64)])
|
||||||
|
new_events.lamda = 5.0
|
||||||
|
new_events.delta = 0.0
|
||||||
|
self.d.data.events = new_events
|
||||||
|
action = CalculateQ(IncidentAngle.mu)
|
||||||
|
action.perform_action(self.d)
|
||||||
|
self.assertIn('qz', self.d.data.events.dtype.names)
|
||||||
|
self.assertIn('qx', self.d.data.events.dtype.names)
|
||||||
|
action.update_header(self.header)
|
||||||
|
self.assertEqual(self.header.measurement_scheme, 'energy-dispersive')
|
||||||
|
|
||||||
|
self.d.create_data()
|
||||||
|
new_events = append_fields(self.d.data.events, [('lamda', np.float64), ('delta', np.float64)])
|
||||||
|
new_events.lamda = 5.0
|
||||||
|
new_events.delta = 0.0
|
||||||
|
self.d.data.events = new_events
|
||||||
|
action = CalculateQ(IncidentAngle.nu)
|
||||||
|
action.perform_action(self.d)
|
||||||
|
self.assertIn('qz', self.d.data.events.dtype.names)
|
||||||
|
self.assertNotIn('qx', self.d.data.events.dtype.names)
|
||||||
|
action.update_header(self.header)
|
||||||
|
self.assertEqual(self.header.measurement_scheme, 'energy-dispersive')
|
||||||
|
|
||||||
|
def test_filter_qz_range(self):
|
||||||
|
action = FilterQzRange((0.1, 0.2))
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
action.perform_action(self.d)
|
||||||
|
|
||||||
|
self.d.data.events = self.d.data.events[:5].copy()
|
||||||
|
new_events = append_fields(self.d.data.events, [('qz', np.float64)])
|
||||||
|
new_events.qz = np.array([0.05, 0.1, 0.15, 0.2, 0.25])
|
||||||
|
self.d.data.events = new_events
|
||||||
|
action.perform_action(self.d)
|
||||||
|
np.testing.assert_array_equal(
|
||||||
|
self.d.data.events.mask,
|
||||||
|
np.array([1, 0, 0, 0, 1], dtype=np.int32) * EVENT_BITMASKS['qRange']
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_filter_by_log(self):
|
||||||
|
action = FilterByLog("test_log==0") | ApplyMask()
|
||||||
|
class LogWarnError(Exception):
|
||||||
|
...
|
||||||
|
def warn_raise(*args, **kwargs):
|
||||||
|
raise LogWarnError()
|
||||||
|
_orig_warn = logging.warning
|
||||||
|
try:
|
||||||
|
logging.warning = warn_raise
|
||||||
|
with self.assertRaises(LogWarnError):
|
||||||
|
action.perform_action(self.d)
|
||||||
|
finally:
|
||||||
|
logging.warning = _orig_warn
|
||||||
|
|
||||||
|
self._extract_walltime()
|
||||||
|
|
||||||
|
test_log = np.recarray(shape=(2,), dtype=np.dtype([('value', np.int32),
|
||||||
|
('time', np.int64)]))
|
||||||
|
test_log.time = [-5, self.d.data.pulses.time[100]+123]
|
||||||
|
test_log.value = [0, 1]
|
||||||
|
self.d.data.device_logs['test_log'] = test_log
|
||||||
|
action.perform_action(self.d)
|
||||||
|
self.assertEqual(self.d.data.pulses.shape[0], 101)
|
||||||
|
|
||||||
|
def test_filter_by_log_switchpulse(self):
|
||||||
|
action = FilterByLog("!test_log==0") | ApplyMask()
|
||||||
|
self._extract_walltime()
|
||||||
|
|
||||||
|
test_log = np.recarray(shape=(2,), dtype=np.dtype([('value', np.int32),
|
||||||
|
('time', np.int64)]))
|
||||||
|
test_log.time = [-5, self.d.data.pulses.time[100]+123]
|
||||||
|
test_log.value = [0, 1]
|
||||||
|
self.d.data.device_logs['test_log'] = test_log
|
||||||
|
self.d.data.device_logs['check_log'] = test_log.copy()
|
||||||
|
action.perform_action(self.d)
|
||||||
|
self.assertEqual(self.d.data.pulses.shape[0], 100)
|
||||||
|
np.testing.assert_array_equal(
|
||||||
|
self.d.data.device_logs['test_log'],
|
||||||
|
self.d.data.device_logs['check_log'],
|
||||||
|
)
|
||||||
@@ -1,16 +1,29 @@
|
|||||||
import os
|
import os
|
||||||
import cProfile
|
import cProfile
|
||||||
|
import numpy as np
|
||||||
from unittest import TestCase
|
from unittest import TestCase
|
||||||
from libeos import options, reduction, logconfig
|
from dataclasses import fields, MISSING
|
||||||
|
from eos import options, reduction_reflectivity, logconfig
|
||||||
|
from orsopy import fileio
|
||||||
|
|
||||||
logconfig.setup_logging()
|
logconfig.setup_logging()
|
||||||
logconfig.update_loglevel(True, False)
|
logconfig.update_loglevel(1)
|
||||||
|
|
||||||
# TODO: add test for new features like proton charge normalization
|
# TODO: add unit tests for individual parts of reduction
|
||||||
|
|
||||||
class FullAmorTest(TestCase):
|
class FullAmorTest(TestCase):
|
||||||
@classmethod
|
@classmethod
|
||||||
def setUpClass(cls):
|
def setUpClass(cls):
|
||||||
|
# generate map for option defaults
|
||||||
|
cls._field_defaults = {}
|
||||||
|
for opt in [options.ExperimentConfig, options.ReflectivityReductionConfig, options.ReflectivityOutputConfig]:
|
||||||
|
defaults = {}
|
||||||
|
for field in fields(opt):
|
||||||
|
if field.default not in [None, MISSING]:
|
||||||
|
defaults[field.name] = field.default
|
||||||
|
elif field.default_factory not in [None, MISSING]:
|
||||||
|
defaults[field.name] = field.default_factory()
|
||||||
|
cls._field_defaults[opt.__name__] = defaults
|
||||||
cls.pr = cProfile.Profile()
|
cls.pr = cProfile.Profile()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -20,92 +33,132 @@ class FullAmorTest(TestCase):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.pr.enable()
|
self.pr.enable()
|
||||||
self.reader_config = options.ReaderConfig(
|
self.reader_config = options.ReaderConfig(
|
||||||
year=2023,
|
year=2025,
|
||||||
rawPath=(os.path.join('..', "test_data"),),
|
rawPath=["test_data"],
|
||||||
)
|
)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
self.pr.disable()
|
self.pr.disable()
|
||||||
for fi in ['test.Rqz.ort', '614.norm']:
|
for fi in ['test_results/test.Rqz.ort', 'test_results/5952.norm']:
|
||||||
try:
|
try:
|
||||||
os.unlink(os.path.join(self.reader_config.rawPath[0], fi))
|
os.unlink(fi)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def test_time_slicing(self):
|
def test_time_slicing(self):
|
||||||
experiment_config = options.ExperimentConfig(
|
experiment_config = options.ExperimentConfig(
|
||||||
chopperPhase=-13.5,
|
chopperPhase=-13.5,
|
||||||
chopperPhaseOffset=-5,
|
chopperPhaseOffset=-5,
|
||||||
monitorType=options.Defaults.monitorType,
|
yRange=(18, 48),
|
||||||
lowCurrentThreshold=options.Defaults.lowCurrentThreshold,
|
lambdaRange=(3., 11.5),
|
||||||
yRange=(11., 41.),
|
|
||||||
lambdaRange=(2., 15.),
|
|
||||||
qzRange=(0.005, 0.30),
|
|
||||||
incidentAngle=options.Defaults.incidentAngle,
|
|
||||||
mu=0,
|
mu=0,
|
||||||
nu=0,
|
nu=0,
|
||||||
muOffset=0.0,
|
muOffset=0.0,
|
||||||
sampleModel='air | 10 H2O | D2O'
|
sampleModel='air | 10 H2O | D2O'
|
||||||
)
|
)
|
||||||
reduction_config = options.ReductionConfig(
|
reduction_config = options.ReflectivityReductionConfig(
|
||||||
normalisationMethod=options.Defaults.normalisationMethod,
|
|
||||||
qResolution=0.01,
|
qResolution=0.01,
|
||||||
qzRange=options.Defaults.qzRange,
|
qzRange=(0.01, 0.15),
|
||||||
thetaRange=(-12., 12.),
|
thetaRange=(-0.75, 0.75),
|
||||||
thetaRangeR=(-12., 12.),
|
fileIdentifier=["6003-6005"],
|
||||||
fileIdentifier=["610"],
|
|
||||||
scale=[1],
|
scale=[1],
|
||||||
normalisationFileIdentifier=[],
|
normalisationFileIdentifier=[],
|
||||||
timeSlize=[300.0]
|
timeSlize=[300.0]
|
||||||
)
|
)
|
||||||
output_config = options.OutputConfig(
|
output_config = options.ReflectivityOutputConfig(
|
||||||
outputFormats=["Rqz.ort"],
|
outputFormats=[options.OutputFomatOption.Rqz_ort],
|
||||||
outputName='test',
|
outputName='test',
|
||||||
outputPath=os.path.join('..', 'test_results'),
|
outputPath='test_results',
|
||||||
)
|
)
|
||||||
config=options.EOSConfig(self.reader_config, experiment_config, reduction_config, output_config)
|
config=options.ReflectivityConfig(self.reader_config, experiment_config, reduction_config, output_config)
|
||||||
# run three times to get similar timing to noslicing runs
|
# run three times to get similar timing to noslicing runs
|
||||||
reducer = reduction.AmorReduction(config)
|
reducer = reduction_reflectivity.ReflectivityReduction(config)
|
||||||
reducer.reduce()
|
reducer.reduce()
|
||||||
reducer = reduction.AmorReduction(config)
|
reducer = reduction_reflectivity.ReflectivityReduction(config)
|
||||||
reducer.reduce()
|
reducer.reduce()
|
||||||
reducer = reduction.AmorReduction(config)
|
reducer = reduction_reflectivity.ReflectivityReduction(config)
|
||||||
reducer.reduce()
|
reducer.reduce()
|
||||||
|
|
||||||
def test_noslicing(self):
|
def test_noslicing(self):
|
||||||
experiment_config = options.ExperimentConfig(
|
experiment_config = options.ExperimentConfig(
|
||||||
chopperPhase=-13.5,
|
chopperPhase=-13.5,
|
||||||
chopperPhaseOffset=-5,
|
chopperPhaseOffset=-5,
|
||||||
monitorType=options.Defaults.monitorType,
|
yRange=(18, 48),
|
||||||
lowCurrentThreshold=options.Defaults.lowCurrentThreshold,
|
lambdaRange=(3., 11.5),
|
||||||
yRange=(11., 41.),
|
|
||||||
lambdaRange=(2., 15.),
|
|
||||||
qzRange=(0.005, 0.30),
|
|
||||||
incidentAngle=options.Defaults.incidentAngle,
|
|
||||||
mu=0,
|
mu=0,
|
||||||
nu=0,
|
nu=0,
|
||||||
muOffset=0.0
|
muOffset=0.0,
|
||||||
)
|
)
|
||||||
reduction_config = options.ReductionConfig(
|
reduction_config = options.ReflectivityReductionConfig(
|
||||||
qResolution=0.01,
|
qResolution=0.01,
|
||||||
qzRange=options.Defaults.qzRange,
|
thetaRange=(-0.75, 0.75),
|
||||||
normalisationMethod=options.Defaults.normalisationMethod,
|
fileIdentifier=["6003", "6004", "6005"],
|
||||||
thetaRange=(-12., 12.),
|
|
||||||
thetaRangeR=(-12., 12.),
|
|
||||||
fileIdentifier=["610", "611", "608,612-613", "609"],
|
|
||||||
scale=[1],
|
scale=[1],
|
||||||
normalisationFileIdentifier=["608"],
|
normalisationFileIdentifier=["5952"],
|
||||||
autoscale=(True, True)
|
autoscale=(0.0, 0.05),
|
||||||
)
|
)
|
||||||
output_config = options.OutputConfig(
|
output_config = options.ReflectivityOutputConfig(
|
||||||
outputFormats=["Rqz.ort"],
|
outputFormats=[options.OutputFomatOption.Rqz_ort],
|
||||||
outputName='test',
|
outputName='test',
|
||||||
outputPath=os.path.join('..', 'test_results'),
|
outputPath='test_results',
|
||||||
)
|
)
|
||||||
config=options.EOSConfig(self.reader_config, experiment_config, reduction_config, output_config)
|
config=options.ReflectivityConfig(self.reader_config, experiment_config, reduction_config, output_config)
|
||||||
reducer = reduction.AmorReduction(config)
|
reducer = reduction_reflectivity.ReflectivityReduction(config)
|
||||||
reducer.reduce()
|
reducer.reduce()
|
||||||
# run second time to reuse norm file
|
# run second time to reuse norm file
|
||||||
reducer = reduction.AmorReduction(config)
|
reducer = reduction_reflectivity.ReflectivityReduction(config)
|
||||||
reducer.reduce()
|
reducer.reduce()
|
||||||
|
|
||||||
|
def test_eventfilter(self):
|
||||||
|
self.reader_config.year = 2026
|
||||||
|
experiment_config = options.ExperimentConfig()
|
||||||
|
reduction_config = options.ReflectivityReductionConfig(fileIdentifier=["826"],
|
||||||
|
logfilter=['polarization_config_label==2'])
|
||||||
|
output_config = options.ReflectivityOutputConfig(
|
||||||
|
outputFormats=[options.OutputFomatOption.Rqz_ort],
|
||||||
|
outputName='test',
|
||||||
|
outputPath='test_results',
|
||||||
|
)
|
||||||
|
config=options.ReflectivityConfig(self.reader_config, experiment_config, reduction_config, output_config)
|
||||||
|
reducer = reduction_reflectivity.ReflectivityReduction(config)
|
||||||
|
reducer.reduce()
|
||||||
|
espin_up = reducer.dataset.data.events.shape[0]
|
||||||
|
|
||||||
|
reduction_config.logfilter = ['polarization_config_label==3']
|
||||||
|
output_config.append = True
|
||||||
|
reducer = reduction_reflectivity.ReflectivityReduction(config)
|
||||||
|
reducer.reduce()
|
||||||
|
espin_down = reducer.dataset.data.events.shape[0]
|
||||||
|
# measurement should have about 2x as many counts in spin_down
|
||||||
|
self.assertAlmostEqual(espin_down/espin_up, 2., 2)
|
||||||
|
|
||||||
|
# perform the same filter but remove pulses during which the switch occured
|
||||||
|
reduction_config.logfilter = ['!polarization_config_label==3']
|
||||||
|
output_config.append = True
|
||||||
|
reducer = reduction_reflectivity.ReflectivityReduction(config)
|
||||||
|
reducer.reduce()
|
||||||
|
espin_down2 = reducer.dataset.data.events.shape[0]
|
||||||
|
# measurement should have about 2x as many counts in spin_down
|
||||||
|
self.assertLess(espin_down2, espin_down)
|
||||||
|
|
||||||
|
def test_polsplitting(self):
|
||||||
|
self.reader_config.year = 2026
|
||||||
|
experiment_config = options.ExperimentConfig()
|
||||||
|
reduction_config = options.ReflectivityReductionConfig(fileIdentifier=["826"])
|
||||||
|
output_config = options.ReflectivityOutputConfig(
|
||||||
|
outputFormats=[options.OutputFomatOption.Rqz_ort],
|
||||||
|
outputName='test',
|
||||||
|
outputPath='test_results',
|
||||||
|
)
|
||||||
|
config=options.ReflectivityConfig(self.reader_config, experiment_config, reduction_config, output_config)
|
||||||
|
reducer = reduction_reflectivity.ReflectivityReduction(config)
|
||||||
|
reducer.reduce()
|
||||||
|
|
||||||
|
results = fileio.load_orso(os.path.join(output_config.outputPath, output_config.outputName+'.Rqz.ort'))
|
||||||
|
self.assertEqual(len(results), 2)
|
||||||
|
self.assertEqual(results[0].info.data_source.measurement.instrument_settings.polarization, 'po')
|
||||||
|
self.assertEqual(results[1].info.data_source.measurement.instrument_settings.polarization, 'mo')
|
||||||
|
espin_up = np.nansum(results[0].data[:,1])
|
||||||
|
espin_down = np.nansum(results[1].data[:,1])
|
||||||
|
# the total intensity should be around equal as events are doubled and monitor counts are doubled
|
||||||
|
self.assertAlmostEqual(espin_down/espin_up, 1., 2)
|
||||||
|
|||||||
16
update.md
Normal file
16
update.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
Make new release
|
||||||
|
================
|
||||||
|
|
||||||
|
- Update revision in `eos/__init__.py`
|
||||||
|
- Commit changes `git commit -a -m "your message here"`
|
||||||
|
- Tag version `git tag v3.x.y`
|
||||||
|
- Push changes `git push` and `git push --tags`
|
||||||
|
- This should trigger the **Release** action on GitHub that builds a new version and uploads it to PyPI.
|
||||||
|
|
||||||
|
|
||||||
|
Update on AMOR
|
||||||
|
==============
|
||||||
|
|
||||||
|
- Login via SSH using the **amor** user.
|
||||||
|
- Activate eos virtual environment `source /home/software/virtualenv/eosenv/bin/activate`
|
||||||
|
- Update eos packge `pip install --upgrade amor-eos`
|
||||||
@@ -1,8 +1,15 @@
|
|||||||
# -*- mode: python ; coding: utf-8 -*-
|
# -*- mode: python ; coding: utf-8 -*-
|
||||||
|
from PyInstaller.utils.hooks import collect_all
|
||||||
|
|
||||||
|
datas = []
|
||||||
|
binaries = []
|
||||||
|
hiddenimports = []
|
||||||
|
tmp_ret = collect_all('tzdata')
|
||||||
|
datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2]
|
||||||
|
|
||||||
|
|
||||||
a = Analysis(
|
a = Analysis(
|
||||||
['eos.py'],
|
['eos/__main__.py'],
|
||||||
pathex=[],
|
pathex=[],
|
||||||
binaries=[],
|
binaries=[],
|
||||||
datas=[],
|
datas=[],
|
||||||
@@ -19,16 +26,13 @@ pyz = PYZ(a.pure)
|
|||||||
exe = EXE(
|
exe = EXE(
|
||||||
pyz,
|
pyz,
|
||||||
a.scripts,
|
a.scripts,
|
||||||
a.binaries,
|
|
||||||
a.datas,
|
|
||||||
[],
|
[],
|
||||||
|
exclude_binaries=True,
|
||||||
name='eos',
|
name='eos',
|
||||||
debug=False,
|
debug=False,
|
||||||
bootloader_ignore_signals=False,
|
bootloader_ignore_signals=False,
|
||||||
strip=False,
|
strip=False,
|
||||||
upx=True,
|
upx=True,
|
||||||
upx_exclude=[],
|
|
||||||
runtime_tmpdir=None,
|
|
||||||
console=True,
|
console=True,
|
||||||
disable_windowed_traceback=False,
|
disable_windowed_traceback=False,
|
||||||
argv_emulation=False,
|
argv_emulation=False,
|
||||||
@@ -36,3 +40,12 @@ exe = EXE(
|
|||||||
codesign_identity=None,
|
codesign_identity=None,
|
||||||
entitlements_file=None,
|
entitlements_file=None,
|
||||||
)
|
)
|
||||||
|
coll = COLLECT(
|
||||||
|
exe,
|
||||||
|
a.binaries,
|
||||||
|
a.datas,
|
||||||
|
strip=False,
|
||||||
|
upx=True,
|
||||||
|
upx_exclude=[],
|
||||||
|
name='eos',
|
||||||
|
)
|
||||||
|
|||||||
@@ -1,51 +0,0 @@
|
|||||||
# -*- mode: python ; coding: utf-8 -*-
|
|
||||||
from PyInstaller.utils.hooks import collect_all
|
|
||||||
|
|
||||||
datas = []
|
|
||||||
binaries = []
|
|
||||||
hiddenimports = []
|
|
||||||
tmp_ret = collect_all('tzdata')
|
|
||||||
datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2]
|
|
||||||
|
|
||||||
|
|
||||||
a = Analysis(
|
|
||||||
['eos.py'],
|
|
||||||
pathex=[],
|
|
||||||
binaries=[],
|
|
||||||
datas=[],
|
|
||||||
hiddenimports=[],
|
|
||||||
hookspath=[],
|
|
||||||
hooksconfig={},
|
|
||||||
runtime_hooks=[],
|
|
||||||
excludes=[],
|
|
||||||
noarchive=False,
|
|
||||||
optimize=1,
|
|
||||||
)
|
|
||||||
pyz = PYZ(a.pure)
|
|
||||||
|
|
||||||
exe = EXE(
|
|
||||||
pyz,
|
|
||||||
a.scripts,
|
|
||||||
[],
|
|
||||||
exclude_binaries=True,
|
|
||||||
name='eos',
|
|
||||||
debug=False,
|
|
||||||
bootloader_ignore_signals=False,
|
|
||||||
strip=False,
|
|
||||||
upx=True,
|
|
||||||
console=True,
|
|
||||||
disable_windowed_traceback=False,
|
|
||||||
argv_emulation=False,
|
|
||||||
target_arch=None,
|
|
||||||
codesign_identity=None,
|
|
||||||
entitlements_file=None,
|
|
||||||
)
|
|
||||||
coll = COLLECT(
|
|
||||||
exe,
|
|
||||||
a.binaries,
|
|
||||||
a.datas,
|
|
||||||
strip=False,
|
|
||||||
upx=True,
|
|
||||||
upx_exclude=[],
|
|
||||||
name='eos',
|
|
||||||
)
|
|
||||||
Reference in New Issue
Block a user