68 Commits

Author SHA1 Message Date
155a167e26 update version date
Some checks failed
Release / build-ubuntu-latest (push) Failing after 2s
Release / build-windows (push) Has been cancelled
Release / release (push) Has been cancelled
2025-01-30 13:38:45 +01:00
fcc1fd7d84 delete old manual and life_histogram, update events2histogram 2025-01-30 13:37:27 +01:00
ee34cc96d4 direct beam reference fixed 2025-01-29 13:54:56 +01:00
Jochen Stahn
64edd8e8d7 Merge pull request #6 from jochenstahn/faster_pulsetimes
Faster pulsetimes
2025-01-29 08:21:14 +01:00
96681c8f81 fix case where there is no pulse missing 2025-01-28 16:17:48 +01:00
87a246196f apply scaling factor when time slicing 2025-01-28 16:02:54 +01:00
3607de9864 replace pulseTimeS generation in for loop by filling of pulse gaps 2025-01-28 16:00:52 +01:00
23e310ba6a removed fix coded proton time offset 2024-12-16 13:50:46 +01:00
9f2dfbcbe1 corrected output about automatic scaling 2024-12-16 08:15:11 +01:00
25f8708b4c re-bump version, make release dependent on windows build 2024-12-13 15:57:12 +01:00
2e565f55be Include timezone data in windows distribution 2024-12-13 15:36:22 +01:00
b48bea7726 increment version 2024-12-13 14:46:23 +01:00
55e9407b67 update revision info
Some checks failed
Release / build-ubuntu-latest (push) Failing after 2s
Release / release (push) Has been skipped
Release / build-windows (push) Has been cancelled
2024-12-13 14:37:50 +01:00
5fc512632f Merge pull request #5 from jochenstahn/time
Handle timezone correctly
2024-12-13 14:35:16 +01:00
55a7b7270c minor cleanup 2024-12-13 14:33:04 +01:00
f81582554e remove unnecessary requirement for pytz and insert timezone into datetime instead of recreation 2024-12-13 14:17:51 +01:00
3e22a94ff6 install zoneinfo backports for python 3.8 2024-12-13 14:07:12 +01:00
Jochen Stahn
c1dc194548 different method to add timezone 2024-12-10 14:39:59 +01:00
Jochen Stahn
c5f5602a89 added pytz 2024-12-09 10:19:22 +01:00
cf9c0018a5 tried to handle summer time correctly.... 2024-12-09 10:03:56 +01:00
03ac04ab05 removed control output 2024-12-06 14:59:18 +01:00
b2df980a21 fix of handling of pulses for more than one file 2024-12-06 10:36:25 +01:00
eb54e52c66 changed format for a logging output about low monitor signal 2024-12-04 10:51:05 +01:00
9b4c384425 release to pypi after storing archive to artifact 2024-12-04 08:48:35 +01:00
ce44e04014 add windows build to release action
Some checks failed
Release / build-ubuntu-latest (push) Failing after 2s
Release / release (push) Has been skipped
Release / build-windows (push) Has been cancelled
2024-12-03 16:11:04 +01:00
ce90fc005a remove upterm from tests 2024-12-03 15:17:12 +01:00
29edbce5bc change iso time offset to include minutes for compatibility with older python 2024-12-03 15:02:25 +01:00
a0b0ff436c replace calculated offset by Iso string 2024-12-03 14:51:09 +01:00
2cae45659c correct for local timezone issues 2024-12-03 14:12:37 +01:00
9693951233 explicitly define utc as timezone for file 2024-12-03 13:52:29 +01:00
e2970a8a5f add upterm action to workflow for debugging 2024-12-03 13:13:28 +01:00
7dcede44b6 removed obsolete control output 2024-11-28 09:03:57 +01:00
f6f853e6e4 commented VERY time consuming control output: events per pulse 2024-11-27 08:29:33 +01:00
ad8245a9a0 relative theta range over absolute over default 2024-11-26 15:27:07 +01:00
33b8829a09 check for empty files introduced 2024-11-22 11:24:17 +01:00
f645082c20 Merge branch 'main' of https://github.com/jochenstahn/amor 2024-11-08 09:01:37 +01:00
1bd1100035 changes in the "pulse generation" - not yet finished 2024-11-08 08:51:56 +01:00
Artur Glavic
b94b6714a3 Update setup.cfg 2024-11-05 13:15:20 +01:00
d4462f450e change to rst README 2024-10-30 15:36:43 +01:00
80d1014750 update readme with installation instructions 2024-10-30 15:05:33 +01:00
a3eb6a173e bump version 2024-10-30 14:32:04 +01:00
bada209c1e minor fix of release workflow and add github release
Some checks failed
Release / build-ubuntu-latest (push) Failing after 2s
Release / release (push) Has been skipped
2024-10-30 14:31:06 +01:00
d6f699e10c minor fix of release workflow 2024-10-30 14:28:34 +01:00
8fa11fa05a add configuration for pypi release 2024-10-30 14:07:48 +01:00
c7b7f38a6d make sure the right pytest is used 2024-10-30 13:34:45 +01:00
5d50e92521 try to use older Ubuntu for failing tests, could not reproduce locally 2024-10-30 13:26:03 +01:00
aa57d75751 don't fail action when just one python version fails 2024-10-30 13:08:50 +01:00
409e62b63f set branch to run tests to main 2024-10-30 13:02:34 +01:00
1c459ae5d3 install correct requirements file in workflow 2024-10-30 13:01:56 +01:00
12d0370807 add test data and pytest action to repository 2024-10-30 12:59:00 +01:00
b1e7b68a21 update tests to new configuration parameters 2024-10-30 12:32:24 +01:00
4134e106ff improved algorythm for pulse generation 2024-10-25 15:35:47 +02:00
5cce1bedc9 fixed a bug in the time-monitor calculation and added units 2024-10-24 08:54:43 +02:00
52bd731a3c set startTime to hdf entry start_time 2024-10-23 15:32:46 +02:00
fbb44d435b changed keys for monitorType 2024-10-22 17:46:31 +02:00
182cc5ab3d added TODO for the resolution calculation 2024-10-22 17:05:09 +02:00
9f2bc034e7 debugging output for neutron pulse times and proton current 2024-10-22 15:47:25 +02:00
7f2d5cf765 added monitorType 2024-10-22 09:43:44 +02:00
772d921f03 Merge branch 'main' of https://github.com/jochenstahn/amor 2024-10-10 14:09:51 +02:00
f5ce5e9d73 format of info output for proton current adapted 2024-10-10 08:06:07 +02:00
7fc64a58f0 sorted outputPath to output 2024-10-05 13:57:39 +02:00
b848d66290 changed path variable names + new option lowCurrentThreshold 2024-10-04 15:10:11 +02:00
1f394b0e86 replaced scipy interpolation by dedicated routine (by Artur) 2024-09-27 16:27:50 +02:00
69620d0f16 removed doupble normalisation.... 2024-09-26 11:04:51 +02:00
0e727ce591 fixed monitor for normalisation measurement 2024-09-26 10:24:37 +02:00
f2ebe5ce0c new method for time / protonCharge normalisation - compatible with old data files 2024-09-26 10:09:15 +02:00
acc508e9ea timestamps are now all of type np.int64 with unit ns 2024-09-25 16:35:31 +02:00
e6578d5bf2 fix merge error missing method calls 2024-09-25 14:06:55 +02:00
26 changed files with 572 additions and 2181 deletions

97
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,97 @@
name: Release
# Controls when the action will run.
on:
# Triggers the workflow on push or pull request events but only for the master branch
push:
tags:
- "*"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
inputs:
build-items:
description: 'Items to be build'
required: true
default: 'all'
type: choice
options:
- all
- windows
- linux
- mac
jobs:
build-ubuntu-latest:
runs-on: ubuntu-latest
if: ${{ (github.event_name != 'workflow_dispatch') || (contains(fromJson('["all", "linux"]'), github.event.inputs.build-items)) }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install build
pip install -r requirements.txt
- name: Build PyPI package
run: |
python3 -m build
- name: Archive distribution
uses: actions/upload-artifact@v4
with:
name: linux-dist
path: |
dist/*.tar.gz
- name: Upload to PyPI
if: github.event_name != 'workflow_dispatch'
uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.PYPI_TOKEN }}
skip-existing: true
build-windows:
runs-on: windows-latest
if: ${{ (github.event_name != 'workflow_dispatch') || (contains(fromJson('["all", "windows"]'), github.event.inputs.build-items)) }}
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.12
- name: Install dependencies
run: |
C:\Miniconda\condabin\conda.bat env update --file conda_windows.yml --name base
C:\Miniconda\condabin\conda.bat init powershell
- name: Build with pyinstaller
run: |
pyinstaller windows_folder.spec
cd dist\eos
Compress-Archive -Path .\* -Destination ..\..\eos.zip
- name: Archive distribution
uses: actions/upload-artifact@v4
with:
name: windows-dist
path: |
eos.zip
release:
if: github.event_name != 'workflow_dispatch'
runs-on: ubuntu-latest
needs: [build-ubuntu-latest, build-windows]
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: linux-dist
- uses: actions/download-artifact@v4
with:
name: windows-dist
- uses: ncipollo/release-action@v1
with:
artifacts: "amor*.tar.gz,*.zip"
token: ${{ secrets.GITHUB_TOKEN }}
allowUpdates: true

42
.github/workflows/unit_tests.yml vendored Normal file
View File

@@ -0,0 +1,42 @@
name: Unit Testing
on:
push:
branches:
- main
pull_request:
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-22.04
strategy:
matrix:
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
fail-fast: false
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pytest
pip install -r requirements.txt
- name: Backport to 3.8
if: matrix.python-version == '3.8'
run: |
pip install backports.zoneinfo
- name: Test with pytest
run: |
cd tests
python -m pytest --pyargs .

2
.gitignore vendored
View File

@@ -5,6 +5,6 @@
__pycache__
raw
.idea
test_data
test_results
build
dist

View File

@@ -1,12 +0,0 @@
Software repository for the neutron reflectometer Amor at the Paul Scherrer Institut, Switzerland
reduction of the raw files (.hdf) to reflectivity files in one of the representations of the **ORSO reflectivity file format**:
> `eos.py`
> `neos.py` : version for playing and testing
visualisation of the content of a raw file (.hdf):
> `events2histogram.py`
#TODO: real readme for final system needed.

27
README.rst Normal file
View File

@@ -0,0 +1,27 @@
EOS - The AMOR focusing reflectometry data reduction software
-------------------------------------------------------------
.. image:: https://img.shields.io/pypi/v/amor-eos.svg
:target: https://pypi.python.org/pypi/amor-eos/
Software repository for the neutron reflectometer Amor at the Paul Scherrer Institut, Switzerland
Reduction of the raw files (.hdf) to reflectivity files in one of the representations of the **ORSO reflectivity file format**:
eos.py --help
visualisation of the content of a raw file (.hdf):
events2histogram.py
:TODO: real readme for final system needed.
Installation
------------
Create a virtual python environment (>3.8) and install the PyPI package:
pip install amor-eos
On Windows you can also use the binary eos.exe that you find in the
[GitHub Releases]([https://github.com/jochenstahn/amor/releases/latest) section

File diff suppressed because it is too large Load Diff

44
conda_windows.yml Normal file
View File

@@ -0,0 +1,44 @@
name: eos_build
channels:
- defaults
dependencies:
- altgraph=0.17.3=py312haa95532_0
- blas=1.0=mkl
- bzip2=1.0.8=h2bbff1b_6
- ca-certificates=2024.11.26=haa95532_0
- expat=2.6.3=h5da7b33_0
- h5py=3.12.1=py312h3b2c811_0
- hdf5=1.12.1=h51c971a_3
- icc_rt=2022.1.0=h6049295_2
- intel-openmp=2023.1.0=h59b6b97_46320
- libffi=3.4.4=hd77b12b_1
- llvmlite=0.43.0=py312hf2fb9eb_0
- mkl=2023.1.0=h6b88ed4_46358
- mkl-service=2.4.0=py312h2bbff1b_1
- mkl_fft=1.3.11=py312h827c3e9_0
- mkl_random=1.2.8=py312h0158946_0
- numba=0.60.0=py312h0158946_0
- numpy=1.26.4=py312hfd52020_0
- numpy-base=1.26.4=py312h4dde369_0
- openssl=3.0.15=h827c3e9_0
- packaging=24.1=py312haa95532_0
- pefile=2023.2.7=py312haa95532_0
- pip=24.2=py312haa95532_0
- pyinstaller=6.9.0=py312h0416ee5_0
- pyinstaller-hooks-contrib=2024.7=py312haa95532_0
- python=3.12.7=h14ffc60_0
- pywin32-ctypes=0.2.2=py312haa95532_0
- setuptools=75.1.0=py312haa95532_0
- sqlite=3.45.3=h2bbff1b_0
- tbb=2021.8.0=h59b6b97_0
- tk=8.6.14=h0416ee5_0
- tzdata=2024b=h04d1e81_0
- vc=14.40=h2eaa2aa_1
- vs2015_runtime=14.40.33807=h98bb1dd_1
- wheel=0.44.0=py312haa95532_0
- xz=5.4.6=h8cc25b3_1
- zlib=1.2.13=h8cc25b3_1
- pip:
- orsopy==1.2.1
- pyyaml==6.0.2
- tzdata

View File

@@ -860,7 +860,7 @@ def process(dataPath, ident, clas):
try: lamdaMax
except NameError: lamdaMax = lamdaMin + tau * hdm/chopperDetectorDistance * 1e13
tofOffset = tau * chopperPhase / 180. # mismatch of chopper pulse and time-zero
tofOffset = -tau * chopperPhase / 180. # mismatch of chopper pulse and time-zero
tofCut = lamdaCut * chopperDetectorDistance / hdm * 1.e-13 # tof of frame start
tof_e = np.array(ev['/entry1/Amor/detector/data/event_time_offset'][:], dtype=np.uint64)/1.e9 + tofOffset # tof
@@ -960,7 +960,7 @@ def commandLineArgs():
type=float,
help ="value of nu")
clas.add_argument("-P", "--chopperPhase",
default=7.5,
default=-7.5,
type=float,
help ="chopper phase offset")
clas.add_argument("-p", "--plot",

View File

@@ -2,5 +2,6 @@
Package to handle data redction at AMOR instrument to be used by eos.py script.
"""
__version__ = '2.1.0'
__date__ = '2024-08-25'
__version__ = '2.1.4'
__date__ = '2025-01-30'

View File

@@ -23,28 +23,32 @@ def commandLineArgs():
default = Defaults.normalisationFileIdentifier,
nargs = '+',
help = "file number(s) of normalisation measurement")
input_data.add_argument("-nm", "--normalisationMethod",
default = Defaults.normalisationMethod,
help = "normalisation method: [o]verillumination, [u]nderillumination, [d]irect_beam")
input_data.add_argument("--raw",
input_data.add_argument("-rp", "--rawPath",
type = str,
default = Defaults.raw,
help = "relative path to directory with .hdf files")
input_data.add_argument("-d", "--dataPath",
type = str,
default = Defaults.dataPath,
help = "relative path for output")
default = Defaults.rawPath,
help = "ath to directory with .hdf files")
input_data.add_argument("-Y", "--year",
default = Defaults.year,
type = int,
help = "year the measurement was performed")
input_data.add_argument("-sub", "--subtract",
help = "R(q_z) curve to be subtracted (in .Rqz.ort format)")
input_data.add_argument("-nm", "--normalisationMethod",
default = Defaults.normalisationMethod,
help = "normalisation method: [o]verillumination, [u]nderillumination, [d]irect_beam")
input_data.add_argument("-mt", "--monitorType",
type = str,
default = Defaults.monitorType,
help = "one of [p]rotonCurrent, [t]ime or [n]eutronMonitor")
output = clas.add_argument_group('output')
output.add_argument("-o", "--outputName",
default = Defaults.outputName,
help = "output file name (withot suffix)")
output.add_argument("-op", "--outputPath",
type = str,
default = Defaults.outputPath,
help = "path for output")
output.add_argument("-of", "--outputFormat",
nargs = '+',
default = Defaults.outputFormat,
@@ -98,6 +102,11 @@ def commandLineArgs():
nargs = 2,
type = float,
help = "q_z range")
masks.add_argument("-ct", "--lowCurrentThreshold",
default = Defaults.lowCurrentThreshold,
type = float,
help = "proton current threshold for discarding neutron pulses")
overwrite = clas.add_argument_group('overwrite')
overwrite.add_argument("-cs", "--chopperSpeed",
@@ -172,8 +181,7 @@ def command_line_options():
reader_config = ReaderConfig(
year = clas.year,
raw = clas.raw,
dataPath = clas.dataPath
rawPath = clas.rawPath,
)
experiment_config = ExperimentConfig(
sampleModel = clas.sampleModel,
@@ -182,10 +190,12 @@ def command_line_options():
yRange = clas.yRange,
lambdaRange = clas.lambdaRange,
qzRange = clas.qzRange,
lowCurrentThreshold = clas.lowCurrentThreshold,
incidentAngle = clas.incidentAngle,
mu = clas.mu,
nu = clas.nu,
muOffset = clas.muOffset
muOffset = clas.muOffset,
monitorType = clas.monitorType,
)
reduction_config = ReductionConfig(
qResolution = clas.qResolution,
@@ -202,7 +212,8 @@ def command_line_options():
)
output_config = OutputConfig(
outputFormats = output_format_list(clas.outputFormat),
outputName = clas.outputName
outputName = clas.outputName,
outputPath = clas.outputPath,
)
return EOSConfig(reader_config, experiment_config, reduction_config, output_config)

View File

@@ -2,12 +2,16 @@ import logging
import os
import subprocess
import sys
from datetime import datetime
from datetime import datetime, timezone
try:
import zoneinfo
except ImportError:
# for python versions < 3.9 try to use the backports version
from backports import zoneinfo
from typing import List
import h5py
import numpy as np
import scipy as sp
from orsopy import fileio
from orsopy.fileio.model_language import SampleModel
@@ -21,6 +25,8 @@ try:
except Exception:
nb_helpers = None
# Time zone used to interpret time strings
AMOR_LOCAL_TIMEZONE = zoneinfo.ZoneInfo(key='Europe/Zurich')
class AmorData:
"""read meta-data and event streams from .hdf file(s), apply filters and conversions"""
@@ -38,19 +44,20 @@ class AmorData:
kap: float
lambdaMax: float
lambda_e: np.ndarray
monitor: float
#monitor: float
mu: float
nu: float
tau: float
tofCut: float
start_date: str
monitorType: str
seriesStartTime = None
#-------------------------------------------------------------------------------------------------
def __init__(self, header: Header, reader_config: ReaderConfig, config: ExperimentConfig,
short_notation:str, norm=False):
self.startTime = reader_config.startTime
#self.startTime = reader_config.startTime
self.header = header
self.config = config
self.reader_config = reader_config
@@ -68,22 +75,26 @@ class AmorData:
else:
self.readHeaderInfo = True
_detZ_e = []
_lamda_e = []
_wallTime_e = []
_monitor = 0
#_current = []
_detZ_e = []
_lamda_e = []
_wallTime_e = []
#_monitor = 0
_monitorPerPulse = []
_pulseTimeS = []
for file in self.file_list:
self.read_individual_data(file, norm)
_detZ_e = np.append(_detZ_e, self.detZ_e)
_lamda_e = np.append(_lamda_e, self.lamda_e)
_wallTime_e = np.append(_wallTime_e, self.wallTime_e)
_monitor += self.monitor
self.detZ_e = _detZ_e
self.lamda_e = _lamda_e
self.wallTime_e = _wallTime_e
self.monitor = _monitor
logging.warning(f' {self.monitorType} monitor = {self.monitor:9.3f}')
_detZ_e = np.append(_detZ_e, self.detZ_e)
_lamda_e = np.append(_lamda_e, self.lamda_e)
_wallTime_e = np.append(_wallTime_e, self.wallTime_e)
_monitorPerPulse = np.append(_monitorPerPulse, self.monitorPerPulse)
_pulseTimeS = np.append(_pulseTimeS, self.pulseTimeS)
#_monitor += self.monitor
self.detZ_e = _detZ_e
self.lamda_e = _lamda_e
self.wallTime_e = _wallTime_e
#self.monitor = _monitor
self.monitorPerPulse = _monitorPerPulse
self.pulseTimeS = _pulseTimeS
#-------------------------------------------------------------------------------------------------
#def path_generator(self, number):
@@ -105,7 +116,7 @@ class AmorData:
def path_generator(self, number):
fileName = f'amor{self.reader_config.year}n{number:06d}.hdf'
path = ''
for rawd in self.reader_config.raw:
for rawd in self.reader_config.rawPath:
if os.path.exists(os.path.join(rawd,fileName)):
path = rawd
break
@@ -113,7 +124,7 @@ class AmorData:
if os.path.exists(f'/afs/psi.ch/project/sinqdata/{self.reader_config.year}/amor/{int(number/1000)}/{fileName}'):
path = f'/afs/psi.ch/project/sinqdata/{self.reader_config.year}/amor/{int(number/1000)}'
else:
sys.exit(f'# ERROR: the file {fileName} can not be found in {self.reader_config.raw}!')
sys.exit(f'# ERROR: the file {fileName} can not be found in {self.reader_config.rawPath}')
return os.path.join(path, fileName)
#-------------------------------------------------------------------------------------------------
def expand_file_list(self, short_notation):
@@ -153,7 +164,7 @@ class AmorData:
if self.readHeaderInfo:
self.read_header_info()
logging.warning(f' data from file: {fileName}')
logging.warning(f' from file: {fileName}')
self.read_individual_header()
# add header content
@@ -180,15 +191,26 @@ class AmorData:
self.read_event_stream()
totalNumber = np.shape(self.tof_e)[0]
# check for empty event stream
if totalNumber == 0:
logging.error('empty event stream: can not determine end time')
sys.exit()
self.sort_pulses()
self.define_monitor()
# sort the events into the related pulses
self.associate_pulse_with_monitor()
self.extract_walltime(norm)
# following lines: debugging output to trace the time-offset of proton current and neutron pulses
if self.config.monitorType == 'x':
cpp, t_bins = np.histogram(self.wallTime_e, self.pulseTimeS)
np.savetxt('tme.hst', np.vstack((self.pulseTimeS[:-1], cpp, self.monitorPerPulse[:-1])).T)
#self.average_events_per_pulse() # for debugging only. VERY time consuming!!!
self.monitor_threshold()
self.filter_strange_times()
self.merge_frames()
@@ -204,51 +226,72 @@ class AmorData:
logging.info(f' number of events: total = {totalNumber:7d}, filtered = {np.shape(self.lamda_e)[0]:7d}')
def sort_pulses(self):
chopperPeriod = int(2*self.tau*1e9)
chopperPeriod = np.int64(2*self.tau*1e9)
pulseTime = np.sort(self.dataPacketTime_p)
pulseTime = pulseTime[np.abs(pulseTime[:]-np.roll(pulseTime, 1)[:])>5]
if self.seriesStartTime is None:
self.seriesStartTime = float(pulseTime[0])
pulseTime -= self.seriesStartTime
self.stopTime = float(pulseTime[-1])
pulseTime -= np.int64(self.seriesStartTime)
self.stopTime = pulseTime[-1]
pulseTime = pulseTime[pulseTime>=0]
# fill in missing pulse times
# TODO: check for real end time
self.pulseTimeS = np.array([pulseTime[0]])
for tt in pulseTime[1:]:
nxt = self.pulseTimeS[-1] + chopperPeriod
while abs(tt - nxt) > self.tau*1e9:
self.pulseTimeS = np.append(self.pulseTimeS, nxt)
nxt += chopperPeriod
self.pulseTimeS = np.append(self.pulseTimeS, tt)
# remove 'partially filled' pulses
self.pulseTimeS = self.pulseTimeS[1:-1]
try:
# further files
# TODO: use the first pulse of the respective measurement
#nextPulseTime = startTime % np.int64(self.tau*2e9)
#nextPulseTime = self.pulseTimeS[-1] + chopperPeriod
nextPulseTime = pulseTime[0]
except AttributeError:
# first file
nextPulseTime = pulseTime[0] % np.int64(self.tau*2e9)
def associate_pulse_with_current(self):
if self.monitorType == 'protonCharge':
lowCurrentThreshold = 0.05 # mA
self.currentTime -= self.seriesStartTime
currentInterpolator = sp.interpolate.interp1d(self.currentTime, self.current, kind='previous', bounds_error=False, fill_value=0)
self.charge = np.array(currentInterpolator(self.pulseTimeS) * 2*self.tau *1e-3, dtype=float)
# calculate where time tiefference between pulses exceeds its time by more than 1/2
# this yields the number of missing pulses
pulseLengths = pulseTime[1:]-pulseTime[:-1]
pulseExtra = (pulseLengths-np.int64(self.tau*1e9))//np.int64(self.tau*2e9)
gap_indices = np.where(pulseExtra>0)[0]
if len(gap_indices)==0:
# no missing pulses, just use given array
self.pulseTimeS = np.array(pulseTime, dtype=np.int64)
return
self.pulseTimeS = np.array(pulseTime[:gap_indices[0]+1], dtype=np.int64)
last_index = gap_indices[0]
for gapi in gap_indices[1:]:
# insert missing pulses into each gap
gap_pulses = pulseTime[last_index]+np.arange(1, pulseExtra[last_index]+1)*chopperPeriod
self.pulseTimeS = np.append(self.pulseTimeS, gap_pulses)
self.pulseTimeS = np.append(self.pulseTimeS, pulseTime[last_index+1:gapi+1])
last_index = gapi
if last_index<len(pulseTime):
self.pulseTimeS = np.append(self.pulseTimeS, pulseTime[last_index:-1])
def get_current_per_pulse(self, pulseTimeS, currentTimeS, currents):
# add currents for early pulses and current time value after last pulse (j+1)
currentTimeS = np.hstack([[0], currentTimeS, [pulseTimeS[-1]+1]])
currents = np.hstack([[0], currents])
pulseCurrentS = np.zeros(pulseTimeS.shape[0], dtype=float)
j = 0
for i, ti in enumerate(pulseTimeS):
if ti >= currentTimeS[j+1]:
j += 1
pulseCurrentS[i] = currents[j]
#print(f' {i} {pulseTimeS[i]} {pulseCurrentS[i]}')
return pulseCurrentS
def associate_pulse_with_monitor(self):
if self.config.monitorType == 'p': # protonCharge
self.currentTime -= np.int64(self.seriesStartTime)
self.monitorPerPulse = self.get_current_per_pulse(self.pulseTimeS, self.currentTime, self.current) * 2*self.tau * 1e-3
# filter low-current pulses
self.charge = np.where(self.charge > 2*self.tau *lowCurrentThreshold, self.charge, 0)
# remove 'partially filled' pulses
self.charge[0] = 0
self.charge[-1] = 0
def define_monitor(self):
if self.monitorType == 'protonCharge':
chargeSum = np.sum(self.charge)
logging.warning(f' proton charge = {chargeSum:9.3f} mC')
self.monitor = chargeSum
elif self.monitorType == 'countingTime':
self.monitor = self.stopTime - self.seriesStartTime
else:
self.monitor = 1.
self.monitorPerPulse = np.where(self.monitorPerPulse > 2*self.tau * self.config.lowCurrentThreshold * 1e-3, self.monitorPerPulse, 0)
elif self.config.monitorType == 't': # countingTime
self.monitorPerPulse = np.ones(np.shape(self.pulseTimeS)[0])*self.tau
else:
self.monitorPerPulse = 1./np.shape(pulseTimeS)[1]
def extract_walltime(self, norm):
#self.dataPacketTime_p = np.array(self.dataPacketTime_p, dtype=float) / 1e9
if nb_helpers:
self.wallTime_e = nb_helpers.extract_walltime(self.tof_e, self.dataPacket_p, self.dataPacketTime_p)
else:
@@ -256,19 +299,25 @@ class AmorData:
for i in range(len(self.dataPacket_p)-1):
self.wallTime_e[self.dataPacket_p[i]:self.dataPacket_p[i+1]] = self.dataPacketTime_p[i]
self.wallTime_e[self.dataPacket_p[-1]:] = self.dataPacketTime_p[-1]
#if not self.startTime and not norm:
# self.startTime = self.wallTime_e[0]
self.wallTime_e -= np.int64(self.seriesStartTime)
logging.debug(f' wall time from {self.wallTime_e[0]/1e9} to {self.wallTime_e[-1]/1e9}')
logging.debug(f' wall time from {self.wallTime_e[0]/1e9:6.1f} s to {self.wallTime_e[-1]/1e9:6.1f} s')
def average_events_per_pulse(self):
if self.config.monitorType == 'p':
for i, time in enumerate(self.pulseTimeS):
events = np.shape(self.wallTime_e[self.wallTime_e == time])[0]
logging.info(f'pulse: {i:6.0f}, events: {events:6.0f}, monitor: {self.monitorPerPulse[i]:6.2f}')
def monitor_threshold(self):
goodTimeS = self.pulseTimeS[self.charge!=0]
filter_e = np.where(np.isin(self.wallTime_e, goodTimeS), True, False)
self.tof_e = self.tof_e[filter_e]
self.pixelID_e = self.pixelID_e[filter_e]
self.wallTime_e = self.wallTime_e[filter_e]
logging.warning(f' rejected {np.shape(self.charge)[0]-np.shape(goodTimeS)[0]} pulses due to low beam current')
logging.warning(f' rejected {np.shape(filter_e)[0]-np.shape(self.tof_e)[0]} events due to low beam current')
if self.config.monitorType == 'p': # fix to check for file compatibility
goodTimeS = self.pulseTimeS[self.monitorPerPulse!=0]
filter_e = np.where(np.isin(self.wallTime_e, goodTimeS), True, False)
self.tof_e = self.tof_e[filter_e]
self.pixelID_e = self.pixelID_e[filter_e]
self.wallTime_e = self.wallTime_e[filter_e]
logging.info(f' rejected {np.shape(self.monitorPerPulse)[0]-np.shape(goodTimeS)[0]} out of {np.shape(self.monitorPerPulse)[0]} pulses')
logging.info(f' with {np.shape(filter_e)[0]-np.shape(self.tof_e)[0]} events due to low beam current')
logging.info(f' average counts per pulse = {np.shape(self.tof_e)[0] / np.shape(goodTimeS[goodTimeS!=0])[0]:7.1f}')
def filter_qz_range(self, norm):
if self.config.qzRange[1]<0.3 and not norm:
@@ -344,23 +393,26 @@ class AmorData:
self.pixelID_e = self.pixelID_e[filter_e]
self.wallTime_e = self.wallTime_e[filter_e]
if np.shape(filter_e)[0]-np.shape(self.tof_e)[0]>0.5:
logging.warning(f'# strange times: {np.shape(filter_e)[0]-np.shape(self.tof_e)[0]}')
logging.warning(f' strange times: {np.shape(filter_e)[0]-np.shape(self.tof_e)[0]}')
def read_event_stream(self):
self.tof_e = np.array(self.hdf['/entry1/Amor/detector/data/event_time_offset'][:])/1.e9
self.pixelID_e = np.array(self.hdf['/entry1/Amor/detector/data/event_id'][:], dtype=np.int64)
self.dataPacket_p = np.array(self.hdf['/entry1/Amor/detector/data/event_index'][:], dtype=np.uint64)
#self.dataPacketTime_p = np.array(self.hdf['/entry1/Amor/detector/data/event_time_zero'][:], dtype=np.uint64)/1e9
self.dataPacketTime_p = np.array(self.hdf['/entry1/Amor/detector/data/event_time_zero'][:], dtype=float)
try:
self.currentTime = np.array(self.hdf['entry1/Amor/detector/proton_current/time'][:], dtype=np.int64)
self.current = np.array(self.hdf['entry1/Amor/detector/proton_current/value'][:,0], dtype=float)
if len(self.current)>0:
self.monitorType = 'protonCharge'
else:
self.monitorType = 'countingTime'
except(KeyError, IndexError):
self.monitorType = 'countingTime'
self.dataPacketTime_p = np.array(self.hdf['/entry1/Amor/detector/data/event_time_zero'][:], dtype=np.int64)
if self.config.monitorType in ['auto', 'p']:
try:
self.currentTime = np.array(self.hdf['entry1/Amor/detector/proton_current/time'][:], dtype=np.int64)
self.current = np.array(self.hdf['entry1/Amor/detector/proton_current/value'][:,0], dtype=float)
if len(self.current)>4:
self.config.monitorType = 'p'
else:
self.config.monitorType = 't'
except(KeyError, IndexError):
self.config.monitorType = 't'
else:
self.config.monitorType = 't'
#TODO: protonMonitor
def read_individual_header(self):
self.chopperDistance = float(np.take(self.hdf['entry1/Amor/chopper/pair_separation'], 0))
@@ -404,7 +456,12 @@ class AmorData:
if self.config.nu:
self.nu = self.config.nu
self.fileDate = datetime.fromisoformat( self.hdf['/entry1/start_time'][0].decode('utf-8') )
# extract start time as unix time, adding UTC offset of 1h to time string
dz = datetime.fromisoformat(self.hdf['/entry1/start_time'][0].decode('utf-8'))
self.fileDate=dz.replace(tzinfo=AMOR_LOCAL_TIMEZONE)
self.startTime = np.int64( (self.fileDate.timestamp() ) * 1e9 )
if self.seriesStartTime is None:
self.seriesStartTime = self.startTime
def read_header_info(self):
# read general information and first data set

View File

@@ -11,12 +11,11 @@ def merge_frames(tof_e, tofCut, tau, total_offset):
tof_e_out[ti] = ((tof_e[ti]-dt)%tau)+total_offset # tof shifted to 1 frame
return tof_e_out
@nb.jit(nb.float64[:](nb.float64[:], nb.uint64[:], nb.float64[:]),
@nb.jit(nb.float64[:](nb.float64[:], nb.uint64[:], nb.int64[:]),
nopython=True, parallel=True, cache=True)
def extract_walltime(tof_e, dataPacket_p, dataPacketTime_p):
# assigning every event the wall time of the event packet (absolute time of pulse ?start?)
totalNumber = np.shape(tof_e)[0]
#wallTime_e = np.empty(totalNumber, dtype=np.float64)
wallTime_e = np.empty(totalNumber, dtype=np.int64)
for i in nb.prange(len(dataPacket_p)-1):
for j in range(dataPacket_p[i], dataPacket_p[i+1]):

View File

@@ -5,16 +5,18 @@ from dataclasses import dataclass, field
from typing import Optional, Tuple
from datetime import datetime
from os import path
import numpy as np
import logging
class Defaults:
# fileIdentifier
dataPath = '.'
raw = ['.', path.join('.','raw'), path.join('..','raw'), path.join('..','..','raw')]
outputPath = '.'
rawPath = ['.', path.join('.','raw'), path.join('..','raw'), path.join('..','..','raw')]
year = datetime.now().year
normalisationFileIdentifier = []
normalisationMethod = 'o'
monitorType = 'auto'
# subtract
outputName = "fromEOS"
outputFormat = ['Rqz.ort']
@@ -35,6 +37,7 @@ class Defaults:
mu = 0
nu = 0
sampleModel = None
lowCurrentThreshold = 50
#
@@ -42,8 +45,7 @@ class Defaults:
@dataclass
class ReaderConfig:
year: int
dataPath: str
raw: Tuple[str]
rawPath: Tuple[str]
startTime: Optional[float] = 0
@dataclass
@@ -53,6 +55,8 @@ class ExperimentConfig:
yRange: Tuple[float, float]
lambdaRange: Tuple[float, float]
qzRange: Tuple[float, float]
monitorType: str
lowCurrentThreshold: float
sampleModel: Optional[str] = None
chopperPhaseOffset: float = 0
@@ -80,6 +84,7 @@ class ReductionConfig:
class OutputConfig:
outputFormats: list
outputName: str
outputPath: str
@dataclass
class EOSConfig:
@@ -105,10 +110,8 @@ class EOSConfig:
inpt += f' -Y {self.reader.year}'
else:
inpt += f' -Y {datetime.now().year}'
if self.reader.dataPath != '.':
inpt += f' --dataPath {self.reader.dataPath}'
#if self.reader.raw != '.':
# inpt = f' --rawd {self.reader.raw}'
if np.shape(self.reader.rawPath)[0] == 1:
inpt += f' --rawPath {self.reader.rawPath}'
if self.reduction.subtract:
inpt += f' -subtract {self.reduction.subtract}'
if self.reduction.normalisationFileIdentifier:
@@ -119,6 +122,8 @@ class EOSConfig:
otpt = ''
if self.reduction.qResolution:
otpt += f' -r {self.reduction.qResolution}'
if self.output.outputPath != '.':
inpt += f' --outputdPath {self.output.outputPath}'
if self.output.outputName:
otpt += f' -o {self.output.outputName}'
if self.output.outputFormats != ['Rqz.ort']:
@@ -130,9 +135,9 @@ class EOSConfig:
if self.experiment.lambdaRange!= Defaults.lambdaRange:
mask += f' -l {" ".join(str(ff) for ff in self.experiment.lambdaRange)}'
if self.reduction.thetaRange != Defaults.thetaRange:
mask += f' -T {" ".join(str(ff) for ff in self.reduction.thetaRange)}'
mask += f' -t {" ".join(str(ff) for ff in self.reduction.thetaRange)}'
elif self.reduction.thetaRangeR != Defaults.thetaRangeR:
mask += f' -t {" ".join(str(ff) for ff in self.reduction.thetaRangeR)}'
mask += f' -T {" ".join(str(ff) for ff in self.reduction.thetaRangeR)}'
if self.experiment.qzRange!= Defaults.qzRange:
mask += f' -q {" ".join(str(ff) for ff in self.experiment.qzRange)}'

View File

@@ -22,10 +22,12 @@ class AmorReduction:
self.header = Header()
self.header.reduction.call = config.call_string()
self.monitorUnit = {'n': 'cnts', 'p': 'mC', 't': 's'}
def reduce(self):
if not os.path.exists(f'{self.reader_config.dataPath}'):
logging.debug(f'Creating destination path {self.reader_config.dataPath}')
os.system(f'mkdir {self.reader_config.dataPath}')
if not os.path.exists(f'{self.output_config.outputPath}'):
logging.debug(f'Creating destination path {self.output_config.outputPath}')
os.system(f'mkdir {self.output_config.outputPath}')
# load or create normalisation matrix
if self.reduction_config.normalisationFileIdentifier:
@@ -76,12 +78,13 @@ class AmorReduction:
def read_unsliced(self, i):
lamda_e = self.file_reader.lamda_e
detZ_e = self.file_reader.detZ_e
self.monitor = np.sum(self.file_reader.monitorPerPulse)
logging.warning(f' monitor = {self.monitor:8.2f} {self.monitorUnit[self.experiment_config.monitorType]}')
qz_lz, qx_lz, ref_lz, err_lz, res_lz, lamda_lz, theta_lz, int_lz, self.mask_lz = self.project_on_lz(
self.file_reader, self.norm_lz, self.normAngle, lamda_e, detZ_e)
monitor = self.file_reader.monitor
if monitor>1 :
ref_lz /= monitor
err_lz /= monitor
#if self.monitor>1 :
# ref_lz /= self.monitor
# err_lz /= self.monitor
try:
ref_lz *= self.reduction_config.scale[i]
err_lz *= self.reduction_config.scale[i]
@@ -170,27 +173,38 @@ class AmorReduction:
j += 1
def read_timeslices(self, i):
wallTime_e = self.file_reader.wallTime_e
wallTime_e = np.float64(self.file_reader.wallTime_e)/1e9
pulseTimeS = np.float64(self.file_reader.pulseTimeS)/1e9
interval = self.reduction_config.timeSlize[0]
try:
start = self.reduction_config.timeSlize[1]
except:
except IndexError:
start = 0
try:
stop = self.reduction_config.timeSlize[2]
except:
except IndexError:
stop = wallTime_e[-1]
# make overwriting log lines possible by removing newline at the end
logging.StreamHandler.terminator = "\r"
#logging.StreamHandler.terminator = "\r"
logging.warning(f' time slizing')
logging.info(' slize time monitor')
for ti, time in enumerate(np.arange(start, stop, interval)):
logging.warning(f' time slize {ti:4d}')
filter_e = np.where((time<wallTime_e) & (wallTime_e<time+interval), True, False)
lamda_e = self.file_reader.lamda_e[filter_e]
detZ_e = self.file_reader.detZ_e[filter_e]
filter_m = np.where((time<pulseTimeS) & (pulseTimeS<time+interval), True, False)
self.monitor = np.sum(self.file_reader.monitorPerPulse[filter_m])
logging.info(f' {ti:<4d} {time:6.0f} {self.monitor:7.2f} {self.monitorUnit[self.experiment_config.monitorType]}')
qz_lz, qx_lz, ref_lz, err_lz, res_lz, lamda_lz, theta_lz, int_lz, mask_lz = self.project_on_lz(
self.file_reader, self.norm_lz, self.normAngle, lamda_e, detZ_e)
try:
ref_lz *= self.reduction_config.scale[i]
err_lz *= self.reduction_config.scale[i]
except IndexError:
ref_lz *= self.reduction_config.scale[-1]
err_lz *= self.reduction_config.scale[-1]
q_q, R_q, dR_q, dq_q = self.project_on_qz(qz_lz, ref_lz, err_lz, res_lz, self.norm_lz, mask_lz)
filter_q = np.where((self.experiment_config.qzRange[0]<q_q) & (q_q<self.experiment_config.qzRange[1]),
@@ -220,17 +234,17 @@ class AmorReduction:
orso_data = fileio.OrsoDataset(headerRqz, data)
self.datasetsRqz.append(orso_data)
# reset normal logging behavior
logging.StreamHandler.terminator = "\n"
logging.warning(f' time slizing, done')
#logging.StreamHandler.terminator = "\n"
logging.info(f' done {time+interval:5.0f}')
def save_Rqz(self):
fname = os.path.join(self.reader_config.dataPath, f'{self.output_config.outputName}.Rqz.ort')
fname = os.path.join(self.output_config.outputPath, f'{self.output_config.outputName}.Rqz.ort')
logging.warning(f' {fname}')
theSecondLine = f' {self.header.experiment.title} | {self.header.experiment.start_date} | sample {self.header.sample.name} | R(q_z)'
fileio.save_orso(self.datasetsRqz, fname, data_separator='\n', comment=theSecondLine)
def save_Rtl(self):
fname = os.path.join(self.reader_config.dataPath, f'{self.output_config.outputName}.Rlt.ort')
fname = os.path.join(self.output_config.outputPath, f'{self.output_config.outputName}.Rlt.ort')
logging.warning(f' {fname}')
theSecondLine = f' {self.header.experiment.title} | {self.header.experiment.start_date} | sample {self.header.sample.name} | R(lambda, theta)'
fileio.save_orso(self.datasetsRlt, fname, data_separator='\n', comment=theSecondLine)
@@ -257,7 +271,7 @@ class AmorReduction:
scale = 1.
R_q /= scale
dR_q /= scale
logging.debug(f' scaling factor = {scale}')
logging.info(f' scaling factor = {1/scale}')
return R_q, dR_q
@@ -298,7 +312,7 @@ class AmorReduction:
return q_q[1:], R_q, dR_q, dq_q
def loadRqz(self, name):
fname = os.path.join(self.reader_config.dataPath, name)
fname = os.path.join(self.output_config.outputPath, name)
if os.path.exists(fname):
fileName = fname
elif os.path.exists(f'{fname}.Rqz.ort'):
@@ -311,12 +325,12 @@ class AmorReduction:
return q_q, Sq_q, dS_q, fileName
def create_normalisation_map(self, short_notation):
dataPath = self.reader_config.dataPath
outputPath = self.output_config.outputPath
normalisation_list = expand_file_list(short_notation)
name = str(normalisation_list[0])
for i in range(1, len(normalisation_list), 1):
name = f'{name}_{normalisation_list[i]}'
n_path = os.path.join(dataPath, f'{name}.norm')
n_path = os.path.join(outputPath, f'{name}.norm')
if os.path.exists(n_path):
logging.warning(f'normalisation matrix: found and using {n_path}')
with open(n_path, 'rb') as fh:
@@ -336,20 +350,24 @@ class AmorReduction:
self.normAngle = fromHDF.nu - fromHDF.mu
lamda_e = fromHDF.lamda_e
detZ_e = fromHDF.detZ_e
self.normMonitor = fromHDF.monitor
self.norm_lz, bins_l, bins_z = np.histogram2d(lamda_e, detZ_e, bins = (self.grid.lamda(), self.grid.z()))
self.norm_lz = np.where(self.norm_lz>2, self.norm_lz, np.nan)
# correct for the SM reflectivity
lamda_l = self.grid.lamda()
theta_z = self.normAngle + fromHDF.delta_z
lamda_lz = (self.grid.lz().T*lamda_l[:-1]).T
theta_lz = self.grid.lz()*theta_z
qz_lz = 4.0*np.pi * np.sin(np.deg2rad(theta_lz)) / lamda_lz
Rsm_lz = np.ones(np.shape(qz_lz))
Rsm_lz = np.where(qz_lz>0.0217, 1-(qz_lz-0.0217)*(0.0625/0.0217), Rsm_lz)
# TODO: introduce variable for `m` and propably for the decay
Rsm_lz = np.where(qz_lz>0.0217*5, np.nan, Rsm_lz)
self.norm_lz = self.norm_lz / Rsm_lz
self.normMonitor = np.sum(fromHDF.monitorPerPulse)
norm_lz, bins_l, bins_z = np.histogram2d(lamda_e, detZ_e, bins = (self.grid.lamda(), self.grid.z()))
norm_lz = np.where(norm_lz>2, norm_lz, np.nan)
if self.reduction_config.normalisationMethod == 'd':
# direct reference => invert map vertically
self.norm_lz = np.flip(norm_lz, 1)
else:
# correct for reference sm reflectivity
lamda_l = self.grid.lamda()
theta_z = self.normAngle + fromHDF.delta_z
lamda_lz = (self.grid.lz().T*lamda_l[:-1]).T
theta_lz = self.grid.lz()*theta_z
qz_lz = 4.0*np.pi * np.sin(np.deg2rad(theta_lz)) / lamda_lz
# TODO: introduce variable for `m` and propably for the slope
Rsm_lz = np.ones(np.shape(qz_lz))
Rsm_lz = np.where(qz_lz>0.0217, 1-(qz_lz-0.0217)*(0.0625/0.0217), Rsm_lz)
Rsm_lz = np.where(qz_lz>0.0217*5, np.nan, Rsm_lz)
self.norm_lz = norm_lz / Rsm_lz
if len(lamda_e) > 1e6:
with open(n_path, 'wb') as fh:
@@ -364,23 +382,26 @@ class AmorReduction:
# projection on lambda-z-grid
lamda_l = self.grid.lamda()
alphaF_z = fromHDF.nu - fromHDF.mu + fromHDF.delta_z
# TODO: implement various methods to obtain alpha_i.
#if self.experiment_config.incidentAngle == 'alphaF':
# # for specular reflectometry with a highly divergent beam
# alphaF_z = fromHDF.nu - fromHDF.mu + fromHDF.delta_z
#elif self.experiment_config.incidentAngle == 'nu':
# # for specular reflectometry, using kappa nad nu but ignoring mu
# alphaF_z = (fromHDF.nu + fromHDF.delta_z + fromHDF.kap + fromHDF.kad) / 2.
#else:
# # using kappa, for a collimated incoming beam
# pass
lamda_lz = (self.grid.lz().T*lamda_l[:-1]).T
alphaF_lz = self.grid.lz()*alphaF_z
thetaN_z = fromHDF.delta_z + normAngle
thetaN_lz = np.ones(np.shape(norm_lz))*thetaN_z
thetaN_lz = np.where(np.absolute(thetaN_lz)>5e-3, thetaN_lz, np.nan)
mask_lz = np.where(np.isnan(norm_lz), False, True)
mask_lz = np.logical_and(mask_lz, np.where(np.absolute(thetaN_lz)>5e-3, True, False))
mask_lz = np.logical_and(mask_lz, np.where(np.absolute(alphaF_lz)>5e-3, True, False))
if self.reduction_config.thetaRange[1]<12:
if self.reduction_config.thetaRangeR[1]<12:
t0 = fromHDF.nu - fromHDF.mu
mask_lz = np.logical_and(mask_lz, np.where(alphaF_lz-t0 >= self.reduction_config.thetaRangeR[0], True, False))
mask_lz = np.logical_and(mask_lz, np.where(alphaF_lz-t0 <= self.reduction_config.thetaRangeR[1], True, False))
elif self.reduction_config.thetaRange[1]<12:
mask_lz = np.logical_and(mask_lz, np.where(alphaF_lz >= self.reduction_config.thetaRange[0], True, False))
mask_lz = np.logical_and(mask_lz, np.where(alphaF_lz <= self.reduction_config.thetaRange[1], True, False))
else:
@@ -388,10 +409,6 @@ class AmorReduction:
fromHDF.nu - fromHDF.mu + fromHDF.div/2]
mask_lz = np.logical_and(mask_lz, np.where(alphaF_lz >= self.reduction_config.thetaRange[0], True, False))
mask_lz = np.logical_and(mask_lz, np.where(alphaF_lz <= self.reduction_config.thetaRange[1], True, False))
if self.reduction_config.thetaRangeR[1]<12:
t0 = fromHDF.nu - fromHDF.mu
mask_lz = np.logical_and(mask_lz, np.where(alphaF_lz-t0 >= self.reduction_config.thetaRangeR[0], True, False))
mask_lz = np.logical_and(mask_lz, np.where(alphaF_lz-t0 <= self.reduction_config.thetaRangeR[1], True, False))
if self.experiment_config.lambdaRange[1]<15:
mask_lz = np.logical_and(mask_lz, np.where(lamda_lz >= self.experiment_config.lambdaRange[0], True, False))
mask_lz = np.logical_and(mask_lz, np.where(lamda_lz <= self.experiment_config.lambdaRange[1], True, False))
@@ -416,19 +433,27 @@ class AmorReduction:
if self.reduction_config.normalisationMethod == 'o':
logging.debug(' assuming an overilluminated sample and correcting for the angle of incidence')
ref_lz = (int_lz * np.absolute(thetaN_lz)) / (norm_lz * np.absolute(thetaF_lz)) * self.normMonitor
thetaN_z = fromHDF.delta_z + normAngle
thetaN_lz = np.ones(np.shape(norm_lz))*thetaN_z
thetaN_lz = np.where(np.absolute(thetaN_lz)>5e-3, thetaN_lz, np.nan)
mask_lz = np.logical_and(mask_lz, np.where(np.absolute(thetaN_lz)>5e-3, True, False))
ref_lz = (int_lz * np.absolute(thetaN_lz)) / (norm_lz * np.absolute(thetaF_lz))
elif self.reduction_config.normalisationMethod == 'u':
logging.debug(' assuming an underilluminated sample and ignoring the angle of incidence')
ref_lz = (int_lz / norm_lz) * self.normMonitor
ref_lz = (int_lz / norm_lz)
elif self.reduction_config.normalisationMethod == 'd':
logging.debug(' assuming direct beam for normalisation and ignoring the angle of incidence')
norm_lz = np.flip(norm_lz,1)
ref_lz = (int_lz / norm_lz) * self.normMonitor
ref_lz = (int_lz / norm_lz)
else:
logging.error('unknown normalisation method! Use [u], [o] or [d]')
ref_lz = (int_lz * np.absolute(thetaN_lz)) / (norm_lz * np.absolute(thetaF_lz)) * self.normMonitor
logging.error('unknown normalisation method! Use [u]nder, [o]ver or [d]irect illumination')
ref_lz = (int_lz / norm_lz)
if self.monitor > 1e-6 :
ref_lz *= self.normMonitor / self.monitor
else:
logging.info(' too small monitor value for normalisation -> ignoring monitors')
err_lz = ref_lz * np.sqrt( 1/(int_lz+.1) + 1/norm_lz )
# TODO: allow for non-ideal Delta lambda / lambda (rather than 2.2%)
res_lz = np.ones((np.shape(lamda_l[:-1])[0], np.shape(alphaF_z)[0])) * 0.022**2
res_lz = res_lz + (0.008/alphaF_lz)**2
res_lz = qz_lz * np.sqrt(res_lz)

View File

@@ -1,716 +0,0 @@
__version__ = '2024-03-30'
import os
import sys
import subprocess
import h5py
import glob
import numpy as np
import argparse
import matplotlib.pyplot as plt
import matplotlib as mpl
import time
import logging
from datetime import datetime
#==============================================================================
#==============================================================================
class Detector:
def __init__(self):
self.nBlades = 14 # number of active blades in the detector
angle = np.deg2rad( 5.1 ) # deg angle of incidence of the beam on the blades (def: 5.1)
self.dZ = 4.0 * np.sin(angle) # mm height-distance of neighboring pixels on one blade
self.dX = 4.0 * np.cos(angle) # mm depth-distance of neighboring pixels on one blace
self.bladeZ = 10.7 # mm distance between detector blades (consistent with nu!)
self.zero = 0.5 * self.nBlades * self.bladeZ # mm vertical center of the detector
#==============================================================================
def pixel2quantity():
det = Detector()
nPixel = 64 * 32 * det.nBlades
pixelID = np.arange(nPixel)
(bladeNr, bPixel) = np.divmod(pixelID, 64*32)
(bZ, bY) = np.divmod(bPixel, 64)
z = det.zero - bladeNr * det.bladeZ - bZ * det.dZ
x = (31 - bZ) * det.dX
bladeAngle = np.rad2deg( 2. * np.arcsin(0.5*det.bladeZ / detectorDistance) )
delta = (det.nBlades/2. - bladeNr) * bladeAngle - np.rad2deg( np.arctan(bZ*det.dZ / ( detectorDistance + bZ * det.dX) ) )
dZ = bladeNr * 32 + bZ
quantity = np.vstack((dZ.T, bY.T, delta.T, x.T)).T
return quantity
#==============================================================================
def analyse_ev(event_e, tof_e, yMin, yMax, thetaMin, thetaMax):
data_e = np.zeros((len(event_e), 9), dtype=float)
# data_e column description:
# 0: wall time / s
# 1: pixelID
# 2: z on detector
# 3: y on detector
# 4: delta / deg = angle on detector
# 5: path within detector / mm
# 6: lambda / angstrom
# 7: theta / deg
# 8: q_z / angstrom^-1
data_e[:,0] = tof_e[:]
data_e[:,1] = event_e[:]
# filter 'strange' tof times > 2 tau
if True:
filter_e = (data_e[:,0] <= 2*tau)
#print(event_e[~filter_e])
#print(data_e[~filter_e,0])
data_e = data_e[filter_e,:]
if np.shape(filter_e)[0]-np.shape(data_e)[0] > 0.5 and verbous:
logging.warning(f'## strange times: {np.shape(filter_e)[0]-np.shape(data_e)[0]}')
pixelLookUp = pixel2quantity()
data_e[:,2:6] = pixelLookUp[np.int_(data_e[:,1])-1,:]
#================================
# filter y range
filter_e = (yMin <= data_e[:,3]) & (data_e[:,3] <= yMax)
data_e = data_e[filter_e,:]
# correct tof for beam size effect at chopper
data_e[:,0] -= ( data_e[:,4] / 180. ) * tau
# effective flight path length
#data_e[:,5] = chopperDetectorDistance + data_e[:,5]
# calculate lambda
hdm = 6.626176e-34/1.674928e-27 # h / m
data_e[:,6] = 1.e13 * data_e[:,0] * hdm / ( chopperDetectorDistance + data_e[:,5] )
# theta
data_e[:,7] = nu - mu + data_e[:,4]
# gravity compensation
data_e[:,7] += np.rad2deg( np.arctan( 3.07e-10 * ( detectorDistance + data_e[:,5]) * data_e[:,6] * data_e[:,6] ) )
# filter theta range
filter_l = (thetaMin <= data_e[:,7]) & (data_e[:,7] <= thetaMax)
data_e = data_e[filter_l,:]
# q_z
data_e[:,8] = 4*np.pi * np.sin( np.deg2rad( data_e[:,7] ) ) / data_e[:,6]
# filter q_z range
#filter_e = (qMin < data_e[:,6]) & (data_e[:,6] < qMax)
#data_e = data_e[filter_e,:]
return data_e
#==============================================================================
class Meta:
# AMOR hdf dataset with associated properties from metadata
def __init__(self, fileName):
self.fileName = fileName
fh = h5py.File(fileName, 'r', swmr=True)
# for processing
self.chopperDistance = float(np.take(fh['/entry1/Amor/chopper/pair_separation'], 0)) # mm
# the following is the distance from the sample to the detector entry window, not to the center of rotation
self.detectorDistance = float(np.take(fh['/entry1/Amor/detector/transformation/distance'], 0)) # mm
self.chopperDetectorDistance = self.detectorDistance - float(np.take(fh['entry1/Amor/chopper/distance'], 0)) # mm
self.lamdaCut = 2.5 # Aa
startDate = str(fh['/entry1/start_time'][0].decode('utf-8'))
self.startDate = datetime.strptime(startDate, '%Y-%m-%d %H:%M:%S')
startDate = datetime.timestamp(self.startDate)
self.countingTime = float(np.take(fh['/entry1/Amor/detector/data/event_time_zero'], -1))/1e9 - startDate
# not exact for low rates
ka0 = 0.245 # given inclination of the beam after the Selene guide
year_date = str(datetime.today()).split(' ')[0].replace("-", "/", 1)
# deside from where to take the control paralemters
try:
self.mu = float(np.take(fh['/entry1/Amor/master_parameters/mu/value'], 0))
self.nu = float(np.take(fh['/entry1/Amor/master_parameters/nu/value'], 0))
self.kap = float(np.take(fh['/entry1/Amor/master_parameters/kap/value'], 0))
self.kad = float(np.take(fh['/entry1/Amor/master_parameters/kad/value'], 0))
self.div = float(np.take(fh['/entry1/Amor/master_parameters/div/value'], 0))
chSp = float(np.take(fh['/entry1/Amor/chopper/rotation_speed/value'], 0))
self.chPh = float(np.take(fh['/entry1/Amor/chopper/phase/value'], 0))
except (KeyError, IndexError):
logging.warning(f" using parameters from nicos cache")
#cachePath = '/home/amor/nicosdata/amor/cache/'
cachePath = '/home/nicos/amorcache/'
value = str(subprocess.getoutput('/usr/bin/grep "value" '+cachePath+'nicos-mu/'+year_date)).split('\t')[-1]
self.mu = float(value)
value = str(subprocess.getoutput('/usr/bin/grep "value" '+cachePath+'nicos-nu/'+year_date)).split('\t')[-1]
self.nu = float(value)
value = str(subprocess.getoutput('/usr/bin/grep "value" '+cachePath+'nicos-kap/'+year_date)).split('\t')[-1]
self.kap = float(value)
value = str(subprocess.getoutput('/usr/bin/grep "value" '+cachePath+'nicos-kad/'+year_date)).split('\t')[-1]
self.kad = float(value)
value = str(subprocess.getoutput('/usr/bin/grep "value" '+cachePath+'nicos-div/'+year_date)).split('\t')[-1]
self.div = float(value)
value = str(subprocess.getoutput('/usr/bin/grep "value" '+cachePath+'nicos-ch1_speed/'+year_date)).split('\t')[-1]
chSp = float(value)
self.chPh = np.nan
if chSp:
self.tau = 30. / chSp
else:
self.tau = 0
try: # not yet correctly implemented in nexus template
spin = str(fh['/entry1/polarizer/spin_flipper/spin'][0].decode('utf-8'))
if spin == "b'p'":
self.spin = 'p'
elif spin == "b'm'":
self.spin = 'm'
elif spin == "b'up'":
self.spin = 'p'
elif spin == "b'down'":
self.spin = 'm'
elif spin == '?':
self.spin = '?'
else:
self.spin = 'n'
except (KeyError, IndexError):
self.spin = '?'
fh.close()
#==============================================================================
def resolveNumber(dataPath, ident):
if ident == '0' or '-' in ident[0]:
try:
nnr = int(ident)
except:
logging.error("ERROR: '{}' is no valid file identifier!".format(ident))
fileNames = glob.glob(dataPath+'/*.hdf')
fileNames.sort()
fileName = fileNames[nnr-1]
fileName = fileName.split('/')[-1]
fileNumber = fileName.split('n')[1].split('.')[0].lstrip('0')
else:
fileNumber = ident
return fileNumber
#==============================================================================
def fileNameCreator(dataPath, ident):
clas = commandLineArgs()
ident=str(ident)
try:
nnr = int(ident)
except:
logging.error("ERROR: '{}' is no valid file identifier!".format(ident))
if nnr <= 0 :
fileName = glob.glob(dataPath+'/*.hdf')[nnr-1]
fileName = fileName.split('/')[-1]
else:
fileName = f'amor{clas.year}n{ident:>06s}'
fileName = fileName.split('.')[0]
fileName = fileName+'.hdf'
fileName = dataPath+fileName
fileNumber = fileName.split('n')[-1].split('.')[0].lstrip('0')
return fileName, fileNumber
#==============================================================================
class PlotSelection:
def headline(self, fileNumber, totalCounts):
headLine = "#{} \u03bc={:>1.2f} \u03bd={:>1.2f} {:>12,} cts {:>8.1f} s".format(fileNumber, mu+5e-3, nu+5e-3, totalCounts, countingTime)
return headLine
# grids
def y_grid(self):
y_grid = np.arange(yMin, yMax+1, 1)
return y_grid
def lamda_grid(self):
dldl = 0.005 # Delta lambda / lambda
lMin = max(2, lamdaMin)
lamda_grid = lMin*(1+dldl)**np.arange(int(np.log(lamdaMax/lMin)/np.log(1+dldl)+1))
return lamda_grid
def theta_grid(self):
det = Detector()
bladeAngle = np.rad2deg( 2. * np.arcsin(0.5*det.bladeZ / detectorDistance) )
blade_grid = np.arctan( np.arange(33) * det.dZ / ( detectorDistance + np.arange(33) * det.dX) )
blade_grid = np.rad2deg(blade_grid)
stepWidth = blade_grid[1] - blade_grid[0]
blade_grid = blade_grid - 0.2 * stepWidth
delta_grid = []
for b in np.arange(det.nBlades-1):
delta_grid = np.concatenate((delta_grid, blade_grid), axis=None)
blade_grid = blade_grid + bladeAngle
delta_grid = delta_grid[delta_grid<blade_grid[0]-0.5*stepWidth]
delta_grid = np.concatenate((delta_grid, blade_grid), axis=None)
theta_grid = nu - mu - np.flip(delta_grid) + 0.5*det.nBlades * bladeAngle
theta_grid = theta_grid[theta_grid>=thetaMin]
theta_grid = theta_grid[theta_grid<=thetaMax]
return theta_grid
def q_grid(self):
dqdq = 0.010 # Delta q_z / q_z
q_grid = qMin*(1.+dqdq)**np.arange(int(np.log(qMax/qMin)/np.log(1+dqdq)))
return q_grid
# create PNG with several plots
def all(self, fileNumber, arg, data_e):
#cmap='gist_earth'
cmap = mpl.cm.gnuplot(np.arange(256))
cmap[:1, :] = np.array([256/256, 255/256, 236/256, 1])
cmap = mpl.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
I_yt, bins_y, bins_t = np.histogram2d(data_e[:,3], data_e[:,7], bins = (self.y_grid(), self.theta_grid()))
I_lt, bins_l, bins_t = np.histogram2d(data_e[:,6], data_e[:,7], bins = (self.lamda_grid(), self.theta_grid()))
I_q, bins_q = np.histogram(data_e[:,8], bins = self.q_grid())
q_lim = 4*np.pi*np.array([ max( np.sin(self.theta_grid()[0]*np.pi/180.)/self.lamda_grid()[-1] , 1e-4 ),
min( np.sin(self.theta_grid()[-1]*np.pi/180.)/self.lamda_grid()[0] , 0.03 )])
if arg == 'lin':
#vmin = min(np.min(I_lt), np.min(I_yt))
vmin = 0
vmax = max(5, np.max(I_lt), np.max(I_yt))
else:
vmin = 0
vmax = max(1, np.log(np.max(I_lt)+.1)/np.log(10)*1.05, np.log(np.max(I_yt)+.1)/np.log(10)*1.05)
# I(y, theta)
fig = plt.figure()
axs = fig.add_gridspec(2,3)
myt = fig.add_subplot(axs[0,0])
myt.set_title('detector area')
myt.set_xlabel('$y ~/~ \\mathrm{bins}$')
myt.set_ylabel('$\\theta ~/~ \\mathrm{deg}$')
if arg == 'lin':
myt.pcolormesh(bins_y, bins_t, I_yt.T, cmap=cmap, vmin=vmin, vmax=vmax)
else:
myt.pcolormesh(bins_y, bins_t, (np.log(I_yt + 5.e-1) / np.log(10.)).T, cmap=cmap, vmin=vmin, vmax=vmax)
# I(lambda, theta)
mlt = fig.add_subplot(axs[0,1:])
mlt.set_title('angle- and energy disperse')
mlt.set_xlabel('$\\lambda ~/~ \\mathrm{\\AA}$')
mlt.axes.get_yaxis().set_visible(False)
if arg == 'lin':
cb = mlt.pcolormesh(bins_l, bins_t, I_lt.T, cmap=cmap, vmin=vmin, vmax=vmax)
else:
cb = mlt.pcolormesh(bins_l, bins_t, (np.log(I_lt + 5.e-1) / np.log(10.)).T, cmap=cmap, vmin=vmin, vmax=vmax)
# I(q_z)
lqz = fig.add_subplot(axs[1,:])
lqz.set_title('$I(q_z)$')
lqz.set_ylabel('$\\log_{10}(\\mathrm{cnts})$')
lqz.set_xlabel('$q_z~/~\\mathrm{\\AA}^{-1}$')
lqz.set_xlim(q_lim)
if arg == 'lin':
plt.plot(bins_q[:-1], I_q, color='blue', linewidth=0.5)
else:
err_q = np.sqrt(I_q+1)
low_q = np.where(I_q-err_q>0, I_q-err_q, 0.1)
plt.fill_between(bins_q[:-1], np.log(low_q)/np.log(10), np.log(I_q+err_q/2)/np.log(10), color='lightgrey')
plt.plot(bins_q[:-1], np.log(I_q+5e-1)/np.log(10), color='blue', linewidth=0.5)
lw = I_q[ ((q_lim[0] < bins_q[:-1]) & (bins_q[:-1] < q_lim[1])) ].min()
plt.ylim(max(-0.1, np.log(lw+.1)/np.log(10)-0.1), )
#
headline = self.headline(fileNumber, np.shape(data_e)[0])
plt.title(headline, loc='left', y=2.8, c='r')
fig.colorbar(cb, ax=mlt)
plt.subplots_adjust(hspace=0.6, wspace=0.1)
plt.savefig(output, format='png', dpi=150)
# create PNG with one plot
def Iyz(self, fileNumber, arg, data_e):
det = Detector()
cmap = mpl.cm.gnuplot(np.arange(256))
cmap[:1, :] = np.array([256/256, 255/256, 236/256, 1])
cmap = mpl.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
z_grid = np.arange(det.nBlades*32)
I_yz, bins_y, bins_z = np.histogram2d(data_e[:,3], data_e[:,2], bins = (self.y_grid(), z_grid))
if arg == 'log':
vmin = 0
vmax = max(1, np.log(np.max(I_yt)+.1)/np.log(10)*1.05)
plt.pcolormesh(bins_y[:],bins_z[:],(np.log(I_yz+6e-1)/np.log(10)).T, cmap=cmap, vmin=vmin, vmax=vmax)
else:
plt.pcolormesh(bins_y[:],bins_z[:],I_yz.T, cmap=cmap)
plt.xlabel('$y ~/~ \\mathrm{bins}$')
plt.ylabel('$z ~/~ \\mathrm{bins}$')
headline = self.headline(fileNumber, np.shape(data_e)[0])
plt.title(headline, loc='left', y=1.0, c='r')
plt.colorbar()
plt.savefig(output, format='png', dpi=150)
def Ilt(self, fileNumber, arg, data_e) :
cmap = mpl.cm.gnuplot(np.arange(256))
cmap[:1, :] = np.array([256/256, 255/256, 236/256, 1])
cmap = mpl.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
I_lt, bins_l, bins_t = np.histogram2d(data_e[:,6], data_e[:,7], bins = (self.lamda_grid(), self.theta_grid()))
if arg == 'log':
vmax = max(1, np.log(np.max(I_lt)+.1)/np.log(10)*1.05 )
plt.pcolormesh(bins_l, bins_t, (np.log(I_lt+I_lt[I_lt>0].min()/2)/np.log(10.)).T, cmap=cmap, vmin=0, vmax=vmax)
else :
vmax = max(np.max(I_lt), 5)
plt.pcolormesh(bins_l, bins_t, I_lt.T, cmap=cmap, vmin=0, vmax=vmax)
plt.xlim(0,)
#if np.min(bins_t) > 0.01 :
# plt.ylim(bottom=0)
#else:
# plt.ylim(bottom=np.min(bins_t))
#if np.max(bins_t) < -0.01:
# plt.ylim(top=0)
#else:
# plt.ylim(top=np.max(bins_t))
plt.xlim(lamdaMin, lamdaMax)
plt.ylim(thetaMin, thetaMax)
plt.xlabel('$\\lambda ~/~ \\mathrm{\\AA}$')
plt.ylabel('$\\theta ~/~ \\mathrm{deg}$')
headline = self.headline(fileNumber, np.shape(data_e)[0])
plt.title(headline, loc='left', y=1.0, c='r')
plt.colorbar()
plt.savefig(output, format='png', dpi=150)
def Itz(self, fileNumber, arg, data_e):
det = Detector()
cmap = mpl.cm.gnuplot(np.arange(256))
cmap[:1, :] = np.array([256/256, 255/256, 236/256, 1])
cmap = mpl.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
time_grid = np.arange(0, tau, 0.0005)
z_grid = np.arange(det.nBlades*32+1)
I_tz, bins_t, bins_z = np.histogram2d(data_e[:,0], data_e[:,2], bins = (time_grid, z_grid))
if arg == 'log':
vmax = max(2., np.log(np.max(I_tz)+.1)/np.log(10)*1.05 )
plt.pcolormesh(bins_t, bins_z, (np.log(I_tz+5.e-1)/np.log(10.)).T, cmap=cmap, vmin=0, vmax=vmax)
else :
vmax = max(np.max(I_tz), 5)
plt.pcolormesh(bins_t, bins_z, I_tz.T, cmap=cmap, vmin=0, vmax=vmax)
if True:
plt.xlim(0,)
plt.ylim(0,)
plt.xlabel('$t ~/~ \\mathrm{s}$')
plt.ylabel('$z$ pixel row')
headline = self.headline(fileNumber, np.shape(data_e)[0])
plt.title(headline, loc='left', y=1.0, c='r')
plt.colorbar()
plt.savefig(output, format='png', dpi=150)
def Iq(self, fileNumber, arg, data_e):
I_q, bins_q = np.histogram(data_e[:,8], bins = self.q_grid())
err_q = np.sqrt(I_q+1)
q_lim = 4*np.pi*np.array([ max( np.sin(self.theta_grid()[0]*np.pi/180.)/self.lamda_grid()[-1] , 1e-4 ),
min( np.sin(self.theta_grid()[-1]*np.pi/180.)/self.lamda_grid()[0] , 0.03 )])
if arg == 'log':
low_q = np.where(I_q-err_q>0, I_q-err_q, 0.1)
plt.fill_between(bins_q[:-1], np.log(low_q)/np.log(10), np.log(I_q+err_q/2)/np.log(10), color='lightgrey')
plt.plot(bins_q[:-1], np.log(I_q+5e-1)/np.log(10), color='blue', linewidth=0.5)
lw = I_q[ ((q_lim[0] < bins_q[:-1]) & (bins_q[:-1] < q_lim[1])) ].min()
plt.ylim(max(-0.1, np.log(lw+.1)/np.log(10)-0.1), )
else:
plt.plot(bins_q[:-1], I_q, color='blue', linewidth=0.5)
plt.ylabel('$\\log_{10}(\\mathrm{cnts})$')
plt.xlabel('$q_z ~/~ \\mathrm{\\AA}^{-1}$')
plt.xlim(q_lim)
headline = self.headline(fileNumber, np.shape(data_e)[0])
plt.title(headline, loc='left', y=1.0, c='r')
plt.savefig(output, format='png', dpi=150)
def Il(self, fileNumber, arg, data_e):
I_l, bins_l = np.histogram(data_e[:,6], bins = self.lamda_grid())
if arg == 'lin':
plt.plot(bins_l[:-1], I_l)
plt.ylabel('$I ~/~ \\mathrm{cnts}$')
else:
plt.plot(bins_l[:-1], np.log(I_l+5.e-1)/np.log(10.))
plt.ylabel('$\\log_{10} I ~/~ \\mathrm{cnts}$')
plt.xlabel('$\\lambda ~/~ \\mathrm{\\AA}$')
headline = self.headline(fileNumber, np.shape(data_e)[0])
plt.title(headline, loc='left', y=1.0, c='r')
plt.savefig(output, format='png', dpi=150)
def It(self, fileNumber, arg, data_e):
I_t, bins_t = np.histogram(data_e[:,7], bins = self.theta_grid())
plt.plot( I_t, bins_t[:-1])
plt.xlabel('$\\mathrm{cnts}$')
plt.ylabel('$\\theta ~/~ \\mathrm{deg}$')
headline = self.headline(fileNumber, np.shape(data_e)[0])
plt.title(headline, loc='left', y=1.0, c='r')
plt.savefig(output, format='png', dpi=150)
def tof(self, fileNumber, arg, data_e):
time_grid = np.arange(0, 1.3*tau, 0.0005)
I_t, bins_t = np.histogram(data_e[:,0], bins = time_grid)
if arg == 'lin':
plt.plot(bins_t[:-1]+tau, I_t)
plt.plot(bins_t[:-1], I_t)
plt.plot(bins_t[:-1]+2*tau, I_t)
else:
lI_t = np.log(I_t+5.e-1)/np.log(10.)
plt.plot(bins_t[:-1]+tau, lI_t)
plt.plot(bins_t[:-1], lI_t)
plt.plot(bins_t[:-1]+2*tau, lI_t)
plt.ylabel('log(counts)')
plt.xlabel('time / s')
headline = self.headline(fileNumber, np.shape(data_e)[0])
plt.title(headline, loc='left', y=1.0, c='r')
plt.savefig(output, format='png')
#==============================================================================
def process(dataPath, ident, clas):
#================================
# constants
hdm = 6.626176e-34/1.674928e-27 # h / m
#================================
# instrument specific parameters
#================================
global lamdaMin, lamdaMax, qMin, qMax, thetaMin, thetaMax, yMin, yMax
# defaults
lamdaCut = 2.5 # Aa used to reshuffle tof
# data filtering and folding
#================================
if clas.lambdaRange:
lamdaMin = clas.lambdaRange[0]
lamdaMax = clas.lambdaRange[1]
else:
lamdaMin = lamdaCut
chopperPhase = clas.chopperPhase
tofOffset = clas.TOFOffset
thetaMin = clas.thetaRange[0]
thetaMax = clas.thetaRange[1]
yMin = clas.yRange[0]
yMax = clas.yRange[1]
qMin = clas.qRange[0]
qMax = clas.qRange[1]
#================================
# find and open input file
global ev
data_eSum = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0]])
sumTime = 0
number = resolveNumber(dataPath, ident)
fileName, fileNumber = fileNameCreator(dataPath, str(number))
if verbous:
logging.info('life_histogrammer processing file ->\033[1m {} \033[0m<-'.format(fileNumber))
for i in range(6):
ev = h5py.File(fileName, 'r', swmr=True)
try:
ev['/entry1/Amor/detector/data/event_time_zero'][-1]
break
except (KeyError, IndexError):
ev.close()
if i < 5:
if verbous:
print("no data yet, retrying ({}) ".format(10-2*i), end='\r')
time.sleep(2)
continue
else:
if verbous:
print("# time-out: no longer waiting for data!\a")
return
# get and process data
meta = Meta(fileName)
global mu, nu, tau
if clas.mu < 98.:
mu = clas.mu
else:
mu = meta.mu + clas.muOffset
if clas.nu < 98.:
nu = clas.nu
else:
nu = meta.nu
if clas.chopperSpeed:
tau = 30./ clas.chopperSpeed
else:
tau = meta.tau
try:
chPh
except NameError:
chPh = meta.chPh
spin = meta.spin
global countingTime, detectorDistance, chopperDetectorDistance
detectorDistance = meta.detectorDistance
chopperDetectorDistance = meta.chopperDetectorDistance
countingTime = meta.countingTime
if verbous:
logging.info(" mu = {:>4.2f} deg, nu = {:>4.2f} deg".format(mu, nu))
if spin == 'u':
logging.info(' spin <+|')
elif spin == 'd':
logging.info(' spin <-|')
try: lamdaMax
except NameError: lamdaMax = lamdaMin + tau * hdm/chopperDetectorDistance * 1e13
tofOffset = tau * chopperPhase / 180. # mismatch of chopper pulse and time-zero
tofCut = lamdaCut * chopperDetectorDistance / hdm * 1.e-13 # tof of frame start
tof_e = np.array(ev['/entry1/Amor/detector/data/event_time_offset'][:], dtype=np.uint64)/1.e9 + tofOffset # tof
detPixelID_e = np.array(ev['/entry1/Amor/detector/data/event_id'][:], dtype=np.uint64) # pixel index
dataPacket_p = np.array(ev['/entry1/Amor/detector/data/event_index'][:], dtype=np.uint64) # data packet index
tof_e = np.where(tof_e<tofCut, tof_e+2.*tau, tof_e)
tof_e = np.where(tof_e>tau+tofCut, tof_e-tau, tof_e)
data_e = analyse_ev(detPixelID_e, tof_e, yMin, yMax, thetaMin, thetaMax)
ev.close()
data_eSum = np.append(data_eSum, data_e, axis=0)
sumTime += countingTime
if verbous:
logging.info(" total counts = {} in {:6.1f} s".format(np.shape(data_e)[0], sumTime))
#================================
# plotting data
plotType = clas.plot[0]
try:
arg = clas.plot[1]
except IndexError:
arg = 'def'
plott = PlotSelection()
try:
plotFunction = getattr(plott, plotType)
plotFunction(fileNumber, arg, data_e)
plt.close()
except Exception as e:
logging.error(f"ERROR: '{plotType}' is no known output format!")
logging.error(f" original error: {e}")
#==============================================================================
def commandLineArgs():
msg = "events2histogram reads the eventstream from an hdf raw file and \
creates various histogrammed outputs or plots."
clas = argparse.ArgumentParser(description = msg)
clas.add_argument("-c", "--chopperSpeed",
type=float,
help ="chopper speed in rpm")
clas.add_argument("-d", "--dataPath",
help ="relative path to directory with .hdf files")
clas.add_argument("-f", "--fileIdent",
default='0',
help ="file number or offset (if negative)")
clas.add_argument("-l", "--lambdaRange",
nargs=2,
type=float,
help ="wavelength range to be used")
clas.add_argument("-M", "--muOffset",
default=0.,
type=float,
help ="mu offset")
clas.add_argument("-m", "--mu",
default=99.,
type=float,
help ="value of mu")
clas.add_argument("-n", "--nu",
default=99.,
type=float,
help ="value of nu")
clas.add_argument("-P", "--chopperPhase",
default=-5.,
type=float,
help ="chopper phase offset")
clas.add_argument("-p", "--plot",
default=['all', 'def'],
nargs='+',
help ="select what to plot or write")
clas.add_argument("-q", "--qRange",
default=[0.005, 0.30],
nargs=2,
type=float,
help ="q_z range")
clas.add_argument("-T", "--TOFOffset",
default=0.0,
type=float,
help ="TOF zero offset")
clas.add_argument("-t", "--thetaRange",
default=[-12., 12.],
nargs=2,
type=float,
help ="theta range to be used")
clas.add_argument("-Y", "--year",
default = str(datetime.today()).split('-')[0],
help = "year, the measurement was performed")
clas.add_argument("-y", "--yRange",
default=[0, 63],
nargs=2,
type=int,
help ="detector y range to be used")
return clas.parse_args()
#==============================================================================
def get_dataPath(clas):
if clas.dataPath:
dataPath = clas.dataPath + '/'
if not os.path.exists(dataPath):
sys.exit('# *** the directory "'+dataPath+'" does not exist ***')
elif os.path.exists('./raw'):
dataPath = "./raw/"
elif os.path.exists('../raw'):
dataPath = "../raw/"
else:
sys.exit('# *** please provide the path to the .hdf data files (-d <path>, default is "./raw")')
return dataPath
#==============================================================================
def get_directDataPath(clas):
#dataPath = clas.dataPath + '/'
year = str(datetime.today()).split('-')[0]
year_date = str(datetime.today()).split(' ')[0].replace("-", "/", 1)
pNr = str(subprocess.getoutput('/usr/bin/grep "proposal\t" /home/amor/nicosdata/amor/cache/nicos-exp/'+year_date)[-1]).split('\'')[1]
dataPath = '/home/amor/nicosdata/amor/data/'+year+'/'+pNr+'/'
if not os.path.exists(dataPath):
sys.exit('# *** the directory "'+dataPath+'" does not exist ***')
return dataPath
#==============================================================================
def main():
global verbous, output, dataPath
clas = commandLineArgs()
dataPath = get_dataPath(clas)
logging.basicConfig(level=logging.INFO, format='# %(message)s')
verbous = True
output = 'life_plot.png'
process(dataPath, clas.fileIdent, clas)
#==============================================================================
#==============================================================================
if __name__ == "__main__":
main()

6
pyproject.toml Normal file
View File

@@ -0,0 +1,6 @@
[build-system]
requires = [
"setuptools>=42",
"wheel"
]
build-backend = "setuptools.build_meta"

32
setup.cfg Normal file
View File

@@ -0,0 +1,32 @@
[bdist_wheel]
universal = 1
[metadata]
name = amor_eos
version = attr: libeos.__version__
author = Jochen Stahn - Paul Scherrer Institut
author_email = jochen.stahn@psi.ch
description = EOS reflectometry reduction for AMOR instrument
long_description = Reduces data obtained by focusing time of flight neutron reflectivity to full reflectivity curve.
license = MIT
classifiers =
Programming Language :: Python :: 3
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Topic :: Scientific/Engineering
Development Status :: 5 - Production/Stable
[options]
python_requires = >=3.8
packages =
libeos
scripts =
eos.py
install_requires =
numpy
h5py
orsopy
numba
[project.urls]
Homepage = "https://github.com/jochenstahn/amor"

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -21,15 +21,14 @@ class FullAmorTest(TestCase):
self.pr.enable()
self.reader_config = options.ReaderConfig(
year=2023,
dataPath=os.path.join('..', "test_data"),
raw=(os.path.join('..', "test_data"),)
rawPath=(os.path.join('..', "test_data"),),
)
def tearDown(self):
self.pr.disable()
for fi in ['test.Rqz.ort', '614.norm']:
try:
os.unlink(os.path.join(self.reader_config.dataPath, fi))
os.unlink(os.path.join(self.reader_config.rawPath[0], fi))
except FileNotFoundError:
pass
@@ -38,6 +37,8 @@ class FullAmorTest(TestCase):
experiment_config = options.ExperimentConfig(
chopperPhase=-13.5,
chopperPhaseOffset=-5,
monitorType=options.Defaults.monitorType,
lowCurrentThreshold=options.Defaults.lowCurrentThreshold,
yRange=(11., 41.),
lambdaRange=(2., 15.),
qzRange=(0.005, 0.30),
@@ -60,7 +61,8 @@ class FullAmorTest(TestCase):
)
output_config = options.OutputConfig(
outputFormats=["Rqz.ort"],
outputName='test'
outputName='test',
outputPath=os.path.join('..', 'test_results'),
)
config=options.EOSConfig(self.reader_config, experiment_config, reduction_config, output_config)
# run three times to get similar timing to noslicing runs
@@ -75,6 +77,8 @@ class FullAmorTest(TestCase):
experiment_config = options.ExperimentConfig(
chopperPhase=-13.5,
chopperPhaseOffset=-5,
monitorType=options.Defaults.monitorType,
lowCurrentThreshold=options.Defaults.lowCurrentThreshold,
yRange=(11., 41.),
lambdaRange=(2., 15.),
qzRange=(0.005, 0.30),
@@ -91,12 +95,13 @@ class FullAmorTest(TestCase):
thetaRangeR=(-12., 12.),
fileIdentifier=["610", "611", "608,612-613", "609"],
scale=[1],
normalisationFileIdentifier=["614"],
normalisationFileIdentifier=["608"],
autoscale=(True, True)
)
output_config = options.OutputConfig(
outputFormats=["Rqz.ort"],
outputName='test'
outputName='test',
outputPath=os.path.join('..', 'test_results'),
)
config=options.EOSConfig(self.reader_config, experiment_config, reduction_config, output_config)
reducer = reduction.AmorReduction(config)

View File

@@ -12,7 +12,7 @@ a = Analysis(
runtime_hooks=[],
excludes=[],
noarchive=False,
optimize=0,
optimize=1,
)
pyz = PYZ(a.pure)

51
windows_folder.spec Normal file
View File

@@ -0,0 +1,51 @@
# -*- mode: python ; coding: utf-8 -*-
from PyInstaller.utils.hooks import collect_all
datas = []
binaries = []
hiddenimports = []
tmp_ret = collect_all('tzdata')
datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2]
a = Analysis(
['eos.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
noarchive=False,
optimize=1,
)
pyz = PYZ(a.pure)
exe = EXE(
pyz,
a.scripts,
[],
exclude_binaries=True,
name='eos',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
)
coll = COLLECT(
exe,
a.binaries,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='eos',
)