27 Commits

Author SHA1 Message Date
bada209c1e minor fix of release workflow and add github release
Some checks failed
Release / build-ubuntu-latest (push) Failing after 2s
Release / release (push) Has been skipped
2024-10-30 14:31:06 +01:00
d6f699e10c minor fix of release workflow 2024-10-30 14:28:34 +01:00
8fa11fa05a add configuration for pypi release 2024-10-30 14:07:48 +01:00
c7b7f38a6d make sure the right pytest is used 2024-10-30 13:34:45 +01:00
5d50e92521 try to use older Ubuntu for failing tests, could not reproduce locally 2024-10-30 13:26:03 +01:00
aa57d75751 don't fail action when just one python version fails 2024-10-30 13:08:50 +01:00
409e62b63f set branch to run tests to main 2024-10-30 13:02:34 +01:00
1c459ae5d3 install correct requirements file in workflow 2024-10-30 13:01:56 +01:00
12d0370807 add test data and pytest action to repository 2024-10-30 12:59:00 +01:00
b1e7b68a21 update tests to new configuration parameters 2024-10-30 12:32:24 +01:00
4134e106ff improved algorythm for pulse generation 2024-10-25 15:35:47 +02:00
5cce1bedc9 fixed a bug in the time-monitor calculation and added units 2024-10-24 08:54:43 +02:00
52bd731a3c set startTime to hdf entry start_time 2024-10-23 15:32:46 +02:00
fbb44d435b changed keys for monitorType 2024-10-22 17:46:31 +02:00
182cc5ab3d added TODO for the resolution calculation 2024-10-22 17:05:09 +02:00
9f2bc034e7 debugging output for neutron pulse times and proton current 2024-10-22 15:47:25 +02:00
7f2d5cf765 added monitorType 2024-10-22 09:43:44 +02:00
772d921f03 Merge branch 'main' of https://github.com/jochenstahn/amor 2024-10-10 14:09:51 +02:00
f5ce5e9d73 format of info output for proton current adapted 2024-10-10 08:06:07 +02:00
7fc64a58f0 sorted outputPath to output 2024-10-05 13:57:39 +02:00
b848d66290 changed path variable names + new option lowCurrentThreshold 2024-10-04 15:10:11 +02:00
1f394b0e86 replaced scipy interpolation by dedicated routine (by Artur) 2024-09-27 16:27:50 +02:00
69620d0f16 removed doupble normalisation.... 2024-09-26 11:04:51 +02:00
0e727ce591 fixed monitor for normalisation measurement 2024-09-26 10:24:37 +02:00
f2ebe5ce0c new method for time / protonCharge normalisation - compatible with old data files 2024-09-26 10:09:15 +02:00
acc508e9ea timestamps are now all of type np.int64 with unit ns 2024-09-25 16:35:31 +02:00
e6578d5bf2 fix merge error missing method calls 2024-09-25 14:06:55 +02:00
18 changed files with 331 additions and 133 deletions

68
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,68 @@
name: Release
# Controls when the action will run.
on:
# Triggers the workflow on push or pull request events but only for the master branch
push:
tags:
- "*"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
inputs:
build-items:
description: 'Items to be build'
required: true
default: 'all'
type: choice
options:
- all
- windows
- linux
- mac
jobs:
build-ubuntu-latest:
runs-on: ubuntu-latest
if: ${{ (github.event_name != 'workflow_dispatch') || (contains(fromJson('["all", "linux"]'), github.event.inputs.build-items)) }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install build
pip install -r requirements.txt
- name: Build PyPI package
run: |
python3 -m build
- name: Upload to PyPI
if: github.event_name != 'workflow_dispatch'
uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.PYPI_TOKEN }}
skip-existing: true
- name: Archive distribution
uses: actions/upload-artifact@v4
with:
name: linux-dist
path: |
dist/*.tar.gz
release:
if: github.event_name != 'workflow_dispatch'
runs-on: ubuntu-latest
needs: [build-ubuntu-latest]
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: linux-dist
- uses: ncipollo/release-action@v1
with:
artifacts: "amor*.tar.gz"
token: ${{ secrets.GITHUB_TOKEN }}
allowUpdates: true

37
.github/workflows/unit_tests.yml vendored Normal file
View File

@@ -0,0 +1,37 @@
name: Unit Testing
on:
push:
branches:
- main
pull_request:
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-22.04
strategy:
matrix:
python-version: [3.8, 3.9, '3.10', '3.11', '3.12']
fail-fast: false
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pytest
pip install -r requirements.txt
- name: Test with pytest
run: |
cd tests
python -m pytest --pyargs .

2
.gitignore vendored
View File

@@ -5,6 +5,6 @@
__pycache__
raw
.idea
test_data
test_results
build
dist

View File

@@ -2,5 +2,5 @@
Package to handle data redction at AMOR instrument to be used by eos.py script.
"""
__version__ = '2.1.0'
__date__ = '2024-08-25'
__version__ = '2.1.1'
__date__ = '2024-10-30'

View File

@@ -23,28 +23,32 @@ def commandLineArgs():
default = Defaults.normalisationFileIdentifier,
nargs = '+',
help = "file number(s) of normalisation measurement")
input_data.add_argument("-nm", "--normalisationMethod",
default = Defaults.normalisationMethod,
help = "normalisation method: [o]verillumination, [u]nderillumination, [d]irect_beam")
input_data.add_argument("--raw",
input_data.add_argument("-rp", "--rawPath",
type = str,
default = Defaults.raw,
help = "relative path to directory with .hdf files")
input_data.add_argument("-d", "--dataPath",
type = str,
default = Defaults.dataPath,
help = "relative path for output")
default = Defaults.rawPath,
help = "ath to directory with .hdf files")
input_data.add_argument("-Y", "--year",
default = Defaults.year,
type = int,
help = "year the measurement was performed")
input_data.add_argument("-sub", "--subtract",
help = "R(q_z) curve to be subtracted (in .Rqz.ort format)")
input_data.add_argument("-nm", "--normalisationMethod",
default = Defaults.normalisationMethod,
help = "normalisation method: [o]verillumination, [u]nderillumination, [d]irect_beam")
input_data.add_argument("-mt", "--monitorType",
type = str,
default = Defaults.monitorType,
help = "one of [p]rotonCurrent, [t]ime or [n]eutronMonitor")
output = clas.add_argument_group('output')
output.add_argument("-o", "--outputName",
default = Defaults.outputName,
help = "output file name (withot suffix)")
output.add_argument("-op", "--outputPath",
type = str,
default = Defaults.outputPath,
help = "path for output")
output.add_argument("-of", "--outputFormat",
nargs = '+',
default = Defaults.outputFormat,
@@ -98,6 +102,11 @@ def commandLineArgs():
nargs = 2,
type = float,
help = "q_z range")
masks.add_argument("-ct", "--lowCurrentThreshold",
default = Defaults.lowCurrentThreshold,
type = float,
help = "proton current threshold for discarding neutron pulses")
overwrite = clas.add_argument_group('overwrite')
overwrite.add_argument("-cs", "--chopperSpeed",
@@ -172,8 +181,7 @@ def command_line_options():
reader_config = ReaderConfig(
year = clas.year,
raw = clas.raw,
dataPath = clas.dataPath
rawPath = clas.rawPath,
)
experiment_config = ExperimentConfig(
sampleModel = clas.sampleModel,
@@ -182,10 +190,12 @@ def command_line_options():
yRange = clas.yRange,
lambdaRange = clas.lambdaRange,
qzRange = clas.qzRange,
lowCurrentThreshold = clas.lowCurrentThreshold,
incidentAngle = clas.incidentAngle,
mu = clas.mu,
nu = clas.nu,
muOffset = clas.muOffset
muOffset = clas.muOffset,
monitorType = clas.monitorType,
)
reduction_config = ReductionConfig(
qResolution = clas.qResolution,
@@ -202,7 +212,8 @@ def command_line_options():
)
output_config = OutputConfig(
outputFormats = output_format_list(clas.outputFormat),
outputName = clas.outputName
outputName = clas.outputName,
outputPath = clas.outputPath,
)
return EOSConfig(reader_config, experiment_config, reduction_config, output_config)

View File

@@ -7,7 +7,6 @@ from typing import List
import h5py
import numpy as np
import scipy as sp
from orsopy import fileio
from orsopy.fileio.model_language import SampleModel
@@ -21,6 +20,18 @@ try:
except Exception:
nb_helpers = None
def get_current_per_pulse(pulseTimeS, currentTimeS, currents):
# add currents for early pulses and current time value after last pulse (j+1)
currentTimeS = np.hstack([[0], currentTimeS, [pulseTimeS[-1]+1]])
currents = np.hstack([[0], currents])
pulseCurrentS = np.zeros(pulseTimeS.shape[0], dtype=float)
j = 0
for i, ti in enumerate(pulseTimeS):
if ti >= currentTimeS[j+1]:
j += 1
pulseCurrentS[i] = currents[j]
#print(f' {i} {pulseTimeS[i]} {pulseCurrentS[i]}')
return pulseCurrentS
class AmorData:
"""read meta-data and event streams from .hdf file(s), apply filters and conversions"""
@@ -38,19 +49,20 @@ class AmorData:
kap: float
lambdaMax: float
lambda_e: np.ndarray
monitor: float
#monitor: float
mu: float
nu: float
tau: float
tofCut: float
start_date: str
monitorType: str
seriesStartTime = None
#-------------------------------------------------------------------------------------------------
def __init__(self, header: Header, reader_config: ReaderConfig, config: ExperimentConfig,
short_notation:str, norm=False):
self.startTime = reader_config.startTime
#self.startTime = reader_config.startTime
self.header = header
self.config = config
self.reader_config = reader_config
@@ -68,22 +80,26 @@ class AmorData:
else:
self.readHeaderInfo = True
_detZ_e = []
_lamda_e = []
_wallTime_e = []
_monitor = 0
#_current = []
_detZ_e = []
_lamda_e = []
_wallTime_e = []
#_monitor = 0
_monitorPerPulse = []
_pulseTimeS = []
for file in self.file_list:
self.read_individual_data(file, norm)
_detZ_e = np.append(_detZ_e, self.detZ_e)
_lamda_e = np.append(_lamda_e, self.lamda_e)
_wallTime_e = np.append(_wallTime_e, self.wallTime_e)
_monitor += self.monitor
self.detZ_e = _detZ_e
self.lamda_e = _lamda_e
self.wallTime_e = _wallTime_e
self.monitor = _monitor
logging.warning(f' {self.monitorType} monitor = {self.monitor:9.3f}')
_detZ_e = np.append(_detZ_e, self.detZ_e)
_lamda_e = np.append(_lamda_e, self.lamda_e)
_wallTime_e = np.append(_wallTime_e, self.wallTime_e)
_monitorPerPulse = np.append(_monitorPerPulse, self.monitorPerPulse)
_pulseTimeS = np.append(_pulseTimeS, self.pulseTimeS)
#_monitor += self.monitor
self.detZ_e = _detZ_e
self.lamda_e = _lamda_e
self.wallTime_e = _wallTime_e
#self.monitor = _monitor
self.monitorPerPulse = _monitorPerPulse
self.pulseTimeS = _pulseTimeS
#-------------------------------------------------------------------------------------------------
#def path_generator(self, number):
@@ -105,7 +121,7 @@ class AmorData:
def path_generator(self, number):
fileName = f'amor{self.reader_config.year}n{number:06d}.hdf'
path = ''
for rawd in self.reader_config.raw:
for rawd in self.reader_config.rawPath:
if os.path.exists(os.path.join(rawd,fileName)):
path = rawd
break
@@ -113,7 +129,7 @@ class AmorData:
if os.path.exists(f'/afs/psi.ch/project/sinqdata/{self.reader_config.year}/amor/{int(number/1000)}/{fileName}'):
path = f'/afs/psi.ch/project/sinqdata/{self.reader_config.year}/amor/{int(number/1000)}'
else:
sys.exit(f'# ERROR: the file {fileName} can not be found in {self.reader_config.raw}!')
sys.exit(f'# ERROR: the file {fileName} can not be found in {self.reader_config.rawPath}')
return os.path.join(path, fileName)
#-------------------------------------------------------------------------------------------------
def expand_file_list(self, short_notation):
@@ -153,7 +169,7 @@ class AmorData:
if self.readHeaderInfo:
self.read_header_info()
logging.warning(f' data from file: {fileName}')
logging.warning(f' from file: {fileName}')
self.read_individual_header()
# add header content
@@ -183,12 +199,19 @@ class AmorData:
self.sort_pulses()
self.define_monitor()
# sort the events into the related pulses
self.associate_pulse_with_monitor()
self.extract_walltime(norm)
# following lines: debugging output to trace the time-offset of proton current and neutron pulses
if self.config.monitorType == 'p':
cpp, t_bins = np.histogram(self.wallTime_e, self.pulseTimeS)
np.savetxt('tme.hst', np.vstack((self.pulseTimeS[:-1], cpp, self.monitorPerPulse[:-1])).T)
self.average_events_per_pulse()
self.monitor_threshold()
self.filter_strange_times()
self.merge_frames()
@@ -204,51 +227,39 @@ class AmorData:
logging.info(f' number of events: total = {totalNumber:7d}, filtered = {np.shape(self.lamda_e)[0]:7d}')
def sort_pulses(self):
chopperPeriod = int(2*self.tau*1e9)
chopperPeriod = np.int64(2*self.tau*1e9)
pulseTime = np.sort(self.dataPacketTime_p)
pulseTime = pulseTime[np.abs(pulseTime[:]-np.roll(pulseTime, 1)[:])>5]
if self.seriesStartTime is None:
self.seriesStartTime = float(pulseTime[0])
pulseTime -= self.seriesStartTime
self.stopTime = float(pulseTime[-1])
pulseTime -= np.int64(self.seriesStartTime)
self.stopTime = pulseTime[-1]
# fill in missing pulse times
# TODO: check for real end time
self.pulseTimeS = np.array([pulseTime[0]])
for tt in pulseTime[1:]:
nxt = self.pulseTimeS[-1] + chopperPeriod
while abs(tt - nxt) > self.tau*1e9:
pulseTime = pulseTime[pulseTime>=0]
firstPulse = pulseTime[0] % np.int64(self.tau*2e9)
self.pulseTimeS = np.array([], dtype=np.int64)
nxt = firstPulse
for tt in pulseTime:
while tt - nxt > self.tau*1e9:
self.pulseTimeS = np.append(self.pulseTimeS, nxt)
nxt += chopperPeriod
self.pulseTimeS = np.append(self.pulseTimeS, tt)
# remove 'partially filled' pulses
self.pulseTimeS = self.pulseTimeS[1:-1]
nxt = self.pulseTimeS[-1] + chopperPeriod
def associate_pulse_with_current(self):
if self.monitorType == 'protonCharge':
lowCurrentThreshold = 0.05 # mA
self.currentTime -= self.seriesStartTime
currentInterpolator = sp.interpolate.interp1d(self.currentTime, self.current, kind='previous', bounds_error=False, fill_value=0)
self.charge = np.array(currentInterpolator(self.pulseTimeS) * 2*self.tau *1e-3, dtype=float)
def associate_pulse_with_monitor(self):
if self.config.monitorType == 'p': # protonCharge
self.currentTime -= np.int64(self.seriesStartTime)
self.currentTime -= np.int64(16e9) # time offset of proton current signal
self.monitorPerPulse = get_current_per_pulse(self.pulseTimeS, self.currentTime, self.current) * 2*self.tau * 1e-3
# filter low-current pulses
self.charge = np.where(self.charge > 2*self.tau *lowCurrentThreshold, self.charge, 0)
# remove 'partially filled' pulses
self.charge[0] = 0
self.charge[-1] = 0
def define_monitor(self):
if self.monitorType == 'protonCharge':
chargeSum = np.sum(self.charge)
logging.warning(f' proton charge = {chargeSum:9.3f} mC')
self.monitor = chargeSum
elif self.monitorType == 'countingTime':
self.monitor = self.stopTime - self.seriesStartTime
else:
self.monitor = 1.
self.monitorPerPulse = np.where(self.monitorPerPulse > 2*self.tau * self.config.lowCurrentThreshold * 1e-3, self.monitorPerPulse, 0)
elif self.config.monitorType == 't': # countingTime
self.monitorPerPulse = np.ones(np.shape(self.pulseTimeS)[0])*self.tau
else:
self.monitorPerPulse = 1./np.shape(pulseTimeS)[1]
def extract_walltime(self, norm):
#self.dataPacketTime_p = np.array(self.dataPacketTime_p, dtype=float) / 1e9
if nb_helpers:
self.wallTime_e = nb_helpers.extract_walltime(self.tof_e, self.dataPacket_p, self.dataPacketTime_p)
else:
@@ -256,19 +267,25 @@ class AmorData:
for i in range(len(self.dataPacket_p)-1):
self.wallTime_e[self.dataPacket_p[i]:self.dataPacket_p[i+1]] = self.dataPacketTime_p[i]
self.wallTime_e[self.dataPacket_p[-1]:] = self.dataPacketTime_p[-1]
#if not self.startTime and not norm:
# self.startTime = self.wallTime_e[0]
self.wallTime_e -= np.int64(self.seriesStartTime)
logging.debug(f' wall time from {self.wallTime_e[0]/1e9} to {self.wallTime_e[-1]/1e9}')
logging.debug(f' wall time from {self.wallTime_e[0]/1e9:6.1f} s to {self.wallTime_e[-1]/1e9:6.1f} s')
def average_events_per_pulse(self):
if self.config.monitorType == 'p':
for i, time in enumerate(self.pulseTimeS):
events = np.shape(self.wallTime_e[self.wallTime_e == time])[0]
#print(f' {i:6.0f} {events:6.0f} {self.monitorPerPulse[i]:6.2f}')
def monitor_threshold(self):
goodTimeS = self.pulseTimeS[self.charge!=0]
filter_e = np.where(np.isin(self.wallTime_e, goodTimeS), True, False)
self.tof_e = self.tof_e[filter_e]
self.pixelID_e = self.pixelID_e[filter_e]
self.wallTime_e = self.wallTime_e[filter_e]
logging.warning(f' rejected {np.shape(self.charge)[0]-np.shape(goodTimeS)[0]} pulses due to low beam current')
logging.warning(f' rejected {np.shape(filter_e)[0]-np.shape(self.tof_e)[0]} events due to low beam current')
if self.config.monitorType == 'p': # fix to check for file compatibility
goodTimeS = self.pulseTimeS[self.monitorPerPulse!=0]
filter_e = np.where(np.isin(self.wallTime_e, goodTimeS), True, False)
self.tof_e = self.tof_e[filter_e]
self.pixelID_e = self.pixelID_e[filter_e]
self.wallTime_e = self.wallTime_e[filter_e]
logging.info(f' rejected {np.shape(self.monitorPerPulse)[0]-np.shape(goodTimeS)[0]} out of {np.shape(self.monitorPerPulse)[0]} pulses')
logging.info(f' with {np.shape(filter_e)[0]-np.shape(self.tof_e)[0]} events due to low beam current')
logging.info(f' average counts per pulse = {np.shape(self.tof_e)[0] / np.shape(goodTimeS[goodTimeS!=0])[0]:7.1f}')
def filter_qz_range(self, norm):
if self.config.qzRange[1]<0.3 and not norm:
@@ -350,17 +367,20 @@ class AmorData:
self.tof_e = np.array(self.hdf['/entry1/Amor/detector/data/event_time_offset'][:])/1.e9
self.pixelID_e = np.array(self.hdf['/entry1/Amor/detector/data/event_id'][:], dtype=np.int64)
self.dataPacket_p = np.array(self.hdf['/entry1/Amor/detector/data/event_index'][:], dtype=np.uint64)
#self.dataPacketTime_p = np.array(self.hdf['/entry1/Amor/detector/data/event_time_zero'][:], dtype=np.uint64)/1e9
self.dataPacketTime_p = np.array(self.hdf['/entry1/Amor/detector/data/event_time_zero'][:], dtype=float)
try:
self.currentTime = np.array(self.hdf['entry1/Amor/detector/proton_current/time'][:], dtype=np.int64)
self.current = np.array(self.hdf['entry1/Amor/detector/proton_current/value'][:,0], dtype=float)
if len(self.current)>0:
self.monitorType = 'protonCharge'
else:
self.monitorType = 'countingTime'
except(KeyError, IndexError):
self.monitorType = 'countingTime'
self.dataPacketTime_p = np.array(self.hdf['/entry1/Amor/detector/data/event_time_zero'][:], dtype=np.int64)
if self.config.monitorType in ['auto', 'p']:
try:
self.currentTime = np.array(self.hdf['entry1/Amor/detector/proton_current/time'][:], dtype=np.int64)
self.current = np.array(self.hdf['entry1/Amor/detector/proton_current/value'][:,0], dtype=float)
if len(self.current)>4:
self.config.monitorType = 'p'
else:
self.config.monitorType = 't'
except(KeyError, IndexError):
self.config.monitorType = 't'
else:
self.config.monitorType = 't'
#TODO: protonMonitor
def read_individual_header(self):
self.chopperDistance = float(np.take(self.hdf['entry1/Amor/chopper/pair_separation'], 0))
@@ -405,6 +425,9 @@ class AmorData:
self.nu = self.config.nu
self.fileDate = datetime.fromisoformat( self.hdf['/entry1/start_time'][0].decode('utf-8') )
self.startTime = np.int64( self.fileDate.timestamp() * 1e9 )
if self.seriesStartTime is None:
self.seriesStartTime = self.startTime
def read_header_info(self):
# read general information and first data set

View File

@@ -11,12 +11,11 @@ def merge_frames(tof_e, tofCut, tau, total_offset):
tof_e_out[ti] = ((tof_e[ti]-dt)%tau)+total_offset # tof shifted to 1 frame
return tof_e_out
@nb.jit(nb.float64[:](nb.float64[:], nb.uint64[:], nb.float64[:]),
@nb.jit(nb.float64[:](nb.float64[:], nb.uint64[:], nb.int64[:]),
nopython=True, parallel=True, cache=True)
def extract_walltime(tof_e, dataPacket_p, dataPacketTime_p):
# assigning every event the wall time of the event packet (absolute time of pulse ?start?)
totalNumber = np.shape(tof_e)[0]
#wallTime_e = np.empty(totalNumber, dtype=np.float64)
wallTime_e = np.empty(totalNumber, dtype=np.int64)
for i in nb.prange(len(dataPacket_p)-1):
for j in range(dataPacket_p[i], dataPacket_p[i+1]):

View File

@@ -5,16 +5,18 @@ from dataclasses import dataclass, field
from typing import Optional, Tuple
from datetime import datetime
from os import path
import numpy as np
import logging
class Defaults:
# fileIdentifier
dataPath = '.'
raw = ['.', path.join('.','raw'), path.join('..','raw'), path.join('..','..','raw')]
outputPath = '.'
rawPath = ['.', path.join('.','raw'), path.join('..','raw'), path.join('..','..','raw')]
year = datetime.now().year
normalisationFileIdentifier = []
normalisationMethod = 'o'
monitorType = 'auto'
# subtract
outputName = "fromEOS"
outputFormat = ['Rqz.ort']
@@ -35,6 +37,7 @@ class Defaults:
mu = 0
nu = 0
sampleModel = None
lowCurrentThreshold = 50
#
@@ -42,8 +45,7 @@ class Defaults:
@dataclass
class ReaderConfig:
year: int
dataPath: str
raw: Tuple[str]
rawPath: Tuple[str]
startTime: Optional[float] = 0
@dataclass
@@ -53,6 +55,8 @@ class ExperimentConfig:
yRange: Tuple[float, float]
lambdaRange: Tuple[float, float]
qzRange: Tuple[float, float]
monitorType: str
lowCurrentThreshold: float
sampleModel: Optional[str] = None
chopperPhaseOffset: float = 0
@@ -80,6 +84,7 @@ class ReductionConfig:
class OutputConfig:
outputFormats: list
outputName: str
outputPath: str
@dataclass
class EOSConfig:
@@ -105,10 +110,8 @@ class EOSConfig:
inpt += f' -Y {self.reader.year}'
else:
inpt += f' -Y {datetime.now().year}'
if self.reader.dataPath != '.':
inpt += f' --dataPath {self.reader.dataPath}'
#if self.reader.raw != '.':
# inpt = f' --rawd {self.reader.raw}'
if np.shape(self.reader.rawPath)[0] == 1:
inpt += f' --rawPath {self.reader.rawPath}'
if self.reduction.subtract:
inpt += f' -subtract {self.reduction.subtract}'
if self.reduction.normalisationFileIdentifier:
@@ -119,6 +122,8 @@ class EOSConfig:
otpt = ''
if self.reduction.qResolution:
otpt += f' -r {self.reduction.qResolution}'
if self.output.outputPath != '.':
inpt += f' --outputdPath {self.output.outputPath}'
if self.output.outputName:
otpt += f' -o {self.output.outputName}'
if self.output.outputFormats != ['Rqz.ort']:
@@ -130,9 +135,9 @@ class EOSConfig:
if self.experiment.lambdaRange!= Defaults.lambdaRange:
mask += f' -l {" ".join(str(ff) for ff in self.experiment.lambdaRange)}'
if self.reduction.thetaRange != Defaults.thetaRange:
mask += f' -T {" ".join(str(ff) for ff in self.reduction.thetaRange)}'
mask += f' -t {" ".join(str(ff) for ff in self.reduction.thetaRange)}'
elif self.reduction.thetaRangeR != Defaults.thetaRangeR:
mask += f' -t {" ".join(str(ff) for ff in self.reduction.thetaRangeR)}'
mask += f' -T {" ".join(str(ff) for ff in self.reduction.thetaRangeR)}'
if self.experiment.qzRange!= Defaults.qzRange:
mask += f' -q {" ".join(str(ff) for ff in self.experiment.qzRange)}'

View File

@@ -22,10 +22,12 @@ class AmorReduction:
self.header = Header()
self.header.reduction.call = config.call_string()
self.monitorUnit = {'n': 'cnts', 'p': 'mC', 't': 's'}
def reduce(self):
if not os.path.exists(f'{self.reader_config.dataPath}'):
logging.debug(f'Creating destination path {self.reader_config.dataPath}')
os.system(f'mkdir {self.reader_config.dataPath}')
if not os.path.exists(f'{self.output_config.outputPath}'):
logging.debug(f'Creating destination path {self.output_config.outputPath}')
os.system(f'mkdir {self.output_config.outputPath}')
# load or create normalisation matrix
if self.reduction_config.normalisationFileIdentifier:
@@ -76,12 +78,13 @@ class AmorReduction:
def read_unsliced(self, i):
lamda_e = self.file_reader.lamda_e
detZ_e = self.file_reader.detZ_e
self.monitor = np.sum(self.file_reader.monitorPerPulse)
logging.warning(f' monitor = {self.monitor:8.2f} {self.monitorUnit[self.experiment_config.monitorType]}')
qz_lz, qx_lz, ref_lz, err_lz, res_lz, lamda_lz, theta_lz, int_lz, self.mask_lz = self.project_on_lz(
self.file_reader, self.norm_lz, self.normAngle, lamda_e, detZ_e)
monitor = self.file_reader.monitor
if monitor>1 :
ref_lz /= monitor
err_lz /= monitor
#if self.monitor>1 :
# ref_lz /= self.monitor
# err_lz /= self.monitor
try:
ref_lz *= self.reduction_config.scale[i]
err_lz *= self.reduction_config.scale[i]
@@ -170,7 +173,8 @@ class AmorReduction:
j += 1
def read_timeslices(self, i):
wallTime_e = self.file_reader.wallTime_e
wallTime_e = np.float64(self.file_reader.wallTime_e)/1e9
pulseTimeS = np.float64(self.file_reader.pulseTimeS)/1e9
interval = self.reduction_config.timeSlize[0]
try:
start = self.reduction_config.timeSlize[1]
@@ -181,13 +185,17 @@ class AmorReduction:
except:
stop = wallTime_e[-1]
# make overwriting log lines possible by removing newline at the end
logging.StreamHandler.terminator = "\r"
#logging.StreamHandler.terminator = "\r"
logging.warning(f' time slizing')
logging.info(' slize time monitor')
for ti, time in enumerate(np.arange(start, stop, interval)):
logging.warning(f' time slize {ti:4d}')
filter_e = np.where((time<wallTime_e) & (wallTime_e<time+interval), True, False)
lamda_e = self.file_reader.lamda_e[filter_e]
detZ_e = self.file_reader.detZ_e[filter_e]
filter_m = np.where((time<pulseTimeS) & (pulseTimeS<time+interval), True, False)
self.monitor = np.sum(self.file_reader.monitorPerPulse[filter_m])
logging.info(f' {ti:<4d} {time:5.0f} {self.monitor:7.2f} {self.monitorUnit[self.experiment_config.monitorType]}')
qz_lz, qx_lz, ref_lz, err_lz, res_lz, lamda_lz, theta_lz, int_lz, mask_lz = self.project_on_lz(
self.file_reader, self.norm_lz, self.normAngle, lamda_e, detZ_e)
@@ -220,17 +228,17 @@ class AmorReduction:
orso_data = fileio.OrsoDataset(headerRqz, data)
self.datasetsRqz.append(orso_data)
# reset normal logging behavior
logging.StreamHandler.terminator = "\n"
logging.warning(f' time slizing, done')
#logging.StreamHandler.terminator = "\n"
logging.info(f' done {time+interval:5.0f}')
def save_Rqz(self):
fname = os.path.join(self.reader_config.dataPath, f'{self.output_config.outputName}.Rqz.ort')
fname = os.path.join(self.output_config.outputPath, f'{self.output_config.outputName}.Rqz.ort')
logging.warning(f' {fname}')
theSecondLine = f' {self.header.experiment.title} | {self.header.experiment.start_date} | sample {self.header.sample.name} | R(q_z)'
fileio.save_orso(self.datasetsRqz, fname, data_separator='\n', comment=theSecondLine)
def save_Rtl(self):
fname = os.path.join(self.reader_config.dataPath, f'{self.output_config.outputName}.Rlt.ort')
fname = os.path.join(self.output_config.outputPath, f'{self.output_config.outputName}.Rlt.ort')
logging.warning(f' {fname}')
theSecondLine = f' {self.header.experiment.title} | {self.header.experiment.start_date} | sample {self.header.sample.name} | R(lambda, theta)'
fileio.save_orso(self.datasetsRlt, fname, data_separator='\n', comment=theSecondLine)
@@ -298,7 +306,7 @@ class AmorReduction:
return q_q[1:], R_q, dR_q, dq_q
def loadRqz(self, name):
fname = os.path.join(self.reader_config.dataPath, name)
fname = os.path.join(self.output_config.outputPath, name)
if os.path.exists(fname):
fileName = fname
elif os.path.exists(f'{fname}.Rqz.ort'):
@@ -311,12 +319,12 @@ class AmorReduction:
return q_q, Sq_q, dS_q, fileName
def create_normalisation_map(self, short_notation):
dataPath = self.reader_config.dataPath
outputPath = self.output_config.outputPath
normalisation_list = expand_file_list(short_notation)
name = str(normalisation_list[0])
for i in range(1, len(normalisation_list), 1):
name = f'{name}_{normalisation_list[i]}'
n_path = os.path.join(dataPath, f'{name}.norm')
n_path = os.path.join(outputPath, f'{name}.norm')
if os.path.exists(n_path):
logging.warning(f'normalisation matrix: found and using {n_path}')
with open(n_path, 'rb') as fh:
@@ -336,7 +344,7 @@ class AmorReduction:
self.normAngle = fromHDF.nu - fromHDF.mu
lamda_e = fromHDF.lamda_e
detZ_e = fromHDF.detZ_e
self.normMonitor = fromHDF.monitor
self.normMonitor = np.sum(fromHDF.monitorPerPulse)
self.norm_lz, bins_l, bins_z = np.histogram2d(lamda_e, detZ_e, bins = (self.grid.lamda(), self.grid.z()))
self.norm_lz = np.where(self.norm_lz>2, self.norm_lz, np.nan)
# correct for the SM reflectivity
@@ -416,19 +424,24 @@ class AmorReduction:
if self.reduction_config.normalisationMethod == 'o':
logging.debug(' assuming an overilluminated sample and correcting for the angle of incidence')
ref_lz = (int_lz * np.absolute(thetaN_lz)) / (norm_lz * np.absolute(thetaF_lz)) * self.normMonitor
ref_lz = (int_lz * np.absolute(thetaN_lz)) / (norm_lz * np.absolute(thetaF_lz))
elif self.reduction_config.normalisationMethod == 'u':
logging.debug(' assuming an underilluminated sample and ignoring the angle of incidence')
ref_lz = (int_lz / norm_lz) * self.normMonitor
ref_lz = (int_lz / norm_lz)
elif self.reduction_config.normalisationMethod == 'd':
logging.debug(' assuming direct beam for normalisation and ignoring the angle of incidence')
norm_lz = np.flip(norm_lz,1)
ref_lz = (int_lz / norm_lz) * self.normMonitor
ref_lz = (int_lz / norm_lz)
else:
logging.error('unknown normalisation method! Use [u], [o] or [d]')
ref_lz = (int_lz * np.absolute(thetaN_lz)) / (norm_lz * np.absolute(thetaF_lz)) * self.normMonitor
ref_lz = (int_lz * np.absolute(thetaN_lz)) / (norm_lz * np.absolute(thetaF_lz))
if self.monitor > 1e-6 :
ref_lz *= self.normMonitor / self.monitor
else:
logging.warning(' too small monitor value for normalisation -> ignoring monitors')
err_lz = ref_lz * np.sqrt( 1/(int_lz+.1) + 1/norm_lz )
# TODO: allow for non-ideal Delta lambda / lambda (rather than 2.2%)
res_lz = np.ones((np.shape(lamda_l[:-1])[0], np.shape(alphaF_z)[0])) * 0.022**2
res_lz = res_lz + (0.008/alphaF_lz)**2
res_lz = qz_lz * np.sqrt(res_lz)

6
pyproject.toml Normal file
View File

@@ -0,0 +1,6 @@
[build-system]
requires = [
"setuptools>=42",
"wheel"
]
build-backend = "setuptools.build_meta"

31
setup.cfg Normal file
View File

@@ -0,0 +1,31 @@
[bdist_wheel]
universal = 1
[metadata]
name = amor_eos
version = attr: libeos.__version__
author = Jochen Stahn - Paul Scherrer Institut
author_email = jochen.stahn@psi.ch
description = EOS reflectometry reduction for AMOR instrument
license = MIT
classifiers =
Programming Language :: Python :: 3
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Topic :: Scientific/Engineering
Development Status :: 5 - Production/Stable
[options]
python_requires = >=3.8
packages =
libeos
scripts =
eos.py
install_requires =
numpy
h5py
orsopy
numba
[project.urls]
Homepage = "https://github.com/jochenstahn/amor"

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -21,15 +21,14 @@ class FullAmorTest(TestCase):
self.pr.enable()
self.reader_config = options.ReaderConfig(
year=2023,
dataPath=os.path.join('..', "test_data"),
raw=(os.path.join('..', "test_data"),)
rawPath=(os.path.join('..', "test_data"),),
)
def tearDown(self):
self.pr.disable()
for fi in ['test.Rqz.ort', '614.norm']:
try:
os.unlink(os.path.join(self.reader_config.dataPath, fi))
os.unlink(os.path.join(self.reader_config.rawPath[0], fi))
except FileNotFoundError:
pass
@@ -38,6 +37,8 @@ class FullAmorTest(TestCase):
experiment_config = options.ExperimentConfig(
chopperPhase=-13.5,
chopperPhaseOffset=-5,
monitorType=options.Defaults.monitorType,
lowCurrentThreshold=options.Defaults.lowCurrentThreshold,
yRange=(11., 41.),
lambdaRange=(2., 15.),
qzRange=(0.005, 0.30),
@@ -60,7 +61,8 @@ class FullAmorTest(TestCase):
)
output_config = options.OutputConfig(
outputFormats=["Rqz.ort"],
outputName='test'
outputName='test',
outputPath=os.path.join('..', 'test_results'),
)
config=options.EOSConfig(self.reader_config, experiment_config, reduction_config, output_config)
# run three times to get similar timing to noslicing runs
@@ -75,6 +77,8 @@ class FullAmorTest(TestCase):
experiment_config = options.ExperimentConfig(
chopperPhase=-13.5,
chopperPhaseOffset=-5,
monitorType=options.Defaults.monitorType,
lowCurrentThreshold=options.Defaults.lowCurrentThreshold,
yRange=(11., 41.),
lambdaRange=(2., 15.),
qzRange=(0.005, 0.30),
@@ -91,12 +95,13 @@ class FullAmorTest(TestCase):
thetaRangeR=(-12., 12.),
fileIdentifier=["610", "611", "608,612-613", "609"],
scale=[1],
normalisationFileIdentifier=["614"],
normalisationFileIdentifier=["608"],
autoscale=(True, True)
)
output_config = options.OutputConfig(
outputFormats=["Rqz.ort"],
outputName='test'
outputName='test',
outputPath=os.path.join('..', 'test_results'),
)
config=options.EOSConfig(self.reader_config, experiment_config, reduction_config, output_config)
reducer = reduction.AmorReduction(config)