Compare commits
45 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| b80a01325b | |||
| a22732c612 | |||
| 443cb30b79 | |||
| 057742cabd | |||
| 2689a969f1 | |||
| aff4c5b603 | |||
| 887ec6fa23 | |||
| 6102ed7123 | |||
| bbac3bc943 | |||
| bacbd18854 | |||
|
|
aa7919b83d | ||
| f65651d0e5 | |||
| 1b0df0ec45 | |||
| 5f94cc7b92 | |||
| a947a70d2d | |||
| 729332f49d | |||
| 500d53a9b2 | |||
| dac76efdd1 | |||
| e78200a39d | |||
| c6bde8cd85 | |||
| 8e9c03a4fb | |||
| 155a167e26 | |||
| fcc1fd7d84 | |||
| ee34cc96d4 | |||
|
|
64edd8e8d7 | ||
| 96681c8f81 | |||
| 87a246196f | |||
| 3607de9864 | |||
| 23e310ba6a | |||
| 9f2dfbcbe1 | |||
| 25f8708b4c | |||
| 2e565f55be | |||
| b48bea7726 | |||
| 55e9407b67 | |||
| 5fc512632f | |||
| 55a7b7270c | |||
| f81582554e | |||
| 3e22a94ff6 | |||
|
|
c1dc194548 | ||
|
|
c5f5602a89 | ||
| cf9c0018a5 | |||
| 03ac04ab05 | |||
| b2df980a21 | |||
| eb54e52c66 | |||
| 9b4c384425 |
32
.github/workflows/release.yml
vendored
32
.github/workflows/release.yml
vendored
@@ -19,12 +19,14 @@ on:
|
|||||||
- all
|
- all
|
||||||
- windows
|
- windows
|
||||||
- linux
|
- linux
|
||||||
- mac
|
- all_incl_release
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-ubuntu-latest:
|
build-ubuntu-latest:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ (github.event_name != 'workflow_dispatch') || (contains(fromJson('["all", "linux"]'), github.event.inputs.build-items)) }}
|
if: ${{ (github.event_name != 'workflow_dispatch') || (contains(fromJson('["all", "linux", "all_incl_release"]'), github.event.inputs.build-items)) }}
|
||||||
|
permissions:
|
||||||
|
id-token: write
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v5
|
||||||
@@ -38,23 +40,23 @@ jobs:
|
|||||||
- name: Build PyPI package
|
- name: Build PyPI package
|
||||||
run: |
|
run: |
|
||||||
python3 -m build
|
python3 -m build
|
||||||
- name: Upload to PyPI
|
|
||||||
if: github.event_name != 'workflow_dispatch'
|
|
||||||
uses: pypa/gh-action-pypi-publish@release/v1
|
|
||||||
with:
|
|
||||||
user: __token__
|
|
||||||
password: ${{ secrets.PYPI_TOKEN }}
|
|
||||||
skip-existing: true
|
|
||||||
- name: Archive distribution
|
- name: Archive distribution
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: linux-dist
|
name: linux-dist
|
||||||
path: |
|
path: |
|
||||||
dist/*.tar.gz
|
dist/*.tar.gz
|
||||||
|
- name: Upload to PyPI
|
||||||
|
#if: github.event_name != 'workflow_dispatch'
|
||||||
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
|
with:
|
||||||
|
# user: __token__
|
||||||
|
# password: ${{ secrets.PYPI_TOKEN }}
|
||||||
|
skip-existing: true
|
||||||
|
|
||||||
build-windows:
|
build-windows:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
if: ${{ (github.event_name != 'workflow_dispatch') || (contains(fromJson('["all", "windows"]'), github.event.inputs.build-items)) }}
|
if: ${{ (github.event_name != 'workflow_dispatch') || (contains(fromJson('["all", "windows", "all_incl_release"]'), github.event.inputs.build-items)) }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -79,19 +81,25 @@ jobs:
|
|||||||
eos.zip
|
eos.zip
|
||||||
|
|
||||||
release:
|
release:
|
||||||
if: github.event_name != 'workflow_dispatch'
|
if: ${{ (github.event_name != 'workflow_dispatch') || (contains(fromJson('["all_incl_release"]'), github.event.inputs.build-items)) }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: [build-ubuntu-latest]
|
needs: [build-ubuntu-latest, build-windows]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
fetch-tags: true
|
||||||
- uses: actions/download-artifact@v4
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: linux-dist
|
name: linux-dist
|
||||||
- uses: actions/download-artifact@v4
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: windows-dist
|
name: windows-dist
|
||||||
|
- name: get latest version tag
|
||||||
|
run: echo "RELEASE_TAG=$(git describe --abbrev=0 --tags)" >> $GITHUB_ENV
|
||||||
- uses: ncipollo/release-action@v1
|
- uses: ncipollo/release-action@v1
|
||||||
with:
|
with:
|
||||||
artifacts: "amor*.tar.gz,*.zip"
|
artifacts: "amor*.tar.gz,*.zip"
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
allowUpdates: true
|
allowUpdates: true
|
||||||
|
tag: ${{ env.RELEASE_TAG }}
|
||||||
|
|||||||
7
.github/workflows/unit_tests.yml
vendored
7
.github/workflows/unit_tests.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
python-version: [3.8, 3.9, '3.10', '3.11', '3.12']
|
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -31,6 +31,11 @@ jobs:
|
|||||||
pip install pytest
|
pip install pytest
|
||||||
pip install -r requirements.txt
|
pip install -r requirements.txt
|
||||||
|
|
||||||
|
- name: Backport to 3.8
|
||||||
|
if: matrix.python-version == '3.8'
|
||||||
|
run: |
|
||||||
|
pip install backports.zoneinfo
|
||||||
|
|
||||||
- name: Test with pytest
|
- name: Test with pytest
|
||||||
run: |
|
run: |
|
||||||
cd tests
|
cd tests
|
||||||
|
|||||||
1283
amor_manual.md
1283
amor_manual.md
File diff suppressed because it is too large
Load Diff
@@ -41,3 +41,4 @@ dependencies:
|
|||||||
- pip:
|
- pip:
|
||||||
- orsopy==1.2.1
|
- orsopy==1.2.1
|
||||||
- pyyaml==6.0.2
|
- pyyaml==6.0.2
|
||||||
|
- tzdata
|
||||||
1082
e2h_new.py
Normal file
1082
e2h_new.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -860,7 +860,7 @@ def process(dataPath, ident, clas):
|
|||||||
try: lamdaMax
|
try: lamdaMax
|
||||||
except NameError: lamdaMax = lamdaMin + tau * hdm/chopperDetectorDistance * 1e13
|
except NameError: lamdaMax = lamdaMin + tau * hdm/chopperDetectorDistance * 1e13
|
||||||
|
|
||||||
tofOffset = tau * chopperPhase / 180. # mismatch of chopper pulse and time-zero
|
tofOffset = -tau * chopperPhase / 180. # mismatch of chopper pulse and time-zero
|
||||||
tofCut = lamdaCut * chopperDetectorDistance / hdm * 1.e-13 # tof of frame start
|
tofCut = lamdaCut * chopperDetectorDistance / hdm * 1.e-13 # tof of frame start
|
||||||
|
|
||||||
tof_e = np.array(ev['/entry1/Amor/detector/data/event_time_offset'][:], dtype=np.uint64)/1.e9 + tofOffset # tof
|
tof_e = np.array(ev['/entry1/Amor/detector/data/event_time_offset'][:], dtype=np.uint64)/1.e9 + tofOffset # tof
|
||||||
@@ -960,7 +960,7 @@ def commandLineArgs():
|
|||||||
type=float,
|
type=float,
|
||||||
help ="value of nu")
|
help ="value of nu")
|
||||||
clas.add_argument("-P", "--chopperPhase",
|
clas.add_argument("-P", "--chopperPhase",
|
||||||
default=7.5,
|
default=-7.5,
|
||||||
type=float,
|
type=float,
|
||||||
help ="chopper phase offset")
|
help ="chopper phase offset")
|
||||||
clas.add_argument("-p", "--plot",
|
clas.add_argument("-p", "--plot",
|
||||||
|
|||||||
1051
events2histogram_2025.py
Executable file
1051
events2histogram_2025.py
Executable file
File diff suppressed because it is too large
Load Diff
@@ -2,5 +2,6 @@
|
|||||||
Package to handle data redction at AMOR instrument to be used by eos.py script.
|
Package to handle data redction at AMOR instrument to be used by eos.py script.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__version__ = '2.1.2'
|
__version__ = '2.2.0'
|
||||||
__date__ = '2024-12-03'
|
__date__ = '2025-09-16'
|
||||||
|
|
||||||
|
|||||||
@@ -185,6 +185,7 @@ def command_line_options():
|
|||||||
)
|
)
|
||||||
experiment_config = ExperimentConfig(
|
experiment_config = ExperimentConfig(
|
||||||
sampleModel = clas.sampleModel,
|
sampleModel = clas.sampleModel,
|
||||||
|
chopperSpeed = clas.chopperSpeed,
|
||||||
chopperPhase = clas.chopperPhase,
|
chopperPhase = clas.chopperPhase,
|
||||||
chopperPhaseOffset = clas.chopperPhaseOffset,
|
chopperPhaseOffset = clas.chopperPhaseOffset,
|
||||||
yRange = clas.yRange,
|
yRange = clas.yRange,
|
||||||
|
|||||||
@@ -3,6 +3,11 @@ import os
|
|||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
|
try:
|
||||||
|
import zoneinfo
|
||||||
|
except ImportError:
|
||||||
|
# for python versions < 3.9 try to use the backports version
|
||||||
|
from backports import zoneinfo
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
import h5py
|
import h5py
|
||||||
@@ -20,12 +25,17 @@ try:
|
|||||||
except Exception:
|
except Exception:
|
||||||
nb_helpers = None
|
nb_helpers = None
|
||||||
|
|
||||||
|
# Time zone used to interpret time strings
|
||||||
|
AMOR_LOCAL_TIMEZONE = zoneinfo.ZoneInfo(key='Europe/Zurich')
|
||||||
|
|
||||||
class AmorData:
|
class AmorData:
|
||||||
"""read meta-data and event streams from .hdf file(s), apply filters and conversions"""
|
"""read meta-data and event streams from .hdf file(s), apply filters and conversions"""
|
||||||
chopperDetectorDistance: float
|
chopperDetectorDistance: float
|
||||||
chopperDistance: float
|
chopperDistance: float
|
||||||
chopperPhase: float
|
chopperPhase: float
|
||||||
chopperSpeed: float
|
chopperSpeed: float
|
||||||
|
chopper1TriggerPhase: float
|
||||||
|
chopper2TriggerPhase: float
|
||||||
div: float
|
div: float
|
||||||
data_file_numbers: List[int]
|
data_file_numbers: List[int]
|
||||||
delta_z: np.ndarray
|
delta_z: np.ndarray
|
||||||
@@ -88,22 +98,6 @@ class AmorData:
|
|||||||
self.monitorPerPulse = _monitorPerPulse
|
self.monitorPerPulse = _monitorPerPulse
|
||||||
self.pulseTimeS = _pulseTimeS
|
self.pulseTimeS = _pulseTimeS
|
||||||
|
|
||||||
#-------------------------------------------------------------------------------------------------
|
|
||||||
#def path_generator(self, number):
|
|
||||||
# fileName = f'amor{self.reader_config.year}n{number:06d}.hdf'
|
|
||||||
# if os.path.exists(os.path.join(self.reader_config.dataPath,fileName)):
|
|
||||||
# path = self.reader_config.dataPath
|
|
||||||
# elif os.path.exists(fileName):
|
|
||||||
# path = '.'
|
|
||||||
# elif os.path.exists(os.path.join('.','raw', fileName)):
|
|
||||||
# path = os.path.join('.','raw')
|
|
||||||
# elif os.path.exists(os.path.join('..','raw', fileName)):
|
|
||||||
# path = os.path.join('..','raw')
|
|
||||||
# elif os.path.exists(f'/afs/psi.ch/project/sinqdata/{self.reader_config.year}/amor/{int(number/1000)}/{fileName}'):
|
|
||||||
# path = f'/afs/psi.ch/project/sinqdata/{self.reader_config.year}/amor/{int(number/1000)}'
|
|
||||||
# else:
|
|
||||||
# sys.exit(f'# ERROR: the file {fileName} is nowhere to be found!')
|
|
||||||
# return os.path.join(path, fileName)
|
|
||||||
#-------------------------------------------------------------------------------------------------
|
#-------------------------------------------------------------------------------------------------
|
||||||
def path_generator(self, number):
|
def path_generator(self, number):
|
||||||
fileName = f'amor{self.reader_config.year}n{number:06d}.hdf'
|
fileName = f'amor{self.reader_config.year}n{number:06d}.hdf'
|
||||||
@@ -167,7 +161,8 @@ class AmorData:
|
|||||||
round(self.mu+self.kap+self.kad+0.5*self.div, 3),
|
round(self.mu+self.kap+self.kad+0.5*self.div, 3),
|
||||||
'deg'),
|
'deg'),
|
||||||
wavelength = fileio.ValueRange(const.lamdaCut, self.config.lambdaRange[1], 'angstrom'),
|
wavelength = fileio.ValueRange(const.lamdaCut, self.config.lambdaRange[1], 'angstrom'),
|
||||||
polarization = fileio.Polarization.unpolarized,
|
#polarization = fileio.Polarization.unpolarized,
|
||||||
|
polarization = self.polarizationConfig
|
||||||
)
|
)
|
||||||
self.header.measurement_instrument_settings.mu = fileio.Value(round(self.mu, 3), 'deg', comment='sample angle to horizon')
|
self.header.measurement_instrument_settings.mu = fileio.Value(round(self.mu, 3), 'deg', comment='sample angle to horizon')
|
||||||
self.header.measurement_instrument_settings.nu = fileio.Value(round(self.nu, 3), 'deg', comment='detector angle to horizon')
|
self.header.measurement_instrument_settings.nu = fileio.Value(round(self.nu, 3), 'deg', comment='detector angle to horizon')
|
||||||
@@ -182,18 +177,17 @@ class AmorData:
|
|||||||
logging.info(f' mu = {self.mu:6.3f}, nu = {self.nu:6.3f}, kap = {self.kap:6.3f}, kad = {self.kad:6.3f}')
|
logging.info(f' mu = {self.mu:6.3f}, nu = {self.nu:6.3f}, kap = {self.kap:6.3f}, kad = {self.kad:6.3f}')
|
||||||
|
|
||||||
self.read_event_stream()
|
self.read_event_stream()
|
||||||
totalNumber = np.shape(self.tof_e)[0]
|
|
||||||
# check for empty event stream
|
|
||||||
if totalNumber == 0:
|
|
||||||
logging.error('empty event stream: can not determine end time')
|
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
self.sort_pulses()
|
self.correct_for_chopper_phases()
|
||||||
|
|
||||||
self.associate_pulse_with_monitor()
|
self.read_chopper_trigger_stream()
|
||||||
|
|
||||||
self.extract_walltime(norm)
|
self.extract_walltime(norm)
|
||||||
|
|
||||||
|
self.read_proton_current_stream()
|
||||||
|
|
||||||
|
self.associate_pulse_with_monitor()
|
||||||
|
|
||||||
# following lines: debugging output to trace the time-offset of proton current and neutron pulses
|
# following lines: debugging output to trace the time-offset of proton current and neutron pulses
|
||||||
if self.config.monitorType == 'x':
|
if self.config.monitorType == 'x':
|
||||||
cpp, t_bins = np.histogram(self.wallTime_e, self.pulseTimeS)
|
cpp, t_bins = np.histogram(self.wallTime_e, self.pulseTimeS)
|
||||||
@@ -205,7 +199,7 @@ class AmorData:
|
|||||||
|
|
||||||
self.filter_strange_times()
|
self.filter_strange_times()
|
||||||
|
|
||||||
self.merge_frames()
|
self.merge_time_frames()
|
||||||
|
|
||||||
self.filter_project_x()
|
self.filter_project_x()
|
||||||
|
|
||||||
@@ -215,57 +209,17 @@ class AmorData:
|
|||||||
|
|
||||||
self.filter_qz_range(norm)
|
self.filter_qz_range(norm)
|
||||||
|
|
||||||
logging.info(f' number of events: total = {totalNumber:7d}, filtered = {np.shape(self.lamda_e)[0]:7d}')
|
logging.info(f' number of events: total = {self.totalNumber:7d}, filtered = {np.shape(self.lamda_e)[0]:7d}')
|
||||||
|
|
||||||
def sort_pulses(self):
|
def read_event_stream(self):
|
||||||
chopperPeriod = np.int64(2*self.tau*1e9)
|
self.tof_e = np.array(self.hdf['/entry1/Amor/detector/data/event_time_offset'][:])/1.e9
|
||||||
pulseTime = np.sort(self.dataPacketTime_p)
|
self.pixelID_e = np.array(self.hdf['/entry1/Amor/detector/data/event_id'][:], dtype=np.int64)
|
||||||
pulseTime = pulseTime[np.abs(pulseTime[:]-np.roll(pulseTime, 1)[:])>5]
|
self.dataPacket_p = np.array(self.hdf['/entry1/Amor/detector/data/event_index'][:], dtype=np.uint64)
|
||||||
|
self.dataPacketTime_p = np.array(self.hdf['/entry1/Amor/detector/data/event_time_zero'][:], dtype=np.int64)
|
||||||
|
|
||||||
pulseTime -= np.int64(self.seriesStartTime)
|
def correct_for_chopper_phases(self):
|
||||||
self.stopTime = pulseTime[-1]
|
#print(f'chopperTriggerPhase: {self.ch1TriggerPhase}')
|
||||||
pulseTime = pulseTime[pulseTime>=0]
|
self.tof_e += self.tau * (self.ch1TriggerPhase + self.chopperPhase/2)/180
|
||||||
|
|
||||||
# fill in missing pulse times
|
|
||||||
# TODO: check for real end time
|
|
||||||
try:
|
|
||||||
# TODO: use the first pulse of the respective measurement
|
|
||||||
#nextPulseTime = startTime % np.int64(self.tau*2e9)
|
|
||||||
nextPulseTime = self.pulseTimeS[-1] + chopperPeriod
|
|
||||||
except AttributeError:
|
|
||||||
nextPulseTime = pulseTime[0] % np.int64(self.tau*2e9)
|
|
||||||
self.pulseTimeS = np.array([], dtype=np.int64)
|
|
||||||
for tt in pulseTime:
|
|
||||||
while tt - nextPulseTime > self.tau*1e9:
|
|
||||||
self.pulseTimeS = np.append(self.pulseTimeS, nextPulseTime)
|
|
||||||
nextPulseTime += chopperPeriod
|
|
||||||
self.pulseTimeS = np.append(self.pulseTimeS, tt)
|
|
||||||
nextPulseTime = self.pulseTimeS[-1] + chopperPeriod
|
|
||||||
|
|
||||||
def get_current_per_pulse(self, pulseTimeS, currentTimeS, currents):
|
|
||||||
# add currents for early pulses and current time value after last pulse (j+1)
|
|
||||||
currentTimeS = np.hstack([[0], currentTimeS, [pulseTimeS[-1]+1]])
|
|
||||||
currents = np.hstack([[0], currents])
|
|
||||||
pulseCurrentS = np.zeros(pulseTimeS.shape[0], dtype=float)
|
|
||||||
j = 0
|
|
||||||
for i, ti in enumerate(pulseTimeS):
|
|
||||||
if ti >= currentTimeS[j+1]:
|
|
||||||
j += 1
|
|
||||||
pulseCurrentS[i] = currents[j]
|
|
||||||
#print(f' {i} {pulseTimeS[i]} {pulseCurrentS[i]}')
|
|
||||||
return pulseCurrentS
|
|
||||||
|
|
||||||
def associate_pulse_with_monitor(self):
|
|
||||||
if self.config.monitorType == 'p': # protonCharge
|
|
||||||
self.currentTime -= np.int64(self.seriesStartTime)
|
|
||||||
self.currentTime -= np.int64(16e9) # time offset of proton current signal
|
|
||||||
self.monitorPerPulse = self.get_current_per_pulse(self.pulseTimeS, self.currentTime, self.current) * 2*self.tau * 1e-3
|
|
||||||
# filter low-current pulses
|
|
||||||
self.monitorPerPulse = np.where(self.monitorPerPulse > 2*self.tau * self.config.lowCurrentThreshold * 1e-3, self.monitorPerPulse, 0)
|
|
||||||
elif self.config.monitorType == 't': # countingTime
|
|
||||||
self.monitorPerPulse = np.ones(np.shape(self.pulseTimeS)[0])*self.tau
|
|
||||||
else:
|
|
||||||
self.monitorPerPulse = 1./np.shape(pulseTimeS)[1]
|
|
||||||
|
|
||||||
def extract_walltime(self, norm):
|
def extract_walltime(self, norm):
|
||||||
if nb_helpers:
|
if nb_helpers:
|
||||||
@@ -278,6 +232,54 @@ class AmorData:
|
|||||||
self.wallTime_e -= np.int64(self.seriesStartTime)
|
self.wallTime_e -= np.int64(self.seriesStartTime)
|
||||||
logging.debug(f' wall time from {self.wallTime_e[0]/1e9:6.1f} s to {self.wallTime_e[-1]/1e9:6.1f} s')
|
logging.debug(f' wall time from {self.wallTime_e[0]/1e9:6.1f} s to {self.wallTime_e[-1]/1e9:6.1f} s')
|
||||||
|
|
||||||
|
def read_chopper_trigger_stream(self):
|
||||||
|
self.chopper1TriggerTime = np.array(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_zero'][:-2], dtype=np.int64)
|
||||||
|
#self.chopper2TriggerTime = self.chopper1TriggerTime + np.array(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time'][:-2], dtype=np.int64)
|
||||||
|
# + np.array(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_offset'][:], dtype=np.int64)
|
||||||
|
if np.shape(self.chopper1TriggerTime)[0] > 2:
|
||||||
|
self.startTime = self.chopper1TriggerTime[0]
|
||||||
|
self.stopTime = self.chopper1TriggerTime[-1]
|
||||||
|
self.pulseTimeS = self.chopper1TriggerTime
|
||||||
|
else:
|
||||||
|
logging.warn(' no chopper trigger data available, using event steram instead')
|
||||||
|
self.startTime = np.array(self.hdf['/entry1/Amor/detector/data/event_time_zero'][0], dtype=np.int64)
|
||||||
|
self.stopTime = np.array(self.hdf['/entry1/Amor/detector/data/event_time_zero'][-2], dtype=np.int64)
|
||||||
|
self.pulseTimeS = np.arange(self.startTime, self.stopTime, self.tau*1e9)
|
||||||
|
if self.seriesStartTime is None:
|
||||||
|
self.seriesStartTime = self.startTime
|
||||||
|
logging.debug(f' series start time (epoch): {self.seriesStartTime/1e9:13.2f} s')
|
||||||
|
self.pulseTimeS -= self.seriesStartTime
|
||||||
|
logging.debug(f' epoch time from {self.startTime/1e9:13.2f} s to {self.stopTime/1e9:13.2f} s')
|
||||||
|
logging.debug(f' => counting time {self.stopTime/1e9-self.startTime/1e9:8.2f} s')
|
||||||
|
|
||||||
|
def read_proton_current_stream(self):
|
||||||
|
self.currentTime = np.array(self.hdf['entry1/Amor/detector/proton_current/time'][:], dtype=np.int64)
|
||||||
|
self.current = np.array(self.hdf['entry1/Amor/detector/proton_current/value'][:,0], dtype=float)
|
||||||
|
|
||||||
|
def get_current_per_pulse(self, pulseTimeS, currentTimeS, currents):
|
||||||
|
# add currents for early pulses and current time value after last pulse (j+1)
|
||||||
|
currentTimeS = np.hstack([[0], currentTimeS, [pulseTimeS[-1]+1]])
|
||||||
|
currents = np.hstack([[0], currents])
|
||||||
|
pulseCurrentS = np.zeros(pulseTimeS.shape[0], dtype=float)
|
||||||
|
j = 0
|
||||||
|
for i, ti in enumerate(pulseTimeS):
|
||||||
|
while ti >= currentTimeS[j+1]:
|
||||||
|
j += 1
|
||||||
|
pulseCurrentS[i] = currents[j]
|
||||||
|
#print(f' {i} {pulseTimeS[i]} {pulseCurrentS[i]}')
|
||||||
|
return pulseCurrentS
|
||||||
|
|
||||||
|
def associate_pulse_with_monitor(self):
|
||||||
|
if self.config.monitorType == 'p': # protonCharge
|
||||||
|
self.currentTime -= np.int64(self.seriesStartTime)
|
||||||
|
self.monitorPerPulse = self.get_current_per_pulse(self.pulseTimeS, self.currentTime, self.current) * 2*self.tau * 1e-3
|
||||||
|
# filter low-current pulses
|
||||||
|
self.monitorPerPulse = np.where(self.monitorPerPulse > 2*self.tau * self.config.lowCurrentThreshold * 1e-3, self.monitorPerPulse, 0)
|
||||||
|
elif self.config.monitorType == 't': # countingTime
|
||||||
|
self.monitorPerPulse = np.ones(np.shape(self.pulseTimeS)[0])*2*self.tau
|
||||||
|
else: # pulses
|
||||||
|
self.monitorPerPulse = np.ones(np.shape(self.pulseTimeS)[0])
|
||||||
|
|
||||||
def average_events_per_pulse(self):
|
def average_events_per_pulse(self):
|
||||||
if self.config.monitorType == 'p':
|
if self.config.monitorType == 'p':
|
||||||
for i, time in enumerate(self.pulseTimeS):
|
for i, time in enumerate(self.pulseTimeS):
|
||||||
@@ -285,18 +287,20 @@ class AmorData:
|
|||||||
logging.info(f'pulse: {i:6.0f}, events: {events:6.0f}, monitor: {self.monitorPerPulse[i]:6.2f}')
|
logging.info(f'pulse: {i:6.0f}, events: {events:6.0f}, monitor: {self.monitorPerPulse[i]:6.2f}')
|
||||||
|
|
||||||
def monitor_threshold(self):
|
def monitor_threshold(self):
|
||||||
if self.config.monitorType == 'p': # fix to check for file compatibility
|
#if self.config.monitorType == 'p': # fix to check for file compatibility
|
||||||
|
self.totalNumber = np.shape(self.tof_e[self.tof_e<=self.stopTime])[0]
|
||||||
|
if True:
|
||||||
goodTimeS = self.pulseTimeS[self.monitorPerPulse!=0]
|
goodTimeS = self.pulseTimeS[self.monitorPerPulse!=0]
|
||||||
filter_e = np.where(np.isin(self.wallTime_e, goodTimeS), True, False)
|
filter_e = np.where(np.isin(self.wallTime_e, goodTimeS), True, False)
|
||||||
self.tof_e = self.tof_e[filter_e]
|
self.tof_e = self.tof_e[filter_e]
|
||||||
self.pixelID_e = self.pixelID_e[filter_e]
|
self.pixelID_e = self.pixelID_e[filter_e]
|
||||||
self.wallTime_e = self.wallTime_e[filter_e]
|
self.wallTime_e = self.wallTime_e[filter_e]
|
||||||
logging.info(f' rejected {np.shape(self.monitorPerPulse)[0]-np.shape(goodTimeS)[0]} out of {np.shape(self.monitorPerPulse)[0]} pulses')
|
logging.info(f' low-beam rejected pulses: {np.shape(self.monitorPerPulse)[0]-1-np.shape(goodTimeS)[0]} out of {np.shape(self.monitorPerPulse)[0]-1}')
|
||||||
logging.info(f' with {np.shape(filter_e)[0]-np.shape(self.tof_e)[0]} events due to low beam current')
|
logging.info(f' with {np.shape(filter_e)[0]-np.shape(self.tof_e)[0]} events')
|
||||||
logging.info(f' average counts per pulse = {np.shape(self.tof_e)[0] / np.shape(goodTimeS[goodTimeS!=0])[0]:7.1f}')
|
logging.info(f' average counts per pulse = {np.shape(self.tof_e)[0] / np.shape(goodTimeS[goodTimeS!=0])[0]:7.1f}')
|
||||||
|
|
||||||
def filter_qz_range(self, norm):
|
def filter_qz_range(self, norm):
|
||||||
if self.config.qzRange[1]<0.3 and not norm:
|
if self.config.qzRange[1]<0.5 and not norm:
|
||||||
self.mask_e = np.logical_and(self.mask_e,
|
self.mask_e = np.logical_and(self.mask_e,
|
||||||
(self.config.qzRange[0]<=self.qz_e) & (self.qz_e<=self.config.qzRange[1]))
|
(self.config.qzRange[0]<=self.qz_e) & (self.qz_e<=self.config.qzRange[1]))
|
||||||
self.detZ_e = self.detZ_e[self.mask_e]
|
self.detZ_e = self.detZ_e[self.mask_e]
|
||||||
@@ -305,7 +309,8 @@ class AmorData:
|
|||||||
|
|
||||||
def calculate_derived_properties(self):
|
def calculate_derived_properties(self):
|
||||||
self.lamdaMax = const.lamdaCut+1.e13*self.tau*const.hdm/(self.chopperDetectorDistance+124.)
|
self.lamdaMax = const.lamdaCut+1.e13*self.tau*const.hdm/(self.chopperDetectorDistance+124.)
|
||||||
if nb_helpers:
|
#if nb_helpers:
|
||||||
|
if False:
|
||||||
self.lamda_e, self.qz_e, self.mask_e = nb_helpers.calculate_derived_properties_focussing(
|
self.lamda_e, self.qz_e, self.mask_e = nb_helpers.calculate_derived_properties_focussing(
|
||||||
self.tof_e, self.detXdist_e, self.delta_e, self.mask_e,
|
self.tof_e, self.detXdist_e, self.delta_e, self.mask_e,
|
||||||
self.config.lambdaRange[0], self.config.lambdaRange[1], self.nu, self.mu,
|
self.config.lambdaRange[0], self.config.lambdaRange[1], self.nu, self.mu,
|
||||||
@@ -355,8 +360,9 @@ class AmorData:
|
|||||||
# define mask and filter y range
|
# define mask and filter y range
|
||||||
self.mask_e = (self.config.yRange[0]<=detY_e) & (detY_e<=self.config.yRange[1])
|
self.mask_e = (self.config.yRange[0]<=detY_e) & (detY_e<=self.config.yRange[1])
|
||||||
|
|
||||||
def merge_frames(self):
|
# TODO: - handle each neutron pulse individually, - associate with correct monitor also for slow neutrons
|
||||||
total_offset = self.tofCut+self.tau*self.config.chopperPhaseOffset/180.
|
def merge_time_frames(self):
|
||||||
|
total_offset = self.tofCut + self.tau * (self.ch1TriggerPhase + self.chopperPhase/2)/180
|
||||||
if nb_helpers:
|
if nb_helpers:
|
||||||
self.tof_e = nb_helpers.merge_frames(self.tof_e, self.tofCut, self.tau, total_offset)
|
self.tof_e = nb_helpers.merge_frames(self.tof_e, self.tofCut, self.tau, total_offset)
|
||||||
else:
|
else:
|
||||||
@@ -369,26 +375,7 @@ class AmorData:
|
|||||||
self.pixelID_e = self.pixelID_e[filter_e]
|
self.pixelID_e = self.pixelID_e[filter_e]
|
||||||
self.wallTime_e = self.wallTime_e[filter_e]
|
self.wallTime_e = self.wallTime_e[filter_e]
|
||||||
if np.shape(filter_e)[0]-np.shape(self.tof_e)[0]>0.5:
|
if np.shape(filter_e)[0]-np.shape(self.tof_e)[0]>0.5:
|
||||||
logging.warning(f'# strange times: {np.shape(filter_e)[0]-np.shape(self.tof_e)[0]}')
|
logging.warning(f' strange times: {np.shape(filter_e)[0]-np.shape(self.tof_e)[0]}')
|
||||||
|
|
||||||
def read_event_stream(self):
|
|
||||||
self.tof_e = np.array(self.hdf['/entry1/Amor/detector/data/event_time_offset'][:])/1.e9
|
|
||||||
self.pixelID_e = np.array(self.hdf['/entry1/Amor/detector/data/event_id'][:], dtype=np.int64)
|
|
||||||
self.dataPacket_p = np.array(self.hdf['/entry1/Amor/detector/data/event_index'][:], dtype=np.uint64)
|
|
||||||
self.dataPacketTime_p = np.array(self.hdf['/entry1/Amor/detector/data/event_time_zero'][:], dtype=np.int64)
|
|
||||||
if self.config.monitorType in ['auto', 'p']:
|
|
||||||
try:
|
|
||||||
self.currentTime = np.array(self.hdf['entry1/Amor/detector/proton_current/time'][:], dtype=np.int64)
|
|
||||||
self.current = np.array(self.hdf['entry1/Amor/detector/proton_current/value'][:,0], dtype=float)
|
|
||||||
if len(self.current)>4:
|
|
||||||
self.config.monitorType = 'p'
|
|
||||||
else:
|
|
||||||
self.config.monitorType = 't'
|
|
||||||
except(KeyError, IndexError):
|
|
||||||
self.config.monitorType = 't'
|
|
||||||
else:
|
|
||||||
self.config.monitorType = 't'
|
|
||||||
#TODO: protonMonitor
|
|
||||||
|
|
||||||
def read_individual_header(self):
|
def read_individual_header(self):
|
||||||
self.chopperDistance = float(np.take(self.hdf['entry1/Amor/chopper/pair_separation'], 0))
|
self.chopperDistance = float(np.take(self.hdf['entry1/Amor/chopper/pair_separation'], 0))
|
||||||
@@ -396,19 +383,39 @@ class AmorData:
|
|||||||
self.chopperDetectorDistance = self.detectorDistance-float(np.take(self.hdf['entry1/Amor/chopper/distance'], 0))
|
self.chopperDetectorDistance = self.detectorDistance-float(np.take(self.hdf['entry1/Amor/chopper/distance'], 0))
|
||||||
self.tofCut = const.lamdaCut*self.chopperDetectorDistance/const.hdm*1.e-13
|
self.tofCut = const.lamdaCut*self.chopperDetectorDistance/const.hdm*1.e-13
|
||||||
|
|
||||||
|
polarizationConfigs = ['undefined', 'unpolarized', 'po', 'mo', 'op', 'pp', 'mp', 'om', 'pm', 'mm']
|
||||||
try:
|
try:
|
||||||
self.mu = float(np.take(self.hdf['/entry1/Amor/master_parameters/mu/value'], 0))
|
self.mu = float(np.take(self.hdf['/entry1/Amor/instrument_control_parameters/mu'], 0))
|
||||||
self.nu = float(np.take(self.hdf['/entry1/Amor/master_parameters/nu/value'], 0))
|
self.nu = float(np.take(self.hdf['/entry1/Amor/instrument_control_parameters/nu'], 0))
|
||||||
self.kap = float(np.take(self.hdf['/entry1/Amor/master_parameters/kap/value'], 0))
|
self.kap = float(np.take(self.hdf['/entry1/Amor/instrument_control_parameters/kap'], 0))
|
||||||
self.kad = float(np.take(self.hdf['/entry1/Amor/master_parameters/kad/value'], 0))
|
self.kad = float(np.take(self.hdf['/entry1/Amor/instrument_control_parameters/kad'], 0))
|
||||||
self.div = float(np.take(self.hdf['/entry1/Amor/master_parameters/div/value'], 0))
|
self.div = float(np.take(self.hdf['/entry1/Amor/instrument_control_parameters/div'], 0))
|
||||||
self.chopperSpeed = float(np.take(self.hdf['/entry1/Amor/chopper/rotation_speed/value'], 0))
|
self.ch1TriggerPhase = float(np.take(self.hdf['/entry1/Amor/chopper/ch1_trigger_phase'], 0))
|
||||||
self.chopperPhase = float(np.take(self.hdf['/entry1/Amor/chopper/phase/value'], 0))
|
self.ch2TriggerPhase = float(np.take(self.hdf['/entry1/Amor/chopper/ch2_trigger_phase'], 0))
|
||||||
|
try:
|
||||||
|
chopperTriggerTime = float(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_zero'][2])\
|
||||||
|
- float(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_zero'][1])
|
||||||
|
self.tau = int(1e-6*chopperTriggerTime/2+0.5)*(1e-3)
|
||||||
|
self.chopperSpeed = 30/self.tau
|
||||||
|
chopperTriggerTimeDiff = float(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_offset'][2])
|
||||||
|
chopperTriggerPhase = 180e-9*chopperTriggerTimeDiff/self.tau
|
||||||
|
#TODO: check the next line
|
||||||
|
self.chopperPhase = chopperTriggerPhase + self.ch1TriggerPhase - self.ch2TriggerPhase
|
||||||
|
except(KeyError, IndexError):
|
||||||
|
logging.debug(' chopper speed and phase taken from .hdf file')
|
||||||
|
self.chopperSpeed = float(np.take(self.hdf['/entry1/Amor/chopper/rotation_speed'], 0))
|
||||||
|
self.chopperPhase = float(np.take(self.hdf['/entry1/Amor/chopper/phase'], 0))
|
||||||
|
self.tau = 30/self.chopperSpeed
|
||||||
|
try:
|
||||||
|
polarizationConfigLabel = int(self.hdf['/entry1/Amor/polarization/configuration/value'][0])
|
||||||
|
except(KeyError, IndexError):
|
||||||
|
polarizationConfigLabel = 0
|
||||||
|
self.polarizationConfig = polarizationConfigs[polarizationConfigLabel]
|
||||||
|
logging.debug(f' polarization configuration: {self.polarizationConfig} (index {polarizationConfigLabel} (index {polarizationConfigLabel}))')
|
||||||
except(KeyError, IndexError):
|
except(KeyError, IndexError):
|
||||||
logging.warning(" using parameters from nicos cache")
|
logging.warning(" using parameters from nicos cache")
|
||||||
year_date = str(self.start_date).replace('-', '/', 1)
|
year_date = str(self.start_date).replace('-', '/', 1)
|
||||||
#cachePath = '/home/amor/nicosdata/amor/cache/'
|
# TODO: check new cache pathes
|
||||||
#cachePath = '/home/nicos/amorcache/'
|
|
||||||
cachePath = '/home/amor/cache/'
|
cachePath = '/home/amor/cache/'
|
||||||
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-mu/{year_date}')).split('\t')[-1]
|
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-mu/{year_date}')).split('\t')[-1]
|
||||||
self.mu = float(value)
|
self.mu = float(value)
|
||||||
@@ -422,21 +429,35 @@ class AmorData:
|
|||||||
self.div = float(value)
|
self.div = float(value)
|
||||||
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-ch1_speed/{year_date}')).split('\t')[-1]
|
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-ch1_speed/{year_date}')).split('\t')[-1]
|
||||||
self.chopperSpeed = float(value)
|
self.chopperSpeed = float(value)
|
||||||
self.chopperPhase = self.config.chopperPhase
|
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-chopper_phase/{year_date}')).split('\t')[-1]
|
||||||
self.tau = 30. / self.chopperSpeed
|
self.chopperPhase = float(value)
|
||||||
|
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-ch1_trigger_phase/{year_date}')).split('\t')[-1]
|
||||||
|
self.ch1TriggerPhase = float(value)
|
||||||
|
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-ch2_trigger_phase/{year_date}')).split('\t')[-1]
|
||||||
|
self.ch2TriggerPhase = float(value)
|
||||||
|
|
||||||
|
self.tau = 30. / self.chopperSpeed
|
||||||
|
|
||||||
|
logging.debug(f' tau = {self.tau} s')
|
||||||
if self.config.muOffset:
|
if self.config.muOffset:
|
||||||
|
logging.debug(f' set muOffset = {self.config.muOffset}')
|
||||||
self.mu += self.config.muOffset
|
self.mu += self.config.muOffset
|
||||||
if self.config.mu:
|
if self.config.mu:
|
||||||
|
logging.debug(f' replaced mu = {self.mu} with {self.config.mu}')
|
||||||
self.mu = self.config.mu
|
self.mu = self.config.mu
|
||||||
if self.config.nu:
|
if self.config.nu:
|
||||||
|
logging.debug(f' replaced nu = {self.nu} with {self.config.nu}')
|
||||||
self.nu = self.config.nu
|
self.nu = self.config.nu
|
||||||
|
if self.config.chopperPhaseOffset:
|
||||||
|
logging.debug(f' replaced ch1TriggerPhase = {self.ch1TriggerPhase} with {self.config.chopperPhaseOffset}')
|
||||||
|
self.ch1TriggerPhase = self.config.chopperPhaseOffset
|
||||||
|
|
||||||
# extract start time as unix time, adding UTC offset of 1h to time string
|
# extract start time as unix time, adding UTC offset of 1h to time string
|
||||||
self.fileDate = datetime.fromisoformat( self.hdf['/entry1/start_time'][0].decode('utf-8')+"+01:00" )
|
dz = datetime.fromisoformat(self.hdf['/entry1/start_time'][0].decode('utf-8'))
|
||||||
self.startTime = np.int64( (self.fileDate.timestamp() ) * 1e9 )
|
self.fileDate=dz.replace(tzinfo=AMOR_LOCAL_TIMEZONE)
|
||||||
if self.seriesStartTime is None:
|
#self.startTime = np.int64( (self.fileDate.timestamp() ) * 1e9 )
|
||||||
self.seriesStartTime = self.startTime
|
#if self.seriesStartTime is None:
|
||||||
|
# self.seriesStartTime = self.startTime
|
||||||
|
|
||||||
def read_header_info(self):
|
def read_header_info(self):
|
||||||
# read general information and first data set
|
# read general information and first data set
|
||||||
|
|||||||
@@ -29,10 +29,10 @@ class Defaults:
|
|||||||
thetaRange = [-12., 12.]
|
thetaRange = [-12., 12.]
|
||||||
thetaRangeR = [-0.75, 0.75]
|
thetaRangeR = [-0.75, 0.75]
|
||||||
yRange = [11, 41]
|
yRange = [11, 41]
|
||||||
qzRange = [0.005, 0.30]
|
qzRange = [0.005, 0.51]
|
||||||
chopperSpeed = 500
|
chopperSpeed = 500
|
||||||
chopperPhase = -13.5
|
chopperPhase = 0.0
|
||||||
chopperPhaseOffset = 7
|
chopperPhaseOffset = -9.1
|
||||||
muOffset = 0
|
muOffset = 0
|
||||||
mu = 0
|
mu = 0
|
||||||
nu = 0
|
nu = 0
|
||||||
@@ -52,6 +52,7 @@ class ReaderConfig:
|
|||||||
class ExperimentConfig:
|
class ExperimentConfig:
|
||||||
incidentAngle: str
|
incidentAngle: str
|
||||||
chopperPhase: float
|
chopperPhase: float
|
||||||
|
chopperSpeed: float
|
||||||
yRange: Tuple[float, float]
|
yRange: Tuple[float, float]
|
||||||
lambdaRange: Tuple[float, float]
|
lambdaRange: Tuple[float, float]
|
||||||
qzRange: Tuple[float, float]
|
qzRange: Tuple[float, float]
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ class AmorReduction:
|
|||||||
self.header = Header()
|
self.header = Header()
|
||||||
self.header.reduction.call = config.call_string()
|
self.header.reduction.call = config.call_string()
|
||||||
|
|
||||||
self.monitorUnit = {'n': 'cnts', 'p': 'mC', 't': 's'}
|
self.monitorUnit = {'n': 'cnts', 'p': 'mC', 't': 's', 'auto': 'pulses'}
|
||||||
|
|
||||||
def reduce(self):
|
def reduce(self):
|
||||||
if not os.path.exists(f'{self.output_config.outputPath}'):
|
if not os.path.exists(f'{self.output_config.outputPath}'):
|
||||||
@@ -178,11 +178,11 @@ class AmorReduction:
|
|||||||
interval = self.reduction_config.timeSlize[0]
|
interval = self.reduction_config.timeSlize[0]
|
||||||
try:
|
try:
|
||||||
start = self.reduction_config.timeSlize[1]
|
start = self.reduction_config.timeSlize[1]
|
||||||
except:
|
except IndexError:
|
||||||
start = 0
|
start = 0
|
||||||
try:
|
try:
|
||||||
stop = self.reduction_config.timeSlize[2]
|
stop = self.reduction_config.timeSlize[2]
|
||||||
except:
|
except IndexError:
|
||||||
stop = wallTime_e[-1]
|
stop = wallTime_e[-1]
|
||||||
# make overwriting log lines possible by removing newline at the end
|
# make overwriting log lines possible by removing newline at the end
|
||||||
#logging.StreamHandler.terminator = "\r"
|
#logging.StreamHandler.terminator = "\r"
|
||||||
@@ -195,10 +195,16 @@ class AmorReduction:
|
|||||||
detZ_e = self.file_reader.detZ_e[filter_e]
|
detZ_e = self.file_reader.detZ_e[filter_e]
|
||||||
filter_m = np.where((time<pulseTimeS) & (pulseTimeS<time+interval), True, False)
|
filter_m = np.where((time<pulseTimeS) & (pulseTimeS<time+interval), True, False)
|
||||||
self.monitor = np.sum(self.file_reader.monitorPerPulse[filter_m])
|
self.monitor = np.sum(self.file_reader.monitorPerPulse[filter_m])
|
||||||
logging.info(f' {ti:<4d} {time:5.0f} {self.monitor:7.2f} {self.monitorUnit[self.experiment_config.monitorType]}')
|
logging.info(f' {ti:<4d} {time:6.0f} {self.monitor:7.2f} {self.monitorUnit[self.experiment_config.monitorType]}')
|
||||||
|
|
||||||
qz_lz, qx_lz, ref_lz, err_lz, res_lz, lamda_lz, theta_lz, int_lz, mask_lz = self.project_on_lz(
|
qz_lz, qx_lz, ref_lz, err_lz, res_lz, lamda_lz, theta_lz, int_lz, mask_lz = self.project_on_lz(
|
||||||
self.file_reader, self.norm_lz, self.normAngle, lamda_e, detZ_e)
|
self.file_reader, self.norm_lz, self.normAngle, lamda_e, detZ_e)
|
||||||
|
try:
|
||||||
|
ref_lz *= self.reduction_config.scale[i]
|
||||||
|
err_lz *= self.reduction_config.scale[i]
|
||||||
|
except IndexError:
|
||||||
|
ref_lz *= self.reduction_config.scale[-1]
|
||||||
|
err_lz *= self.reduction_config.scale[-1]
|
||||||
q_q, R_q, dR_q, dq_q = self.project_on_qz(qz_lz, ref_lz, err_lz, res_lz, self.norm_lz, mask_lz)
|
q_q, R_q, dR_q, dq_q = self.project_on_qz(qz_lz, ref_lz, err_lz, res_lz, self.norm_lz, mask_lz)
|
||||||
|
|
||||||
filter_q = np.where((self.experiment_config.qzRange[0]<q_q) & (q_q<self.experiment_config.qzRange[1]),
|
filter_q = np.where((self.experiment_config.qzRange[0]<q_q) & (q_q<self.experiment_config.qzRange[1]),
|
||||||
@@ -265,7 +271,7 @@ class AmorReduction:
|
|||||||
scale = 1.
|
scale = 1.
|
||||||
R_q /= scale
|
R_q /= scale
|
||||||
dR_q /= scale
|
dR_q /= scale
|
||||||
logging.debug(f' scaling factor = {scale}')
|
logging.info(f' scaling factor = {1/scale}')
|
||||||
|
|
||||||
return R_q, dR_q
|
return R_q, dR_q
|
||||||
|
|
||||||
@@ -345,19 +351,23 @@ class AmorReduction:
|
|||||||
lamda_e = fromHDF.lamda_e
|
lamda_e = fromHDF.lamda_e
|
||||||
detZ_e = fromHDF.detZ_e
|
detZ_e = fromHDF.detZ_e
|
||||||
self.normMonitor = np.sum(fromHDF.monitorPerPulse)
|
self.normMonitor = np.sum(fromHDF.monitorPerPulse)
|
||||||
self.norm_lz, bins_l, bins_z = np.histogram2d(lamda_e, detZ_e, bins = (self.grid.lamda(), self.grid.z()))
|
norm_lz, bins_l, bins_z = np.histogram2d(lamda_e, detZ_e, bins = (self.grid.lamda(), self.grid.z()))
|
||||||
self.norm_lz = np.where(self.norm_lz>2, self.norm_lz, np.nan)
|
norm_lz = np.where(norm_lz>2, norm_lz, np.nan)
|
||||||
# correct for the SM reflectivity
|
if self.reduction_config.normalisationMethod == 'd':
|
||||||
lamda_l = self.grid.lamda()
|
# direct reference => invert map vertically
|
||||||
theta_z = self.normAngle + fromHDF.delta_z
|
self.norm_lz = np.flip(norm_lz, 1)
|
||||||
lamda_lz = (self.grid.lz().T*lamda_l[:-1]).T
|
else:
|
||||||
theta_lz = self.grid.lz()*theta_z
|
# correct for reference sm reflectivity
|
||||||
qz_lz = 4.0*np.pi * np.sin(np.deg2rad(theta_lz)) / lamda_lz
|
lamda_l = self.grid.lamda()
|
||||||
Rsm_lz = np.ones(np.shape(qz_lz))
|
theta_z = self.normAngle + fromHDF.delta_z
|
||||||
Rsm_lz = np.where(qz_lz>0.0217, 1-(qz_lz-0.0217)*(0.0625/0.0217), Rsm_lz)
|
lamda_lz = (self.grid.lz().T*lamda_l[:-1]).T
|
||||||
# TODO: introduce variable for `m` and propably for the decay
|
theta_lz = self.grid.lz()*theta_z
|
||||||
Rsm_lz = np.where(qz_lz>0.0217*5, np.nan, Rsm_lz)
|
qz_lz = 4.0*np.pi * np.sin(np.deg2rad(theta_lz)) / lamda_lz
|
||||||
self.norm_lz = self.norm_lz / Rsm_lz
|
# TODO: introduce variable for `m` and propably for the slope
|
||||||
|
Rsm_lz = np.ones(np.shape(qz_lz))
|
||||||
|
Rsm_lz = np.where(qz_lz>0.0217, 1-(qz_lz-0.0217)*(0.0625/0.0217), Rsm_lz)
|
||||||
|
Rsm_lz = np.where(qz_lz>0.0217*5, np.nan, Rsm_lz)
|
||||||
|
self.norm_lz = norm_lz / Rsm_lz
|
||||||
|
|
||||||
if len(lamda_e) > 1e6:
|
if len(lamda_e) > 1e6:
|
||||||
with open(n_path, 'wb') as fh:
|
with open(n_path, 'wb') as fh:
|
||||||
@@ -372,21 +382,20 @@ class AmorReduction:
|
|||||||
# projection on lambda-z-grid
|
# projection on lambda-z-grid
|
||||||
lamda_l = self.grid.lamda()
|
lamda_l = self.grid.lamda()
|
||||||
alphaF_z = fromHDF.nu - fromHDF.mu + fromHDF.delta_z
|
alphaF_z = fromHDF.nu - fromHDF.mu + fromHDF.delta_z
|
||||||
|
# TODO: implement various methods to obtain alpha_i.
|
||||||
#if self.experiment_config.incidentAngle == 'alphaF':
|
#if self.experiment_config.incidentAngle == 'alphaF':
|
||||||
|
# # for specular reflectometry with a highly divergent beam
|
||||||
# alphaF_z = fromHDF.nu - fromHDF.mu + fromHDF.delta_z
|
# alphaF_z = fromHDF.nu - fromHDF.mu + fromHDF.delta_z
|
||||||
#elif self.experiment_config.incidentAngle == 'nu':
|
#elif self.experiment_config.incidentAngle == 'nu':
|
||||||
|
# # for specular reflectometry, using kappa nad nu but ignoring mu
|
||||||
# alphaF_z = (fromHDF.nu + fromHDF.delta_z + fromHDF.kap + fromHDF.kad) / 2.
|
# alphaF_z = (fromHDF.nu + fromHDF.delta_z + fromHDF.kap + fromHDF.kad) / 2.
|
||||||
#else:
|
#else:
|
||||||
|
# # using kappa, for a collimated incoming beam
|
||||||
# pass
|
# pass
|
||||||
lamda_lz = (self.grid.lz().T*lamda_l[:-1]).T
|
lamda_lz = (self.grid.lz().T*lamda_l[:-1]).T
|
||||||
alphaF_lz = self.grid.lz()*alphaF_z
|
alphaF_lz = self.grid.lz()*alphaF_z
|
||||||
|
|
||||||
thetaN_z = fromHDF.delta_z + normAngle
|
|
||||||
thetaN_lz = np.ones(np.shape(norm_lz))*thetaN_z
|
|
||||||
thetaN_lz = np.where(np.absolute(thetaN_lz)>5e-3, thetaN_lz, np.nan)
|
|
||||||
|
|
||||||
mask_lz = np.where(np.isnan(norm_lz), False, True)
|
mask_lz = np.where(np.isnan(norm_lz), False, True)
|
||||||
mask_lz = np.logical_and(mask_lz, np.where(np.absolute(thetaN_lz)>5e-3, True, False))
|
|
||||||
mask_lz = np.logical_and(mask_lz, np.where(np.absolute(alphaF_lz)>5e-3, True, False))
|
mask_lz = np.logical_and(mask_lz, np.where(np.absolute(alphaF_lz)>5e-3, True, False))
|
||||||
if self.reduction_config.thetaRangeR[1]<12:
|
if self.reduction_config.thetaRangeR[1]<12:
|
||||||
t0 = fromHDF.nu - fromHDF.mu
|
t0 = fromHDF.nu - fromHDF.mu
|
||||||
@@ -424,21 +433,24 @@ class AmorReduction:
|
|||||||
|
|
||||||
if self.reduction_config.normalisationMethod == 'o':
|
if self.reduction_config.normalisationMethod == 'o':
|
||||||
logging.debug(' assuming an overilluminated sample and correcting for the angle of incidence')
|
logging.debug(' assuming an overilluminated sample and correcting for the angle of incidence')
|
||||||
|
thetaN_z = fromHDF.delta_z + normAngle
|
||||||
|
thetaN_lz = np.ones(np.shape(norm_lz))*thetaN_z
|
||||||
|
thetaN_lz = np.where(np.absolute(thetaN_lz)>5e-3, thetaN_lz, np.nan)
|
||||||
|
mask_lz = np.logical_and(mask_lz, np.where(np.absolute(thetaN_lz)>5e-3, True, False))
|
||||||
ref_lz = (int_lz * np.absolute(thetaN_lz)) / (norm_lz * np.absolute(thetaF_lz))
|
ref_lz = (int_lz * np.absolute(thetaN_lz)) / (norm_lz * np.absolute(thetaF_lz))
|
||||||
elif self.reduction_config.normalisationMethod == 'u':
|
elif self.reduction_config.normalisationMethod == 'u':
|
||||||
logging.debug(' assuming an underilluminated sample and ignoring the angle of incidence')
|
logging.debug(' assuming an underilluminated sample and ignoring the angle of incidence')
|
||||||
ref_lz = (int_lz / norm_lz)
|
ref_lz = (int_lz / norm_lz)
|
||||||
elif self.reduction_config.normalisationMethod == 'd':
|
elif self.reduction_config.normalisationMethod == 'd':
|
||||||
logging.debug(' assuming direct beam for normalisation and ignoring the angle of incidence')
|
logging.debug(' assuming direct beam for normalisation and ignoring the angle of incidence')
|
||||||
norm_lz = np.flip(norm_lz,1)
|
|
||||||
ref_lz = (int_lz / norm_lz)
|
ref_lz = (int_lz / norm_lz)
|
||||||
else:
|
else:
|
||||||
logging.error('unknown normalisation method! Use [u], [o] or [d]')
|
logging.error('unknown normalisation method! Use [u]nder, [o]ver or [d]irect illumination')
|
||||||
ref_lz = (int_lz * np.absolute(thetaN_lz)) / (norm_lz * np.absolute(thetaF_lz))
|
ref_lz = (int_lz / norm_lz)
|
||||||
if self.monitor > 1e-6 :
|
if self.monitor > 1e-6 :
|
||||||
ref_lz *= self.normMonitor / self.monitor
|
ref_lz *= self.normMonitor / self.monitor
|
||||||
else:
|
else:
|
||||||
logging.warning(' too small monitor value for normalisation -> ignoring monitors')
|
logging.info(' too small monitor value for normalisation -> ignoring monitors')
|
||||||
err_lz = ref_lz * np.sqrt( 1/(int_lz+.1) + 1/norm_lz )
|
err_lz = ref_lz * np.sqrt( 1/(int_lz+.1) + 1/norm_lz )
|
||||||
|
|
||||||
# TODO: allow for non-ideal Delta lambda / lambda (rather than 2.2%)
|
# TODO: allow for non-ideal Delta lambda / lambda (rather than 2.2%)
|
||||||
|
|||||||
@@ -1,716 +0,0 @@
|
|||||||
__version__ = '2024-03-30'
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import subprocess
|
|
||||||
import h5py
|
|
||||||
import glob
|
|
||||||
import numpy as np
|
|
||||||
import argparse
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import matplotlib as mpl
|
|
||||||
import time
|
|
||||||
import logging
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
#==============================================================================
|
|
||||||
class Detector:
|
|
||||||
def __init__(self):
|
|
||||||
self.nBlades = 14 # number of active blades in the detector
|
|
||||||
angle = np.deg2rad( 5.1 ) # deg angle of incidence of the beam on the blades (def: 5.1)
|
|
||||||
self.dZ = 4.0 * np.sin(angle) # mm height-distance of neighboring pixels on one blade
|
|
||||||
self.dX = 4.0 * np.cos(angle) # mm depth-distance of neighboring pixels on one blace
|
|
||||||
self.bladeZ = 10.7 # mm distance between detector blades (consistent with nu!)
|
|
||||||
self.zero = 0.5 * self.nBlades * self.bladeZ # mm vertical center of the detector
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
def pixel2quantity():
|
|
||||||
det = Detector()
|
|
||||||
nPixel = 64 * 32 * det.nBlades
|
|
||||||
pixelID = np.arange(nPixel)
|
|
||||||
(bladeNr, bPixel) = np.divmod(pixelID, 64*32)
|
|
||||||
(bZ, bY) = np.divmod(bPixel, 64)
|
|
||||||
z = det.zero - bladeNr * det.bladeZ - bZ * det.dZ
|
|
||||||
x = (31 - bZ) * det.dX
|
|
||||||
bladeAngle = np.rad2deg( 2. * np.arcsin(0.5*det.bladeZ / detectorDistance) )
|
|
||||||
delta = (det.nBlades/2. - bladeNr) * bladeAngle - np.rad2deg( np.arctan(bZ*det.dZ / ( detectorDistance + bZ * det.dX) ) )
|
|
||||||
dZ = bladeNr * 32 + bZ
|
|
||||||
quantity = np.vstack((dZ.T, bY.T, delta.T, x.T)).T
|
|
||||||
|
|
||||||
return quantity
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
def analyse_ev(event_e, tof_e, yMin, yMax, thetaMin, thetaMax):
|
|
||||||
|
|
||||||
data_e = np.zeros((len(event_e), 9), dtype=float)
|
|
||||||
|
|
||||||
# data_e column description:
|
|
||||||
# 0: wall time / s
|
|
||||||
# 1: pixelID
|
|
||||||
# 2: z on detector
|
|
||||||
# 3: y on detector
|
|
||||||
# 4: delta / deg = angle on detector
|
|
||||||
# 5: path within detector / mm
|
|
||||||
# 6: lambda / angstrom
|
|
||||||
# 7: theta / deg
|
|
||||||
# 8: q_z / angstrom^-1
|
|
||||||
|
|
||||||
data_e[:,0] = tof_e[:]
|
|
||||||
data_e[:,1] = event_e[:]
|
|
||||||
|
|
||||||
# filter 'strange' tof times > 2 tau
|
|
||||||
if True:
|
|
||||||
filter_e = (data_e[:,0] <= 2*tau)
|
|
||||||
#print(event_e[~filter_e])
|
|
||||||
#print(data_e[~filter_e,0])
|
|
||||||
data_e = data_e[filter_e,:]
|
|
||||||
if np.shape(filter_e)[0]-np.shape(data_e)[0] > 0.5 and verbous:
|
|
||||||
logging.warning(f'## strange times: {np.shape(filter_e)[0]-np.shape(data_e)[0]}')
|
|
||||||
|
|
||||||
pixelLookUp = pixel2quantity()
|
|
||||||
data_e[:,2:6] = pixelLookUp[np.int_(data_e[:,1])-1,:]
|
|
||||||
|
|
||||||
#================================
|
|
||||||
|
|
||||||
# filter y range
|
|
||||||
filter_e = (yMin <= data_e[:,3]) & (data_e[:,3] <= yMax)
|
|
||||||
data_e = data_e[filter_e,:]
|
|
||||||
|
|
||||||
# correct tof for beam size effect at chopper
|
|
||||||
data_e[:,0] -= ( data_e[:,4] / 180. ) * tau
|
|
||||||
|
|
||||||
# effective flight path length
|
|
||||||
#data_e[:,5] = chopperDetectorDistance + data_e[:,5]
|
|
||||||
|
|
||||||
# calculate lambda
|
|
||||||
hdm = 6.626176e-34/1.674928e-27 # h / m
|
|
||||||
data_e[:,6] = 1.e13 * data_e[:,0] * hdm / ( chopperDetectorDistance + data_e[:,5] )
|
|
||||||
|
|
||||||
# theta
|
|
||||||
data_e[:,7] = nu - mu + data_e[:,4]
|
|
||||||
|
|
||||||
# gravity compensation
|
|
||||||
data_e[:,7] += np.rad2deg( np.arctan( 3.07e-10 * ( detectorDistance + data_e[:,5]) * data_e[:,6] * data_e[:,6] ) )
|
|
||||||
|
|
||||||
# filter theta range
|
|
||||||
filter_l = (thetaMin <= data_e[:,7]) & (data_e[:,7] <= thetaMax)
|
|
||||||
data_e = data_e[filter_l,:]
|
|
||||||
|
|
||||||
# q_z
|
|
||||||
data_e[:,8] = 4*np.pi * np.sin( np.deg2rad( data_e[:,7] ) ) / data_e[:,6]
|
|
||||||
|
|
||||||
# filter q_z range
|
|
||||||
#filter_e = (qMin < data_e[:,6]) & (data_e[:,6] < qMax)
|
|
||||||
#data_e = data_e[filter_e,:]
|
|
||||||
|
|
||||||
return data_e
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
class Meta:
|
|
||||||
# AMOR hdf dataset with associated properties from metadata
|
|
||||||
def __init__(self, fileName):
|
|
||||||
self.fileName = fileName
|
|
||||||
|
|
||||||
fh = h5py.File(fileName, 'r', swmr=True)
|
|
||||||
|
|
||||||
# for processing
|
|
||||||
|
|
||||||
self.chopperDistance = float(np.take(fh['/entry1/Amor/chopper/pair_separation'], 0)) # mm
|
|
||||||
# the following is the distance from the sample to the detector entry window, not to the center of rotation
|
|
||||||
self.detectorDistance = float(np.take(fh['/entry1/Amor/detector/transformation/distance'], 0)) # mm
|
|
||||||
self.chopperDetectorDistance = self.detectorDistance - float(np.take(fh['entry1/Amor/chopper/distance'], 0)) # mm
|
|
||||||
|
|
||||||
self.lamdaCut = 2.5 # Aa
|
|
||||||
|
|
||||||
startDate = str(fh['/entry1/start_time'][0].decode('utf-8'))
|
|
||||||
self.startDate = datetime.strptime(startDate, '%Y-%m-%d %H:%M:%S')
|
|
||||||
startDate = datetime.timestamp(self.startDate)
|
|
||||||
self.countingTime = float(np.take(fh['/entry1/Amor/detector/data/event_time_zero'], -1))/1e9 - startDate
|
|
||||||
# not exact for low rates
|
|
||||||
|
|
||||||
ka0 = 0.245 # given inclination of the beam after the Selene guide
|
|
||||||
|
|
||||||
year_date = str(datetime.today()).split(' ')[0].replace("-", "/", 1)
|
|
||||||
|
|
||||||
# deside from where to take the control paralemters
|
|
||||||
try:
|
|
||||||
self.mu = float(np.take(fh['/entry1/Amor/master_parameters/mu/value'], 0))
|
|
||||||
self.nu = float(np.take(fh['/entry1/Amor/master_parameters/nu/value'], 0))
|
|
||||||
self.kap = float(np.take(fh['/entry1/Amor/master_parameters/kap/value'], 0))
|
|
||||||
self.kad = float(np.take(fh['/entry1/Amor/master_parameters/kad/value'], 0))
|
|
||||||
self.div = float(np.take(fh['/entry1/Amor/master_parameters/div/value'], 0))
|
|
||||||
chSp = float(np.take(fh['/entry1/Amor/chopper/rotation_speed/value'], 0))
|
|
||||||
self.chPh = float(np.take(fh['/entry1/Amor/chopper/phase/value'], 0))
|
|
||||||
except (KeyError, IndexError):
|
|
||||||
logging.warning(f" using parameters from nicos cache")
|
|
||||||
#cachePath = '/home/amor/nicosdata/amor/cache/'
|
|
||||||
cachePath = '/home/nicos/amorcache/'
|
|
||||||
value = str(subprocess.getoutput('/usr/bin/grep "value" '+cachePath+'nicos-mu/'+year_date)).split('\t')[-1]
|
|
||||||
self.mu = float(value)
|
|
||||||
value = str(subprocess.getoutput('/usr/bin/grep "value" '+cachePath+'nicos-nu/'+year_date)).split('\t')[-1]
|
|
||||||
self.nu = float(value)
|
|
||||||
value = str(subprocess.getoutput('/usr/bin/grep "value" '+cachePath+'nicos-kap/'+year_date)).split('\t')[-1]
|
|
||||||
self.kap = float(value)
|
|
||||||
value = str(subprocess.getoutput('/usr/bin/grep "value" '+cachePath+'nicos-kad/'+year_date)).split('\t')[-1]
|
|
||||||
self.kad = float(value)
|
|
||||||
value = str(subprocess.getoutput('/usr/bin/grep "value" '+cachePath+'nicos-div/'+year_date)).split('\t')[-1]
|
|
||||||
self.div = float(value)
|
|
||||||
value = str(subprocess.getoutput('/usr/bin/grep "value" '+cachePath+'nicos-ch1_speed/'+year_date)).split('\t')[-1]
|
|
||||||
chSp = float(value)
|
|
||||||
self.chPh = np.nan
|
|
||||||
|
|
||||||
if chSp:
|
|
||||||
self.tau = 30. / chSp
|
|
||||||
else:
|
|
||||||
self.tau = 0
|
|
||||||
|
|
||||||
try: # not yet correctly implemented in nexus template
|
|
||||||
spin = str(fh['/entry1/polarizer/spin_flipper/spin'][0].decode('utf-8'))
|
|
||||||
if spin == "b'p'":
|
|
||||||
self.spin = 'p'
|
|
||||||
elif spin == "b'm'":
|
|
||||||
self.spin = 'm'
|
|
||||||
elif spin == "b'up'":
|
|
||||||
self.spin = 'p'
|
|
||||||
elif spin == "b'down'":
|
|
||||||
self.spin = 'm'
|
|
||||||
elif spin == '?':
|
|
||||||
self.spin = '?'
|
|
||||||
else:
|
|
||||||
self.spin = 'n'
|
|
||||||
except (KeyError, IndexError):
|
|
||||||
self.spin = '?'
|
|
||||||
|
|
||||||
fh.close()
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
def resolveNumber(dataPath, ident):
|
|
||||||
if ident == '0' or '-' in ident[0]:
|
|
||||||
try:
|
|
||||||
nnr = int(ident)
|
|
||||||
except:
|
|
||||||
logging.error("ERROR: '{}' is no valid file identifier!".format(ident))
|
|
||||||
fileNames = glob.glob(dataPath+'/*.hdf')
|
|
||||||
fileNames.sort()
|
|
||||||
fileName = fileNames[nnr-1]
|
|
||||||
fileName = fileName.split('/')[-1]
|
|
||||||
fileNumber = fileName.split('n')[1].split('.')[0].lstrip('0')
|
|
||||||
else:
|
|
||||||
fileNumber = ident
|
|
||||||
|
|
||||||
return fileNumber
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
def fileNameCreator(dataPath, ident):
|
|
||||||
clas = commandLineArgs()
|
|
||||||
ident=str(ident)
|
|
||||||
try:
|
|
||||||
nnr = int(ident)
|
|
||||||
except:
|
|
||||||
logging.error("ERROR: '{}' is no valid file identifier!".format(ident))
|
|
||||||
|
|
||||||
if nnr <= 0 :
|
|
||||||
fileName = glob.glob(dataPath+'/*.hdf')[nnr-1]
|
|
||||||
fileName = fileName.split('/')[-1]
|
|
||||||
else:
|
|
||||||
fileName = f'amor{clas.year}n{ident:>06s}'
|
|
||||||
|
|
||||||
fileName = fileName.split('.')[0]
|
|
||||||
fileName = fileName+'.hdf'
|
|
||||||
fileName = dataPath+fileName
|
|
||||||
|
|
||||||
fileNumber = fileName.split('n')[-1].split('.')[0].lstrip('0')
|
|
||||||
|
|
||||||
return fileName, fileNumber
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
class PlotSelection:
|
|
||||||
|
|
||||||
def headline(self, fileNumber, totalCounts):
|
|
||||||
headLine = "#{} \u03bc={:>1.2f} \u03bd={:>1.2f} {:>12,} cts {:>8.1f} s".format(fileNumber, mu+5e-3, nu+5e-3, totalCounts, countingTime)
|
|
||||||
return headLine
|
|
||||||
|
|
||||||
# grids
|
|
||||||
|
|
||||||
def y_grid(self):
|
|
||||||
y_grid = np.arange(yMin, yMax+1, 1)
|
|
||||||
return y_grid
|
|
||||||
|
|
||||||
def lamda_grid(self):
|
|
||||||
dldl = 0.005 # Delta lambda / lambda
|
|
||||||
lMin = max(2, lamdaMin)
|
|
||||||
lamda_grid = lMin*(1+dldl)**np.arange(int(np.log(lamdaMax/lMin)/np.log(1+dldl)+1))
|
|
||||||
return lamda_grid
|
|
||||||
|
|
||||||
def theta_grid(self):
|
|
||||||
det = Detector()
|
|
||||||
|
|
||||||
bladeAngle = np.rad2deg( 2. * np.arcsin(0.5*det.bladeZ / detectorDistance) )
|
|
||||||
blade_grid = np.arctan( np.arange(33) * det.dZ / ( detectorDistance + np.arange(33) * det.dX) )
|
|
||||||
blade_grid = np.rad2deg(blade_grid)
|
|
||||||
stepWidth = blade_grid[1] - blade_grid[0]
|
|
||||||
blade_grid = blade_grid - 0.2 * stepWidth
|
|
||||||
|
|
||||||
delta_grid = []
|
|
||||||
for b in np.arange(det.nBlades-1):
|
|
||||||
delta_grid = np.concatenate((delta_grid, blade_grid), axis=None)
|
|
||||||
blade_grid = blade_grid + bladeAngle
|
|
||||||
delta_grid = delta_grid[delta_grid<blade_grid[0]-0.5*stepWidth]
|
|
||||||
delta_grid = np.concatenate((delta_grid, blade_grid), axis=None)
|
|
||||||
|
|
||||||
theta_grid = nu - mu - np.flip(delta_grid) + 0.5*det.nBlades * bladeAngle
|
|
||||||
|
|
||||||
theta_grid = theta_grid[theta_grid>=thetaMin]
|
|
||||||
theta_grid = theta_grid[theta_grid<=thetaMax]
|
|
||||||
|
|
||||||
return theta_grid
|
|
||||||
|
|
||||||
def q_grid(self):
|
|
||||||
dqdq = 0.010 # Delta q_z / q_z
|
|
||||||
q_grid = qMin*(1.+dqdq)**np.arange(int(np.log(qMax/qMin)/np.log(1+dqdq)))
|
|
||||||
return q_grid
|
|
||||||
|
|
||||||
# create PNG with several plots
|
|
||||||
|
|
||||||
def all(self, fileNumber, arg, data_e):
|
|
||||||
#cmap='gist_earth'
|
|
||||||
cmap = mpl.cm.gnuplot(np.arange(256))
|
|
||||||
cmap[:1, :] = np.array([256/256, 255/256, 236/256, 1])
|
|
||||||
cmap = mpl.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
|
|
||||||
I_yt, bins_y, bins_t = np.histogram2d(data_e[:,3], data_e[:,7], bins = (self.y_grid(), self.theta_grid()))
|
|
||||||
I_lt, bins_l, bins_t = np.histogram2d(data_e[:,6], data_e[:,7], bins = (self.lamda_grid(), self.theta_grid()))
|
|
||||||
I_q, bins_q = np.histogram(data_e[:,8], bins = self.q_grid())
|
|
||||||
q_lim = 4*np.pi*np.array([ max( np.sin(self.theta_grid()[0]*np.pi/180.)/self.lamda_grid()[-1] , 1e-4 ),
|
|
||||||
min( np.sin(self.theta_grid()[-1]*np.pi/180.)/self.lamda_grid()[0] , 0.03 )])
|
|
||||||
if arg == 'lin':
|
|
||||||
#vmin = min(np.min(I_lt), np.min(I_yt))
|
|
||||||
vmin = 0
|
|
||||||
vmax = max(5, np.max(I_lt), np.max(I_yt))
|
|
||||||
else:
|
|
||||||
vmin = 0
|
|
||||||
vmax = max(1, np.log(np.max(I_lt)+.1)/np.log(10)*1.05, np.log(np.max(I_yt)+.1)/np.log(10)*1.05)
|
|
||||||
# I(y, theta)
|
|
||||||
fig = plt.figure()
|
|
||||||
axs = fig.add_gridspec(2,3)
|
|
||||||
myt = fig.add_subplot(axs[0,0])
|
|
||||||
myt.set_title('detector area')
|
|
||||||
myt.set_xlabel('$y ~/~ \\mathrm{bins}$')
|
|
||||||
myt.set_ylabel('$\\theta ~/~ \\mathrm{deg}$')
|
|
||||||
if arg == 'lin':
|
|
||||||
myt.pcolormesh(bins_y, bins_t, I_yt.T, cmap=cmap, vmin=vmin, vmax=vmax)
|
|
||||||
else:
|
|
||||||
myt.pcolormesh(bins_y, bins_t, (np.log(I_yt + 5.e-1) / np.log(10.)).T, cmap=cmap, vmin=vmin, vmax=vmax)
|
|
||||||
# I(lambda, theta)
|
|
||||||
mlt = fig.add_subplot(axs[0,1:])
|
|
||||||
mlt.set_title('angle- and energy disperse')
|
|
||||||
mlt.set_xlabel('$\\lambda ~/~ \\mathrm{\\AA}$')
|
|
||||||
mlt.axes.get_yaxis().set_visible(False)
|
|
||||||
if arg == 'lin':
|
|
||||||
cb = mlt.pcolormesh(bins_l, bins_t, I_lt.T, cmap=cmap, vmin=vmin, vmax=vmax)
|
|
||||||
else:
|
|
||||||
cb = mlt.pcolormesh(bins_l, bins_t, (np.log(I_lt + 5.e-1) / np.log(10.)).T, cmap=cmap, vmin=vmin, vmax=vmax)
|
|
||||||
# I(q_z)
|
|
||||||
lqz = fig.add_subplot(axs[1,:])
|
|
||||||
lqz.set_title('$I(q_z)$')
|
|
||||||
lqz.set_ylabel('$\\log_{10}(\\mathrm{cnts})$')
|
|
||||||
lqz.set_xlabel('$q_z~/~\\mathrm{\\AA}^{-1}$')
|
|
||||||
lqz.set_xlim(q_lim)
|
|
||||||
if arg == 'lin':
|
|
||||||
plt.plot(bins_q[:-1], I_q, color='blue', linewidth=0.5)
|
|
||||||
else:
|
|
||||||
err_q = np.sqrt(I_q+1)
|
|
||||||
low_q = np.where(I_q-err_q>0, I_q-err_q, 0.1)
|
|
||||||
plt.fill_between(bins_q[:-1], np.log(low_q)/np.log(10), np.log(I_q+err_q/2)/np.log(10), color='lightgrey')
|
|
||||||
plt.plot(bins_q[:-1], np.log(I_q+5e-1)/np.log(10), color='blue', linewidth=0.5)
|
|
||||||
lw = I_q[ ((q_lim[0] < bins_q[:-1]) & (bins_q[:-1] < q_lim[1])) ].min()
|
|
||||||
plt.ylim(max(-0.1, np.log(lw+.1)/np.log(10)-0.1), )
|
|
||||||
#
|
|
||||||
headline = self.headline(fileNumber, np.shape(data_e)[0])
|
|
||||||
plt.title(headline, loc='left', y=2.8, c='r')
|
|
||||||
fig.colorbar(cb, ax=mlt)
|
|
||||||
plt.subplots_adjust(hspace=0.6, wspace=0.1)
|
|
||||||
plt.savefig(output, format='png', dpi=150)
|
|
||||||
|
|
||||||
# create PNG with one plot
|
|
||||||
|
|
||||||
def Iyz(self, fileNumber, arg, data_e):
|
|
||||||
det = Detector()
|
|
||||||
cmap = mpl.cm.gnuplot(np.arange(256))
|
|
||||||
cmap[:1, :] = np.array([256/256, 255/256, 236/256, 1])
|
|
||||||
cmap = mpl.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
|
|
||||||
z_grid = np.arange(det.nBlades*32)
|
|
||||||
I_yz, bins_y, bins_z = np.histogram2d(data_e[:,3], data_e[:,2], bins = (self.y_grid(), z_grid))
|
|
||||||
if arg == 'log':
|
|
||||||
vmin = 0
|
|
||||||
vmax = max(1, np.log(np.max(I_yt)+.1)/np.log(10)*1.05)
|
|
||||||
plt.pcolormesh(bins_y[:],bins_z[:],(np.log(I_yz+6e-1)/np.log(10)).T, cmap=cmap, vmin=vmin, vmax=vmax)
|
|
||||||
else:
|
|
||||||
plt.pcolormesh(bins_y[:],bins_z[:],I_yz.T, cmap=cmap)
|
|
||||||
plt.xlabel('$y ~/~ \\mathrm{bins}$')
|
|
||||||
plt.ylabel('$z ~/~ \\mathrm{bins}$')
|
|
||||||
headline = self.headline(fileNumber, np.shape(data_e)[0])
|
|
||||||
plt.title(headline, loc='left', y=1.0, c='r')
|
|
||||||
plt.colorbar()
|
|
||||||
plt.savefig(output, format='png', dpi=150)
|
|
||||||
|
|
||||||
def Ilt(self, fileNumber, arg, data_e) :
|
|
||||||
cmap = mpl.cm.gnuplot(np.arange(256))
|
|
||||||
cmap[:1, :] = np.array([256/256, 255/256, 236/256, 1])
|
|
||||||
cmap = mpl.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
|
|
||||||
I_lt, bins_l, bins_t = np.histogram2d(data_e[:,6], data_e[:,7], bins = (self.lamda_grid(), self.theta_grid()))
|
|
||||||
if arg == 'log':
|
|
||||||
vmax = max(1, np.log(np.max(I_lt)+.1)/np.log(10)*1.05 )
|
|
||||||
plt.pcolormesh(bins_l, bins_t, (np.log(I_lt+I_lt[I_lt>0].min()/2)/np.log(10.)).T, cmap=cmap, vmin=0, vmax=vmax)
|
|
||||||
else :
|
|
||||||
vmax = max(np.max(I_lt), 5)
|
|
||||||
plt.pcolormesh(bins_l, bins_t, I_lt.T, cmap=cmap, vmin=0, vmax=vmax)
|
|
||||||
plt.xlim(0,)
|
|
||||||
#if np.min(bins_t) > 0.01 :
|
|
||||||
# plt.ylim(bottom=0)
|
|
||||||
#else:
|
|
||||||
# plt.ylim(bottom=np.min(bins_t))
|
|
||||||
#if np.max(bins_t) < -0.01:
|
|
||||||
# plt.ylim(top=0)
|
|
||||||
#else:
|
|
||||||
# plt.ylim(top=np.max(bins_t))
|
|
||||||
plt.xlim(lamdaMin, lamdaMax)
|
|
||||||
plt.ylim(thetaMin, thetaMax)
|
|
||||||
plt.xlabel('$\\lambda ~/~ \\mathrm{\\AA}$')
|
|
||||||
plt.ylabel('$\\theta ~/~ \\mathrm{deg}$')
|
|
||||||
headline = self.headline(fileNumber, np.shape(data_e)[0])
|
|
||||||
plt.title(headline, loc='left', y=1.0, c='r')
|
|
||||||
plt.colorbar()
|
|
||||||
plt.savefig(output, format='png', dpi=150)
|
|
||||||
|
|
||||||
def Itz(self, fileNumber, arg, data_e):
|
|
||||||
det = Detector()
|
|
||||||
cmap = mpl.cm.gnuplot(np.arange(256))
|
|
||||||
cmap[:1, :] = np.array([256/256, 255/256, 236/256, 1])
|
|
||||||
cmap = mpl.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
|
|
||||||
time_grid = np.arange(0, tau, 0.0005)
|
|
||||||
z_grid = np.arange(det.nBlades*32+1)
|
|
||||||
|
|
||||||
I_tz, bins_t, bins_z = np.histogram2d(data_e[:,0], data_e[:,2], bins = (time_grid, z_grid))
|
|
||||||
if arg == 'log':
|
|
||||||
vmax = max(2., np.log(np.max(I_tz)+.1)/np.log(10)*1.05 )
|
|
||||||
plt.pcolormesh(bins_t, bins_z, (np.log(I_tz+5.e-1)/np.log(10.)).T, cmap=cmap, vmin=0, vmax=vmax)
|
|
||||||
else :
|
|
||||||
vmax = max(np.max(I_tz), 5)
|
|
||||||
plt.pcolormesh(bins_t, bins_z, I_tz.T, cmap=cmap, vmin=0, vmax=vmax)
|
|
||||||
if True:
|
|
||||||
plt.xlim(0,)
|
|
||||||
plt.ylim(0,)
|
|
||||||
plt.xlabel('$t ~/~ \\mathrm{s}$')
|
|
||||||
plt.ylabel('$z$ pixel row')
|
|
||||||
headline = self.headline(fileNumber, np.shape(data_e)[0])
|
|
||||||
plt.title(headline, loc='left', y=1.0, c='r')
|
|
||||||
plt.colorbar()
|
|
||||||
plt.savefig(output, format='png', dpi=150)
|
|
||||||
|
|
||||||
def Iq(self, fileNumber, arg, data_e):
|
|
||||||
I_q, bins_q = np.histogram(data_e[:,8], bins = self.q_grid())
|
|
||||||
err_q = np.sqrt(I_q+1)
|
|
||||||
q_lim = 4*np.pi*np.array([ max( np.sin(self.theta_grid()[0]*np.pi/180.)/self.lamda_grid()[-1] , 1e-4 ),
|
|
||||||
min( np.sin(self.theta_grid()[-1]*np.pi/180.)/self.lamda_grid()[0] , 0.03 )])
|
|
||||||
if arg == 'log':
|
|
||||||
low_q = np.where(I_q-err_q>0, I_q-err_q, 0.1)
|
|
||||||
plt.fill_between(bins_q[:-1], np.log(low_q)/np.log(10), np.log(I_q+err_q/2)/np.log(10), color='lightgrey')
|
|
||||||
plt.plot(bins_q[:-1], np.log(I_q+5e-1)/np.log(10), color='blue', linewidth=0.5)
|
|
||||||
lw = I_q[ ((q_lim[0] < bins_q[:-1]) & (bins_q[:-1] < q_lim[1])) ].min()
|
|
||||||
plt.ylim(max(-0.1, np.log(lw+.1)/np.log(10)-0.1), )
|
|
||||||
else:
|
|
||||||
plt.plot(bins_q[:-1], I_q, color='blue', linewidth=0.5)
|
|
||||||
plt.ylabel('$\\log_{10}(\\mathrm{cnts})$')
|
|
||||||
plt.xlabel('$q_z ~/~ \\mathrm{\\AA}^{-1}$')
|
|
||||||
plt.xlim(q_lim)
|
|
||||||
headline = self.headline(fileNumber, np.shape(data_e)[0])
|
|
||||||
plt.title(headline, loc='left', y=1.0, c='r')
|
|
||||||
plt.savefig(output, format='png', dpi=150)
|
|
||||||
|
|
||||||
def Il(self, fileNumber, arg, data_e):
|
|
||||||
I_l, bins_l = np.histogram(data_e[:,6], bins = self.lamda_grid())
|
|
||||||
if arg == 'lin':
|
|
||||||
plt.plot(bins_l[:-1], I_l)
|
|
||||||
plt.ylabel('$I ~/~ \\mathrm{cnts}$')
|
|
||||||
else:
|
|
||||||
plt.plot(bins_l[:-1], np.log(I_l+5.e-1)/np.log(10.))
|
|
||||||
plt.ylabel('$\\log_{10} I ~/~ \\mathrm{cnts}$')
|
|
||||||
plt.xlabel('$\\lambda ~/~ \\mathrm{\\AA}$')
|
|
||||||
headline = self.headline(fileNumber, np.shape(data_e)[0])
|
|
||||||
plt.title(headline, loc='left', y=1.0, c='r')
|
|
||||||
plt.savefig(output, format='png', dpi=150)
|
|
||||||
|
|
||||||
def It(self, fileNumber, arg, data_e):
|
|
||||||
I_t, bins_t = np.histogram(data_e[:,7], bins = self.theta_grid())
|
|
||||||
plt.plot( I_t, bins_t[:-1])
|
|
||||||
plt.xlabel('$\\mathrm{cnts}$')
|
|
||||||
plt.ylabel('$\\theta ~/~ \\mathrm{deg}$')
|
|
||||||
headline = self.headline(fileNumber, np.shape(data_e)[0])
|
|
||||||
plt.title(headline, loc='left', y=1.0, c='r')
|
|
||||||
plt.savefig(output, format='png', dpi=150)
|
|
||||||
|
|
||||||
def tof(self, fileNumber, arg, data_e):
|
|
||||||
time_grid = np.arange(0, 1.3*tau, 0.0005)
|
|
||||||
I_t, bins_t = np.histogram(data_e[:,0], bins = time_grid)
|
|
||||||
if arg == 'lin':
|
|
||||||
plt.plot(bins_t[:-1]+tau, I_t)
|
|
||||||
plt.plot(bins_t[:-1], I_t)
|
|
||||||
plt.plot(bins_t[:-1]+2*tau, I_t)
|
|
||||||
else:
|
|
||||||
lI_t = np.log(I_t+5.e-1)/np.log(10.)
|
|
||||||
plt.plot(bins_t[:-1]+tau, lI_t)
|
|
||||||
plt.plot(bins_t[:-1], lI_t)
|
|
||||||
plt.plot(bins_t[:-1]+2*tau, lI_t)
|
|
||||||
plt.ylabel('log(counts)')
|
|
||||||
plt.xlabel('time / s')
|
|
||||||
headline = self.headline(fileNumber, np.shape(data_e)[0])
|
|
||||||
plt.title(headline, loc='left', y=1.0, c='r')
|
|
||||||
plt.savefig(output, format='png')
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
def process(dataPath, ident, clas):
|
|
||||||
#================================
|
|
||||||
# constants
|
|
||||||
hdm = 6.626176e-34/1.674928e-27 # h / m
|
|
||||||
#================================
|
|
||||||
# instrument specific parameters
|
|
||||||
#================================
|
|
||||||
global lamdaMin, lamdaMax, qMin, qMax, thetaMin, thetaMax, yMin, yMax
|
|
||||||
# defaults
|
|
||||||
lamdaCut = 2.5 # Aa used to reshuffle tof
|
|
||||||
# data filtering and folding
|
|
||||||
|
|
||||||
#================================
|
|
||||||
if clas.lambdaRange:
|
|
||||||
lamdaMin = clas.lambdaRange[0]
|
|
||||||
lamdaMax = clas.lambdaRange[1]
|
|
||||||
else:
|
|
||||||
lamdaMin = lamdaCut
|
|
||||||
|
|
||||||
chopperPhase = clas.chopperPhase
|
|
||||||
tofOffset = clas.TOFOffset
|
|
||||||
thetaMin = clas.thetaRange[0]
|
|
||||||
thetaMax = clas.thetaRange[1]
|
|
||||||
yMin = clas.yRange[0]
|
|
||||||
yMax = clas.yRange[1]
|
|
||||||
qMin = clas.qRange[0]
|
|
||||||
qMax = clas.qRange[1]
|
|
||||||
|
|
||||||
#================================
|
|
||||||
# find and open input file
|
|
||||||
global ev
|
|
||||||
|
|
||||||
data_eSum = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
|
||||||
sumTime = 0
|
|
||||||
|
|
||||||
number = resolveNumber(dataPath, ident)
|
|
||||||
fileName, fileNumber = fileNameCreator(dataPath, str(number))
|
|
||||||
|
|
||||||
if verbous:
|
|
||||||
logging.info('life_histogrammer processing file ->\033[1m {} \033[0m<-'.format(fileNumber))
|
|
||||||
|
|
||||||
for i in range(6):
|
|
||||||
ev = h5py.File(fileName, 'r', swmr=True)
|
|
||||||
try:
|
|
||||||
ev['/entry1/Amor/detector/data/event_time_zero'][-1]
|
|
||||||
break
|
|
||||||
except (KeyError, IndexError):
|
|
||||||
ev.close()
|
|
||||||
if i < 5:
|
|
||||||
if verbous:
|
|
||||||
print("no data yet, retrying ({}) ".format(10-2*i), end='\r')
|
|
||||||
time.sleep(2)
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
if verbous:
|
|
||||||
print("# time-out: no longer waiting for data!\a")
|
|
||||||
return
|
|
||||||
|
|
||||||
# get and process data
|
|
||||||
meta = Meta(fileName)
|
|
||||||
|
|
||||||
global mu, nu, tau
|
|
||||||
|
|
||||||
if clas.mu < 98.:
|
|
||||||
mu = clas.mu
|
|
||||||
else:
|
|
||||||
mu = meta.mu + clas.muOffset
|
|
||||||
|
|
||||||
if clas.nu < 98.:
|
|
||||||
nu = clas.nu
|
|
||||||
else:
|
|
||||||
nu = meta.nu
|
|
||||||
|
|
||||||
if clas.chopperSpeed:
|
|
||||||
tau = 30./ clas.chopperSpeed
|
|
||||||
else:
|
|
||||||
tau = meta.tau
|
|
||||||
|
|
||||||
try:
|
|
||||||
chPh
|
|
||||||
except NameError:
|
|
||||||
chPh = meta.chPh
|
|
||||||
spin = meta.spin
|
|
||||||
|
|
||||||
global countingTime, detectorDistance, chopperDetectorDistance
|
|
||||||
detectorDistance = meta.detectorDistance
|
|
||||||
chopperDetectorDistance = meta.chopperDetectorDistance
|
|
||||||
countingTime = meta.countingTime
|
|
||||||
|
|
||||||
if verbous:
|
|
||||||
logging.info(" mu = {:>4.2f} deg, nu = {:>4.2f} deg".format(mu, nu))
|
|
||||||
if spin == 'u':
|
|
||||||
logging.info(' spin <+|')
|
|
||||||
elif spin == 'd':
|
|
||||||
logging.info(' spin <-|')
|
|
||||||
|
|
||||||
try: lamdaMax
|
|
||||||
except NameError: lamdaMax = lamdaMin + tau * hdm/chopperDetectorDistance * 1e13
|
|
||||||
|
|
||||||
tofOffset = tau * chopperPhase / 180. # mismatch of chopper pulse and time-zero
|
|
||||||
tofCut = lamdaCut * chopperDetectorDistance / hdm * 1.e-13 # tof of frame start
|
|
||||||
|
|
||||||
tof_e = np.array(ev['/entry1/Amor/detector/data/event_time_offset'][:], dtype=np.uint64)/1.e9 + tofOffset # tof
|
|
||||||
|
|
||||||
detPixelID_e = np.array(ev['/entry1/Amor/detector/data/event_id'][:], dtype=np.uint64) # pixel index
|
|
||||||
|
|
||||||
dataPacket_p = np.array(ev['/entry1/Amor/detector/data/event_index'][:], dtype=np.uint64) # data packet index
|
|
||||||
|
|
||||||
tof_e = np.where(tof_e<tofCut, tof_e+2.*tau, tof_e)
|
|
||||||
tof_e = np.where(tof_e>tau+tofCut, tof_e-tau, tof_e)
|
|
||||||
|
|
||||||
data_e = analyse_ev(detPixelID_e, tof_e, yMin, yMax, thetaMin, thetaMax)
|
|
||||||
|
|
||||||
ev.close()
|
|
||||||
|
|
||||||
data_eSum = np.append(data_eSum, data_e, axis=0)
|
|
||||||
sumTime += countingTime
|
|
||||||
|
|
||||||
if verbous:
|
|
||||||
logging.info(" total counts = {} in {:6.1f} s".format(np.shape(data_e)[0], sumTime))
|
|
||||||
|
|
||||||
#================================
|
|
||||||
# plotting data
|
|
||||||
plotType = clas.plot[0]
|
|
||||||
try:
|
|
||||||
arg = clas.plot[1]
|
|
||||||
except IndexError:
|
|
||||||
arg = 'def'
|
|
||||||
plott = PlotSelection()
|
|
||||||
try:
|
|
||||||
plotFunction = getattr(plott, plotType)
|
|
||||||
plotFunction(fileNumber, arg, data_e)
|
|
||||||
plt.close()
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"ERROR: '{plotType}' is no known output format!")
|
|
||||||
logging.error(f" original error: {e}")
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
def commandLineArgs():
|
|
||||||
msg = "events2histogram reads the eventstream from an hdf raw file and \
|
|
||||||
creates various histogrammed outputs or plots."
|
|
||||||
clas = argparse.ArgumentParser(description = msg)
|
|
||||||
|
|
||||||
clas.add_argument("-c", "--chopperSpeed",
|
|
||||||
type=float,
|
|
||||||
help ="chopper speed in rpm")
|
|
||||||
clas.add_argument("-d", "--dataPath",
|
|
||||||
help ="relative path to directory with .hdf files")
|
|
||||||
clas.add_argument("-f", "--fileIdent",
|
|
||||||
default='0',
|
|
||||||
help ="file number or offset (if negative)")
|
|
||||||
clas.add_argument("-l", "--lambdaRange",
|
|
||||||
nargs=2,
|
|
||||||
type=float,
|
|
||||||
help ="wavelength range to be used")
|
|
||||||
clas.add_argument("-M", "--muOffset",
|
|
||||||
default=0.,
|
|
||||||
type=float,
|
|
||||||
help ="mu offset")
|
|
||||||
clas.add_argument("-m", "--mu",
|
|
||||||
default=99.,
|
|
||||||
type=float,
|
|
||||||
help ="value of mu")
|
|
||||||
clas.add_argument("-n", "--nu",
|
|
||||||
default=99.,
|
|
||||||
type=float,
|
|
||||||
help ="value of nu")
|
|
||||||
clas.add_argument("-P", "--chopperPhase",
|
|
||||||
default=-5.,
|
|
||||||
type=float,
|
|
||||||
help ="chopper phase offset")
|
|
||||||
clas.add_argument("-p", "--plot",
|
|
||||||
default=['all', 'def'],
|
|
||||||
nargs='+',
|
|
||||||
help ="select what to plot or write")
|
|
||||||
clas.add_argument("-q", "--qRange",
|
|
||||||
default=[0.005, 0.30],
|
|
||||||
nargs=2,
|
|
||||||
type=float,
|
|
||||||
help ="q_z range")
|
|
||||||
clas.add_argument("-T", "--TOFOffset",
|
|
||||||
default=0.0,
|
|
||||||
type=float,
|
|
||||||
help ="TOF zero offset")
|
|
||||||
clas.add_argument("-t", "--thetaRange",
|
|
||||||
default=[-12., 12.],
|
|
||||||
nargs=2,
|
|
||||||
type=float,
|
|
||||||
help ="theta range to be used")
|
|
||||||
clas.add_argument("-Y", "--year",
|
|
||||||
default = str(datetime.today()).split('-')[0],
|
|
||||||
help = "year, the measurement was performed")
|
|
||||||
clas.add_argument("-y", "--yRange",
|
|
||||||
default=[0, 63],
|
|
||||||
nargs=2,
|
|
||||||
type=int,
|
|
||||||
help ="detector y range to be used")
|
|
||||||
|
|
||||||
return clas.parse_args()
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
def get_dataPath(clas):
|
|
||||||
if clas.dataPath:
|
|
||||||
dataPath = clas.dataPath + '/'
|
|
||||||
if not os.path.exists(dataPath):
|
|
||||||
sys.exit('# *** the directory "'+dataPath+'" does not exist ***')
|
|
||||||
elif os.path.exists('./raw'):
|
|
||||||
dataPath = "./raw/"
|
|
||||||
elif os.path.exists('../raw'):
|
|
||||||
dataPath = "../raw/"
|
|
||||||
else:
|
|
||||||
sys.exit('# *** please provide the path to the .hdf data files (-d <path>, default is "./raw")')
|
|
||||||
|
|
||||||
return dataPath
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
def get_directDataPath(clas):
|
|
||||||
#dataPath = clas.dataPath + '/'
|
|
||||||
year = str(datetime.today()).split('-')[0]
|
|
||||||
year_date = str(datetime.today()).split(' ')[0].replace("-", "/", 1)
|
|
||||||
pNr = str(subprocess.getoutput('/usr/bin/grep "proposal\t" /home/amor/nicosdata/amor/cache/nicos-exp/'+year_date)[-1]).split('\'')[1]
|
|
||||||
dataPath = '/home/amor/nicosdata/amor/data/'+year+'/'+pNr+'/'
|
|
||||||
if not os.path.exists(dataPath):
|
|
||||||
sys.exit('# *** the directory "'+dataPath+'" does not exist ***')
|
|
||||||
|
|
||||||
return dataPath
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
def main():
|
|
||||||
global verbous, output, dataPath
|
|
||||||
|
|
||||||
clas = commandLineArgs()
|
|
||||||
|
|
||||||
dataPath = get_dataPath(clas)
|
|
||||||
logging.basicConfig(level=logging.INFO, format='# %(message)s')
|
|
||||||
verbous = True
|
|
||||||
output = 'life_plot.png'
|
|
||||||
process(dataPath, clas.fileIdent, clas)
|
|
||||||
|
|
||||||
#==============================================================================
|
|
||||||
#==============================================================================
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,4 +1,11 @@
|
|||||||
# -*- mode: python ; coding: utf-8 -*-
|
# -*- mode: python ; coding: utf-8 -*-
|
||||||
|
from PyInstaller.utils.hooks import collect_all
|
||||||
|
|
||||||
|
datas = []
|
||||||
|
binaries = []
|
||||||
|
hiddenimports = []
|
||||||
|
tmp_ret = collect_all('tzdata')
|
||||||
|
datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2]
|
||||||
|
|
||||||
|
|
||||||
a = Analysis(
|
a = Analysis(
|
||||||
|
|||||||
Reference in New Issue
Block a user