All actions within old file_reader now implemented as new style event transformations
This commit is contained in:
149
libeos/event_analysis.py
Normal file
149
libeos/event_analysis.py
Normal file
@@ -0,0 +1,149 @@
|
||||
"""
|
||||
Define a event dataformat that performs reduction actions like wavelength calculation on per-event basis.
|
||||
"""
|
||||
import numpy as np
|
||||
import logging
|
||||
|
||||
from typing import Tuple
|
||||
|
||||
from . import const
|
||||
from .file_reader import AmorEventData, EVENT_TYPE
|
||||
from .event_handling import EventDataAction
|
||||
from .helpers import filter_project_x
|
||||
from .instrument import Detector
|
||||
from .options import IncidentAngle
|
||||
from .header import Header
|
||||
|
||||
|
||||
# Structured datatypes used for event streams
|
||||
ANA_EVENT_TYPE = np.dtype([('tof', np.float64),('pixelID', np.uint32), ('wallTime', np.int64),
|
||||
('detZ', np.float64), ('detXdist', np.float64), ('delta', np.float64),
|
||||
('mask', bool)])
|
||||
|
||||
FINAL_EVENT_TYPE = np.dtype([('tof', np.float64),('pixelID', np.uint32), ('wallTime', np.int64),
|
||||
('detZ', np.float64), ('detXdist', np.float64), ('delta', np.float64),
|
||||
('mask', bool),
|
||||
('lamda', np.float64), ('qz', np.float64), ('qx', np.float64), ])
|
||||
|
||||
|
||||
class AnalyzedEventData(EventDataAction):
|
||||
def __init__(self, yRange: Tuple[int, int]):
|
||||
self.yRange = yRange
|
||||
|
||||
def perform_action(self, dataset: AmorEventData) ->None:
|
||||
d = dataset.data
|
||||
if d.events.dtype != EVENT_TYPE:
|
||||
raise ValueError("AnalyzeEventData only works on raw AmorEventData, this dataset has already been altered")
|
||||
pixelLookUp = self.resolve_pixels()
|
||||
# TODO: change numba implementation to use native pixelID type
|
||||
(detZ, detXdist, delta, mask) = filter_project_x(
|
||||
pixelLookUp, d.events.pixelID.astype(np.int64), self.yRange[0], self.yRange[1]
|
||||
)
|
||||
ana_events = np.recarray(d.events.shape, dtype=ANA_EVENT_TYPE)
|
||||
# copy old data
|
||||
for field in d.events.dtype.fields.keys():
|
||||
ana_events[field] = d.events[field]
|
||||
# add analysis per event
|
||||
ana_events.detZ = detZ
|
||||
ana_events.detXdist = detXdist
|
||||
ana_events.delta = delta
|
||||
ana_events.mask = mask
|
||||
d.events = ana_events
|
||||
dataset.geometry.delta_z = self.delta_z
|
||||
|
||||
def resolve_pixels(self):
|
||||
"""determine spatial coordinats and angles from pixel number"""
|
||||
nPixel = Detector.nWires * Detector.nStripes * Detector.nBlades
|
||||
pixelID = np.arange(nPixel)
|
||||
(bladeNr, bPixel) = np.divmod(pixelID, Detector.nWires * Detector.nStripes)
|
||||
(bZi, detYi) = np.divmod(bPixel, Detector.nStripes) # z index on blade, y index on detector
|
||||
detZi = bladeNr * Detector.nWires + bZi # z index on detector
|
||||
detX = bZi * Detector.dX # x position in detector
|
||||
# detZ = Detector.zero - bladeNr * Detector.bladeZ - bZi * Detector.dZ # z position on detector
|
||||
bladeAngle = np.rad2deg( 2. * np.arcsin(0.5*Detector.bladeZ / Detector.distance) )
|
||||
delta = (Detector.nBlades/2. - bladeNr) * bladeAngle \
|
||||
- np.rad2deg( np.arctan(bZi*Detector.dZ / ( Detector.distance + bZi * Detector.dX) ) )
|
||||
self.delta_z = delta[detYi==1]
|
||||
return np.vstack((detYi.T, detZi.T, detX.T, delta.T)).T
|
||||
|
||||
class TofTimeCorrection(EventDataAction):
|
||||
def __init__(self, correct_chopper_opening: bool = True):
|
||||
self.correct_chopper_opening = correct_chopper_opening
|
||||
|
||||
def perform_action(self, dataset: AmorEventData) ->None:
|
||||
d = dataset.data
|
||||
if d.events.dtype != ANA_EVENT_TYPE:
|
||||
raise ValueError("TofTimeCorrection requires dataset with analyzed events, perform AnalyzedEventData first")
|
||||
|
||||
if self.correct_chopper_opening:
|
||||
d.events.tof -= ( d.events.delta / 180. ) * dataset.timing.tau
|
||||
else:
|
||||
d.events.tof -= ( dataset.geometry.kad / 180. ) * dataset.timing.tau
|
||||
|
||||
class WavelengthAndQ(EventDataAction):
|
||||
def __init__(self, lambdaRange: Tuple[float, float], incidentAngle: IncidentAngle):
|
||||
self.lambdaRange = lambdaRange
|
||||
self.incidentAngle = incidentAngle
|
||||
|
||||
def perform_action(self, dataset: AmorEventData) ->None:
|
||||
d = dataset.data
|
||||
if d.events.dtype != ANA_EVENT_TYPE:
|
||||
raise ValueError("WavelengthAndQ requires dataset with analyzed events, perform AnalyzedEventData first")
|
||||
|
||||
self.lamdaMax = const.lamdaCut+1.e13*dataset.timing.tau*const.hdm/(dataset.geometry.chopperDetectorDistance+124.)
|
||||
|
||||
# lambda
|
||||
lamda = (1.e13*const.hdm)*d.events.tof/(dataset.geometry.chopperDetectorDistance+d.events.detXdist)
|
||||
|
||||
final_events = np.recarray(d.events.shape, dtype=FINAL_EVENT_TYPE)
|
||||
# copy old data
|
||||
for field in d.events.dtype.fields.keys():
|
||||
final_events[field] = d.events[field]
|
||||
# add analysis per event
|
||||
final_events.lamda = lamda
|
||||
final_events.mask &= (self.lambdaRange[0]<=lamda) & (lamda<=self.lambdaRange[1])
|
||||
|
||||
# alpha_f
|
||||
# q_z
|
||||
if self.incidentAngle == IncidentAngle.alphaF:
|
||||
alphaF_e = dataset.geometry.nu - dataset.geometry.mu + d.events.delta
|
||||
final_events.qz = 4*np.pi*(np.sin(np.deg2rad(alphaF_e))/lamda)
|
||||
elif self.incidentAngle == IncidentAngle.nu:
|
||||
alphaF_e = (dataset.geometry.nu + d.events.delta + dataset.geometry.kap + dataset.geometry.kad) / 2.
|
||||
final_events.qz = 4*np.pi*(np.sin(np.deg2rad(alphaF_e))/lamda)
|
||||
else:
|
||||
alphaF_e = dataset.geometry.nu - dataset.geometry.mu + d.events.delta
|
||||
alphaI = dataset.geometry.kap + dataset.geometry.kad + dataset.geometry.mu
|
||||
final_events.qz = 2*np.pi * ((np.sin(np.deg2rad(alphaF_e)) + np.sin(np.deg2rad(alphaI)))/lamda)
|
||||
final_events.qx = 2*np.pi * ((np.cos(np.deg2rad(alphaF_e)) - np.cos(np.deg2rad(alphaI)))/lamda)
|
||||
|
||||
dataset.data.events = final_events
|
||||
|
||||
def update_header(self, header: Header):
|
||||
if self.incidentAngle == IncidentAngle.alphaF:
|
||||
header.measurement_scheme = 'angle- and energy-dispersive'
|
||||
else:
|
||||
header.measurement_scheme = 'energy-dispersive'
|
||||
|
||||
class FilterQzRange(EventDataAction):
|
||||
def __init__(self, qzRange: Tuple[float, float]):
|
||||
self.qzRange = qzRange
|
||||
|
||||
def perform_action(self, dataset: AmorEventData) ->None:
|
||||
d = dataset.data
|
||||
if d.events.dtype != FINAL_EVENT_TYPE:
|
||||
raise ValueError("FilterQzRange requires dataset with fully analyzed events, perform WavelengthAndQ first")
|
||||
|
||||
if self.qzRange[1]<0.5:
|
||||
d.events.mask &= (self.qzRange[0]<=d.events.qz) & (d.events.qz<=self.qzRange[1])
|
||||
|
||||
class ApplyMask(EventDataAction):
|
||||
def perform_action(self, dataset: AmorEventData) ->None:
|
||||
d = dataset.data
|
||||
if not 'mask' in d.events.dtype.names:
|
||||
logging.debug("ApplyMask performed on dataset without mask")
|
||||
return
|
||||
|
||||
logging.info(f' number of events: total = {d.events.shape[0]:7d}, '
|
||||
f'filtered = {np.logical_not(d.events.mask).sum():7d}')
|
||||
d.events = d.events[d.events.mask]
|
||||
@@ -9,6 +9,7 @@ from abc import ABC, abstractmethod
|
||||
from .header import Header
|
||||
from .options import MonitorType
|
||||
from .file_reader import AmorEventData
|
||||
from .helpers import merge_frames
|
||||
|
||||
class EventDataAction(ABC):
|
||||
"""
|
||||
@@ -16,8 +17,12 @@ class EventDataAction(ABC):
|
||||
Each action can optionally modify the header information.
|
||||
"""
|
||||
|
||||
def __call__(self, dataset: AmorEventData)->None:
|
||||
logging.debug(f" Enter action {self.__class__.__name__} on {dataset!r}")
|
||||
self.perform_action(dataset)
|
||||
|
||||
@abstractmethod
|
||||
def __call__(self, dataset: AmorEventData)->None: ...
|
||||
def perform_action(self, dataset: AmorEventData)->None: ...
|
||||
|
||||
def update_header(self, header:Header)->None:
|
||||
if hasattr(self, 'action_name'):
|
||||
@@ -27,7 +32,7 @@ class CorrectSeriesTime(EventDataAction):
|
||||
def __init__(self, seriesStartTime):
|
||||
self.seriesStartTime = np.int64(seriesStartTime)
|
||||
|
||||
def __call__(self, dataset: AmorEventData)->None:
|
||||
def perform_action(self, dataset: AmorEventData)->None:
|
||||
dataset.data.pulses.time -= self.seriesStartTime
|
||||
dataset.data.events.wallTime -= self.seriesStartTime
|
||||
dataset.data.proton_current.time -= self.seriesStartTime
|
||||
@@ -40,7 +45,7 @@ class AssociatePulseWithMonitor(EventDataAction):
|
||||
self.monitorType = monitorType
|
||||
self.lowCurrentThreshold = lowCurrentThreshold
|
||||
|
||||
def __call__(self, dataset: AmorEventData)->None:
|
||||
def perform_action(self, dataset: AmorEventData)->None:
|
||||
logging.debug(f' using monitor type {self.monitorType}')
|
||||
if self.monitorType in [MonitorType.proton_charge or MonitorType.debug]:
|
||||
monitorPerPulse = self.get_current_per_pulse(dataset.data.pulses.time,
|
||||
@@ -56,6 +61,23 @@ class AssociatePulseWithMonitor(EventDataAction):
|
||||
else: # pulses
|
||||
dataset.data.pulses.monitor = 1
|
||||
|
||||
if self.monitorType == MonitorType.debug:
|
||||
cpp, t_bins = np.histogram(dataset.data.events.wallTime, dataset.data.pulses.time)
|
||||
np.savetxt('tme.hst', np.vstack((dataset.data.pulses.time[:-1], cpp, dataset.data.pulses.monitor[:-1])).T)
|
||||
|
||||
if self.monitorType in [MonitorType.proton_charge or MonitorType.debug]:
|
||||
goodTimeS = dataset.data.pulses.time[dataset.data.pulses.monitor!=0]
|
||||
filter_e = np.isin(dataset.data.events.wallTime, goodTimeS)
|
||||
dataset.data.events = dataset.data.events[filter_e]
|
||||
logging.info(f' low-beam (<{self.lowCurrentThreshold} mC) rejected pulses: '
|
||||
f'{dataset.data.pulses.monitor.shape[0]-goodTimeS.shape[0]} '
|
||||
f'out of {dataset.data.pulses.monitor.shape[0]}')
|
||||
logging.info(f' with {filter_e.shape[0]-dataset.data.events.shape[0]} events')
|
||||
if goodTimeS.shape[0]:
|
||||
logging.info(f' average counts per pulse = {dataset.data.events.shape[0] / goodTimeS.shape[0]:7.1f}')
|
||||
else:
|
||||
logging.info(f' average counts per pulse = undefined')
|
||||
|
||||
@staticmethod
|
||||
def get_current_per_pulse(pulseTimeS, currentTimeS, currents):
|
||||
# add currents for early pulses and current time value after last pulse (j+1)
|
||||
@@ -69,9 +91,19 @@ class AssociatePulseWithMonitor(EventDataAction):
|
||||
pulseCurrentS[i] = currents[j]
|
||||
return pulseCurrentS
|
||||
|
||||
|
||||
class FilterStrangeTimes(EventDataAction):
|
||||
def __call__(self, dataset: AmorEventData)->None:
|
||||
def perform_action(self, dataset: AmorEventData)->None:
|
||||
filter_e = (dataset.data.events.tof<=2*dataset.timing.tau)
|
||||
dataset.data.events = dataset.data.events[filter_e]
|
||||
if filter_e.any():
|
||||
logging.warning(f' strange times: {filter_e.sum()}')
|
||||
if not filter_e.all():
|
||||
logging.warning(f' strange times: {np.logical_not(filter_e).sum()}')
|
||||
|
||||
class MergeFrames(EventDataAction):
|
||||
def __init__(self, tofCut:float):
|
||||
self.tofCut = tofCut
|
||||
|
||||
def perform_action(self, dataset: AmorEventData)->None:
|
||||
total_offset = (self.tofCut +
|
||||
dataset.timing.tau * (dataset.timing.ch1TriggerPhase + dataset.timing.chopperPhase/2)/180)
|
||||
dataset.data.events.tof = merge_frames(dataset.data.events.tof, self.tofCut, dataset.timing.tau, total_offset)
|
||||
|
||||
@@ -2,6 +2,7 @@ import h5py
|
||||
import numpy as np
|
||||
import platform
|
||||
import logging
|
||||
import subprocess
|
||||
|
||||
from datetime import datetime
|
||||
from dataclasses import dataclass
|
||||
@@ -42,11 +43,7 @@ class AmorGeometry:
|
||||
detectorDistance: float
|
||||
chopperDetectorDistance: float
|
||||
|
||||
# Structured datatypes used for event streams
|
||||
EVENT_TYPE = np.dtype([('tof', np.float64),('pixelID', np.uint32), ('wallTime', np.int64)])
|
||||
PACKET_TYPE = np.dtype([('start_index', np.uint32), ('Time', np.int64)])
|
||||
PULSE_TYPE = np.dtype([('time', np.int64), ('monitor', np.float32)])
|
||||
PC_TYPE = np.dtype([('current', np.float32), ('time', np.int64)])
|
||||
delta_z: Optional[float] = None
|
||||
|
||||
@dataclass
|
||||
class AmorTiming:
|
||||
@@ -56,6 +53,12 @@ class AmorTiming:
|
||||
chopperPhase: float
|
||||
tau: float
|
||||
|
||||
# Structured datatypes used for event streams
|
||||
EVENT_TYPE = np.dtype([('tof', np.float64),('pixelID', np.uint32), ('wallTime', np.int64)])
|
||||
PACKET_TYPE = np.dtype([('start_index', np.uint32), ('Time', np.int64)])
|
||||
PULSE_TYPE = np.dtype([('time', np.int64), ('monitor', np.float32)])
|
||||
PC_TYPE = np.dtype([('current', np.float32), ('time', np.int64)])
|
||||
|
||||
@dataclass
|
||||
class AmorEventStream:
|
||||
events: np.recarray # EVENT_TYPE
|
||||
@@ -175,9 +178,21 @@ class AmorEventData:
|
||||
div = self._replace_if_missing('instrument_control_parameters/div', 'div', float)
|
||||
ch1TriggerPhase = self._replace_if_missing('chopper/ch1_trigger_phase', 'ch1_trigger_phase', float)
|
||||
ch2TriggerPhase = self._replace_if_missing('chopper/ch2_trigger_phase', 'ch2_trigger_phase', float)
|
||||
chopperSpeed = self._replace_if_missing('chopper/rotation_speed', 'chopper_phase', float)
|
||||
chopperPhase = self._replace_if_missing('chopper/phase', 'chopper_phase', float)
|
||||
tau = 30/chopperSpeed
|
||||
try:
|
||||
chopperTriggerTime = (float(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_zero'][7]) \
|
||||
-float(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_zero'][0])) \
|
||||
/7
|
||||
chopperTriggerTimeDiff = float(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_offset'][2])
|
||||
except (KeyError, IndexError):
|
||||
logging.debug(' chopper speed and phase taken from .hdf file')
|
||||
chopperSpeed = self._replace_if_missing('chopper/rotation_speed', 'chopper_phase', float)
|
||||
chopperPhase = self._replace_if_missing('chopper/phase', 'chopper_phase', float)
|
||||
tau = 30/chopperSpeed
|
||||
else:
|
||||
tau = int(1e-6*chopperTriggerTime/2+0.5)*(1e-3)
|
||||
chopperTriggerPhase = 180e-9*chopperTriggerTimeDiff/tau
|
||||
chopperSpeed = 30/tau
|
||||
chopperPhase = chopperTriggerPhase+ch1TriggerPhase-ch2TriggerPhase
|
||||
|
||||
self.geometry = AmorGeometry(mu, nu, kap, kad, div,
|
||||
chopperSeparation, detectorDistance, chopperDetectorDistance)
|
||||
@@ -186,17 +201,6 @@ class AmorEventData:
|
||||
polarizationConfigLabel = self._replace_if_missing('polarization/configuration/value', 'polarizer_config_label', int)
|
||||
polarizationConfig = fileio.Polarization(polarizationConfigs[polarizationConfigLabel])
|
||||
logging.debug(f' polarization configuration: {polarizationConfig} (index {polarizationConfigLabel})')
|
||||
# try:
|
||||
# chopperTriggerTime = (float(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_zero'][7]) \
|
||||
# -float(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_zero'][0])) \
|
||||
# /7
|
||||
# self.tau = int(1e-6*chopperTriggerTime/2+0.5)*(1e-3)
|
||||
# self.chopperSpeed = 30/self.tau
|
||||
# chopperTriggerTimeDiff = float(self.hdf['entry1/Amor/chopper/ch2_trigger/event_time_offset'][2])
|
||||
# chopperTriggerPhase = 180e-9*chopperTriggerTimeDiff/self.tau
|
||||
# # TODO: check the next line
|
||||
# except(KeyError, IndexError):
|
||||
# logging.debug(' chopper speed and phase taken from .hdf file')
|
||||
|
||||
|
||||
self.instrument_settings = fileio.InstrumentSettings(
|
||||
|
||||
Reference in New Issue
Block a user