Evaluating ToF to extract sub-frames and assign to correct neutron pulse not chopper pulse (#4)
All checks were successful
All checks were successful
Interpolate neutorn times between chopper pulses and assign by ToF. If ToF too short (long wavelengths from previous pulse), assign them to the previous sub-pulse. Reviewed with Jochen Reviewed-on: #4 Co-authored-by: Artur Glavic <artur.glavic@psi.ch> Co-committed-by: Artur Glavic <artur.glavic@psi.ch>
This commit was merged in pull request #4.
This commit is contained in:
@@ -2,5 +2,5 @@
|
||||
Package to handle data redction at AMOR instrument to be used by __main__.py script.
|
||||
"""
|
||||
|
||||
__version__ = '3.2.5'
|
||||
__version__ = '3.2.6'
|
||||
__date__ = '2026-03-10'
|
||||
|
||||
@@ -9,7 +9,7 @@ from typing import Tuple
|
||||
|
||||
from . import const
|
||||
from .event_data_types import EventDataAction, EventDatasetProtocol, append_fields, EVENT_BITMASKS
|
||||
from .helpers import filter_project_x, merge_frames, extract_walltime, add_log_to_pulses
|
||||
from .helpers import filter_project_x, merge_frames, extract_walltime, add_log_to_pulses, merge_frames_w_index
|
||||
from .instrument import Detector
|
||||
from .options import IncidentAngle
|
||||
from .header import Header
|
||||
@@ -25,8 +25,9 @@ class ExtractWalltime(EventDataAction):
|
||||
dataset.data.events = new_events
|
||||
|
||||
class MergeFrames(EventDataAction):
|
||||
def __init__(self, lamdaCut=None):
|
||||
def __init__(self, lamdaCut=None, extractNeutronPulses=False):
|
||||
self.lamdaCut=lamdaCut
|
||||
self.extractNeutronPulses=extractNeutronPulses
|
||||
|
||||
def perform_action(self, dataset: EventDatasetProtocol)->None:
|
||||
if self.lamdaCut is None:
|
||||
@@ -36,7 +37,34 @@ class MergeFrames(EventDataAction):
|
||||
tofCut = lamdaCut*dataset.geometry.chopperDetectorDistance/const.hdm*1e-13
|
||||
total_offset = (tofCut +
|
||||
dataset.timing.tau * (dataset.timing.ch1TriggerPhase + dataset.timing.chopperPhase/2)/180)
|
||||
dataset.data.events.tof = merge_frames(dataset.data.events.tof, tofCut, dataset.timing.tau, total_offset)
|
||||
if self.extractNeutronPulses and 'wallTime' in dataset.data.events.dtype.names:
|
||||
d = dataset.data
|
||||
# put events into precise sub-frame
|
||||
d.events.tof, subframes = merge_frames_w_index(d.events.tof, tofCut, dataset.timing.tau, total_offset)
|
||||
subframes = subframes.astype(int)
|
||||
# add a sub-pulse time 1-tau before and after each existing time
|
||||
utimes, uidxs = np.unique(d.events.wallTime, return_inverse=True)
|
||||
inter_times = np.empty(2*utimes.shape[0]+1, dtype=d.events.wallTime.dtype)
|
||||
tauns = dataset.timing.tau*1e9
|
||||
inter_times[0] = utimes[0]-tauns
|
||||
inter_times[1::2] = utimes
|
||||
inter_times[2::2] = utimes+tauns
|
||||
# use subframe indices to sort existing events into new times
|
||||
d.events.wallTime = inter_times[2*uidxs+subframes+1]
|
||||
# expand pulses array with additional sub-frames
|
||||
new_pulses = np.recarray(2*d.pulses.shape[0]+1, dtype=d.pulses.dtype)
|
||||
new_pulses[0] = d.pulses[0]
|
||||
new_pulses[1::2] = d.pulses
|
||||
new_pulses[2::2] = d.pulses
|
||||
new_pulses.time[0] = d.pulses.time[0]-tauns
|
||||
new_pulses.time[2::2] = d.pulses.time+tauns
|
||||
new_pulses.monitor /= 2.0 # ~preserve total monitor counts
|
||||
d.pulses = new_pulses
|
||||
else:
|
||||
if self.extractNeutronPulses:
|
||||
logging.error(" Trying to separate neutron pulses while wallTime is not extracted, yet!")
|
||||
dataset.data.events.tof = merge_frames(dataset.data.events.tof, tofCut, dataset.timing.tau, total_offset)
|
||||
|
||||
|
||||
|
||||
class AnalyzePixelIDs(EventDataAction):
|
||||
|
||||
@@ -46,6 +46,8 @@ EVENT_BITMASKS = {
|
||||
}
|
||||
|
||||
def append_fields(input: np.recarray, new_fields: List[Tuple[str, np.dtype]]):
|
||||
# TODO: This action is used often and time consuming as it runs len(flds) times over all indices.
|
||||
# Could only be faster if array is allocated in the beginning with all fields, less flexible.
|
||||
# add one ore more fields to a recarray, numpy functions seems to fail
|
||||
flds = [(name, dtypei[0]) for name, dtypei in input.dtype.fields.items()]
|
||||
flds += new_fields
|
||||
|
||||
@@ -165,7 +165,9 @@ class ApplyMask(EventDataAction):
|
||||
self.bitmask_filter = bitmask_filter
|
||||
|
||||
def perform_action(self, dataset: EventDatasetProtocol) ->None:
|
||||
# TODO: why is this action time consuming?
|
||||
# TODO: Most time in test examples is spend here.
|
||||
# While the actions here are very simple, they act on a large array,
|
||||
# so even just comparison and indexing become time consuming.
|
||||
d = dataset.data
|
||||
pre_filter = d.events.shape[0]
|
||||
if logging.getLogger().level <= logging.DEBUG:
|
||||
|
||||
@@ -6,9 +6,13 @@ import numpy as np
|
||||
from .event_data_types import EventDatasetProtocol, append_fields
|
||||
|
||||
try:
|
||||
from .helpers_numba import merge_frames, extract_walltime, filter_project_x, calculate_derived_properties_focussing
|
||||
from .helpers_numba import merge_frames, extract_walltime, filter_project_x, \
|
||||
calculate_derived_properties_focussing, merge_frames_w_index
|
||||
except ImportError:
|
||||
from .helpers_fallback import merge_frames, extract_walltime, filter_project_x, calculate_derived_properties_focussing
|
||||
import logging
|
||||
logging.warning('Cannot import numba enhanced functions, is it installed?')
|
||||
from .helpers_fallback import merge_frames, extract_walltime, filter_project_x, \
|
||||
calculate_derived_properties_focussing, merge_frames_w_index
|
||||
|
||||
def add_log_to_pulses(key, dataset: EventDatasetProtocol):
|
||||
"""
|
||||
|
||||
@@ -8,6 +8,17 @@ def merge_frames(tof_e, tofCut, tau, total_offset):
|
||||
# tof shifted to 1 frame
|
||||
return np.remainder(tof_e-(tofCut-tau), tau)+total_offset
|
||||
|
||||
def merge_frames_w_index(tof_e, tofCut, tau, total_offset):
|
||||
"""
|
||||
Version of merge frames that also returns a frame index for each pulse:
|
||||
0 - belongs to the frame it was measured in
|
||||
-1 - arrived in this frame but belongs to the previous neutron pulse
|
||||
1 - belongs to the second neutron pulse of the original frame
|
||||
"""
|
||||
new_tof = merge_frames(tof_e, tofCut, tau, total_offset)
|
||||
frame_idx = np.floor_divide(tof_e-tofCut, tau)
|
||||
return new_tof, frame_idx
|
||||
|
||||
def extract_walltime(tof_e, dataPacket_p, dataPacketTime_p):
|
||||
output = np.empty(np.shape(tof_e)[0], dtype=np.int64)
|
||||
for i in range(len(dataPacket_p)-1):
|
||||
|
||||
@@ -11,6 +11,22 @@ def merge_frames(tof_e, tofCut, tau, total_offset):
|
||||
tof_e_out[ti] = ((tof_e[ti]-dt)%tau)+total_offset # tof shifted to 1 frame
|
||||
return tof_e_out
|
||||
|
||||
@nb.jit(nb.float64[:,:](nb.float64[:], nb.float64, nb.float64, nb.float64),
|
||||
nopython=True, parallel=True, cache=True)
|
||||
def merge_frames_w_index(tof_e, tofCut, tau, total_offset):
|
||||
"""
|
||||
Version of merge frames that also returns a frame index for each pulse:
|
||||
0 - belongs to the frame it was measured in
|
||||
-1 - arrived in this frame but belongs to the previous neutron pulse
|
||||
1 - belongs to the second neutron pulse of the original frame
|
||||
"""
|
||||
tof_idx_out = np.empty((2, tof_e.shape[0]), dtype=np.float64)
|
||||
dt = (tofCut-tau)
|
||||
for ti in nb.prange(tof_e.shape[0]):
|
||||
tof_idx_out[0, ti] = ((tof_e[ti]-dt)%tau)+total_offset # tof shifted to 1 frame
|
||||
tof_idx_out[1, ti] = ((tof_e[ti]-tofCut) // tau) # tof shifted to 1 frame
|
||||
return tof_idx_out
|
||||
|
||||
@nb.jit(nb.int64[:](nb.float64[:], nb.uint32[:], nb.int64[:]),
|
||||
nopython=True, parallel=True, cache=True)
|
||||
def extract_walltime(tof_e, dataPacket_p, dataPacketTime_p):
|
||||
|
||||
@@ -480,6 +480,16 @@ class ReflectivityReductionConfig(ArgParsable):
|
||||
},
|
||||
)
|
||||
|
||||
extractNeutronPulses: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
'short': 'np',
|
||||
'group': 'data manicure',
|
||||
'help': 're-assign events to actual neutron pulses => '
|
||||
'2xbetter filter resolution but extra computation (experimental)',
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class OutputFomatOption(StrEnum):
|
||||
Rqz_ort = "Rqz.ort"
|
||||
|
||||
@@ -65,7 +65,7 @@ class ReflectivityReduction:
|
||||
# the filtering only makes sense if using actual monitor data, not time
|
||||
self.dataevent_actions |= eh.FilterMonitorThreshold(self.config.experiment.lowCurrentThreshold)
|
||||
self.dataevent_actions |= eh.FilterStrangeTimes()
|
||||
self.dataevent_actions |= ea.MergeFrames()
|
||||
self.dataevent_actions |= ea.MergeFrames(extractNeutronPulses=self.config.reduction.extractNeutronPulses)
|
||||
self.dataevent_actions |= ea.AnalyzePixelIDs(self.config.experiment.yRange)
|
||||
self.dataevent_actions |= eh.TofTimeCorrection(self.config.experiment.incidentAngle==IncidentAngle.alphaF)
|
||||
self.dataevent_actions |= ea.CalculateWavelength(self.config.experiment.lambdaRange)
|
||||
|
||||
@@ -46,7 +46,7 @@ class MockEventData:
|
||||
# list of data packates containing previous events
|
||||
packets = np.recarray((1000,), dtype=PACKET_TYPE)
|
||||
packets.start_index = np.linspace(0, events.shape[0]-1, packets.shape[0], dtype=np.uint32)
|
||||
packets.time = np.linspace(1700000000000000000, 1700000000000000000+3_600_000_000,
|
||||
packets.time = np.linspace(1700000000000000000, 1700000000000000000+3_600_000_000_000,
|
||||
packets.shape[0], dtype=np.int64)
|
||||
|
||||
# chopper pulses within the measurement time
|
||||
@@ -58,7 +58,7 @@ class MockEventData:
|
||||
proton_current = np.recarray((50,), dtype=PC_TYPE)
|
||||
proton_current.current = 1500.0
|
||||
proton_current[np.random.randint(0, proton_current.shape[0]-1, 10)] = 0. # random time with no current
|
||||
proton_current.time = np.linspace(1700000000000000300, 1700000000000000000+3_600_000_000,
|
||||
proton_current.time = np.linspace(1700000000000000300, 1700000000000000000+3_600_000_000_000,
|
||||
proton_current.shape[0], dtype=np.int64)
|
||||
|
||||
self.data = AmorEventStream(events, packets, pulses, proton_current)
|
||||
@@ -420,20 +420,30 @@ class TestSimpleActions(TestCase):
|
||||
dtype=np.int32))
|
||||
|
||||
def test_merge_frames(self):
|
||||
action = MergeFrames(lamdaCut=0.0)
|
||||
action = MergeFrames(lamdaCut=0.0, extractNeutronPulses=False)
|
||||
action.perform_action(self.d)
|
||||
self.assertEqual(self.d.data.events.tof.shape, self.d.orig_data.events.tof.shape)
|
||||
np.testing.assert_array_compare(lambda x,y: x<=y, self.d.data.events.tof, self.d.orig_data.events.tof)
|
||||
self.assertTrue((-self.d.timing.tau<=self.d.data.events.tof).all())
|
||||
np.testing.assert_array_less(self.d.data.events.tof, self.d.timing.tau)
|
||||
|
||||
action = MergeFrames(lamdaCut=2.0)
|
||||
action = MergeFrames(lamdaCut=2.0, extractNeutronPulses=False)
|
||||
self.d.data.events.tof = self.d.orig_data.events.tof[:]
|
||||
action.perform_action(self.d)
|
||||
tofCut = 2.0*self.d.geometry.chopperDetectorDistance/const.hdm*1e-13
|
||||
self.assertTrue((tofCut-self.d.timing.tau<=self.d.data.events.tof).all())
|
||||
self.assertTrue((self.d.data.events.tof<=tofCut+self.d.timing.tau).all())
|
||||
|
||||
def test_merge_frames_splitting(self):
|
||||
action = MergeFrames(lamdaCut=0.0, extractNeutronPulses=True)
|
||||
self._extract_walltime()
|
||||
action.perform_action(self.d)
|
||||
self.assertEqual(self.d.data.events.tof.shape, self.d.orig_data.events.tof.shape)
|
||||
np.testing.assert_array_compare(lambda x,y: x<=y, self.d.data.events.tof, self.d.orig_data.events.tof)
|
||||
self.assertEqual(self.d.data.pulses.shape[0], self.d.orig_data.pulses.shape[0]*2+1)
|
||||
np.testing.assert_array_less(self.d.orig_data.pulses.time[:-1], self.d.orig_data.pulses.time[1:])
|
||||
np.testing.assert_array_less(self.d.data.pulses.time[:-1], self.d.data.pulses.time[1:])
|
||||
|
||||
def test_analyze_pixel_ids(self):
|
||||
action = AnalyzePixelIDs((1000, 1001))
|
||||
action.perform_action(self.d)
|
||||
|
||||
Reference in New Issue
Block a user