9 Commits

Author SHA1 Message Date
67727adde5 Remove version from commands, some powershell syntax issues 2026-03-12 16:21:34 +01:00
860249299b Include release tag extraction needed for last step 2026-03-12 16:13:53 +01:00
c7e92f7086 Test if workflow runs without cached environment 2026-03-12 15:55:23 +01:00
4ffb988bac Activate environment before running python commands 2026-03-12 15:47:22 +01:00
36c43f7ecb Retry ignore 2026-03-12 15:38:01 +01:00
931bb1ba48 Ignore exit code of env remove in case it doesn't exist 2026-03-12 15:33:24 +01:00
461a20e184 Update release to micromamba and renew conda environment to latest conda-forge ressources 2026-03-12 14:56:39 +01:00
334e29d0d6 Reactivate window build action
All checks were successful
Unit Testing / test (3.10) (push) Successful in 55s
Unit Testing / test (3.11) (push) Successful in 1m0s
Unit Testing / test (3.12) (push) Successful in 58s
Unit Testing / test (3.8) (push) Successful in 1m0s
Unit Testing / test (3.9) (push) Successful in 58s
2026-03-11 13:09:01 +01:00
5e96a20f23 Evaluating ToF to extract sub-frames and assign to correct neutron pulse not chopper pulse (#4)
All checks were successful
Unit Testing / test (3.10) (push) Successful in 54s
Unit Testing / test (3.11) (push) Successful in 49s
Unit Testing / test (3.8) (push) Successful in 50s
Unit Testing / test (3.12) (push) Successful in 55s
Unit Testing / test (3.9) (push) Successful in 54s
Interpolate neutorn times between chopper pulses and assign by ToF. If ToF too short (long wavelengths from previous pulse), assign them to the previous sub-pulse.

Reviewed with Jochen

Reviewed-on: #4
Co-authored-by: Artur Glavic <artur.glavic@psi.ch>
Co-committed-by: Artur Glavic <artur.glavic@psi.ch>
2026-03-10 15:00:10 +01:00
13 changed files with 250 additions and 81 deletions

View File

@@ -23,7 +23,6 @@ on:
jobs: jobs:
test: test:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
@@ -107,31 +106,57 @@ jobs:
files: |- files: |-
dist/amor*.tar.gz dist/amor*.tar.gz
# build-windows: build-windows:
# needs: [test] needs: [test]
# runs-on: windows-latest runs-on: windows-latest
# if: ${{ (github.event_name != 'workflow_dispatch') || (contains(fromJson('["all", "windows", "all_incl_release"]'), github.event.inputs.build-items)) }} if: ${{ (github.event_name != 'workflow_dispatch') || (contains(fromJson('["all", "windows", "all_incl_release"]'), github.event.inputs.build-items)) }}
#
# steps: steps:
# - uses: actions/checkout@v4 - uses: actions/checkout@v4
# - name: Set up Python - name: Set up Python
# uses: actions/setup-python@v5 uses: actions/setup-python@v5
# with: with:
# python-version: 3.12 python-version: 3.12
# - name: Install dependencies - name: Ensure build environment
# run: | shell: powershell
# C:\Miniconda\condabin\conda.bat env update --file conda_windows.yml --name base run: |
# C:\Miniconda\condabin\conda.bat init powershell $envName="eos_build"
# - name: Build with pyinstaller $envFile="conda_windows.yml"
# run: | #$hashFile="$env:TEMP\$envName.hash"
# pyinstaller windows_build.spec
# cd dist\eos #$newHash = (Get-FileHash $envFile).Hash
# Compress-Archive -Path .\* -Destination ..\..\eos.zip
# - name: Update Release #if (!(Test-Path $hashFile)) {
# if: ${{ (github.event_name != 'workflow_dispatch') || (contains(fromJson('["all_incl_release"]'), github.event.inputs.build-items)) }} # $rebuild = $true
# uses: actions/gitea-release-action@v1 #} else {
# with: # $oldHash = Get-Content $hashFile
# name: "Amor-Eos ${{ env.RELEASE_TAG }}" # $rebuild = $oldHash -ne $newHash
# tag_name: "${{ env.RELEASE_TAG }}" #}
# files: |-
# eos.zip #if ($rebuild) {
# Write-Host "Environment changed → rebuilding"
# if (micromamba env list | Select-String $envName) {
# micromamba env remove -n $envName -y 2>$null
# }
micromamba create -n $envName -f $envFile -y
# $newHash | Out-File $hashFile
#} else {
# Write-Host "Environment unchanged → using cached env"
#}
- name: Build with pyinstaller
run: |
micromamba activate eos_build
$env:RELEASE_TAG = (python -c "import eos;print('v'+eos.__version__)")
echo "RELEASE_TAG=$env:RELEASE_TAG" >> $env:GITHUB_ENV
pyinstaller windows_build.spec
cd dist\eos
Compress-Archive -Path .\* -Destination ..\..\eos.zip
- name: Update Release
if: ${{ (github.event_name != 'workflow_dispatch') || (contains(fromJson('["all_incl_release"]'), github.event.inputs.build-items)) }}
uses: actions/gitea-release-action@v1
with:
name: "Amor-Eos ${{ env.RELEASE_TAG }}"
tag_name: "${{ env.RELEASE_TAG }}"
files: |-
eos.zip

View File

@@ -1,44 +1,68 @@
name: eos_build name: eos_build
channels: channels:
- defaults - conda-forge
dependencies: dependencies:
- altgraph=0.17.3=py312haa95532_0 - altgraph=0.17.5=pyhd8ed1ab_0
- blas=1.0=mkl - bzip2=1.0.8=h0ad9c76_9
- bzip2=1.0.8=h2bbff1b_6 - ca-certificates=2026.2.25=h4c7d964_0
- ca-certificates=2024.11.26=haa95532_0 - cached-property=1.5.2=hd8ed1ab_1
- expat=2.6.3=h5da7b33_0 - cached_property=1.5.2=pyha770c72_1
- h5py=3.12.1=py312h3b2c811_0 - flexcache=0.3=pyhd8ed1ab_1
- hdf5=1.12.1=h51c971a_3 - flexparser=0.4=pyhd8ed1ab_1
- icc_rt=2022.1.0=h6049295_2 - future=1.0.0=pyhd8ed1ab_2
- intel-openmp=2023.1.0=h59b6b97_46320 - h5py=3.15.1=nompi_py312h03cd2ba_101
- libffi=3.4.4=hd77b12b_1 - hdf5=1.14.6=nompi_hae35d4c_106
- llvmlite=0.43.0=py312hf2fb9eb_0 - icu=78.2=h637d24d_0
- mkl=2023.1.0=h6b88ed4_46358 - importlib-metadata=8.7.0=pyhe01879c_1
- mkl-service=2.4.0=py312h2bbff1b_1 - krb5=1.22.2=h0ea6238_0
- mkl_fft=1.3.11=py312h827c3e9_0 - libaec=1.1.5=haf901d7_0
- mkl_random=1.2.8=py312h0158946_0 - libblas=3.11.0=5_hf2e6a31_mkl
- numba=0.60.0=py312h0158946_0 - libcblas=3.11.0=5_h2a3cdd5_mkl
- numpy=1.26.4=py312hfd52020_0 - libcurl=8.19.0=h8206538_0
- numpy-base=1.26.4=py312h4dde369_0 - libexpat=2.7.4=hac47afa_0
- openssl=3.0.15=h827c3e9_0 - libffi=3.5.2=h3d046cb_0
- packaging=24.1=py312haa95532_0 - libhwloc=2.12.2=default_h4379cf1_1000
- pefile=2023.2.7=py312haa95532_0 - libiconv=1.18=hc1393d2_2
- pip=24.2=py312haa95532_0 - liblapack=3.11.0=5_hf9ab0e9_mkl
- pyinstaller=6.9.0=py312h0416ee5_0 - liblzma=5.8.2=hfd05255_0
- pyinstaller-hooks-contrib=2024.7=py312haa95532_0 - libsqlite=3.52.0=hf5d6505_0
- python=3.12.7=h14ffc60_0 - libssh2=1.11.1=h9aa295b_0
- pywin32-ctypes=0.2.2=py312haa95532_0 - libwinpthread=12.0.0.r4.gg4f2fc60ca=h57928b3_10
- setuptools=75.1.0=py312haa95532_0 - libxml2=2.15.2=h779ef1b_0
- sqlite=3.45.3=h2bbff1b_0 - libxml2-16=2.15.2=h3cfd58e_0
- tbb=2021.8.0=h59b6b97_0 - libzlib=1.3.1=h2466b09_2
- tk=8.6.14=h0416ee5_0 - llvm-openmp=22.1.0=h4fa8253_0
- tzdata=2024b=h04d1e81_0 - llvmlite=0.46.0=py312hdb9728c_0
- vc=14.40=h2eaa2aa_1 - mkl=2025.3.0=hac47afa_455
- vs2015_runtime=14.40.33807=h98bb1dd_1 - numba=0.64.0=py312h560f1c9_0
- wheel=0.44.0=py312haa95532_0 - numpy=2.4.2=py312ha72d056_1
- xz=5.4.6=h8cc25b3_1 - openssl=3.6.1=hf411b9b_1
- zlib=1.2.13=h8cc25b3_1 - packaging=26.0=pyhcf101f3_0
- pefile=2023.2.7=pyhd8ed1ab_0
- pint=0.25.2=pyhcf101f3_0
- pip=26.0.1=pyh8b19718_0
- platformdirs=4.9.4=pyhcf101f3_0
- pyinstaller=6.19.0=py312hf4647a0_1
- pyinstaller-hooks-contrib=2026.2=pyhd8ed1ab_0
- python=3.12.13=h0159041_0_cpython
- python_abi=3.12=8_cp312
- pywin32=311=py312h829343e_1
- pywin32-ctypes=0.2.3=py312h2e8e312_3
- pyyaml=6.0.3=py312h05f76fc_1
- setuptools=82.0.1=pyh332efcf_0
- tabulate=0.10.0=pyhcf101f3_0
- tbb=2022.3.0=h3155e25_2
- tk=8.6.13=h6ed50ae_3
- typing-extensions=4.15.0=h396c80c_0
- typing_extensions=4.15.0=pyhcf101f3_0
- ucrt=10.0.26100.0=h57928b3_0
- vc=14.3=h41ae7f8_34
- vc14_runtime=14.44.35208=h818238b_34
- vcomp14=14.44.35208=h818238b_34
- wheel=0.46.3=pyhd8ed1ab_0
- yaml=0.2.5=h6a83c73_3
- zipp=3.23.0=pyhcf101f3_1
- zstd=1.5.7=h534d264_6
- pip: - pip:
- orsopy==1.2.1 - orsopy==1.2.2
- pyyaml==6.0.2
- tzdata - tzdata

View File

@@ -2,5 +2,5 @@
Package to handle data redction at AMOR instrument to be used by __main__.py script. Package to handle data redction at AMOR instrument to be used by __main__.py script.
""" """
__version__ = '3.2.5' __version__ = '3.2.6'
__date__ = '2026-03-10' __date__ = '2026-03-10'

View File

@@ -9,7 +9,7 @@ from typing import Tuple
from . import const from . import const
from .event_data_types import EventDataAction, EventDatasetProtocol, append_fields, EVENT_BITMASKS from .event_data_types import EventDataAction, EventDatasetProtocol, append_fields, EVENT_BITMASKS
from .helpers import filter_project_x, merge_frames, extract_walltime, add_log_to_pulses from .helpers import filter_project_x, merge_frames, extract_walltime, add_log_to_pulses, merge_frames_w_index
from .instrument import Detector from .instrument import Detector
from .options import IncidentAngle from .options import IncidentAngle
from .header import Header from .header import Header
@@ -25,8 +25,9 @@ class ExtractWalltime(EventDataAction):
dataset.data.events = new_events dataset.data.events = new_events
class MergeFrames(EventDataAction): class MergeFrames(EventDataAction):
def __init__(self, lamdaCut=None): def __init__(self, lamdaCut=None, extractNeutronPulses=False):
self.lamdaCut=lamdaCut self.lamdaCut=lamdaCut
self.extractNeutronPulses=extractNeutronPulses
def perform_action(self, dataset: EventDatasetProtocol)->None: def perform_action(self, dataset: EventDatasetProtocol)->None:
if self.lamdaCut is None: if self.lamdaCut is None:
@@ -36,7 +37,34 @@ class MergeFrames(EventDataAction):
tofCut = lamdaCut*dataset.geometry.chopperDetectorDistance/const.hdm*1e-13 tofCut = lamdaCut*dataset.geometry.chopperDetectorDistance/const.hdm*1e-13
total_offset = (tofCut + total_offset = (tofCut +
dataset.timing.tau * (dataset.timing.ch1TriggerPhase + dataset.timing.chopperPhase/2)/180) dataset.timing.tau * (dataset.timing.ch1TriggerPhase + dataset.timing.chopperPhase/2)/180)
dataset.data.events.tof = merge_frames(dataset.data.events.tof, tofCut, dataset.timing.tau, total_offset) if self.extractNeutronPulses and 'wallTime' in dataset.data.events.dtype.names:
d = dataset.data
# put events into precise sub-frame
d.events.tof, subframes = merge_frames_w_index(d.events.tof, tofCut, dataset.timing.tau, total_offset)
subframes = subframes.astype(int)
# add a sub-pulse time 1-tau before and after each existing time
utimes, uidxs = np.unique(d.events.wallTime, return_inverse=True)
inter_times = np.empty(2*utimes.shape[0]+1, dtype=d.events.wallTime.dtype)
tauns = dataset.timing.tau*1e9
inter_times[0] = utimes[0]-tauns
inter_times[1::2] = utimes
inter_times[2::2] = utimes+tauns
# use subframe indices to sort existing events into new times
d.events.wallTime = inter_times[2*uidxs+subframes+1]
# expand pulses array with additional sub-frames
new_pulses = np.recarray(2*d.pulses.shape[0]+1, dtype=d.pulses.dtype)
new_pulses[0] = d.pulses[0]
new_pulses[1::2] = d.pulses
new_pulses[2::2] = d.pulses
new_pulses.time[0] = d.pulses.time[0]-tauns
new_pulses.time[2::2] = d.pulses.time+tauns
new_pulses.monitor /= 2.0 # ~preserve total monitor counts
d.pulses = new_pulses
else:
if self.extractNeutronPulses:
logging.error(" Trying to separate neutron pulses while wallTime is not extracted, yet!")
dataset.data.events.tof = merge_frames(dataset.data.events.tof, tofCut, dataset.timing.tau, total_offset)
class AnalyzePixelIDs(EventDataAction): class AnalyzePixelIDs(EventDataAction):

View File

@@ -46,6 +46,8 @@ EVENT_BITMASKS = {
} }
def append_fields(input: np.recarray, new_fields: List[Tuple[str, np.dtype]]): def append_fields(input: np.recarray, new_fields: List[Tuple[str, np.dtype]]):
# TODO: This action is used often and time consuming as it runs len(flds) times over all indices.
# Could only be faster if array is allocated in the beginning with all fields, less flexible.
# add one ore more fields to a recarray, numpy functions seems to fail # add one ore more fields to a recarray, numpy functions seems to fail
flds = [(name, dtypei[0]) for name, dtypei in input.dtype.fields.items()] flds = [(name, dtypei[0]) for name, dtypei in input.dtype.fields.items()]
flds += new_fields flds += new_fields

View File

@@ -165,7 +165,9 @@ class ApplyMask(EventDataAction):
self.bitmask_filter = bitmask_filter self.bitmask_filter = bitmask_filter
def perform_action(self, dataset: EventDatasetProtocol) ->None: def perform_action(self, dataset: EventDatasetProtocol) ->None:
# TODO: why is this action time consuming? # TODO: Most time in test examples is spend here.
# While the actions here are very simple, they act on a large array,
# so even just comparison and indexing become time consuming.
d = dataset.data d = dataset.data
pre_filter = d.events.shape[0] pre_filter = d.events.shape[0]
if logging.getLogger().level <= logging.DEBUG: if logging.getLogger().level <= logging.DEBUG:

View File

@@ -6,9 +6,13 @@ import numpy as np
from .event_data_types import EventDatasetProtocol, append_fields from .event_data_types import EventDatasetProtocol, append_fields
try: try:
from .helpers_numba import merge_frames, extract_walltime, filter_project_x, calculate_derived_properties_focussing from .helpers_numba import merge_frames, extract_walltime, filter_project_x, \
calculate_derived_properties_focussing, merge_frames_w_index
except ImportError: except ImportError:
from .helpers_fallback import merge_frames, extract_walltime, filter_project_x, calculate_derived_properties_focussing import logging
logging.warning('Cannot import numba enhanced functions, is it installed?')
from .helpers_fallback import merge_frames, extract_walltime, filter_project_x, \
calculate_derived_properties_focussing, merge_frames_w_index
def add_log_to_pulses(key, dataset: EventDatasetProtocol): def add_log_to_pulses(key, dataset: EventDatasetProtocol):
""" """

View File

@@ -8,6 +8,17 @@ def merge_frames(tof_e, tofCut, tau, total_offset):
# tof shifted to 1 frame # tof shifted to 1 frame
return np.remainder(tof_e-(tofCut-tau), tau)+total_offset return np.remainder(tof_e-(tofCut-tau), tau)+total_offset
def merge_frames_w_index(tof_e, tofCut, tau, total_offset):
"""
Version of merge frames that also returns a frame index for each pulse:
0 - belongs to the frame it was measured in
-1 - arrived in this frame but belongs to the previous neutron pulse
1 - belongs to the second neutron pulse of the original frame
"""
new_tof = merge_frames(tof_e, tofCut, tau, total_offset)
frame_idx = np.floor_divide(tof_e-tofCut, tau)
return new_tof, frame_idx
def extract_walltime(tof_e, dataPacket_p, dataPacketTime_p): def extract_walltime(tof_e, dataPacket_p, dataPacketTime_p):
output = np.empty(np.shape(tof_e)[0], dtype=np.int64) output = np.empty(np.shape(tof_e)[0], dtype=np.int64)
for i in range(len(dataPacket_p)-1): for i in range(len(dataPacket_p)-1):

View File

@@ -11,6 +11,22 @@ def merge_frames(tof_e, tofCut, tau, total_offset):
tof_e_out[ti] = ((tof_e[ti]-dt)%tau)+total_offset # tof shifted to 1 frame tof_e_out[ti] = ((tof_e[ti]-dt)%tau)+total_offset # tof shifted to 1 frame
return tof_e_out return tof_e_out
@nb.jit(nb.float64[:,:](nb.float64[:], nb.float64, nb.float64, nb.float64),
nopython=True, parallel=True, cache=True)
def merge_frames_w_index(tof_e, tofCut, tau, total_offset):
"""
Version of merge frames that also returns a frame index for each pulse:
0 - belongs to the frame it was measured in
-1 - arrived in this frame but belongs to the previous neutron pulse
1 - belongs to the second neutron pulse of the original frame
"""
tof_idx_out = np.empty((2, tof_e.shape[0]), dtype=np.float64)
dt = (tofCut-tau)
for ti in nb.prange(tof_e.shape[0]):
tof_idx_out[0, ti] = ((tof_e[ti]-dt)%tau)+total_offset # tof shifted to 1 frame
tof_idx_out[1, ti] = ((tof_e[ti]-tofCut) // tau) # tof shifted to 1 frame
return tof_idx_out
@nb.jit(nb.int64[:](nb.float64[:], nb.uint32[:], nb.int64[:]), @nb.jit(nb.int64[:](nb.float64[:], nb.uint32[:], nb.int64[:]),
nopython=True, parallel=True, cache=True) nopython=True, parallel=True, cache=True)
def extract_walltime(tof_e, dataPacket_p, dataPacketTime_p): def extract_walltime(tof_e, dataPacket_p, dataPacketTime_p):

View File

@@ -480,6 +480,16 @@ class ReflectivityReductionConfig(ArgParsable):
}, },
) )
extractNeutronPulses: bool = field(
default=False,
metadata={
'short': 'np',
'group': 'data manicure',
'help': 're-assign events to actual neutron pulses => '
'2xbetter filter resolution but extra computation (experimental)',
},
)
class OutputFomatOption(StrEnum): class OutputFomatOption(StrEnum):
Rqz_ort = "Rqz.ort" Rqz_ort = "Rqz.ort"

View File

@@ -65,7 +65,7 @@ class ReflectivityReduction:
# the filtering only makes sense if using actual monitor data, not time # the filtering only makes sense if using actual monitor data, not time
self.dataevent_actions |= eh.FilterMonitorThreshold(self.config.experiment.lowCurrentThreshold) self.dataevent_actions |= eh.FilterMonitorThreshold(self.config.experiment.lowCurrentThreshold)
self.dataevent_actions |= eh.FilterStrangeTimes() self.dataevent_actions |= eh.FilterStrangeTimes()
self.dataevent_actions |= ea.MergeFrames() self.dataevent_actions |= ea.MergeFrames(extractNeutronPulses=self.config.reduction.extractNeutronPulses)
self.dataevent_actions |= ea.AnalyzePixelIDs(self.config.experiment.yRange) self.dataevent_actions |= ea.AnalyzePixelIDs(self.config.experiment.yRange)
self.dataevent_actions |= eh.TofTimeCorrection(self.config.experiment.incidentAngle==IncidentAngle.alphaF) self.dataevent_actions |= eh.TofTimeCorrection(self.config.experiment.incidentAngle==IncidentAngle.alphaF)
self.dataevent_actions |= ea.CalculateWavelength(self.config.experiment.lambdaRange) self.dataevent_actions |= ea.CalculateWavelength(self.config.experiment.lambdaRange)

View File

@@ -46,7 +46,7 @@ class MockEventData:
# list of data packates containing previous events # list of data packates containing previous events
packets = np.recarray((1000,), dtype=PACKET_TYPE) packets = np.recarray((1000,), dtype=PACKET_TYPE)
packets.start_index = np.linspace(0, events.shape[0]-1, packets.shape[0], dtype=np.uint32) packets.start_index = np.linspace(0, events.shape[0]-1, packets.shape[0], dtype=np.uint32)
packets.time = np.linspace(1700000000000000000, 1700000000000000000+3_600_000_000, packets.time = np.linspace(1700000000000000000, 1700000000000000000+3_600_000_000_000,
packets.shape[0], dtype=np.int64) packets.shape[0], dtype=np.int64)
# chopper pulses within the measurement time # chopper pulses within the measurement time
@@ -58,7 +58,7 @@ class MockEventData:
proton_current = np.recarray((50,), dtype=PC_TYPE) proton_current = np.recarray((50,), dtype=PC_TYPE)
proton_current.current = 1500.0 proton_current.current = 1500.0
proton_current[np.random.randint(0, proton_current.shape[0]-1, 10)] = 0. # random time with no current proton_current[np.random.randint(0, proton_current.shape[0]-1, 10)] = 0. # random time with no current
proton_current.time = np.linspace(1700000000000000300, 1700000000000000000+3_600_000_000, proton_current.time = np.linspace(1700000000000000300, 1700000000000000000+3_600_000_000_000,
proton_current.shape[0], dtype=np.int64) proton_current.shape[0], dtype=np.int64)
self.data = AmorEventStream(events, packets, pulses, proton_current) self.data = AmorEventStream(events, packets, pulses, proton_current)
@@ -420,20 +420,30 @@ class TestSimpleActions(TestCase):
dtype=np.int32)) dtype=np.int32))
def test_merge_frames(self): def test_merge_frames(self):
action = MergeFrames(lamdaCut=0.0) action = MergeFrames(lamdaCut=0.0, extractNeutronPulses=False)
action.perform_action(self.d) action.perform_action(self.d)
self.assertEqual(self.d.data.events.tof.shape, self.d.orig_data.events.tof.shape) self.assertEqual(self.d.data.events.tof.shape, self.d.orig_data.events.tof.shape)
np.testing.assert_array_compare(lambda x,y: x<=y, self.d.data.events.tof, self.d.orig_data.events.tof) np.testing.assert_array_compare(lambda x,y: x<=y, self.d.data.events.tof, self.d.orig_data.events.tof)
self.assertTrue((-self.d.timing.tau<=self.d.data.events.tof).all()) self.assertTrue((-self.d.timing.tau<=self.d.data.events.tof).all())
np.testing.assert_array_less(self.d.data.events.tof, self.d.timing.tau) np.testing.assert_array_less(self.d.data.events.tof, self.d.timing.tau)
action = MergeFrames(lamdaCut=2.0) action = MergeFrames(lamdaCut=2.0, extractNeutronPulses=False)
self.d.data.events.tof = self.d.orig_data.events.tof[:] self.d.data.events.tof = self.d.orig_data.events.tof[:]
action.perform_action(self.d) action.perform_action(self.d)
tofCut = 2.0*self.d.geometry.chopperDetectorDistance/const.hdm*1e-13 tofCut = 2.0*self.d.geometry.chopperDetectorDistance/const.hdm*1e-13
self.assertTrue((tofCut-self.d.timing.tau<=self.d.data.events.tof).all()) self.assertTrue((tofCut-self.d.timing.tau<=self.d.data.events.tof).all())
self.assertTrue((self.d.data.events.tof<=tofCut+self.d.timing.tau).all()) self.assertTrue((self.d.data.events.tof<=tofCut+self.d.timing.tau).all())
def test_merge_frames_splitting(self):
action = MergeFrames(lamdaCut=0.0, extractNeutronPulses=True)
self._extract_walltime()
action.perform_action(self.d)
self.assertEqual(self.d.data.events.tof.shape, self.d.orig_data.events.tof.shape)
np.testing.assert_array_compare(lambda x,y: x<=y, self.d.data.events.tof, self.d.orig_data.events.tof)
self.assertEqual(self.d.data.pulses.shape[0], self.d.orig_data.pulses.shape[0]*2+1)
np.testing.assert_array_less(self.d.orig_data.pulses.time[:-1], self.d.orig_data.pulses.time[1:])
np.testing.assert_array_less(self.d.data.pulses.time[:-1], self.d.data.pulses.time[1:])
def test_analyze_pixel_ids(self): def test_analyze_pixel_ids(self):
action = AnalyzePixelIDs((1000, 1001)) action = AnalyzePixelIDs((1000, 1001))
action.perform_action(self.d) action.perform_action(self.d)

View File

@@ -40,10 +40,47 @@ exe = EXE(
codesign_identity=None, codesign_identity=None,
entitlements_file=None, entitlements_file=None,
) )
a2 = Analysis(
['eos/ls.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
noarchive=False,
optimize=1,
)
pyz2 = PYZ(a2.pure)
exe2 = EXE(
pyz2,
a2.scripts,
[],
exclude_binaries=True,
name='eosls',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
)
coll = COLLECT( coll = COLLECT(
exe, exe,
a.binaries, a.binaries,
a.datas, a.datas,
exe2,
a2.binaries,
a2.datas,
strip=False, strip=False,
upx=True, upx=True,
upx_exclude=[], upx_exclude=[],