nothing but formatting

This commit is contained in:
2024-08-09 10:02:43 +02:00
9 changed files with 147 additions and 120 deletions

46
eos.py Normal file
View File

@@ -0,0 +1,46 @@
#!/usr/bin/env python
"""
eos reduces measurements performed on Amor@SINQ, PSI
Author: Jochen Stahn (algorithms, python draft),
Artur Glavic (structuring and optimisation of code)
conventions (not strictly followed, yet):
- array names end with the suffix '_x[y]' with the meaning
_e = events
_tof
_l = lambda
_t = theta
_z = detector z
_lz = (lambda, detector z)
_q = q_z
"""
import logging
from libeos.command_line import command_line_options
from libeos.logconfig import setup_logging
from libeos.reduction import AmorReduction
#=====================================================================================================
# TODO:
# - calculate resolution using the chopperPhase
# - deal with background correction
# - format of 'call' + add '-Y' if not supplied
#=====================================================================================================
def main():
setup_logging()
logging.warning('######## eos - data reduction for Amor ########')
# read command line arguments and generate classes holding configuration parameters
config = command_line_options()
# Create reducer with these arguments
reducer = AmorReduction(config)
# Perform actual reduction
reducer.reduce()
logging.info('######## eos - finished ########')
if __name__ == '__main__':
main()

View File

@@ -156,7 +156,8 @@ class Meta:
except (KeyError, IndexError):
logging.warning(f" using parameters from nicos cache")
#cachePath = '/home/amor/nicosdata/amor/cache/'
cachePath = '/home/nicos/amorcache/'
#cachePath = '/home/nicos/amorcache/'
cachePath = '/home/amor/cache/'
value = str(subprocess.getoutput('/usr/bin/grep "value" '+cachePath+'nicos-mu/'+year_date)).split('\t')[-1]
self.mu = float(value)
value = str(subprocess.getoutput('/usr/bin/grep "value" '+cachePath+'nicos-nu/'+year_date)).split('\t')[-1]
@@ -522,7 +523,7 @@ class PlotSelection:
return
elif arg == 'log':
vmin = 0
vmax = max(1, np.log(np.max(I_yt)+.1)/np.log(10)*1.05)
vmax = max(1, np.log(np.max(I_yz)+.1)/np.log(10)*1.05)
plt.pcolormesh(bins_y[:],bins_z[:],(np.log(I_yz+6e-1)/np.log(10)).T, cmap=cmap, vmin=vmin, vmax=vmax)
else:
plt.pcolormesh(bins_y[:],bins_z[:],I_yz.T, cmap=cmap)
@@ -953,7 +954,7 @@ def commandLineArgs():
type=float,
help ="value of nu")
clas.add_argument("-P", "--chopperPhase",
default=-5.,
default=7.5,
type=float,
help ="chopper phase offset")
clas.add_argument("-p", "--plot",

View File

@@ -27,10 +27,14 @@ def commandLineArgs():
input_data.add_argument("-nm", "--normalisationMethod",
default = Defaults.normalisationMethod,
help = "normalisation method: overilumination, underillumination, direct_beam")
input_data.add_argument("--raw",
type = str,
default = Defaults.raw,
help = "relative path to directory with .hdf files")
input_data.add_argument("-d", "--dataPath",
type = str,
default = Defaults.dataPath,
help = "relative path to directory with .hdf files")
help = "relative path for output")
input_data.add_argument("-Y", "--year",
default = Defaults.year,
type = int,
@@ -46,9 +50,10 @@ def commandLineArgs():
nargs = '+',
default = Defaults.outputFormat,
help = "one of [Rqz.ort, Rlt.ort]")
output.add_argument("--offSpecular",
type = bool,
default = Defaults.offSpecular,
output.add_argument("-ai", "--incidentAngle",
type = str,
default = Defaults.incidentAngle,
help = "calulate alpha_i from [alphaF, mu, nu]",
)
output.add_argument("-r", "--qResolution",
default = Defaults.qResolution,
@@ -168,6 +173,7 @@ def command_line_options():
reader_config = ReaderConfig(
year = clas.year,
raw = clas.raw,
dataPath = clas.dataPath
)
experiment_config = ExperimentConfig(
@@ -177,13 +183,14 @@ def command_line_options():
yRange = clas.yRange,
lambdaRange = clas.lambdaRange,
qzRange = clas.qzRange,
offSpecular = clas.offSpecular,
incidentAngle = clas.incidentAngle,
mu = clas.mu,
nu = clas.nu,
muOffset = clas.muOffset
)
reduction_config = ReductionConfig(
qResolution = clas.qResolution,
qzRange = clas.qzRange,
autoscale = clas.autoscale,
thetaRange = clas.thetaRange,
thetaRangeR = clas.thetaRangeR,

View File

@@ -78,21 +78,35 @@ class AmorData:
self.lamda_e = _lamda_e
self.wallTime_e = _wallTime_e
#-------------------------------------------------------------------------------------------------
#def path_generator(self, number):
# fileName = f'amor{self.reader_config.year}n{number:06d}.hdf'
# if os.path.exists(os.path.join(self.reader_config.dataPath,fileName)):
# path = self.reader_config.dataPath
# elif os.path.exists(fileName):
# path = '.'
# elif os.path.exists(os.path.join('.','raw', fileName)):
# path = os.path.join('.','raw')
# elif os.path.exists(os.path.join('..','raw', fileName)):
# path = os.path.join('..','raw')
# elif os.path.exists(f'/afs/psi.ch/project/sinqdata/{self.reader_config.year}/amor/{int(number/1000)}/{fileName}'):
# path = f'/afs/psi.ch/project/sinqdata/{self.reader_config.year}/amor/{int(number/1000)}'
# else:
# sys.exit(f'# ERROR: the file {fileName} is nowhere to be found!')
# return os.path.join(path, fileName)
#-------------------------------------------------------------------------------------------------
def path_generator(self, number):
fileName = f'amor{self.reader_config.year}n{number:06d}.hdf'
if os.path.exists(os.path.join(self.reader_config.dataPath,fileName)):
path = self.reader_config.dataPath
elif os.path.exists(fileName):
path = '.'
elif os.path.exists(os.path.join('.','raw', fileName)):
path = os.path.join('.','raw')
elif os.path.exists(os.path.join('..','raw', fileName)):
path = os.path.join('..','raw')
elif os.path.exists(f'/afs/psi.ch/project/sinqdata/{self.reader_config.year}/amor/{int(number/1000)}/{fileName}'):
path = f'/afs/psi.ch/project/sinqdata/{self.reader_config.year}/amor/{int(number/1000)}'
else:
sys.exit(f'# ERROR: the file {fileName} is nowhere to be found!')
path = ''
for rawd in self.reader_config.raw:
if os.path.exists(os.path.join(rawd,fileName)):
path = rawd
break
if not path:
if os.path.exists(f'/afs/psi.ch/project/sinqdata/{self.reader_config.year}/amor/{int(number/1000)}/{fileName}'):
path = f'/afs/psi.ch/project/sinqdata/{self.reader_config.year}/amor/{int(number/1000)}'
else:
sys.exit(f'# ERROR: the file {fileName} can not be found in {self.reader_config.raw}!')
return os.path.join(path, fileName)
#-------------------------------------------------------------------------------------------------
def expand_file_list(self, short_notation):
@@ -169,9 +183,11 @@ class AmorData:
self.filter_project_x()
# correct tof for beam size effect at chopper: t_cor = (delta / 180 deg) * tau
# TODO: check for correctness
if not self.config.offSpecular:
if self.config.incidentAngle == 'alphaF':
self.tof_e -= ( self.delta_e / 180. ) * self.tau
else:
# TODO: check sign of correction
self.tof_e -= ( self.kad / 180. ) * self.tau
self.calculate_derived_properties()
@@ -189,7 +205,7 @@ class AmorData:
def calculate_derived_properties(self):
self.lamdaMax = const.lamdaCut+1.e13*self.tau*const.hdm/(self.chopperDetectorDistance+124.)
if nb_helpers and not self.config.offSpecular:
if nb_helpers:
self.lamda_e, self.qz_e, self.mask_e = nb_helpers.calculate_derived_properties_focussing(
self.tof_e, self.detXdist_e, self.delta_e, self.mask_e,
self.config.lambdaRange[0], self.config.lambdaRange[1], self.nu, self.mu,
@@ -201,17 +217,23 @@ class AmorData:
self.mask_e = np.logical_and(self.mask_e, (self.config.lambdaRange[0]<=self.lamda_e) & (
self.lamda_e<=self.config.lambdaRange[1]))
# alpha_f
alphaF_e = self.nu-self.mu+self.delta_e
# q_z
if self.config.offSpecular:
alphaI = self.kap+self.kad+self.mu
self.qz_e = 2*np.pi*((np.sin(np.deg2rad(alphaF_e))+np.sin(np.deg2rad(alphaI)))/self.lamda_e)
self.qx_e = 2*np.pi*((np.cos(np.deg2rad(alphaF_e))-np.cos(np.deg2rad(alphaI)))/self.lamda_e)
self.header.measurement_scheme = 'energy-dispersive',
else:
if self.config.incidentAngle == 'alphaF':
alphaF_e = self.nu - self.mu + self.delta_e
self.qz_e = 4*np.pi*(np.sin(np.deg2rad(alphaF_e))/self.lamda_e)
# qx_e = 0.
# qx_e = 0.
self.header.measurement_scheme = 'angle- and energy-dispersive'
elif self.config.incidentAngle == 'nu':
alphaF_e = (self.nu + self.delta_e + self.kap + self.kad) / 2.
self.qz_e = 4*np.pi*(np.sin(np.deg2rad(alphaF_e))/self.lamda_e)
# qx_e = 0.
self.header.measurement_scheme = 'energy-dispersive'
else:
alphaF_e = self.nu - self.mu + self.delta_e
alphaI = self.kap + self.kad + self.mu
self.qz_e = 2*np.pi * ((np.sin(np.deg2rad(alphaF_e)) + np.sin(np.deg2rad(alphaI)))/self.lamda_e)
self.qx_e = 2*np.pi * ((np.cos(np.deg2rad(alphaF_e)) - np.cos(np.deg2rad(alphaI)))/self.lamda_e)
self.header.measurement_scheme = 'energy-dispersive'
def filter_project_x(self):
pixelLookUp = self.resolve_pixels()
@@ -279,7 +301,8 @@ class AmorData:
logging.warning(" using parameters from nicos cache")
year_date = str(self.start_date).replace('-', '/', 1)
#cachePath = '/home/amor/nicosdata/amor/cache/'
cachePath = '/home/nicos/amorcache/'
#cachePath = '/home/nicos/amorcache/'
cachePath = '/home/amor/cache/'
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-mu/{year_date}')).split('\t')[-1]
self.mu = float(value)
value = str(subprocess.getoutput(f'/usr/bin/grep "value" {cachePath}nicos-nu/{year_date}')).split('\t')[-1]

View File

@@ -20,10 +20,11 @@ class Detector:
class Grid:
def __init__(self, qResolution):
def __init__(self, qResolution, qzRange):
self.lamdaCut = const.lamdaCut
self.dldl = 0.005 # Delta lambda / lambda
self.qResolution = qResolution
self.qzRange = qzRange
def q(self):
resolutions = [0.005, 0.01, 0.02, 0.025, 0.04, 0.05, 0.1, 1]
@@ -35,7 +36,8 @@ class Grid:
# linear up to qq
q_grid = np.arange(0, qq, qq*dqdq)
# exponential from qq on
q_grid = np.append(q_grid, qq*(1.+dqdq)**np.arange(int(np.log(0.3/qq)/np.log(1+dqdq))))
q_grid = np.append(q_grid, qq*(1.+dqdq)**np.arange(int(np.log(self.qzRange[1]/qq)/np.log(1+dqdq))))
q_grid = q_grid[q_grid>=self.qzRange[0]]
return q_grid
def lamda(self):

View File

@@ -9,7 +9,7 @@ def merge_frames(tof_e, tofCut, tau, total_offset):
dt = (tofCut-tau)
for ti in nb.prange(tof_e.shape[0]):
tof_e_out[ti] = ((tof_e[ti]-dt)%tau)+total_offset # tof shifted to 1 frame
return tof_e
return tof_e_out
@nb.jit(nb.float64[:](nb.float64[:], nb.uint64[:], nb.float64[:]),
nopython=True, parallel=True, cache=True)
@@ -62,4 +62,4 @@ def calculate_derived_properties_focussing(tof_e, detXdist_e, delta_e, mask_e,
mask_e_out[i] = mask_e[i] & ((lmin<=lamda_e[i]) & (lamda_e[i]<=lmax))
alphaF_e[i] = nu-mu+delta_e[i]
qz_e[i] = 4*np.pi*np.sin(np.deg2rad(alphaF_e[i]))/lamda_e[i]
return lamda_e, qz_e, mask_e
return lamda_e, qz_e, mask_e

View File

@@ -7,19 +7,20 @@ from datetime import datetime
class Defaults:
#fileIdentifier
# fileIdentifier
normalisationFileIdentifier = []
normalisationMethod = 'overilumination'
dataPath = '.'
raw = ['.', './raw', '../raw', '../../raw']
year = datetime.now().year
#subtract
# subtract
outputName = "fromEOS"
outputFormat = ['Rqz.ort']
offSpecular = False
incidentAngle = 'alphaF'
qResolution = 0.01
#timeSlize
scale = [1]
#autoscale
# autoscale
lambdaRange = [2., 15.]
thetaRange = [-12., 12.]
thetaRangeR = [-0.7, 0.7]
@@ -27,7 +28,7 @@ class Defaults:
qzRange = [0.005, 0.30]
chopperSpeed = 500
chopperPhase = -13.5
chopperPhaseOffset = -5
chopperPhaseOffset = 7
muOffset = 0
mu = 0
nu = 0
@@ -40,10 +41,12 @@ class Defaults:
class ReaderConfig:
year: int
dataPath: str
raw: Tuple[str]
startTime: Optional[float] = 0
@dataclass
class ExperimentConfig:
incidentAngle: str
chopperPhase: float
yRange: Tuple[float, float]
lambdaRange: Tuple[float, float]
@@ -54,11 +57,11 @@ class ExperimentConfig:
mu: Optional[float] = None
nu: Optional[float] = None
muOffset: Optional[float] = None
offSpecular: bool = False
@dataclass
class ReductionConfig:
qResolution: float
qzRange: Tuple[float, float]
thetaRange: Tuple[float, float]
thetaRangeR: Tuple[float, float]
@@ -102,6 +105,8 @@ class EOSConfig:
inpt += f' -Y {datetime.now().year}'
if self.reader_config.dataPath != '.':
inpt += f' --dataPath {self.reader_config.dataPath}'
if self.reader_config.raw != '.':
inpt = f' --rawd {self.reader_config.raw}'
if self.reduction_config.subtract:
inpt += f' -subtract {self.reduction_config.subtract}'
if self.reduction_config.normalisationFileIdentifier:
@@ -153,10 +158,6 @@ class EOSConfig:
if self.reduction_config.timeSlize:
acts += f' --timeSlize {" ".join(str(ff) for ff in self.reduction_config.timeSlize)}'
# TODO: experiment_config = ExperimentConfig(
# offSpecular = clas.offSpecular,
# )
mlst = base + inpt + otpt
if mask:
mlst += mask

View File

@@ -17,7 +17,7 @@ class AmorReduction:
self.reader_config = config.reader
self.reduction_config = config.reduction
self.output_config = config.output
self.grid = Grid(config.reduction.qResolution)
self.grid = Grid(config.reduction.qResolution, config.experiment.qzRange)
self.header = Header()
self.header.reduction.call = EOSConfig.call_string(self)
@@ -87,15 +87,16 @@ class AmorReduction:
headerRqz = self.header.orso_header()
headerRqz.data_set = f'Nr {i} : mu = {self.file_reader.mu:6.3f} deg'
# projection on q-grid
# projection on qz-grid
q_q, R_q, dR_q, dq_q = self.project_on_qz(qz_lz, ref_lz, err_lz, res_lz, self.norm_lz, self.mask_lz)
filter_q = np.where((self.experiment_config.qzRange[0]<q_q) & (q_q<self.experiment_config.qzRange[1]),
True, False)
q_q = q_q[filter_q]
R_q = R_q[filter_q]
dR_q = dR_q[filter_q]
dq_q = dq_q[filter_q]
# The filtering is now done by restricting the qz-grid
#filter_q = np.where((self.experiment_config.qzRange[0]>q_q) & (q_q>self.experiment_config.qzRange[1]),
# False, True)
#q_q = q_q[filter_q]
#R_q = R_q[filter_q]
#dR_q = dR_q[filter_q]
#dq_q = dq_q[filter_q]
if self.reduction_config.autoscale:
if i==0:
@@ -307,11 +308,6 @@ class AmorReduction:
n_path = os.path.join(dataPath, f'{name}.norm')
if os.path.exists(n_path):
logging.warning(f'normalisation matrix: found and using {n_path}')
#self.norm_lz = np.loadtxt(f'{dataPath}/{name}.norm')
#with open(n_path, 'r') as fh:
# fh.readline()
# self.normFileList = fh.readline().split('[')[1].split(']')[0].replace('\'', '').split(', ')
# self.normAngle = float(fh.readline().split('= ')[1])
with open(n_path, 'rb') as fh:
self.normFileList = np.load(fh, allow_pickle=True)
self.normAngle = np.load(fh, allow_pickle=True)
@@ -329,7 +325,7 @@ class AmorReduction:
lamda_e = fromHDF.lamda_e
detZ_e = fromHDF.detZ_e
self.norm_lz, bins_l, bins_z = np.histogram2d(lamda_e, detZ_e, bins = (self.grid.lamda(), self.grid.z()))
self.norm_lz = np.where(self.norm_lz>0, self.norm_lz, np.nan)
self.norm_lz = np.where(self.norm_lz>2, self.norm_lz, np.nan)
# correct for the SM reflectivity
lamda_l = self.grid.lamda()
theta_z = self.normAngle + fromHDF.delta_z
@@ -338,23 +334,14 @@ class AmorReduction:
qz_lz = 4.0*np.pi * np.sin(np.deg2rad(theta_lz)) / lamda_lz
Rsm_lz = np.ones(np.shape(qz_lz))
Rsm_lz = np.where(qz_lz>0.0217, 1-(qz_lz-0.0217)*(0.0625/0.0217), Rsm_lz)
# TODO: introduce variable for `m` and propably for the decay
Rsm_lz = np.where(qz_lz>0.0217*5, np.nan, Rsm_lz)
self.norm_lz = self.norm_lz / Rsm_lz
#if len(lamda_e) > 1e6:
# head = ('normalisation matrix based on the measurements\n'
# f'{fromHDF.file_list}\n'
# f'nu - mu = {self.normAngle}\n'
# f'shape= {np.shape(self.norm_lz)} (lambda, z)\n'
# f'measured at mu = {fromHDF.mu:6.3f} deg\n'
# f'N(l_lambda, z) = theta(z) / sum_i=-1..1 I(l_lambda+i, z)')
# head = head.replace('../', '')
# head = head.replace('./', '')
# head = head.replace('raw/', '')
# np.savetxt(f'{dataPath}/{name}.norm', self.norm_lz, header = head)
if len(lamda_e) > 1e6:
with open(n_path, 'wb') as fh:
np.save(fh, np.array(fromHDF.file_list), allow_pickle=False)
np.save(fh, np.array(fromHDF.mu), allow_pickle=False)
np.save(fh, np.array(self.normAngle), allow_pickle=False)
np.save(fh, self.norm_lz, allow_pickle=False)
self.normFileList = fromHDF.file_list
self.header.reduction.corrections.append('normalisation with \'additional files\'')
@@ -362,7 +349,12 @@ class AmorReduction:
def project_on_lz(self, fromHDF, norm_lz, normAngle, lamda_e, detZ_e):
# projection on lambda-z-grid
lamda_l = self.grid.lamda()
theta_z = fromHDF.nu - fromHDF.mu + fromHDF.delta_z
if self.experiment_config.incidentAngle == 'alphaF':
theta_z = fromHDF.nu - fromHDF.mu + fromHDF.delta_z
elif self.experiment_config.incidentAngle == 'nu':
theta_z = (fromHDF.nu + fromHDF.delta_z + fromHDF.kap + fromHDF.kad) / 2.
else:
pass
lamda_lz = (self.grid.lz().T*lamda_l[:-1]).T
theta_lz = self.grid.lz()*theta_z
@@ -393,7 +385,7 @@ class AmorReduction:
int_lz, bins_l, bins_z = np.histogram2d(lamda_e, detZ_e, bins = (lamda_l, self.grid.z()))
# cut normalisation sample horizon
int_lz = np.where(mask_lz, int_lz, np.nan)
thetaF_lz = np.where(mask_lz, theta_lz, np.nan)
thetaF_lz = np.where(mask_lz, theta_lz, np.nan)
ref_lz = (int_lz * np.absolute(thetaN_lz)) / (norm_lz * np.absolute(thetaF_lz))
err_lz = ref_lz * np.sqrt( 1/(int_lz+.1) + 1/norm_lz )

46
neos.py
View File

@@ -1,46 +0,0 @@
#!/usr/bin/env python
"""
eos reduces measurements performed on Amor@SINQ, PSI
Author: Jochen Stahn (algorithms, python draft),
Artur Glavic (structuring and optimisation of code)
conventions (not strictly followed, yet):
- array names end with the suffix '_x[y]' with the meaning
_e = events
_tof
_l = lambda
_t = theta
_z = detector z
_lz = (lambda, detector z)
_q = q_z
"""
import logging
from libeos.command_line import command_line_options
from libeos.logconfig import setup_logging
from libeos.reduction import AmorReduction
#=====================================================================================================
# TODO:
# - calculate resolution using the chopperPhase
# - deal with background correction
# - format of 'call' + add '-Y' if not supplied
#=====================================================================================================
def main():
setup_logging()
logging.warning('######## eos - data reduction for Amor ########')
# read command line arguments and generate classes holding configuration parameters
config = command_line_options()
# Create reducer with these arguments
reducer = AmorReduction(config)
# Perform actual reduction
reducer.reduce()
logging.info('######## eos - finished ########')
if __name__ == '__main__':
main()

1
neos.py Symbolic link
View File

@@ -0,0 +1 @@
eos.py