Move all reduction code to AmorReducer class and options to dataclasses, all command line handling separated
This commit is contained in:
@@ -1,6 +1,9 @@
|
||||
import argparse
|
||||
from datetime import datetime
|
||||
|
||||
from .logconfig import update_loglevel
|
||||
from .options import DataReaderConfig, OutputConfig, ReductionConfig
|
||||
|
||||
|
||||
def commandLineArgs():
|
||||
"""
|
||||
@@ -151,5 +154,40 @@ def output_format_list(outputFormat):
|
||||
format_list.append('Rqz.orb')
|
||||
if 'orb' in outputFormat or 'Rlt.orb' in outputFormat or 'Rlt' in outputFormat:
|
||||
format_list.append('Rlt.orb')
|
||||
|
||||
return sorted(format_list, reverse=True)
|
||||
|
||||
def command_line_options():
|
||||
clas = commandLineArgs()
|
||||
update_loglevel(clas.verbose, clas.debug)
|
||||
|
||||
reader_config = DataReaderConfig(
|
||||
year=clas.year,
|
||||
dataPath=clas.dataPath,
|
||||
sampleModel=clas.sampleModel,
|
||||
chopperPhase=clas.chopperPhase,
|
||||
chopperPhaseOffset=clas.chopperPhaseOffset,
|
||||
yRange=clas.yRange,
|
||||
lambdaRange=clas.lambdaRange,
|
||||
qzRange=clas.qzRange,
|
||||
offSpecular=clas.offSpecular,
|
||||
mu=clas.mu,
|
||||
nu=clas.nu,
|
||||
muOffset=clas.muOffset
|
||||
)
|
||||
reduction_config = ReductionConfig(
|
||||
qResolution=clas.qResolution,
|
||||
autoscale=clas.autoscale,
|
||||
thetaRange=clas.thetaRange,
|
||||
thetaRangeR=clas.thetaRangeR,
|
||||
lambdaRange=clas.lambdaRange,
|
||||
fileIdentifier=clas.fileIdentifier,
|
||||
scale=clas.scale,
|
||||
subtract=clas.subtract,
|
||||
normalisationFileIdentifier=clas.normalisationFileIdentifier,
|
||||
timeSlize=clas.timeSlize
|
||||
)
|
||||
output_config = OutputConfig(
|
||||
outputFormats=output_format_list(clas.outputFormat),
|
||||
outputName=clas.outputName
|
||||
)
|
||||
return reader_config, reduction_config, output_config
|
||||
@@ -4,8 +4,6 @@ import platform
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import h5py
|
||||
import numpy as np
|
||||
@@ -13,23 +11,8 @@ from orsopy import fileio
|
||||
|
||||
from . import __version__, const
|
||||
from .instrument import Detector
|
||||
from .options import DataReaderConfig
|
||||
|
||||
@dataclass
|
||||
class DataReaderConfig:
|
||||
year: int
|
||||
dataPath: str
|
||||
sampleModel: str
|
||||
|
||||
chopperPhase: float
|
||||
yRange: Tuple[float, float]
|
||||
lambdaRange: Tuple[float, float]
|
||||
qzRange: Tuple[float, float]
|
||||
|
||||
chopperPhaseOffset: float = 0.0
|
||||
mu: Optional[float] = None
|
||||
nu: Optional[float] = None
|
||||
muOffset: Optional[float] = None
|
||||
offSpecular: bool = False
|
||||
|
||||
class Header:
|
||||
"""orso compatible output file header content"""
|
||||
|
||||
@@ -3,6 +3,7 @@ Setup for the logging of eos.
|
||||
"""
|
||||
import sys
|
||||
import logging
|
||||
import logging.handlers
|
||||
|
||||
def setup_logging():
|
||||
logger = logging.getLogger() # logging.getLogger('quicknxs')
|
||||
|
||||
43
libeos/options.py
Normal file
43
libeos/options.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""
|
||||
Classes for stroing various configurations needed for reduction.
|
||||
"""
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional, Tuple
|
||||
|
||||
|
||||
@dataclass
|
||||
class DataReaderConfig:
|
||||
year: int
|
||||
dataPath: str
|
||||
sampleModel: str
|
||||
|
||||
chopperPhase: float
|
||||
yRange: Tuple[float, float]
|
||||
lambdaRange: Tuple[float, float]
|
||||
qzRange: Tuple[float, float]
|
||||
|
||||
chopperPhaseOffset: float = 0.0
|
||||
mu: Optional[float] = None
|
||||
nu: Optional[float] = None
|
||||
muOffset: Optional[float] = None
|
||||
offSpecular: bool = False
|
||||
|
||||
@dataclass
|
||||
class ReductionConfig:
|
||||
qResolution: float
|
||||
autoscale: Tuple[bool, bool]
|
||||
thetaRange: Tuple[float, float]
|
||||
thetaRangeR: Tuple[float, float]
|
||||
lambdaRange: Tuple[float, float]
|
||||
|
||||
fileIdentifier: list = field(default_factory=lambda: ["0"])
|
||||
scale: list = field(default_factory=lambda: [1]) #per file scaling, if less elements then files use the last one
|
||||
|
||||
subtract: Optional[str] = None
|
||||
normalisationFileIdentifier: Optional[list] = None
|
||||
timeSlize: Optional[list] = None
|
||||
|
||||
@dataclass
|
||||
class OutputConfig:
|
||||
outputFormats: list #output_format_list(clas.outputFormat)
|
||||
outputName: str
|
||||
@@ -1,158 +1,382 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
import numpy as np
|
||||
from orsopy import fileio
|
||||
|
||||
from libeos.command_line import expand_file_list
|
||||
from libeos.dataset import AmorData
|
||||
from .command_line import expand_file_list
|
||||
from .dataset import AmorData, Header
|
||||
from .options import DataReaderConfig, ReductionConfig, OutputConfig
|
||||
from .instrument import Grid
|
||||
|
||||
class AmorReduction:
|
||||
def __init__(self,
|
||||
data_reader_config: DataReaderConfig,
|
||||
reduction_config: ReductionConfig,
|
||||
output_config: OutputConfig):
|
||||
self.data_reader_config = data_reader_config
|
||||
self.reduction_config = reduction_config
|
||||
self.output_config = output_config
|
||||
|
||||
def normalisation_map(short_notation, header, grid, dataPath):
|
||||
fromHDF = AmorData()
|
||||
normalisation_list = expand_file_list(short_notation)
|
||||
name = str(normalisation_list[0])
|
||||
for i in range(1, len(normalisation_list), 1):
|
||||
name = f'{name}_{normalisation_list[i]}'
|
||||
if os.path.exists(f'{dataPath}/{name}.norm'):
|
||||
logging.info(f'# normalisation matrix: found and using {dataPath}/{name}.norm')
|
||||
norm_lz = np.loadtxt(f'{dataPath}/{name}.norm')
|
||||
fh = open(f'{dataPath}/{name}.norm', 'r')
|
||||
fh.readline()
|
||||
normFileList = fh.readline().split('[')[1].split(']')[0].replace('\'', '').split(', ')
|
||||
normAngle = float(fh.readline().split('= ')[1])
|
||||
fh.close()
|
||||
for i, entry in enumerate(normFileList):
|
||||
normFileList[i] = entry.split('/')[-1]
|
||||
header.measurement_additional_files = normFileList
|
||||
else:
|
||||
logging.info(f'# normalisation matrix: using the files {normalisation_list}')
|
||||
fromHDF.read_data(short_notation, norm=True)
|
||||
normAngle = fromHDF.nu - fromHDF.mu
|
||||
lamda_e = fromHDF.lamda_e
|
||||
detZ_e = fromHDF.detZ_e
|
||||
norm_lz, bins_l, bins_z = np.histogram2d(lamda_e, detZ_e, bins = (grid.lamda(), grid.z()))
|
||||
norm_lz = np.where(norm_lz>0, norm_lz, np.nan)
|
||||
# correct for the SM reflectivity
|
||||
lamda_l = grid.lamda()
|
||||
theta_z = normAngle + fromHDF.delta_z
|
||||
lamda_lz = (grid.lz().T*lamda_l[:-1]).T
|
||||
theta_lz = grid.lz()*theta_z
|
||||
qz_lz = 4.0*np.pi * np.sin(np.deg2rad(theta_lz)) / lamda_lz
|
||||
Rsm_lz = np.ones(np.shape(qz_lz))
|
||||
Rsm_lz = np.where(qz_lz>0.0217, 1-(qz_lz-0.0217)*(0.0625/0.0217), Rsm_lz)
|
||||
Rsm_lz = np.where(qz_lz>0.0217*5, np.nan, Rsm_lz)
|
||||
norm_lz = norm_lz / Rsm_lz
|
||||
if len(lamda_e) > 1e6:
|
||||
head = ('normalisation matrix based on the measurements\n'
|
||||
f'{fromHDF.file_list}\n'
|
||||
f'nu - mu = {normAngle}\n'
|
||||
f'shape= {np.shape(norm_lz)} (lambda, z)\n'
|
||||
f'measured at mu = {fromHDF.mu:6.3f} deg\n'
|
||||
f'N(l_lambda, z) = theta(z) / sum_i=-1..1 I(l_lambda+i, z)')
|
||||
head = head.replace('../', '')
|
||||
head = head.replace('./', '')
|
||||
head = head.replace('raw/', '')
|
||||
np.savetxt(f'{dataPath}/{name}.norm', norm_lz, header = head)
|
||||
normFileList = fromHDF.file_list
|
||||
return norm_lz, normAngle, normFileList
|
||||
def reduce(self):
|
||||
self.grid = Grid(self.reduction_config.qResolution)
|
||||
self.header = Header()
|
||||
self.startTime = 0
|
||||
if not os.path.exists(f'{self.data_reader_config.dataPath}'):
|
||||
os.system(f'mkdir {self.data_reader_config.dataPath}')
|
||||
fromHDF = AmorData(self.startTime, header=self.header, config=self.data_reader_config)
|
||||
logging.warning('\n######## eos - data reduction for Amor ########')
|
||||
|
||||
|
||||
def project_on_lz(fromHDF, norm_lz, normAngle, lamda_e, detZ_e, grid, thetaRange, thetaRangeR, lambdaRange):
|
||||
# projection on lambda-z-grid
|
||||
lamda_l = grid.lamda()
|
||||
theta_z = fromHDF.nu - fromHDF.mu + fromHDF.delta_z
|
||||
lamda_lz = (grid.lz().T*lamda_l[:-1]).T
|
||||
theta_lz = grid.lz()*theta_z
|
||||
|
||||
thetaN_z = fromHDF.delta_z + normAngle
|
||||
thetaN_lz = np.ones(np.shape(norm_lz))*thetaN_z
|
||||
thetaN_lz = np.where(np.absolute(thetaN_lz)>5e-3, thetaN_lz, np.nan)
|
||||
|
||||
mask_lz = np.where(np.isnan(norm_lz), False, True)
|
||||
mask_lz = np.logical_and(mask_lz, np.where(np.absolute(thetaN_lz)>5e-3, True, False))
|
||||
mask_lz = np.logical_and(mask_lz, np.where(np.absolute(theta_lz)>5e-3, True, False))
|
||||
if thetaRange[1]<12:
|
||||
mask_lz = np.logical_and(mask_lz, np.where(theta_lz >= thetaRange[0], True, False))
|
||||
mask_lz = np.logical_and(mask_lz, np.where(theta_lz <= thetaRange[1], True, False))
|
||||
if thetaRangeR[1]<12:
|
||||
t0 = fromHDF.nu - fromHDF.mu
|
||||
mask_lz = np.logical_and(mask_lz, np.where(theta_lz-t0 >= thetaRangeR[0], True, False))
|
||||
mask_lz = np.logical_and(mask_lz, np.where(theta_lz-t0 <= thetaRangeR[1], True, False))
|
||||
if lambdaRange[1]<15:
|
||||
mask_lz = np.logical_and(mask_lz, np.where(lamda_lz >= lambdaRange[0], True, False))
|
||||
mask_lz = np.logical_and(mask_lz, np.where(lamda_lz <= lambdaRange[1], True, False))
|
||||
|
||||
# gravity correction
|
||||
#theta_lz += np.rad2deg( np.arctan( 3.07e-10 * (fromHDF.detectorDistance + detXdist_e) * lamda_lz**2 ) )
|
||||
theta_lz += np.rad2deg( np.arctan( 3.07e-10 * fromHDF.detectorDistance * lamda_lz**2 ) )
|
||||
|
||||
z_z = enumerate(theta_z)
|
||||
qz_lz = 4.0*np.pi * np.sin(np.deg2rad(theta_lz)) / lamda_lz
|
||||
int_lz, bins_l, bins_z = np.histogram2d(lamda_e, detZ_e, bins = (lamda_l, grid.z()))
|
||||
# cut normalisation sample horizon
|
||||
int_lz = np.where(mask_lz, int_lz, np.nan)
|
||||
thetaF_lz = np.where(mask_lz, theta_lz, np.nan)
|
||||
|
||||
ref_lz = (int_lz * np.absolute(thetaN_lz)) / (norm_lz * np.absolute(thetaF_lz))
|
||||
err_lz = ref_lz * np.sqrt( 1/(int_lz+.1) + 1/norm_lz )
|
||||
|
||||
res_lz = np.ones((np.shape(lamda_l[:-1])[0], np.shape(theta_z)[0])) * 0.022**2
|
||||
res_lz = res_lz + (0.008/theta_lz)**2
|
||||
res_lz = qz_lz * np.sqrt(res_lz)
|
||||
|
||||
return qz_lz, ref_lz, err_lz, res_lz, lamda_lz, theta_lz, int_lz, mask_lz
|
||||
|
||||
|
||||
def project_on_qz(q_lz, R_lz, dR_lz, dq_lz, norm_lz, mask_lz, grid):
|
||||
q_q = grid.q()
|
||||
mask_lzf = mask_lz.flatten()
|
||||
q_lzf = q_lz.flatten()[mask_lzf]
|
||||
R_lzf = R_lz.flatten()[mask_lzf]
|
||||
dR_lzf = dR_lz.flatten()[mask_lzf]
|
||||
dq_lzf = dq_lz.flatten()[mask_lzf]
|
||||
norm_lzf = norm_lz.flatten()[mask_lzf]
|
||||
|
||||
N_q = np.histogram(q_lzf, bins = q_q, weights = norm_lzf )[0]
|
||||
N_q = np.where(N_q > 0, N_q, np.nan)
|
||||
|
||||
R_q = np.histogram(q_lzf, bins = q_q, weights = norm_lzf * R_lzf )[0]
|
||||
R_q = R_q / N_q
|
||||
|
||||
dR_q = np.histogram(q_lzf, bins = q_q, weights = (norm_lzf * dR_lzf)**2 )[0]
|
||||
dR_q = np.sqrt( dR_q ) / N_q
|
||||
|
||||
# TODO: different error propagations for dR and dq!
|
||||
N_q = np.histogram(q_lzf, bins = q_q, weights = norm_lzf**2 )[0]
|
||||
N_q = np.where(N_q > 0, N_q, np.nan)
|
||||
dq_q = np.histogram(q_lzf, bins = q_q, weights = (norm_lzf * dq_lzf)**2 )[0]
|
||||
dq_q = np.sqrt( dq_q / N_q )
|
||||
|
||||
q_q = 0.5 * (q_q + np.roll(q_q, 1))
|
||||
|
||||
return q_q[1:], R_q, dR_q, dq_q
|
||||
|
||||
|
||||
def autoscale(q_q, R_q, dR_q, autoscale, pR_q=[], pdR_q=[]):
|
||||
if len(pR_q) == 0:
|
||||
filter_q = np.where((autoscale[0]<=q_q)&(q_q<=autoscale[1]), True, False)
|
||||
filter_q = np.where(dR_q>0, filter_q, False)
|
||||
if len(filter_q[filter_q]) > 0:
|
||||
scale = np.sum(R_q[filter_q]**2/dR_q[filter_q]) / np.sum(R_q[filter_q]/dR_q[filter_q])
|
||||
# load or create normalisation matrix
|
||||
if self.reduction_config.normalisationFileIdentifier:
|
||||
normalise = True
|
||||
norm_lz, normAngle, normFileList = self.normalisation_map(self.reduction_config.normalisationFileIdentifier[0])
|
||||
self.header.reduction.corrections.append('normalisation with \'additional files\'')
|
||||
else:
|
||||
logging.warning(f'# automatic scaling not possible')
|
||||
scale = 1.
|
||||
else:
|
||||
filter_q = np.where(np.isnan(pR_q*R_q), False, True)
|
||||
filter_q = np.where(R_q>0, filter_q, False)
|
||||
filter_q = np.where(pR_q>0, filter_q, False)
|
||||
if len(filter_q[filter_q]) > 0:
|
||||
scale = np.sum(R_q[filter_q]**3 * pR_q[filter_q] / (dR_q[filter_q]**2 * pdR_q[filter_q]**2)) \
|
||||
/ np.sum(R_q[filter_q]**2 * pR_q[filter_q]**2 / (dR_q[filter_q]**2 * pdR_q[filter_q]**2))
|
||||
else:
|
||||
logging.warning(f'# automatic scaling not possible')
|
||||
scale = 1.
|
||||
R_q /= scale
|
||||
dR_q /= scale
|
||||
logging.debug(f'# scaling factor = {scale}')
|
||||
normalise = False
|
||||
norm_lz = self.grid.lz()
|
||||
normAngle = 1.
|
||||
|
||||
logging.warning('normalisation matrix: none requested')
|
||||
|
||||
# load R(q_z) curve to be subtracted:
|
||||
if self.reduction_config.subtract:
|
||||
sq_q, sR_q, sdR_q, sFileName = self.loadRqz(self.reduction_config.subtract)
|
||||
subtract = True
|
||||
logging.warning(f'loaded background file: {sFileName}')
|
||||
self.header.reduction.corrections.append(f'background from \'{sFileName}\' subtracted')
|
||||
else:
|
||||
subtract = False
|
||||
|
||||
# load measurement data and do the reduction
|
||||
datasetsRqz = []
|
||||
datasetsRlt = []
|
||||
for i, short_notation in enumerate(self.reduction_config.fileIdentifier):
|
||||
logging.warning('reading input:')
|
||||
self.header.measurement_data_files = []
|
||||
fromHDF.read_data(short_notation)
|
||||
|
||||
if self.reduction_config.timeSlize:
|
||||
wallTime_e = fromHDF.wallTime_e
|
||||
columns = self.header.columns()+[fileio.Column('time', 's', 'time relative to start of measurement series')]
|
||||
headerRqz = fileio.Orso(self.header.data_source(), self.header.reduction, columns)
|
||||
|
||||
interval = self.reduction_config.timeSlize[0]
|
||||
try:
|
||||
start = self.reduction_config.timeSlize[1]
|
||||
except:
|
||||
start = 0
|
||||
try:
|
||||
stop = self.reduction_config.timeSlize[2]
|
||||
except:
|
||||
stop = wallTime_e[-1]
|
||||
# make overwriting log lines possible by removing newline at the end
|
||||
logging.StreamHandler.terminator = "\r"
|
||||
for i, time in enumerate(np.arange(start, stop, interval)):
|
||||
logging.warning(f' time slize {i:4d}')
|
||||
|
||||
filter_e = np.where((time<wallTime_e) & (wallTime_e<time+interval), True, False)
|
||||
lamda_e = fromHDF.lamda_e[filter_e]
|
||||
detZ_e = fromHDF.detZ_e[filter_e]
|
||||
|
||||
qz_lz, ref_lz, err_lz, res_lz, lamda_lz, theta_lz, int_lz, mask_lz = self.project_on_lz(
|
||||
fromHDF, norm_lz, normAngle, lamda_e, detZ_e)
|
||||
q_q, R_q, dR_q, dq_q = self.project_on_qz(qz_lz, ref_lz, err_lz, res_lz, norm_lz, mask_lz)
|
||||
|
||||
filter_q = np.where((self.data_reader_config.qzRange[0]<q_q) & (q_q<self.data_reader_config.qzRange[1]),
|
||||
True, False)
|
||||
q_q = q_q[filter_q]
|
||||
R_q = R_q[filter_q]
|
||||
dR_q = dR_q[filter_q]
|
||||
dq_q = dq_q[filter_q]
|
||||
|
||||
if self.reduction_config.autoscale:
|
||||
R_q, dR_q = self.autoscale(q_q, R_q, dR_q)
|
||||
|
||||
if subtract:
|
||||
if len(q_q)==len(sq_q):
|
||||
R_q -= sR_q
|
||||
dR_q = np.sqrt(dR_q**2+sdR_q**2)
|
||||
else:
|
||||
subtract = False
|
||||
logging.warning(
|
||||
f'background file {sFileName} not compatible with q_z scale ({len(sq_q)} vs. {len(q_q)})')
|
||||
|
||||
tme_q = np.ones(np.shape(q_q))*time
|
||||
data = np.array([q_q, R_q, dR_q, dq_q, tme_q]).T
|
||||
headerRqz.data_set = f'{i}: time = {time:8.1f} s to {time+interval:8.1f} s'
|
||||
orso_data = fileio.OrsoDataset(headerRqz, data)
|
||||
# make a copy of the header for the next iteration
|
||||
headerRqz = fileio.Orso.from_dict(headerRqz.to_dict())
|
||||
datasetsRqz.append(orso_data)
|
||||
# reset normal logging behavior
|
||||
logging.StreamHandler.terminator = "\n"
|
||||
logging.warning(f' time slize {i:4d}, done')
|
||||
else:
|
||||
lamda_e = fromHDF.lamda_e
|
||||
detZ_e = fromHDF.detZ_e
|
||||
|
||||
qz_lz, ref_lz, err_lz, res_lz, lamda_lz, theta_lz, int_lz, mask_lz = self.project_on_lz(
|
||||
fromHDF, norm_lz, normAngle, lamda_e, detZ_e)
|
||||
try:
|
||||
ref_lz *= self.reduction_config.scale[i]
|
||||
err_lz *= self.reduction_config.scale[i]
|
||||
except IndexError:
|
||||
ref_lz *= self.reduction_config.scale[-1]
|
||||
err_lz *= self.reduction_config.scale[-1]
|
||||
|
||||
if 'Rqz.ort' in self.output_config.outputFormats:
|
||||
headerRqz = fileio.Orso(self.header.data_source(), self.header.reduction, self.header.columns())
|
||||
headerRqz.data_set = f'Nr {i} : mu = {fromHDF.mu:6.3f} deg'
|
||||
|
||||
# projection on q-grid
|
||||
q_q, R_q, dR_q, dq_q = self.project_on_qz(qz_lz, ref_lz, err_lz, res_lz, norm_lz, mask_lz, self.grid)
|
||||
|
||||
filter_q = np.where((self.reduction_config.qzRange[0]<q_q) & (q_q<self.reduction_config.qzRange[1]), True, False)
|
||||
q_q = q_q[filter_q]
|
||||
R_q = R_q[filter_q]
|
||||
dR_q = dR_q[filter_q]
|
||||
dq_q = dq_q[filter_q]
|
||||
|
||||
if self.reduction_config.autoscale:
|
||||
if i==0:
|
||||
R_q, dR_q = self.autoscale(q_q, R_q, dR_q)
|
||||
else:
|
||||
pRq_z = datasetsRqz[i-1].data[:, 1]
|
||||
pdRq_z = datasetsRqz[i-1].data[:, 2]
|
||||
R_q, dR_q = self.autoscale(q_q, R_q, dR_q, pRq_z, pdRq_z)
|
||||
|
||||
if subtract:
|
||||
if len(q_q)==len(sq_q):
|
||||
R_q -= sR_q
|
||||
dR_q = np.sqrt(dR_q**2+sdR_q**2)
|
||||
else:
|
||||
logging.warning(
|
||||
f'backgroung file {sFileName} not compatible with q_z scale ({len(sq_q)} vs. {len(q_q)})')
|
||||
|
||||
data = np.array([q_q, R_q, dR_q, dq_q]).T
|
||||
orso_data = fileio.OrsoDataset(headerRqz, data)
|
||||
headerRqz = fileio.Orso(**headerRqz.to_dict())
|
||||
datasetsRqz.append(orso_data)
|
||||
|
||||
if 'Rlt.ort' in self.output_config.outputFormats:
|
||||
columns = [
|
||||
fileio.Column('Qz', '1/angstrom', 'normal momentum transfer'),
|
||||
fileio.Column('R', '', 'specular reflectivity'),
|
||||
fileio.ErrorColumn(error_of='R', error_type='uncertainty', value_is='sigma'),
|
||||
fileio.ErrorColumn(error_of='Qz', error_type='resolution', value_is='sigma'),
|
||||
fileio.Column('lambda', 'angstrom', 'wavelength'),
|
||||
fileio.Column('alpha_f', 'deg', 'final angle'),
|
||||
fileio.Column('l', '', 'index of lambda-bin'),
|
||||
fileio.Column('t', '', 'index of theta bin'),
|
||||
fileio.Column('intensity', '', 'filtered neutron events per pixel'),
|
||||
fileio.Column('norm', '', 'normalisation matrix'),
|
||||
fileio.Column('mask', '', 'pixels used for calculating R(q_z)'),
|
||||
]
|
||||
# data_source = fromHDF.data_source
|
||||
headerRlt = fileio.Orso(self.header.data_source, self.header.reduction, columns)
|
||||
|
||||
ts, zs = ref_lz.shape
|
||||
lindex_lz = np.tile(np.arange(1, ts+1), (zs, 1)).T
|
||||
tindex_lz = np.tile(np.arange(1, zs+1), (ts, 1))
|
||||
|
||||
j = 0
|
||||
for item in zip(
|
||||
qz_lz.T,
|
||||
ref_lz.T,
|
||||
err_lz.T,
|
||||
res_lz.T,
|
||||
lamda_lz.T,
|
||||
theta_lz.T,
|
||||
lindex_lz.T,
|
||||
tindex_lz.T,
|
||||
int_lz.T,
|
||||
norm_lz.T,
|
||||
np.where(mask_lz, 1, 0).T
|
||||
):
|
||||
data = np.array(list(item)).T
|
||||
headerRlt = fileio.Orso(**headerRlt.to_dict())
|
||||
headerRlt.data_set = f'dataset_{i}_{j+1} : alpha_f = {theta_lz[0, j]:6.3f} deg'
|
||||
orso_data = fileio.OrsoDataset(headerRlt, data)
|
||||
datasetsRlt.append(orso_data)
|
||||
j += 1
|
||||
|
||||
# output
|
||||
logging.warning('output:')
|
||||
|
||||
if 'Rqz.ort' in self.output_config.outputFormats:
|
||||
logging.warning(f' {self.data_reader_config.dataPath}/{self.output_config.outputName}.Rqz.ort')
|
||||
theSecondLine = f' {self.header.experiment.title} | {self.header.experiment.start_date} | sample {self.header.sample.name} | R(q_z)'
|
||||
fileio.save_orso(datasetsRqz, f'{self.data_reader_config.dataPath}/{self.output_config.outputName}.Rqz.ort', data_separator='\n',
|
||||
comment=theSecondLine)
|
||||
|
||||
if 'Rlt.ort' in self.output_config.outputFormats:
|
||||
logging.warning(f' {self.data_reader_config.dataPath}/{self.output_config.outputName}.Rlt.ort')
|
||||
theSecondLine = f' {self.header.experiment.title} | {self.header.experiment.start_date} | sample {self.header.sample.name} | R(lambda, theta)'
|
||||
fileio.save_orso(datasetsRlt, f'{self.data_reader_config.dataPath}/{self.output_config.outputName}.Rlt.ort', data_separator='\n',
|
||||
comment=theSecondLine)
|
||||
|
||||
|
||||
def autoscale(self, q_q, R_q, dR_q, pR_q=[], pdR_q=[]):
|
||||
autoscale = self.reduction_config.autoscale
|
||||
if len(pR_q) == 0:
|
||||
filter_q = np.where((autoscale[0]<=q_q)&(q_q<=autoscale[1]), True, False)
|
||||
filter_q = np.where(dR_q>0, filter_q, False)
|
||||
if len(filter_q[filter_q]) > 0:
|
||||
scale = np.sum(R_q[filter_q]**2/dR_q[filter_q]) / np.sum(R_q[filter_q]/dR_q[filter_q])
|
||||
else:
|
||||
logging.warning(f'# automatic scaling not possible')
|
||||
scale = 1.
|
||||
else:
|
||||
filter_q = np.where(np.isnan(pR_q*R_q), False, True)
|
||||
filter_q = np.where(R_q>0, filter_q, False)
|
||||
filter_q = np.where(pR_q>0, filter_q, False)
|
||||
if len(filter_q[filter_q]) > 0:
|
||||
scale = np.sum(R_q[filter_q]**3 * pR_q[filter_q] / (dR_q[filter_q]**2 * pdR_q[filter_q]**2)) \
|
||||
/ np.sum(R_q[filter_q]**2 * pR_q[filter_q]**2 / (dR_q[filter_q]**2 * pdR_q[filter_q]**2))
|
||||
else:
|
||||
logging.warning(f'# automatic scaling not possible')
|
||||
scale = 1.
|
||||
R_q /= scale
|
||||
dR_q /= scale
|
||||
logging.debug(f'# scaling factor = {scale}')
|
||||
|
||||
return R_q, dR_q
|
||||
|
||||
def project_on_qz(self, q_lz, R_lz, dR_lz, dq_lz, norm_lz, mask_lz):
|
||||
q_q = self.grid.q()
|
||||
mask_lzf = mask_lz.flatten()
|
||||
q_lzf = q_lz.flatten()[mask_lzf]
|
||||
R_lzf = R_lz.flatten()[mask_lzf]
|
||||
dR_lzf = dR_lz.flatten()[mask_lzf]
|
||||
dq_lzf = dq_lz.flatten()[mask_lzf]
|
||||
norm_lzf = norm_lz.flatten()[mask_lzf]
|
||||
|
||||
N_q = np.histogram(q_lzf, bins = q_q, weights = norm_lzf )[0]
|
||||
N_q = np.where(N_q > 0, N_q, np.nan)
|
||||
|
||||
R_q = np.histogram(q_lzf, bins = q_q, weights = norm_lzf * R_lzf )[0]
|
||||
R_q = R_q / N_q
|
||||
|
||||
dR_q = np.histogram(q_lzf, bins = q_q, weights = (norm_lzf * dR_lzf)**2 )[0]
|
||||
dR_q = np.sqrt( dR_q ) / N_q
|
||||
|
||||
# TODO: different error propagations for dR and dq!
|
||||
N_q = np.histogram(q_lzf, bins = q_q, weights = norm_lzf**2 )[0]
|
||||
N_q = np.where(N_q > 0, N_q, np.nan)
|
||||
dq_q = np.histogram(q_lzf, bins = q_q, weights = (norm_lzf * dq_lzf)**2 )[0]
|
||||
dq_q = np.sqrt( dq_q / N_q )
|
||||
|
||||
q_q = 0.5 * (q_q + np.roll(q_q, 1))
|
||||
|
||||
return q_q[1:], R_q, dR_q, dq_q
|
||||
|
||||
def loadRqz(self, name):
|
||||
if os.path.exists(f'{self.data_reader_config.dataPath}/{name}'):
|
||||
fileName = f'{self.data_reader_config.dataPath}/{name}'
|
||||
elif os.path.exists(f'{self.data_reader_config.dataPath}/{name}.Rqz.ort'):
|
||||
fileName = f'{self.data_reader_config.dataPath}/{name}.Rqz.ort'
|
||||
else:
|
||||
sys.exit(f'### the background file \'{self.data_reader_config.dataPath}/{name}\' does not exist! => stopping')
|
||||
|
||||
q_q, Sq_q, dS_q = np.loadtxt(fileName, usecols=(0, 1, 2), comments='#', unpack=True)
|
||||
|
||||
return q_q, Sq_q, dS_q, fileName
|
||||
|
||||
def normalisation_map(self, short_notation):
|
||||
dataPath = self.data_reader_config.dataPath
|
||||
fromHDF = AmorData(self.startTime, self.header, self.data_reader_config)
|
||||
normalisation_list = expand_file_list(short_notation)
|
||||
name = str(normalisation_list[0])
|
||||
for i in range(1, len(normalisation_list), 1):
|
||||
name = f'{name}_{normalisation_list[i]}'
|
||||
if os.path.exists(f'{dataPath}/{name}.norm'):
|
||||
logging.info(f'# normalisation matrix: found and using {dataPath}/{name}.norm')
|
||||
norm_lz = np.loadtxt(f'{dataPath}/{name}.norm')
|
||||
fh = open(f'{dataPath}/{name}.norm', 'r')
|
||||
fh.readline()
|
||||
normFileList = fh.readline().split('[')[1].split(']')[0].replace('\'', '').split(', ')
|
||||
normAngle = float(fh.readline().split('= ')[1])
|
||||
fh.close()
|
||||
for i, entry in enumerate(normFileList):
|
||||
normFileList[i] = entry.split('/')[-1]
|
||||
self.header.measurement_additional_files = normFileList
|
||||
else:
|
||||
logging.info(f'# normalisation matrix: using the files {normalisation_list}')
|
||||
fromHDF.read_data(short_notation, norm=True)
|
||||
normAngle = fromHDF.nu - fromHDF.mu
|
||||
lamda_e = fromHDF.lamda_e
|
||||
detZ_e = fromHDF.detZ_e
|
||||
norm_lz, bins_l, bins_z = np.histogram2d(lamda_e, detZ_e, bins = (self.grid.lamda(), self.grid.z()))
|
||||
norm_lz = np.where(norm_lz>0, norm_lz, np.nan)
|
||||
# correct for the SM reflectivity
|
||||
lamda_l = self.grid.lamda()
|
||||
theta_z = normAngle + fromHDF.delta_z
|
||||
lamda_lz = (self.grid.lz().T*lamda_l[:-1]).T
|
||||
theta_lz = self.grid.lz()*theta_z
|
||||
qz_lz = 4.0*np.pi * np.sin(np.deg2rad(theta_lz)) / lamda_lz
|
||||
Rsm_lz = np.ones(np.shape(qz_lz))
|
||||
Rsm_lz = np.where(qz_lz>0.0217, 1-(qz_lz-0.0217)*(0.0625/0.0217), Rsm_lz)
|
||||
Rsm_lz = np.where(qz_lz>0.0217*5, np.nan, Rsm_lz)
|
||||
norm_lz = norm_lz / Rsm_lz
|
||||
if len(lamda_e) > 1e6:
|
||||
head = ('normalisation matrix based on the measurements\n'
|
||||
f'{fromHDF.file_list}\n'
|
||||
f'nu - mu = {normAngle}\n'
|
||||
f'shape= {np.shape(norm_lz)} (lambda, z)\n'
|
||||
f'measured at mu = {fromHDF.mu:6.3f} deg\n'
|
||||
f'N(l_lambda, z) = theta(z) / sum_i=-1..1 I(l_lambda+i, z)')
|
||||
head = head.replace('../', '')
|
||||
head = head.replace('./', '')
|
||||
head = head.replace('raw/', '')
|
||||
np.savetxt(f'{dataPath}/{name}.norm', norm_lz, header = head)
|
||||
normFileList = fromHDF.file_list
|
||||
return norm_lz, normAngle, normFileList
|
||||
|
||||
|
||||
def project_on_lz(self, fromHDF, norm_lz, normAngle, lamda_e, detZ_e):
|
||||
# projection on lambda-z-grid
|
||||
lamda_l = self.grid.lamda()
|
||||
theta_z = fromHDF.nu - fromHDF.mu + fromHDF.delta_z
|
||||
lamda_lz = (self.grid.lz().T*lamda_l[:-1]).T
|
||||
theta_lz = self.grid.lz()*theta_z
|
||||
|
||||
thetaN_z = fromHDF.delta_z + normAngle
|
||||
thetaN_lz = np.ones(np.shape(norm_lz))*thetaN_z
|
||||
thetaN_lz = np.where(np.absolute(thetaN_lz)>5e-3, thetaN_lz, np.nan)
|
||||
|
||||
mask_lz = np.where(np.isnan(norm_lz), False, True)
|
||||
mask_lz = np.logical_and(mask_lz, np.where(np.absolute(thetaN_lz)>5e-3, True, False))
|
||||
mask_lz = np.logical_and(mask_lz, np.where(np.absolute(theta_lz)>5e-3, True, False))
|
||||
if self.reduction_config.thetaRange[1]<12:
|
||||
mask_lz = np.logical_and(mask_lz, np.where(theta_lz >= self.reduction_config.thetaRange[0], True, False))
|
||||
mask_lz = np.logical_and(mask_lz, np.where(theta_lz <= self.reduction_config.thetaRange[1], True, False))
|
||||
if self.reduction_config.thetaRangeR[1]<12:
|
||||
t0 = fromHDF.nu - fromHDF.mu
|
||||
mask_lz = np.logical_and(mask_lz, np.where(theta_lz-t0 >= self.reduction_config.thetaRangeR[0], True, False))
|
||||
mask_lz = np.logical_and(mask_lz, np.where(theta_lz-t0 <= self.reduction_config.thetaRangeR[1], True, False))
|
||||
if self.reduction_config.lambdaRange[1]<15:
|
||||
mask_lz = np.logical_and(mask_lz, np.where(lamda_lz >= self.reduction_config.lambdaRange[0], True, False))
|
||||
mask_lz = np.logical_and(mask_lz, np.where(lamda_lz <= self.reduction_config.lambdaRange[1], True, False))
|
||||
|
||||
# gravity correction
|
||||
#theta_lz += np.rad2deg( np.arctan( 3.07e-10 * (fromHDF.detectorDistance + detXdist_e) * lamda_lz**2 ) )
|
||||
theta_lz += np.rad2deg( np.arctan( 3.07e-10 * fromHDF.detectorDistance * lamda_lz**2 ) )
|
||||
|
||||
z_z = enumerate(theta_z)
|
||||
qz_lz = 4.0*np.pi * np.sin(np.deg2rad(theta_lz)) / lamda_lz
|
||||
int_lz, bins_l, bins_z = np.histogram2d(lamda_e, detZ_e, bins = (lamda_l, self.grid.z()))
|
||||
# cut normalisation sample horizon
|
||||
int_lz = np.where(mask_lz, int_lz, np.nan)
|
||||
thetaF_lz = np.where(mask_lz, theta_lz, np.nan)
|
||||
|
||||
ref_lz = (int_lz * np.absolute(thetaN_lz)) / (norm_lz * np.absolute(thetaF_lz))
|
||||
err_lz = ref_lz * np.sqrt( 1/(int_lz+.1) + 1/norm_lz )
|
||||
|
||||
res_lz = np.ones((np.shape(lamda_l[:-1])[0], np.shape(theta_z)[0])) * 0.022**2
|
||||
res_lz = res_lz + (0.008/theta_lz)**2
|
||||
res_lz = qz_lz * np.sqrt(res_lz)
|
||||
|
||||
return qz_lz, ref_lz, err_lz, res_lz, lamda_lz, theta_lz, int_lz, mask_lz
|
||||
|
||||
return R_q, dR_q
|
||||
|
||||
247
neos.py
247
neos.py
@@ -16,20 +16,9 @@ conventions (not strictly followed, yet):
|
||||
- to come
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import logging.handlers
|
||||
import numpy as np
|
||||
from orsopy import fileio
|
||||
|
||||
import libeos.reduction
|
||||
from libeos.command_line import commandLineArgs, output_format_list
|
||||
from libeos.dataset import AmorData, DataReaderConfig, Header
|
||||
from libeos.instrument import Grid
|
||||
from libeos.logconfig import setup_logging, update_loglevel
|
||||
from libeos.reduction import autoscale, normalisation_map, project_on_lz, project_on_qz
|
||||
from libeos.command_line import command_line_options
|
||||
from libeos.logconfig import setup_logging
|
||||
from libeos.reduction import AmorReduction
|
||||
|
||||
|
||||
#=====================================================================================================
|
||||
@@ -38,234 +27,16 @@ from libeos.reduction import autoscale, normalisation_map, project_on_lz, projec
|
||||
# - deal with background correction
|
||||
# - format of 'call' + add '-Y' if not supplied
|
||||
#=====================================================================================================
|
||||
def loadRqz(name):
|
||||
|
||||
if os.path.exists(f'{clas.dataPath}/{name}'):
|
||||
fileName = f'{clas.dataPath}/{name}'
|
||||
elif os.path.exists(f'{clas.dataPath}/{name}.Rqz.ort'):
|
||||
fileName = f'{clas.dataPath}/{name}.Rqz.ort'
|
||||
else:
|
||||
sys.exit(f'### the background file \'{clas.dataPath}/{name}\' does not exist! => stopping')
|
||||
|
||||
q_q, Sq_q, dS_q = np.loadtxt(fileName, usecols=(0, 1, 2), comments='#', unpack=True)
|
||||
|
||||
return q_q, Sq_q, dS_q, fileName
|
||||
|
||||
def main():
|
||||
setup_logging()
|
||||
|
||||
global startTime, grid, clas, header
|
||||
clas = commandLineArgs()
|
||||
update_loglevel(clas.verbose, clas.debug)
|
||||
|
||||
grid = Grid(clas.qResolution)
|
||||
header = Header()
|
||||
startTime = 0
|
||||
if not os.path.exists(f'{clas.dataPath}'):
|
||||
os.system(f'mkdir {clas.dataPath}')
|
||||
fromHDF = AmorData(startTime, header=header, config=DataReaderConfig(
|
||||
year=clas.year,
|
||||
dataPath=clas.dataPath,
|
||||
sampleModel=clas.sampleModel,
|
||||
chopperPhase=clas.chopperPhase,
|
||||
chopperPhaseOffset=clas.chopperPhaseOffset,
|
||||
yRange=clas.yRange,
|
||||
lambdaRange=clas.lambdaRange,
|
||||
qzRange=clas.qzRange,
|
||||
offSpecular=clas.offSpecular,
|
||||
mu=clas.mu,
|
||||
nu=clas.nu,
|
||||
muOffset=clas.muOffset
|
||||
))
|
||||
logging.warning('\n######## eos - data reduction for Amor ########')
|
||||
|
||||
# load or create normalisation matrix
|
||||
if clas.normalisationFileIdentifier:
|
||||
normalise = True
|
||||
norm_lz, normAngle, normFileList = normalisation_map(clas.normalisationFileIdentifier[0],
|
||||
header, grid, clas.dataPath)
|
||||
header.reduction.corrections.append('normalisation with \'additional files\'')
|
||||
else:
|
||||
normalise = False
|
||||
norm_lz = grid.lz()
|
||||
normAngle = 1.
|
||||
|
||||
logging.warning('normalisation matrix: none requested')
|
||||
|
||||
# load R(q_z) curve to be subtracted:
|
||||
if clas.subtract:
|
||||
sq_q, sR_q, sdR_q, sFileName = loadRqz(clas.subtract)
|
||||
subtract = True
|
||||
logging.warning(f'loaded background file: {sFileName}')
|
||||
header.reduction.corrections.append(f'background from \'{sFileName}\' subtracted')
|
||||
else:
|
||||
subtract = False
|
||||
|
||||
# load measurement data and do the reduction
|
||||
datasetsRqz = []
|
||||
datasetsRlt = []
|
||||
for i, short_notation in enumerate(clas.fileIdentifier):
|
||||
logging.warning('reading input:')
|
||||
header.measurement_data_files = []
|
||||
fromHDF.read_data(short_notation)
|
||||
|
||||
if clas.timeSlize:
|
||||
wallTime_e = fromHDF.wallTime_e
|
||||
columns = header.columns() + [fileio.Column('time', 's', 'time relative to start of measurement series')]
|
||||
headerRqz = fileio.Orso(header.data_source(), header.reduction, columns)
|
||||
|
||||
interval = clas.timeSlize[0]
|
||||
try:
|
||||
start = clas.timeSlize[1]
|
||||
except:
|
||||
start = 0
|
||||
try:
|
||||
stop = clas.timeSlize[2]
|
||||
except:
|
||||
stop = wallTime_e[-1]
|
||||
# make overwriting log lines possible by removing newline at the end
|
||||
logging.StreamHandler.terminator="\r"
|
||||
for i, time in enumerate(np.arange(start, stop, interval)):
|
||||
logging.warning(f' time slize {i:4d}')
|
||||
|
||||
filter_e = np.where((time < wallTime_e) & (wallTime_e < time+interval), True, False)
|
||||
lamda_e = fromHDF.lamda_e[filter_e]
|
||||
detZ_e = fromHDF.detZ_e[filter_e]
|
||||
|
||||
qz_lz, ref_lz, err_lz, res_lz, lamda_lz, theta_lz, int_lz, mask_lz = project_on_lz(
|
||||
fromHDF, norm_lz, normAngle, lamda_e, detZ_e,
|
||||
grid, clas.thetaRange, clas.thetaRangeR, clas.lambdaRange)
|
||||
q_q, R_q, dR_q, dq_q = project_on_qz(qz_lz, ref_lz, err_lz, res_lz, norm_lz, mask_lz, grid)
|
||||
|
||||
filter_q = np.where((clas.qzRange[0] < q_q) & (q_q < clas.qzRange[1]), True, False)
|
||||
q_q = q_q[filter_q]
|
||||
R_q = R_q[filter_q]
|
||||
dR_q = dR_q[filter_q]
|
||||
dq_q = dq_q[filter_q]
|
||||
|
||||
if clas.autoscale:
|
||||
R_q, dR_q = autoscale(q_q, R_q, dR_q, clas.autoscale)
|
||||
|
||||
if subtract:
|
||||
if len(q_q) == len(sq_q):
|
||||
R_q -= sR_q
|
||||
dR_q = np.sqrt( dR_q**2 + sdR_q**2 )
|
||||
else:
|
||||
subtract = False
|
||||
logging.warning(f'background file {sFileName} not compatible with q_z scale ({len(sq_q)} vs. {len(q_q)})')
|
||||
|
||||
tme_q = np.ones(np.shape(q_q))*time
|
||||
data = np.array([q_q, R_q, dR_q, dq_q, tme_q]).T
|
||||
headerRqz.data_set = f'{i}: time = {time:8.1f} s to {time+interval:8.1f} s'
|
||||
orso_data = fileio.OrsoDataset(headerRqz, data)
|
||||
# make a copy of the header for the next iteration
|
||||
headerRqz = fileio.Orso.from_dict(headerRqz.to_dict())
|
||||
datasetsRqz.append(orso_data)
|
||||
# reset normal logging behavior
|
||||
logging.StreamHandler.terminator="\n"
|
||||
logging.warning(f' time slize {i:4d}, done')
|
||||
else:
|
||||
lamda_e = fromHDF.lamda_e
|
||||
detZ_e = fromHDF.detZ_e
|
||||
|
||||
qz_lz, ref_lz, err_lz, res_lz, lamda_lz, theta_lz, int_lz, mask_lz = project_on_lz(
|
||||
fromHDF, norm_lz, normAngle, lamda_e, detZ_e,
|
||||
grid, clas.thetaRange, clas.thetaRangeR, clas.lambdaRange
|
||||
)
|
||||
try:
|
||||
ref_lz *= clas.scale[i]
|
||||
err_lz *= clas.scale[i]
|
||||
except:
|
||||
ref_lz *= clas.scale[-1]
|
||||
err_lz *= clas.scale[-1]
|
||||
|
||||
if 'Rqz.ort' in output_format_list(clas.outputFormat):
|
||||
headerRqz = fileio.Orso(header.data_source(), header.reduction, header.columns())
|
||||
headerRqz.data_set = f'Nr {i} : mu = {fromHDF.mu:6.3f} deg'
|
||||
|
||||
# projection on q-grid
|
||||
q_q, R_q, dR_q, dq_q = project_on_qz(qz_lz, ref_lz, err_lz, res_lz, norm_lz, mask_lz, grid)
|
||||
|
||||
filter_q = np.where((clas.qzRange[0] < q_q) & (q_q < clas.qzRange[1]), True, False)
|
||||
q_q = q_q[filter_q]
|
||||
R_q = R_q[filter_q]
|
||||
dR_q = dR_q[filter_q]
|
||||
dq_q = dq_q[filter_q]
|
||||
|
||||
if libeos.reduction.autoscale:
|
||||
if i == 0:
|
||||
R_q, dR_q = autoscale(q_q, R_q, dR_q, clas.autoscale)
|
||||
else:
|
||||
pRq_z = datasetsRqz[i-1].data[:,1]
|
||||
pdRq_z = datasetsRqz[i-1].data[:,2]
|
||||
R_q, dR_q = autoscale(q_q, R_q, dR_q, clas.autoscale, pRq_z, pdRq_z)
|
||||
|
||||
if subtract:
|
||||
if len(q_q) == len(sq_q):
|
||||
R_q -= sR_q
|
||||
dR_q = np.sqrt( dR_q**2 + sdR_q**2 )
|
||||
else:
|
||||
logging.warning(f'backgroung file {sFileName} not compatible with q_z scale ({len(sq_q)} vs. {len(q_q)})')
|
||||
|
||||
data = np.array([q_q, R_q, dR_q, dq_q]).T
|
||||
orso_data = fileio.OrsoDataset(headerRqz, data)
|
||||
headerRqz = fileio.Orso(**headerRqz.to_dict())
|
||||
datasetsRqz.append(orso_data)
|
||||
|
||||
if 'Rlt.ort' in output_format_list(clas.outputFormat):
|
||||
columns = [
|
||||
fileio.Column('Qz', '1/angstrom', 'normal momentum transfer'),
|
||||
fileio.Column('R', '', 'specular reflectivity'),
|
||||
fileio.ErrorColumn(error_of='R', error_type='uncertainty', value_is='sigma'),
|
||||
fileio.ErrorColumn(error_of='Qz', error_type='resolution', value_is='sigma'),
|
||||
fileio.Column('lambda', 'angstrom', 'wavelength'),
|
||||
fileio.Column('alpha_f', 'deg', 'final angle'),
|
||||
fileio.Column('l', '', 'index of lambda-bin'),
|
||||
fileio.Column('t', '', 'index of theta bin'),
|
||||
fileio.Column('intensity', '', 'filtered neutron events per pixel'),
|
||||
fileio.Column('norm', '', 'normalisation matrix'),
|
||||
fileio.Column('mask', '', 'pixels used for calculating R(q_z)'),
|
||||
]
|
||||
#data_source = fromHDF.data_source
|
||||
headerRlt = fileio.Orso(header.data_source, header.reduction, columns)
|
||||
|
||||
ts, zs=ref_lz.shape
|
||||
lindex_lz=np.tile(np.arange(1, ts+1), (zs, 1)).T
|
||||
tindex_lz=np.tile(np.arange(1, zs+1), (ts, 1))
|
||||
|
||||
j = 0
|
||||
for item in zip(
|
||||
qz_lz.T,
|
||||
ref_lz.T,
|
||||
err_lz.T,
|
||||
res_lz.T,
|
||||
lamda_lz.T,
|
||||
theta_lz.T,
|
||||
lindex_lz.T,
|
||||
tindex_lz.T,
|
||||
int_lz.T,
|
||||
norm_lz.T,
|
||||
np.where(mask_lz, 1, 0).T
|
||||
):
|
||||
data = np.array(list(item)).T
|
||||
headerRlt = fileio.Orso(**headerRlt.to_dict())
|
||||
headerRlt.data_set = f'dataset_{i}_{j+1} : alpha_f = {theta_lz[0,j]:6.3f} deg'
|
||||
orso_data = fileio.OrsoDataset(headerRlt, data)
|
||||
datasetsRlt.append(orso_data)
|
||||
j += 1
|
||||
|
||||
# output
|
||||
logging.warning('output:')
|
||||
|
||||
if 'Rqz.ort' in output_format_list(clas.outputFormat):
|
||||
logging.warning(f' {clas.dataPath}/{clas.outputName}.Rqz.ort')
|
||||
theSecondLine = f' {header.experiment.title} | {header.experiment.start_date} | sample {header.sample.name} | R(q_z)'
|
||||
fileio.save_orso(datasetsRqz, f'{clas.dataPath}/{clas.outputName}.Rqz.ort', data_separator='\n', comment=theSecondLine)
|
||||
|
||||
if 'Rlt.ort' in output_format_list(clas.outputFormat):
|
||||
logging.warning(f' {clas.dataPath}/{clas.outputName}.Rlt.ort')
|
||||
theSecondLine = f' {header.experiment.title} | {header.experiment.start_date} | sample {header.sample.name} | R(lambda, theta)'
|
||||
fileio.save_orso(datasetsRlt, f'{clas.dataPath}/{clas.outputName}.Rlt.ort', data_separator='\n', comment=theSecondLine)
|
||||
# read command line arguments and generate classes holding configuration parameters
|
||||
reader_config, reduction_config, output_config = command_line_options()
|
||||
# Create reducer with these arguments
|
||||
reducer = AmorReduction(reader_config, reduction_config, output_config)
|
||||
# Perform actual reduction
|
||||
reducer.reduce()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
Reference in New Issue
Block a user