cleanup from stuff which belongs to sf-daq-broker

This commit is contained in:
Dmitry Ozerov
2021-08-23 11:51:43 +02:00
committed by Data Backend account
parent ec6050f331
commit 4484d70aea
7 changed files with 0 additions and 931 deletions
-44
View File
@@ -1,44 +0,0 @@
import argparse
import json
import os
import datetime
from shutil import copyfile
PEDESTAL_DIRECTORY="/sf/jungfrau/data/pedestal"
parser = argparse.ArgumentParser()
parser.add_argument("file_pedestal", type=str)
parser.add_argument("json_run", type=str)
parser.add_argument("detector", type=str)
parser.add_argument("json_stream", type=str)
args = parser.parse_args()
with open(args.json_run, "r") as run_file:
data = json.load(run_file)
request_time=datetime.datetime.strptime(data["request_time"], '%Y-%m-%d %H:%M:%S.%f')
if not os.path.isdir(f'{PEDESTAL_DIRECTORY}/{args.detector}'):
os.mkdir(f'{PEDESTAL_DIRECTORY}/{args.detector}')
out_name = f'{PEDESTAL_DIRECTORY}/{args.detector}/{request_time.strftime("%Y%m%d_%H%M%S")}.h5'
copyfile(args.file_pedestal, out_name)
print(f'Copied resulting pedestal file {args.file_pedestal} to {out_name}')
if not os.path.exists(args.json_stream):
print(f'stream file {args.json_stream} does not exists, exiting')
exit()
with open(args.json_stream, "r") as stream_file:
det = json.load(stream_file)
print(f'Changing in stream file {args.json_stream} pedestal from {det["pedestal_file"]} to {out_name}')
det["pedestal_file"] = out_name
with open(args.json_stream, "w") as write_file:
json.dump(det, write_file, indent=4)
-118
View File
@@ -1,118 +0,0 @@
import argparse
import json
import warnings
import h5py
import numpy as np
import jungfrau_utils as ju
import sys
sys.path.append('/home/dbe/git/sf_daq_buffer/scripts')
import postprocess_raw
import os
parser = argparse.ArgumentParser()
parser.add_argument("file_in", type=str)
parser.add_argument("file_out", type=str)
parser.add_argument("json_run", type=str)
parser.add_argument("json_detector", type=str)
args = parser.parse_args()
with open(args.json_detector, "r") as detector_file:
data = json.load(detector_file)
detector_name = data["detector_name"]
gain_file = data["gain_file"]
pedestal_file = data["pedestal_file"]
with open(args.json_run, "r") as run_file:
data = json.load(run_file)
detector_params = data["detectors"][detector_name]
compression = detector_params.get("compression", False)
conversion = detector_params.get("adc_to_energy", False)
disabled_modules = detector_params.get("disabled_modules", [])
if conversion:
mask = detector_params.get("mask", True)
mask_double_pixels = detector_params.get("mask_double_pixels", True)
geometry = detector_params.get("geometry", False)
gap_pixels = detector_params.get("gap_pixels", True)
factor = detector_params.get("factor", None)
else:
mask = False
mask_double_pixels = False
geometry = False
gap_pixels = False
factor = None
if not mask and mask_double_pixels:
warnings.warn("mask_double_pixels set to False")
mask_double_pixels = False
file_tmp = args.file_in
if len(disabled_modules)>0:
print(f"Will reduce data file, disabled_modules: {disabled_modules}")
if conversion:
file_tmp = args.file_out+".tmp"
else:
file_tmp = args.file_out
postprocess_raw.postprocess_raw(args.file_in, file_tmp, compression=compression, disabled_modules=disabled_modules)
if conversion:
with ju.File(
file_tmp,
gain_file=gain_file,
pedestal_file=pedestal_file,
conversion=conversion,
mask=mask,
gap_pixels=gap_pixels,
geometry=geometry,
parallel=False,
) as juf:
n_input_frames = len(juf["data"])
good_frames = np.nonzero(juf["is_good_frame"])[0]
n_output_frames = len(good_frames)
juf.handler.mask_double_pixels = mask_double_pixels
juf.export(
args.file_out,
index=good_frames,
roi=None,
compression=compression,
factor=factor,
dtype=None,
batch_size=35,
)
#os.remove(file_tmp)
else:
with h5py.File(file_tmp, "r") as juf:
n_input_frames = len(juf[f"data/{detector_name}/data"])
good_frames = np.nonzero(juf[f"data/{detector_name}/is_good_frame"])[0]
n_output_frames = len(good_frames)
# Utility info
with h5py.File(args.file_out, "r") as h5f:
print("daq_rec:", h5f[f"/data/{detector_name}/daq_rec"][0, 0])
frame_index = h5f[f"/data/{detector_name}/frame_index"][:]
print("frame_index range:", (np.min(frame_index), np.max(frame_index)))
print("input frames:", n_input_frames)
print("bad frames:", n_input_frames - n_output_frames)
print("output frames:", n_output_frames)
print("gain_file:", gain_file)
print("pedestal_file:", pedestal_file)
print("conversion:", conversion)
print("mask:", mask)
print("mask_double_pixels:", mask_double_pixels)
print("geometry:", geometry)
print("gap_pixels:", gap_pixels)
print("compression:", compression)
print("factor:", factor)
-225
View File
@@ -1,225 +0,0 @@
import argparse
import sys
import os
import numpy as np
import h5py
import logging
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
log = logging.getLogger("create_pedestals")
log.addHandler(ch)
def h5_printname(name):
print(" {}".format(name))
def forcedGainValue(i, n0, n1, n2, n3):
if i <= n0 - 1:
return 0
if i <= (n0 + n1) - 1:
return 1
if i <= (n0 + n1 + n2) - 1:
return 3
if i <= (n0 + n1 + n2 + n3) - 1:
return 4
return 2
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--verbosity", default=None, help="log verbosity level INFO/DEBUG/WARN/ERROR/CRITICAL")
parser.add_argument("--filename", default="pedestal.h5", help="pedestal file")
parser.add_argument("--X_test_pixel", type=int, default=0, help="x position of the test pixel")
parser.add_argument("--Y_test_pixel", type=int, default=0, help="y position of the test pixel")
parser.add_argument("--nFramesPede", type=int, default=1000, help="number of pedestal frames to average pedestal value")
parser.add_argument("--frames_G0", type=int, default=0, help="force to treat pedestal run as first frames_G0 taken in gain0, then frames_G1 in gain1, and frames_G2 in gain2 and HG0")
parser.add_argument("--frames_G1", type=int, default=0, help="force to treat pedestal run as first frames_G0 taken in gain0, then frames_G1 in gain1, and frames_G2 in gain2 and HG0")
parser.add_argument("--frames_G2", type=int, default=0, help="force to treat pedestal run as first frames_G0 taken in gain0, then frames_G1 in gain1, and frames_G2 in gain2 and HG0")
parser.add_argument("--frames_HG0", type=int, default=0, help="force to treat pedestal run as first frames_G0 taken in gain0, then frames_G1 in gain1, and frames_G2 in gain2 and HG0")
parser.add_argument("--number_frames", type=int, default=1000000, help="analyze only first number_frames frames")
parser.add_argument("--frames_average", type=int, default=1000, help="for pedestal in each gain average over last frames_average frames, reducing weight of previous")
parser.add_argument("--directory", default="./", help="Output directory where to store pixelmask and gain file")
parser.add_argument("--gain_check", type=int, default=1, help="check that gain setting in each of the module corresponds to the general gain switch, (0 - dont check)")
parser.add_argument("--add_pixel_mask", default=None, help="add additional masked pixels from external, specified file")
parser.add_argument("--number_bad_modules", type=int, default=0, help="Number of bad modules in detector")
args = parser.parse_args()
if not (os.path.isfile(args.filename) and os.access(args.filename, os.R_OK)):
print("Pedestal file {} not found, exit".format(args.filename))
exit()
if args.verbosity:
log.setLevel(getattr(logging, args.verbosity.upper(), None))
overwriteGain = False
if (args.frames_G0 + args.frames_G1 + args.frames_G2) > 0:
log.info("Treat this run as taken with {} frames in gain0, then {} frames in gain1 and {} frames in gain2".format(args.frames_G0, args.frames_G1, args.frames_G2))
overwriteGain = True
f = h5py.File(args.filename, "r")
#detector_name = (f.get("general/detector_name").value).decode('UTF-8')
detector_name = (f.get("general/detector_name")[()]).decode('UTF-8')
#n_bad_modules = f.get("general/n_bad_modules").value
n_bad_modules = args.number_bad_modules
data_location = "data/" + detector_name + "/data"
daq_recs_location = "data/" + detector_name + "/daq_rec"
is_good_frame_location = "data/" + detector_name + "/is_good_frame"
numberOfFrames = len(f[data_location])
(sh_y, sh_x) = f[data_location][0].shape
nModules = (sh_x * sh_y) // (1024 * 512)
if (nModules * 1024 * 512) != (sh_x * sh_y):
log.error(" {} : Something very strange in the data, Jungfrau consists of (1024x512) modules, while data has {}x{}".format(detector_name, sh_x, sh_y))
exit()
(tX, tY) = (args.X_test_pixel, args.Y_test_pixel)
if tX < 0 or tX > (sh_x - 1):
tX = 0
if tY < 0 or tY > (sh_y - 1):
tY = 0
log.debug(" {} : test pixel is ( x y ): {}x{}".format(detector_name, tX, tY))
log.info(" {} : In pedestal file {} there are {} frames".format(detector_name, args.filename, numberOfFrames + 1))
# log.debug("Following groups are available:")
# if args.verbosity >= 3:
# f.visit(h5_printname)
log.debug(" {} : data has the following shape: {}, type: {}, {} modules ({} bad modules)".format(detector_name, f[data_location][0].shape, f[data_location][0].dtype, nModules, n_bad_modules))
pixelMask = np.zeros((sh_y, sh_x), dtype=np.int)
adcValuesN = np.zeros((5, sh_y, sh_x))
adcValuesNN = np.zeros((5, sh_y, sh_x))
averagePedestalFrames = args.frames_average
nMgain = [0] * 5
gainCheck = -1
highG0Check = 0
printFalseGain = False
nGoodFrames = 0
nGoodFramesGain = 0
analyzeFrames = min(numberOfFrames, args.number_frames)
for n in range(analyzeFrames):
if not f[is_good_frame_location][n]:
continue
nGoodFrames += 1
daq_rec = (f[daq_recs_location][n])[0]
image = f[data_location][n][:]
frameData = (np.bitwise_and(image, 0b0011111111111111))
gainData = np.bitwise_and(image, 0b1100000000000000) >> 14
trueGain = forcedGainValue(n, args.framesG0, args.framesG1, args.framesG2, args.framesHG0) if overwriteGain else ( (daq_rec & 0b11000000000000) >> 12 )
highG0 = (daq_rec & 0b1)
gainGoodAllModules = True
if args.gain_check > 0:
daq_recs = f[daq_recs_location][n]
for i in range(len(daq_recs)):
if trueGain != ((daq_recs[i] & 0b11000000000000) >> 12) or highG0 != (daq_recs[i] & 0b1):
gainGoodAllModules = False
if highG0 == 1 and trueGain != 0:
gainGoodAllModules = False
log.info(" {} : Jungfrau is in the high G0 mode ({}), but gain settings is strange: {}".format( detector_name, highG0, trueGain))
nFramesGain = np.sum(gainData==(trueGain))
if nFramesGain < (nModules - 0.5 - n_bad_modules) * (1024 * 512): # make sure that most are the modules are in correct gain
gainGoodAllModules = False
log.debug(" {} : Too many bad pixels, skip the frame {}, true gain: {}(highG0: {}) ({}); gain0 : {}; gain1 : {}; gain2 : {}; undefined gain : {}".format( detector_name, n, trueGain, highG0, nFramesGain, np.sum(gainData==0), np.sum(gainData==1), np.sum(gainData==3), np.sum(gainData==2)))
if not gainGoodAllModules:
log.debug(" {} : In Frame Number {} : mismatch in modules and general settings, Gain: {} vs {}; HighG0: {} vs {} (or too many bad pixels)".format( detector_name, n, trueGain, ((daq_recs & 0b11000000000000) >> 12), highG0, (daq_recs & 0b1)))
continue
nGoodFramesGain += 1
if gainData[tY][tX] != trueGain:
if not printFalseGain:
log.info(" {} : Gain wrong for channel ({}x{}) should be {}, but {}. Frame {}. {} {}".format( detector_name, tX, tY, trueGain, gainData[tY][tX], n, trueGain, daq_rec))
printFalseGain = True
else:
if gainCheck != -1 and printFalseGain:
log.info(" {} : Gain was wrong for channel ({}x{}) in previous frames, but now correct : {}. Frame {}.".format( detector_name, tX, tY, gainData[tY, tX], n))
printFalseGain = False
if gainData[tY][tX] != gainCheck or highG0Check != highG0:
log.info(" {} : Gain changed for ({}x{}) channel {} -> {} (highG0 setting: {} -> {}), frame number {}, match: {}".format( detector_name, tX, tY, gainCheck, gainData[tY][tX], highG0Check, highG0, n, gainData[tY][tX] == trueGain))
gainCheck = gainData[tY][tX]
highG0Check = highG0
if gainGoodAllModules:
pixelMask[gainData != trueGain] |= (1 << (trueGain+4*highG0))
trueGain += 4 * highG0
nMgain[trueGain] += 1
if nMgain[trueGain] > averagePedestalFrames:
adcValuesN[trueGain] -= adcValuesN[trueGain] / averagePedestalFrames
adcValuesNN[trueGain] -= adcValuesNN[trueGain] / averagePedestalFrames
adcValuesN[trueGain] += frameData
adcValuesNN[trueGain] += np.float_power(frameData, 2)
log.info(" {} : {} frames analyzed, {} good frames, {} frames without settings mismatch. Gain frames distribution (0,1,2,3,HG0) : ({})".format( detector_name, analyzeFrames, nGoodFrames, nGoodFramesGain, nMgain))
if args.add_pixel_mask != None:
if (os.path.isfile(args.add_pixel_mask) and os.access(args.add_pixel_mask, os.R_OK)):
additional_pixel_mask_file = h5py.File(args.add_pixel_mask, "r")
additional_pixel_mask = np.array(additional_pixel_mask_file["pixel_mask"])
log.info("Will add additional masked pixels from file %s , number %d " % (args.add_pixel_mask, np.sum(additional_pixel_mask == 1)))
if additional_pixel_mask.shape == pixelMask.shape:
pixelMask[additional_pixel_mask == 1] |= (1 << 5)
else:
log.error(" shape of additional pixel mask ({}) doesn't match current ({})".format( additional_pixel_mask.shape, pixelMask.shape))
else:
log.error(" Specified addition file with pixel mask not found or not reachable {}".format( args.add_pixel_mask))
fileNameIn = os.path.splitext(os.path.basename(args.filename))[0]
full_fileNameOut = args.directory + "/" + fileNameIn + ".res.h5"
log.info(" {} : Output file with pedestal corrections in: {}".format( detector_name, full_fileNameOut))
outFile = h5py.File(full_fileNameOut, "w")
gains = [None] * 4
gainsRMS = [None] * 4
for gain in range(5):
numberFramesAverage = max(1, min(averagePedestalFrames, nMgain[gain]))
mean = adcValuesN[gain] / float(numberFramesAverage)
mean2 = adcValuesNN[gain] / float(numberFramesAverage)
variance = mean2 - np.float_power(mean, 2)
stdDeviation = np.sqrt(variance)
log.debug(" {} : gain {} values results (pixel ({},{}) : {} {}".format( detector_name, gain, tY, tX, mean[tY][tX], stdDeviation[tY][tX]))
if gain != 2:
g = gain if gain < 3 else (gain-1)
gains[g] = mean
gainsRMS[g] = stdDeviation
pixelMask[np.isclose(stdDeviation,0)] |= (1 << (6 + g))
dset = outFile.create_dataset('pixel_mask', data=pixelMask)
dset = outFile.create_dataset('gains', data=gains)
dset = outFile.create_dataset('gainsRMS', data=gainsRMS)
outFile.close()
log.info(" {} : Number of good pixels: {} from {} in total ({} bad pixels)".format( detector_name, np.sum(pixelMask == 0), sh_x * sh_y, (sh_x * sh_y - np.sum(pixelMask == 0))))
if __name__ == "__main__":
main()
-145
View File
@@ -1,145 +0,0 @@
import argparse
import numpy as np
import os
import h5py
import json
def is_it_dark(laser_mode, detector_rate, pulseid):
dark_rate = 1
if 100/detector_rate == int(100/detector_rate):
dark_rate = 100/detector_rate
if laser_mode == 11: # 50/50 mode
dark_rate *= 2
elif laser_mode == 41: # 4 lights, 1 dark sequence
dark_rate *= 5
elif laser_mode == 111: # 11 lights, 1 dark sequence
dark_rate *= 12
elif laser_mode == 191: # 19 lights, 1 dark sequence
dark_rate *=20
dark = True
if laser_mode == 0:
dark = True
elif laser_mode == 1:
dark = False
elif laser_mode == 13:
if (pulseid % int(100/detector_rate*4)) == 0:
dark = False
else:
if (pulseid + int(100/detector_rate) ) % dark_rate == 0:
dark = True
else:
dark = False
return dark
def which_dark(laser_mode, detector_rate, pulseid):
dark_mode = -1
if laser_mode != 13:
dark_mode = 0
else:
for m in range(1,4):
if ((pulseid-m*int(100/detector_rate)) % int(100/detector_rate*4)) == 0:
dark_mode = m
return dark_mode
parser = argparse.ArgumentParser()
parser.add_argument("data_file", type=str)
parser.add_argument("run_info", type=str)
parser.add_argument("detector", type=str)
args = parser.parse_args()
data_file = args.data_file
run_info_file = args.run_info
detector = args.detector
try:
with open(run_info_file) as json_file:
parameters = json.load(json_file)
except:
print("Can't read provided run file {run_info_file}, may be not json?")
exit()
laser_mode = parameters.get("laser_mode", 0)
rate_multiplicator = parameters.get("rate_multiplicator", 1)
detector_rate = 100//rate_multiplicator
print("Laser mode: ", laser_mode, ", detector runs at ", detector_rate, "Hz")
try:
f=h5py.File(data_file, "r")
except:
print(f"Can't open {data_file}")
exit()
pulseids = f[f'/data/{detector}/pulse_id'][:]
n_pulse_id = len(pulseids)
if f'/data/{detector}/is_good_frame' in f.keys():
is_good_frame = f[f'/data/{detector}/is_good_frame'][:]
else:
is_good_frame = [1] * n_pulse_id
nGoodFrames = 0
nProcessedFrames = 0
index_dark = []
index_light = []
index_dark_mode = {}
for i in range(len(pulseids)):
if not is_good_frame[i]:
continue
nGoodFrames += 1
p = pulseids[i]
nProcessedFrames += 1
if is_it_dark(laser_mode, detector_rate, p):
index_dark.append(i)
if laser_mode == 13:
dark_mode = which_dark(laser_mode, detector_rate, p)
if dark_mode not in index_dark_mode:
index_dark_mode[dark_mode] = []
index_dark_mode[dark_mode].append(i)
else:
index_light.append(i)
f.close()
print("Total number of frames: %s, number of good frames : %s, processed frames: %s, outputed frames: %s(dark) %s(light) " % (len(pulseids), nGoodFrames, nProcessedFrames, len(index_dark), len(index_light)) )
delim = '//'
if len(index_dark) > 0:
file_dark = data_file[:-3] + ".dark.lst"
if laser_mode == -1:
file_dark = data_file[:-3] + ".undefined.lst"
print(f"List of dark frames : {file_dark} , {len(index_dark)} frames")
f_list = open(file_dark, "w")
for frame_number in index_dark:
print(f'{data_file} //{frame_number}', file = f_list)
f_list.close()
if len(index_light) > 0:
file_light = data_file[:-3] + ".light.lst"
print(f"List of light frames : {file_light} , {len(index_light)} frames")
f_list = open(file_light, "w")
for frame_number in index_light:
print(f'{data_file} {delim}{frame_number}', file = f_list)
f_list.close()
for m in index_dark_mode:
if len(index_dark_mode[m]) > 0:
file_dark = f'{data_file[:-3]}.dark{m}.lst'
print(f"List of dark{m} frames : {file_dark} , {len(index_dark_mode[m])} frames")
f_list = open(file_dark, "w")
for frame_number in index_dark_mode[m]:
print(f'{data_file} //{frame_number}', file = f_list)
f_list.close()
-140
View File
@@ -1,140 +0,0 @@
import os
import struct
import bitshuffle
import h5py
import numpy as np
from bitshuffle.h5 import H5_COMPRESS_LZ4, H5FILTER # pylint: disable=no-name-in-module
# bitshuffle hdf5 filter params
BLOCK_SIZE = 2048
compargs = {"compression": H5FILTER, "compression_opts": (BLOCK_SIZE, H5_COMPRESS_LZ4)}
# limit bitshuffle omp to a single thread
# a better fix would be to use bitshuffle compiled without omp support
os.environ["OMP_NUM_THREADS"] = "1"
DTYPE = np.dtype(np.uint16)
DTYPE_SIZE = DTYPE.itemsize
MODULE_SIZE_X = 1024
MODULE_SIZE_Y = 512
def postprocess_raw(
source, dest, disabled_modules=(), index=None, compression=False, batch_size=100
):
# a function for 'visititems' should have the args (name, object)
def _visititems(name, obj):
if isinstance(obj, h5py.Group):
h5_dest.create_group(name)
elif isinstance(obj, h5py.Dataset):
dset_source = h5_source[name]
# process all but the raw data
if name != data_dset:
if name.startswith("data"):
# datasets with data per image, so indexing should be applied
if index is None:
data = dset_source[:]
else:
data = dset_source[index, :]
args = {"shape": data.shape}
h5_dest.create_dataset_like(name, dset_source, data=data, **args)
else:
h5_dest.create_dataset_like(name, dset_source, data=dset_source)
else:
raise TypeError(f"Unknown h5py object type {obj}")
# copy group/dataset attributes if it's not a dataset with the actual data
if name != data_dset:
for key, value in h5_source[name].attrs.items():
h5_dest[name].attrs[key] = value
with h5py.File(source, "r") as h5_source, h5py.File(dest, "w") as h5_dest:
detector_name = h5_source["general/detector_name"][()].decode()
data_dset = f"data/{detector_name}/data"
# traverse the source file and copy/index all datasets, except the raw data
h5_source.visititems(_visititems)
# now process the raw data
dset = h5_source[data_dset]
args = dict()
if index is None:
n_images = dset.shape[0]
else:
index = np.array(index)
n_images = len(index)
n_modules = dset.shape[1] // MODULE_SIZE_Y
out_shape = (MODULE_SIZE_Y * (n_modules - len(disabled_modules)), MODULE_SIZE_X)
args["shape"] = (n_images, *out_shape)
args["maxshape"] = (n_images, *out_shape)
args["chunks"] = (1, *out_shape)
if compression:
args.update(compargs)
h5_dest.create_dataset_like(data_dset, dset, **args)
# calculate and save module_map
module_map = []
tmp = 0
for ind in range(n_modules):
if ind in disabled_modules:
module_map.append(-1)
else:
module_map.append(tmp)
tmp += 1
h5_dest[f"data/{detector_name}/module_map"] = np.tile(module_map, (n_images, 1))
# prepare buffers to be reused for every batch
read_buffer = np.empty((batch_size, *dset.shape[1:]), dtype=DTYPE)
out_buffer = np.zeros((batch_size, *out_shape), dtype=DTYPE)
# process and write data in batches
for batch_start_ind in range(0, n_images, batch_size):
batch_range = range(batch_start_ind, min(batch_start_ind + batch_size, n_images))
if index is None:
batch_ind = np.array(batch_range)
else:
batch_ind = index[batch_range]
# TODO: avoid unnecessary buffers
read_buffer_view = read_buffer[: len(batch_ind)]
out_buffer_view = out_buffer[: len(batch_ind)]
# Avoid a stride-bottleneck, see https://github.com/h5py/h5py/issues/977
if np.sum(np.diff(batch_ind)) == len(batch_ind) - 1:
# consecutive index values
dset.read_direct(read_buffer_view, source_sel=np.s_[batch_ind])
else:
for i, j in enumerate(batch_ind):
dset.read_direct(read_buffer_view, source_sel=np.s_[j], dest_sel=np.s_[i])
for i, m in enumerate(module_map):
if m == -1:
continue
read_slice = read_buffer_view[:, i * MODULE_SIZE_Y : (i + 1) * MODULE_SIZE_Y, :]
out_slice = out_buffer_view[:, m * MODULE_SIZE_Y : (m + 1) * MODULE_SIZE_Y, :]
out_slice[:] = read_slice
bytes_num_elem = struct.pack(">q", out_shape[0] * out_shape[1] * DTYPE_SIZE)
bytes_block_size = struct.pack(">i", BLOCK_SIZE * DTYPE_SIZE)
header = bytes_num_elem + bytes_block_size
for pos, im in zip(batch_range, out_buffer_view):
if compression:
byte_array = header + bitshuffle.compress_lz4(im, BLOCK_SIZE).tobytes()
else:
byte_array = im.tobytes()
h5_dest[data_dset].id.write_direct_chunk((pos, 0, 0), byte_array)
-199
View File
@@ -1,199 +0,0 @@
#!/bin/bash
#cores for the extraction from detector buffer
coreAssociated="9,10,11,12,13,14,15,16,17"
#max number of simultaneously running extraction from detector buffer processes
NUMBER_BUFFER_EXTRACT=9
#cores used for conversion
coreAssociatedConversion="35,34,33,32,31,30,29,28,27,26,25,24,23,22,21,20,19,18"
#max number of simultaneously running convertion processes
NUMBER_CONVERTION_PROCESSES=15
if [ $# -lt 3 ]
then
echo "Usage : $0 detector_name start_pulse_id end_pulse_id "
echo "Example : $0 JF07T32V01 11709404000 11709405000 "
echo "Optional parameters: output_file_name rate_multiplicator jf_conversion run_file raw_file"
exit
fi
DETECTOR=$1
START_PULSE_ID=$2
STOP_PULSE_ID=$3
PULSE_ID_STEP=1 # by default assume 100Hz
JF_CONVERSION=0 # by default don't call ju_export
RUN_FILE=None
RAW_FILE=None
echo "Request to retrieve : $@ "
echo "Started : "`date`
date1=$(date +%s)
if [ $# -ge 4 ]
then
OUTFILE=$4
else
OUTFILE=/gpfs/photonics/swissfel/buffer/test.${START_PULSE_ID}-${STOP_PULSE_ID}.h5
fi
if [ $# -ge 5 ]
then
PULSE_ID_STEP=$5
fi
if [ $# -ge 6 ]
then
JF_CONVERSION=$6
if [ $# -ge 7 ]
then
RUN_FILE=$7
fi
if [ $# -eq 8 ]
then
RAW_FILE=$8
fi
fi
NM=`echo ${DETECTOR} | cut -c 6-7`
DET_CONFIG_FILE=/gpfs/photonics/swissfel/buffer/config/${DETECTOR}.json
touch /tmp/detector_retrieve.log
cd /gpfs/photonics/swissfel/buffer/
PREVIOUS_STILL_RUN=0
while [ ${PREVIOUS_STILL_RUN} == 0 ]
do
sleep 15 # we need to sleep at least to make sure that we don't read from CURRENT file
n=`ps -fe | grep "bin/sf_writer " | grep -v grep | grep sf_writer | wc -l`
if [ ${n} -lt ${NUMBER_BUFFER_EXTRACT} ]
then
PREVIOUS_STILL_RUN=1
fi
done
date2=$(date +%s)
echo -n "Waited Time : "
echo $((date2-date1)) | awk '{print int($1/60)":"int($1%60)}'
echo "Started actual retrieve : "`date`
if [ ${JF_CONVERSION} == 0 ]
then
OUTFILE_RAW=${OUTFILE}
else
if [ ${RAW_FILE} != "None" ]
then
OUTFILE_RAW=${RAW_FILE}
D1=`dirname ${OUTFILE_RAW}`
mkdir -p ${D1}
else
RUN_NUMBER=`basename ${RUN_FILE} | awk -F '.' '{print $1}'`
D1=`dirname ${RUN_FILE}`
D2=`dirname ${D1}`
OUTFILE_RAW=${D2}/RAW_DATA/${RUN_NUMBER}.${DETECTOR}.h5
mkdir -p ${D2}/RAW_DATA/
fi
fi
taskset -c ${coreAssociated} /home/dbe/bin/sf_writer ${OUTFILE_RAW} /gpfs/photonics/swissfel/buffer/${DETECTOR} ${NM} ${START_PULSE_ID} ${STOP_PULSE_ID} ${PULSE_ID_STEP}>> /tmp/detector_retrieve.log &
wait
#TODO: calculate this number from coreAssociatedConversion
#export NUMBA_NUM_THREADS=18
#not clear why, but bitshuffle doesn't respect OMP_NUM_THREADS set in jungfrau_utils anymore, thus we set it here
export OMP_NUM_THREADS=1
date3=$(date +%s)
echo "Finished : "`date`
echo -n "Retrieve Time : "
echo $((date3-date2)) | awk '{print int($1/60)":"int($1%60)}'
if [ ${JF_CONVERSION} == 0 ]
then
echo "File is written in raw format, no compression"
dir_name=`dirname ${OUTFILE_RAW}`
base_name=`basename ${dir_name}`
if [ ${base_name} == "JF_pedestals" ]
then
echo "Pedestal run will make conversion"
source /home/dbe/miniconda3/etc/profile.d/conda.sh
conda deactivate
conda activate sf-daq
if [ ${DETECTOR} == "JF07T32V01" ]
then
time taskset -c ${coreAssociatedConversion} python /home/dbe/git/sf_daq_buffer/scripts/jungfrau_create_pedestals.py --filename ${OUTFILE_RAW} --directory ${dir_name} --verbosity DEBUG --add_pixel_mask /sf/bernina/config/jungfrau/pixel_mask/JF07T32V01/pixel_mask_13_full.h5
elif [ ${DETECTOR} == "JF03T01V02" ]
then
time taskset -c ${coreAssociatedConversion} python /home/dbe/git/sf_daq_buffer/scripts/jungfrau_create_pedestals.py --filename ${OUTFILE_RAW} --directory ${dir_name} --verbosity DEBUG --add_pixel_mask /sf/bernina/config/jungfrau/pixel_mask/JF03T01V02/pixel_mask_half_chip.h5
# time taskset -c ${coreAssociatedConversion} python /home/dbe/git/sf_daq_buffer/scripts/jungfrau_create_pedestals.py --filename ${OUTFILE_RAW} --directory ${dir_name} --verbosity DEBUG
elif [ ${DETECTOR} == "JF02T09V02" ]
then
time taskset -c ${coreAssociatedConversion} python /home/dbe/git/sf_daq_buffer/scripts/jungfrau_create_pedestals.py --filename ${OUTFILE_RAW} --directory ${dir_name} --verbosity DEBUG --number_bad_modules=1
elif [ ${DETECTOR} == "JF06T08V02" ]
then
time taskset -c ${coreAssociatedConversion} python /home/dbe/git/sf_daq_buffer/scripts/jungfrau_create_pedestals.py --filename ${OUTFILE_RAW} --directory ${dir_name} --verbosity DEBUG --add_pixel_mask /sf/alvra/config/jungfrau/pixel_mask/JF06T08V01/mask_2lines_module3.h5
# time taskset -c ${coreAssociatedConversion} python /home/dbe/git/sf_daq_buffer/scripts/jungfrau_create_pedestals.py --filename ${OUTFILE_RAW} --directory ${dir_name} --verbosity DEBUG --add_pixel_mask /sf/alvra/config/jungfrau/pixel_mask/JF06T08V01/mask_2lines_module3.asics_lines.h5
# elif [ ${DETECTOR} == "JF06T32V02" ]
# then
# time taskset -c ${coreAssociatedConversion} python /home/dbe/git/sf_daq_buffer/scripts/jungfrau_create_pedestals.py --filename ${OUTFILE_RAW} --directory ${dir_name} --verbosity DEBUG --add_pixel_mask /sf/alvra/config/jungfrau/pixel_mask/JF06T32V02/mask_noise_in_28.h5
elif [ ${DETECTOR} == "JF13T01V01" ]
then
time taskset -c ${coreAssociatedConversion} python /home/dbe/git/sf_daq_buffer/scripts/jungfrau_create_pedestals.py --filename ${OUTFILE_RAW} --directory ${dir_name} --verbosity DEBUG --add_pixel_mask /sf/bernina/config/jungfrau/pixel_mask/JF13T01V01/pixel_mask_bad_rb_22.09.2020.h5
elif [ ${DETECTOR} == "JF11T04V01" ]
then
time taskset -c ${coreAssociatedConversion} python /home/dbe/git/sf_daq_buffer/scripts/jungfrau_create_pedestals.py --filename ${OUTFILE_RAW} --directory ${dir_name} --verbosity DEBUG --number_bad_modules=2
elif [ ${DETECTOR} == "JF10T01V01" ]
then
time taskset -c ${coreAssociatedConversion} python /home/dbe/git/sf_daq_buffer/scripts/jungfrau_create_pedestals.py --filename ${OUTFILE_RAW} --directory ${dir_name} --verbosity DEBUG --number_bad_modules=1
else
time taskset -c ${coreAssociatedConversion} python /home/dbe/git/sf_daq_buffer/scripts/jungfrau_create_pedestals.py --filename ${OUTFILE_RAW} --directory ${dir_name} --verbosity DEBUG
fi
PEDESTAL_FILE=`echo ${OUTFILE_RAW} | sed 's/.h5/.res.h5/'`
taskset -c ${coreAssociatedConversion} python /home/dbe/git/sf_daq_buffer/scripts/copy_pedestal_file.py ${PEDESTAL_FILE} ${RUN_FILE} ${DETECTOR} ${DET_CONFIG_FILE}
fi
else
echo "Will call compression/convertion ${OUTFILE_RAW} --> ${OUTFILE}"
PREVIOUS_STILL_RUN=0
while [ ${PREVIOUS_STILL_RUN} == 0 ]
do
sleep $[ ( $RANDOM % 10 ) + ${NUMBER_CONVERTION_PROCESSES} ]s # sleep some random time
n=`ps -fe | grep "scripts/export_file.py " | grep -v grep | grep export | wc -l`
if [ ${n} -lt 15 ]
then
PREVIOUS_STILL_RUN=1
fi
done
date4=$(date +%s)
echo -n "Sleep Time : "
echo $((date4-date3)) | awk '{print int($1/60)":"int($1%60)}'
source /home/dbe/miniconda3/etc/profile.d/conda.sh
conda deactivate
conda activate sf-daq
time taskset -c ${coreAssociatedConversion} python /home/dbe/git/sf_daq_buffer/scripts/export_file.py ${OUTFILE_RAW} ${OUTFILE} ${RUN_FILE} ${DET_CONFIG_FILE}
# if [ ${DETECTOR} == "JF06T32V02" ] || [ ${DETECTOR} == "JF06T08V02" ]
# then
# taskset -c ${coreAssociatedConversion} python /home/dbe/git/sf_daq_buffer/scripts/make_crystfel_list.py ${OUTFILE} ${RUN_FILE} ${DETECTOR}
# fi
date5=$(date +%s)
echo "Finished : "`date`
echo -n "Conversion Time : "
echo $((date5-date4)) | awk '{print int($1/60)":"int($1%60)}'
fi
-60
View File
@@ -1,60 +0,0 @@
#!/bin/bash
export PATH=/home/dbe/miniconda3/bin:$PATH
source /home/dbe/miniconda3/etc/profile.d/conda.sh
conda deactivate
conda activate sf-daq
export OMP_NUM_THREADS=1
#export NUMBA_NUM_THREADS=$1
#OUTDIR=/sf/alvra/data/p18674/raw/run_info/003000/CONVERSION-PAR-${NUMBA_NUM_THREADS}
OUTDIR=/sf/alvra/data/p18674/raw/run_info/003000/CONVERSION-NEW.21-daq1
#coreAssociatedBuffer=(35 34 33 32 31 30 29 28 27 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 8 7 6 5 4 3 2 1 0)
#coreAssociatedBuffer=(35 34 33 32 31 30 29 28 27 18 19 20 21 22 23 24 25 26 9 10 11 12 13 14 15 16 17 8 7 6 5 4 3 2 1 0)
coreAssociatedBuffer=(35 26 34 25 33 24 32 23 31 22 30 21 29 20 28 19 27 18 17 8 16 7 15 6 14 5 13 4 12 3 11 2 10 1 9)
coreAssociated="35,26,34,25,33,24,32,23,31,22,30,21,29,20,28,19,27,18"
#coreAssociated="35,34,33,32,31,30,29,28,27"
#coreAssociated="26,25,24,23,22,21,20,19,18"
#for N in 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31 33 35
for N in 10 12 14 16 18 20 1 2 4 6 8 22 24 26 28 30 32 35
do
for n in `seq -f %02g 1 $N`
do
rm -rf /sf/alvra/data/p18674/raw/run_info/003000/conversion_0030${n}.log
sleep 0.1
# c=`echo $n - 1 | bc`
# echo process : $n cores : ${coreAssociatedBuffer[10#${c}]}
# taskset -c ${coreAssociatedBuffer[10#${c}]} python /home/dbe/git/sf_daq_buffer/scripts/export_file.py /sf/alvra/data/p18674/raw//RAW_DATA/test_16M/run_0030${n}.JF06T32V02.h5 /sf/alvra/data/p18674/raw/test_16M/run_0030${n}.JF06T32V02.h5 /sf/alvra/data/p18674/raw/run_info/003000/run_0030${n}.json /gpfs/photonics/swissfel/buffer/config/stream-JF06.json > /sf/alvra/data/p18674/raw/run_info/003000/conversion_0030${n}.log &
echo process : $n cores :${coreAssociated}
rm -rf /sf/alvra/data/p18674/raw/run_info/003000/conversion_0030${n}.log
taskset -c ${coreAssociated} python /home/dbe/git/sf_daq_buffer/scripts/export_file.py /sf/alvra/data/p18674/raw//RAW_DATA/test_16M/run_0030${n}.JF06T32V02.h5 /sf/alvra/data/p18674/raw/test_16M/run_0030${n}.JF06T32V02.h5 /sf/alvra/data/p18674/raw/run_info/003000/run_0030${n}.json /gpfs/photonics/swissfel/buffer/config/stream-JF06.json > /sf/alvra/data/p18674/raw/run_info/003000/conversion_0030${n}.log &
done
echo Submitted
A=0
while [ $A -lt 30 ]
do
sleep 30
A=`grep read /sf/alvra/data/p18674/raw/run_info/003000/conversion_003001.log | wc -l`
echo Number of cycles passed $A
done
K=`ps -fe | grep export | grep -v grep | awk '{print $2}' | xargs`
echo Killing `ps -fe | grep export | grep -v grep | awk '{print $2}' | wc -l` processes ${K}
kill -9 ${K}
sleep 2
mkdir -p ${OUTDIR}/${N}
mv /sf/alvra/data/p18674/raw/run_info/003000/conversion_0030* ${OUTDIR}/${N}/.
done