diff --git a/scripts/export_file.py b/scripts/export_file.py index 4aa0388..52d6224 100644 --- a/scripts/export_file.py +++ b/scripts/export_file.py @@ -7,6 +7,12 @@ import numpy as np import jungfrau_utils as ju +import sys +sys.path.append('/home/dbe/git/sf_daq_buffer/scripts') +import postprocess_raw + +import os + parser = argparse.ArgumentParser() parser.add_argument("file_in", type=str) @@ -27,8 +33,9 @@ with open(args.json_run, "r") as run_file: data = json.load(run_file) detector_params = data["detectors"][detector_name] - compression = detector_params.get("compression", True) - conversion = detector_params.get("adc_to_energy", True) + compression = detector_params.get("compression", False) + conversion = detector_params.get("adc_to_energy", False) + disabled_modules = detector_params.get("disabled_modules", []) if conversion: mask = detector_params.get("mask", True) mask_double_pixels = detector_params.get("mask_double_pixels", True) @@ -46,30 +53,48 @@ if not mask and mask_double_pixels: warnings.warn("mask_double_pixels set to False") mask_double_pixels = False -with ju.File( - args.file_in, - gain_file=gain_file, - pedestal_file=pedestal_file, - conversion=conversion, - mask=mask, - gap_pixels=gap_pixels, - geometry=geometry, - parallel=False, -) as juf: - n_input_frames = len(juf["data"]) - good_frames = np.nonzero(juf["is_good_frame"])[0] - n_output_frames = len(good_frames) +file_tmp = args.file_in +if len(disabled_modules)>0: + print(f"Will reduce data file, disabled_modules: {disabled_modules}") + if conversion: + file_tmp = args.file_out+".tmp" + else: + file_tmp = args.file_out + postprocess_raw.postprocess_raw(args.file_in, file_tmp, compression=compression, disabled_modules=disabled_modules) - juf.handler.mask_double_pixels = mask_double_pixels - juf.export( - args.file_out, - index=good_frames, - roi=None, - compression=compression, - factor=factor, - dtype=None, - batch_size=35, - ) +if conversion: + + with ju.File( + file_tmp, + gain_file=gain_file, + pedestal_file=pedestal_file, + conversion=conversion, + mask=mask, + gap_pixels=gap_pixels, + geometry=geometry, + parallel=False, + ) as juf: + n_input_frames = len(juf["data"]) + good_frames = np.nonzero(juf["is_good_frame"])[0] + n_output_frames = len(good_frames) + + juf.handler.mask_double_pixels = mask_double_pixels + juf.export( + args.file_out, + index=good_frames, + roi=None, + compression=compression, + factor=factor, + dtype=None, + batch_size=35, + ) + os.remove(file_tmp) + +else: + with h5py.File(file_tmp, "r") as juf: + n_input_frames = len(juf[f"data/{detector_name}/data"]) + good_frames = np.nonzero(juf[f"data/{detector_name}/is_good_frame"])[0] + n_output_frames = len(good_frames) # Utility info with h5py.File(args.file_out, "r") as h5f: