import os import numpy as np from ROOT import TH1D, TH2D, TCanvas, TF1 from multiprocessing import Pool from array import array import h5py from multiprocessing import Pool import aare _cfg = {} ### global config _aduToKev3DMap = None ### global caliFile _pedestalAduFrame = None ### global pedestalAduFrame _noiseEneFrame = None ### global noiseEneFrame nChunks = 16 def init(cfg): global _cfg global _aduToKev3DMap _cfg = cfg _aduToKev3DMap = cfg['caliFile'] Roi = cfg['Roi'] global nChunks nChunks = _cfg.get('NChunks', 16) def convertAduFrameToEnergyFrame(aduFrame): if _aduToKev3DMap is None: return aduFrame NX, NY = _cfg['NX'], _cfg['NY'] if _aduToKev3DMap.shape[0] == 1640: idxFrame = (np.abs(aduFrame//10)).astype(np.int32) idxFrame[idxFrame > _aduToKev3DMap.shape[0]-2] = _aduToKev3DMap.shape[0]-2 NY, NX = _aduToKev3DMap.shape[1:3] _energyFrame0 = _aduToKev3DMap[idxFrame, np.arange(NY).reshape(-1, 1), np.arange(NX)] _energyFrame1 = _aduToKev3DMap[idxFrame+1, np.arange(NY).reshape(-1, 1), np.arange(NX)] energyFrame = _energyFrame0 + (_energyFrame1 - _energyFrame0)/10 * (np.abs(aduFrame) - idxFrame*10) energyFrame *= np.sign(aduFrame) else: aduPerBin = 10 nExtraBins = 1000 // aduPerBin idxFrame = (aduFrame//aduPerBin).astype(np.int32) + nExtraBins idxFrame[idxFrame > _aduToKev3DMap.shape[0]-2] = _aduToKev3DMap.shape[0]-2 NY, NX = _aduToKev3DMap.shape[1:3] _energyFrame0 = _aduToKev3DMap[idxFrame, np.arange(NY).reshape(-1, 1), np.arange(NX)] _energyFrame1 = _aduToKev3DMap[idxFrame+1, np.arange(NY).reshape(-1, 1), np.arange(NX)] energyFrame = _energyFrame0 + (_energyFrame1 - _energyFrame0)/aduPerBin * (aduFrame - (idxFrame-nExtraBins)*aduPerBin) if 'energyScalingFactor' in _cfg: energyFrame *= _cfg['energyScalingFactor'] return energyFrame def bookHistograms(energy, suffix='', energyBinWidth=0.1, isMC=False): histMaxEnergy = energy * 5.5 nEnergyBins = int(histMaxEnergy // energyBinWidth) roi_x0, roi_x1, roi_y0, roi_y1 = _cfg['Roi'][0], _cfg['Roi'][1], _cfg['Roi'][2], _cfg['Roi'][3] roi_width = roi_x1 - roi_x0 roi_height = roi_y1 - roi_y0 interpolationBins = 10 histograms = {} histograms['h1_ClusterEnergy'] = TH1D( f'h1_ClusterEnergy{suffix}', f'h1_ClusterEnergy{suffix}', nEnergyBins, -1, histMaxEnergy ) histograms['h1_PixelEnergy'] = TH1D( f'h1_PixelEnergy{suffix}', f'h1_PixelEnergy{suffix}', nEnergyBins, -1, histMaxEnergy ) histograms['h1_CenterPixelEnergy'] = TH1D( f'h1_CenterPixelEnergy{suffix}', f'h1_CenterPixelEnergy{suffix}', nEnergyBins, -1, histMaxEnergy ) histograms['h2_Qc'] = TH2D( f'h2_Qc{suffix}', f'h2_Qc{suffix}', 201, 0, 1, 201, 0, 1 ) histograms['h2_counts'] = TH2D( f'h2_counts{suffix}', f'h2_counts{suffix}', roi_width, roi_x0, roi_x1, roi_height, roi_y0, roi_y1 ) histograms['h2_HitsSumFrame'] = TH2D( f'h2_HitsSumFrame{suffix}', f'h2_HitsSumFrame{suffix}', roi_width, roi_x0, roi_x1, roi_height, roi_y0, roi_y1 ) histograms['h2_interpolated'] = TH2D( f'h2_interpolated{suffix}', f'h2_interpolated{suffix}', roi_width * interpolationBins, roi_x0, roi_x1, roi_height * interpolationBins, roi_y0, roi_y1 ) histograms['h2_interpolatedSubpixel'] = TH2D( f'h2_interpolatedSubpixel{suffix}', f'h2_interpolatedSubpixel{suffix}', interpolationBins, 0, 1, interpolationBins, 0, 1 ) for hist in histograms.values(): hist.SetDirectory(0) histograms['h1_ClusterEnergy'].SetTitle('Cluster Energy;Energy [keV];Counts') histograms['h1_PixelEnergy'].SetTitle('Pixel Energy;Energy [keV];Counts') histograms['h1_CenterPixelEnergy'].SetTitle('Center Pixel Energy;Energy [keV];Counts') histograms['h2_Qc'].SetTitle('Charge weighted center;#eta_x;#eta_y;Counts') if isMC: histograms['h1_ChargeCollectionEfficiency'] = TH1D(f'h1_ChargeCollectionEfficiency{suffix}', f'h1_ChargeCollectionEfficiency{suffix}', 400, 0.8, 1.01) histograms['h1_ChargeCollectionEfficiency'].SetDirectory(0) for key, hist in list(histograms.items()): histograms[key] = hist.Clone() return histograms def _processFrames(idxChunk): ### for both single and double photon events, using startFrame, endFrame = _cfg['NFrame'] * idxChunk // nChunks, min(_cfg['NFrame'] * (idxChunk + 1) // nChunks, _cfg['NFrame']) print(f'Processing frames from {startFrame} to {endFrame}...') Energy = _cfg['energy'] selectionRange = _cfg['selectionRange'] signalFileNames = _cfg['signalFileNames'] NX = _cfg['NX'] NY = _cfg['NY'] headerSize = _cfg['headerSize'] Roi = _cfg['Roi'] nFramePerFile = os.path.getsize(f'{signalFileNames[0]}') // (NX * NY + 56) // 2 _hists = bookHistograms(Energy, f'_{idxChunk}') ### create a cluster finder nxROI = Roi[1] - Roi[0] nyROI = Roi[3] - Roi[2] CF = aare.VarClusterFinder((nxROI, nyROI), 5) ### arg1: frame size, arg2: threshold CF.set_peripheralThresholdFactor(2) CF.set_noiseMap(_noiseEneFrame) if 'writeClusters' in _cfg and _cfg['writeClusters'] == True: h5_file = h5py.File(f'clusters_chunk{idxChunk}.h5', 'w') # 创建可扩展数据集 dset_clusters = h5_file.create_dataset( 'clusters', (0, 3, 3), maxshape=(None, 3, 3), dtype='f4', chunks=True, compression='gzip' ) dset_refs = h5_file.create_dataset( 'referencePoint', (0, 2), maxshape=(None, 2), dtype='i4', chunks=True ) buffer_size = 50_000 cluster_list = [] refpoint_list = [] def flush_h5_buffer(): if not cluster_list: return old_len = dset_clusters.shape[0] new_len = old_len + len(cluster_list) dset_clusters.resize((new_len, 3, 3)) dset_refs.resize((new_len, 2)) dset_clusters[old_len:new_len] = cluster_list dset_refs[old_len:new_len] = refpoint_list cluster_list.clear() refpoint_list.clear() for idxFrame in range(startFrame, endFrame): if idxFrame % 10000 == 0: print(f'Processing frame {idxFrame}...') idxFile, idxFrame = divmod(idxFrame, nFramePerFile) try: if 'nFiber' in _cfg and _cfg['nFiber'] == 2: offset = (headerSize + idxFrame * (NX * NY//2 + headerSize)) * 2 fileName1 = signalFileNames[idxFile] fileName2 = fileName1.replace('d0', 'd1') _signalAduFrame1 = np.fromfile(f'{fileName1}', dtype=np.uint16, offset=offset, count=NX * NY //2).astype(np.int32).reshape(NY//2, NX) _signalAduFrame2 = np.fromfile(f'{fileName2}', dtype=np.uint16, offset=offset, count=NX * NY //2).astype(np.int32).reshape(NY//2, NX) signalAduFrame = np.concatenate([_signalAduFrame1, _signalAduFrame2], axis=0) del _signalAduFrame1, _signalAduFrame2 else: offset = (headerSize + idxFrame * (NX * NY + headerSize)) * 2 signalAduFrame = np.fromfile(f'{signalFileNames[idxFile]}', dtype=np.uint16, offset=offset, count=NX * NY).astype(np.int32).reshape(NX, NY) except Exception as e: print(f'Error reading frame {idxFrame} from file {signalFileNames[idxFile]}: {e}, stop processing this chunk ({startFrame}-{endFrame})') return _hists signalAduFrame = signalAduFrame - _pedestalAduFrame signalEneFrame = convertAduFrameToEnergyFrame(signalAduFrame) CF.find_clusters_X(signalEneFrame[Roi[2]:Roi[3], Roi[0]:Roi[1]]) clusters = CF.hits() for idxCluster in range(len(clusters)): energy = clusters['energy'][idxCluster] _hists['h1_ClusterEnergy'].Fill(energy) xs = clusters['cols'][idxCluster] ys = clusters['rows'][idxCluster] enes = clusters['enes'][idxCluster] if energy < 5: continue for i in range(len(xs)): _hists['h1_PixelEnergy'].Fill(enes[i]) _hists['h2_HitsSumFrame'].Fill(xs[i]+Roi[0], ys[i]+Roi[2], enes[i]) if 'writeClusters' in _cfg and _cfg['writeClusters'] == True: flush_h5_buffer() h5_file.close() return _hists def process(): with Pool(_cfg['NThread'], maxtasksperchild=1) as p: results = p.map(_processFrames, range(nChunks)) global hists hists = bookHistograms(_cfg['energy']) for h in hists.values(): h.SetDirectory(0) for _hists in results: hists['h1_ClusterEnergy'].Add(_hists['h1_ClusterEnergy']) hists['h1_PixelEnergy'].Add(_hists['h1_PixelEnergy']) hists['h1_CenterPixelEnergy'].Add(_hists['h1_CenterPixelEnergy']) hists['h2_Qc'].Add(_hists['h2_Qc']) hists['h2_interpolated'].Add(_hists['h2_interpolated']) hists['h2_interpolatedSubpixel'].Add(_hists['h2_interpolatedSubpixel']) hists['h2_counts'].Add(_hists['h2_counts']) hists['h2_HitsSumFrame'].Add(_hists['h2_HitsSumFrame']) del _hists def getPedestalAndNoise(): NX, NY = _cfg['NX'], _cfg['NY'] pedestalFileName = _cfg['pedestalFileName'] if 'nFiber' in _cfg and _cfg['nFiber'] == 2: pedestalFileName2 = pedestalFileName.replace('d0', 'd1') _pedestalAduFrames1 = np.fromfile(f'{pedestalFileName}', dtype=np.uint16).astype(np.int32) _pedestalAduFrames1 = _pedestalAduFrames1.reshape(-1, NX * NY //2 + 56) _pedestalAduFrames1 = _pedestalAduFrames1[:, 56:].reshape(-1, NY//2, NX) _pedestalAduFrames2 = np.fromfile(f'{pedestalFileName2}', dtype=np.uint16).astype(np.int32) _pedestalAduFrames2 = _pedestalAduFrames2.reshape(-1, NX * NY //2 + 56) _pedestalAduFrames2 = _pedestalAduFrames2[:, 56:].reshape(-1, NY//2, NX) pedestalAduFrames = np.concatenate([_pedestalAduFrames1, _pedestalAduFrames2], axis=1) del _pedestalAduFrames1, _pedestalAduFrames2 else: pedestalAduFrames = np.fromfile(f'{pedestalFileName}', dtype=np.uint16).astype(np.int32) pedestalAduFrames = pedestalAduFrames.reshape(-1, NX * NY + 56) pedestalAduFrames = pedestalAduFrames[:, 56:].reshape(-1, NX, NY) pedestalAduFrames = pedestalAduFrames[pedestalAduFrames.shape[0]//10:] # skip the first 10% frames global _pedestalAduFrame, _noiseEneFrame _pedestalAduFrame = np.mean(pedestalAduFrames, axis=0) noiseAduFrame = np.std(pedestalAduFrames, axis=0, ddof=1) with Pool(16) as p: pedestalEneFrames = p.map(convertAduFrameToEnergyFrame, pedestalAduFrames) _noiseEneFrame = np.std(pedestalEneFrames, axis=0, ddof=1) print(f'Average noise = {np.mean(_noiseEneFrame):.3f} keV; {np.mean(noiseAduFrame):.3f} ADU') del pedestalAduFrames, pedestalEneFrames def getPedestalAndNoise_simplified(): NX, NY = _cfg['NX'], _cfg['NY'] pedestalFileName = _cfg['pedestalFileName'] pedestalAduFrames = np.fromfile(f'{pedestalFileName}', dtype=np.uint16).astype(np.int32) pedestalAduFrames = pedestalAduFrames.reshape(-1, NX * NY + 56) pedestalAduFrames = pedestalAduFrames[:, 56:].reshape(-1, NX, NY) pedestalAduFrames = pedestalAduFrames[pedestalAduFrames.shape[0]//10:] # skip the first 10% frames global _pedestalAduFrame, _noiseEneFrame _pedestalAduFrame = np.mean(pedestalAduFrames, axis=0) noiseAduFrame = np.std(pedestalAduFrames, axis=0, ddof=1) _noiseEneFrame = convertAduFrameToEnergyFrame(noiseAduFrame) print(f'Average noise = {np.mean(_noiseEneFrame):.3f} keV; {np.mean(noiseAduFrame):.3f} ADU') del pedestalAduFrames