save ram usage; format outputs
This commit is contained in:
+52
-42
@@ -13,44 +13,52 @@ torch.backends.cudnn.deterministic = True
|
||||
torch.backends.cudnn.benchmark = False
|
||||
|
||||
configs = {}
|
||||
configs['SiemenStar'] = {
|
||||
'dataFiles': [f'/home/xie_x1/MLXID/DataProcess/Samples/Clusters_2Photon_chunk{i}.h5' for i in range(16)],
|
||||
'modelVersion': '251001_2',
|
||||
configs['SiemenStarLowerLeft'] = {
|
||||
'dataFiles': [f'/home/xie_x1/MLXID/DataProcess/Samples/SiemenStarLowerLeft_old/Clusters_2Photon_CS7_chunk{i}.h5' for i in range(200)],
|
||||
'modelVersion': '251124',
|
||||
'roi': [140, 230, 120, 210], # x_min, x_max, y_min, y_max,
|
||||
'noise': 0.13 # in keV
|
||||
'nSize': 7,
|
||||
}
|
||||
|
||||
configs['SiemenStarLowerRight'] = {
|
||||
'dataFiles': [f'/home/xie_x1/MLXID/DataProcess/Samples/SiemenStarLowerRight/2Photon_CS7_chunk{i}.h5' for i in range(320)], ### 320 files
|
||||
'modelVersion': '251124',
|
||||
'roi': [235, 345, 110, 220], # x_min, x_max, y_min, y_max,
|
||||
'nSize': 7,
|
||||
}
|
||||
|
||||
task = 'SiemenStarLowerRight'
|
||||
config = configs[task]
|
||||
|
||||
BinningFactor = 10
|
||||
Roi = configs['SiemenStar']['roi']
|
||||
Roi = configs[task]['roi']
|
||||
X_st, X_ed, Y_st, Y_ed = Roi
|
||||
mlSuperFrame = np.zeros(((Y_ed-Y_st)*BinningFactor, (X_ed-X_st)*BinningFactor))
|
||||
countFrame = np.zeros((Y_ed-Y_st, X_ed-X_st))
|
||||
subpixelDistribution = np.zeros((BinningFactor, BinningFactor))
|
||||
|
||||
if __name__ == "__main__":
|
||||
task = 'SiemenStar'
|
||||
config = configs[task]
|
||||
|
||||
model = models.get_double_photon_model_class(config['modelVersion'])().cuda()
|
||||
model.load_state_dict(torch.load(f'/home/xie_x1/MLXID/DeepLearning/Models/doublePhoton{config["modelVersion"]}_15.3keV_Noise0.13keV_E300.pth', weights_only=True))
|
||||
predictions = []
|
||||
referencePoints = []
|
||||
nChunks = len(config['dataFiles']) // 16
|
||||
model.load_state_dict(torch.load(f'/home/xie_x1/MLXID/DeepLearning/Models/doublePhoton{config["modelVersion"]}_15keV_Noise0.13keV_E300.pth', weights_only=True))
|
||||
nChunks = np.ceil(len(config['dataFiles']) / 16).astype(int)
|
||||
for idxChunk in range(nChunks):
|
||||
predictions = []
|
||||
referencePoints = []
|
||||
stFileIdx = idxChunk * 16
|
||||
edFileIdx = min((idxChunk + 1) * 16, len(config['dataFiles']))
|
||||
sampleFiles = config['dataFiles'][stFileIdx : edFileIdx]
|
||||
print(f'Processing files {stFileIdx} to {edFileIdx}...')
|
||||
dataset = doublePhotonInferenceDataset(
|
||||
sampleFiles,
|
||||
sampleRatio=1,
|
||||
sampleRatio=1.,
|
||||
datasetName='Inference',
|
||||
# noiseKeV=0.13
|
||||
)
|
||||
nSize=config['nSize']
|
||||
)
|
||||
dataLoader = torch.utils.data.DataLoader(
|
||||
dataset,
|
||||
batch_size=8192,
|
||||
shuffle=False,
|
||||
num_workers=32,
|
||||
num_workers=16,
|
||||
pin_memory=True,
|
||||
)
|
||||
|
||||
@@ -63,42 +71,44 @@ if __name__ == "__main__":
|
||||
outputs = model(inputs).view(-1, 2) # 2B x 2
|
||||
predictions.append(outputs.cpu())
|
||||
|
||||
predictions = torch.cat(predictions, dim=0)
|
||||
print(f'mean x = {torch.mean(predictions[:, 0])}, std x = {torch.std(predictions[:, 0])}')
|
||||
print(f'mean y = {torch.mean(predictions[:, 1])}, std y = {torch.std(predictions[:, 1])}')
|
||||
referencePoints = np.concatenate(referencePoints, axis=0)
|
||||
### duplicate reference points for 2-photon clusters
|
||||
referencePoints = np.repeat(referencePoints, 2, axis=0)
|
||||
absolutePositions = predictions.numpy() + referencePoints
|
||||
print(absolutePositions[:5, 0] - Roi[0], absolutePositions[:5, 1] - Roi[2])
|
||||
|
||||
hit_x = np.floor((absolutePositions[:, 0] - Roi[0]) * BinningFactor).astype(int)
|
||||
hit_x = np.clip(hit_x, 0, mlSuperFrame.shape[1]-1)
|
||||
hit_y = np.floor((absolutePositions[:, 1] - Roi[2]) * BinningFactor).astype(int)
|
||||
hit_y = np.clip(hit_y, 0, mlSuperFrame.shape[0]-1)
|
||||
print(hit_x[:5], hit_y[:5])
|
||||
np.add.at(mlSuperFrame, (hit_y, hit_x), 1)
|
||||
predictions = torch.cat(predictions, dim=0)
|
||||
predictions += torch.tensor([config['nSize']/2., config['nSize']/2.]).unsqueeze(0) # adjust back to original coordinate system
|
||||
print(f'mean x = {torch.mean(predictions[:, 0])}, std x = {torch.std(predictions[:, 0])}')
|
||||
print(f'mean y = {torch.mean(predictions[:, 1])}, std y = {torch.std(predictions[:, 1])}')
|
||||
referencePoints = np.concatenate(referencePoints, axis=0) ### the lower-left corner of the cluster in absolute coordinate
|
||||
### duplicate reference points for 2-photon clusters
|
||||
referencePoints = np.repeat(referencePoints, 2, axis=0)
|
||||
absolutePositions = predictions.numpy() + referencePoints
|
||||
|
||||
hit_x = np.floor((absolutePositions[:, 0] - Roi[0]) * BinningFactor).astype(int)
|
||||
hit_x = np.clip(hit_x, 0, mlSuperFrame.shape[1]-1)
|
||||
hit_y = np.floor((absolutePositions[:, 1] - Roi[2]) * BinningFactor).astype(int)
|
||||
hit_y = np.clip(hit_y, 0, mlSuperFrame.shape[0]-1)
|
||||
np.add.at(mlSuperFrame, (hit_y, hit_x), 1)
|
||||
|
||||
np.add.at(countFrame, ((referencePoints[:, 1] - Roi[2]).astype(int),
|
||||
(referencePoints[:, 0] - Roi[0]).astype(int)), 1)
|
||||
np.add.at(countFrame, ((referencePoints[:, 1] - Roi[2]).astype(int),
|
||||
(referencePoints[:, 0] - Roi[0]).astype(int)), 1)
|
||||
|
||||
np.add.at(subpixelDistribution,
|
||||
(np.floor((absolutePositions[:, 1] % 1) * BinningFactor).astype(int),
|
||||
np.floor((absolutePositions[:, 0] % 1) * BinningFactor).astype(int)), 1)
|
||||
np.add.at(subpixelDistribution,
|
||||
(np.floor((absolutePositions[:, 1] % 1) * BinningFactor).astype(int),
|
||||
np.floor((absolutePositions[:, 0] % 1) * BinningFactor).astype(int)), 1)
|
||||
|
||||
import os
|
||||
os.makedirs(f'InferenceResults/{task}', exist_ok=True)
|
||||
|
||||
plt.imshow(mlSuperFrame, origin='lower', extent=[Y_st, Y_ed, X_st, X_ed])
|
||||
plt.colorbar()
|
||||
plt.savefig('InferenceResults/SiemenStar_ML_2Photon_superFrame.png', dpi=300)
|
||||
np.save('InferenceResults/SiemenStar_ML_2Photon_superFrame.npy', mlSuperFrame)
|
||||
plt.savefig(f'InferenceResults/{task}/ML_2Photon_superFrame.png', dpi=300)
|
||||
np.save(f'InferenceResults/{task}/ML_2Photon_superFrame.npy', mlSuperFrame)
|
||||
plt.clf()
|
||||
|
||||
plt.imshow(countFrame, origin='lower', extent=[Y_st, Y_ed, X_st, X_ed])
|
||||
plt.colorbar()
|
||||
plt.savefig('InferenceResults/SiemenStar_count_2Photon_Frame.png', dpi=300)
|
||||
np.save('InferenceResults/SiemenStar_count_2Photon_Frame.npy', countFrame)
|
||||
plt.savefig(f'InferenceResults/{task}/count_2Photon_Frame.png', dpi=300)
|
||||
np.save(f'InferenceResults/{task}/count_2Photon_Frame.npy', countFrame)
|
||||
|
||||
plt.clf()
|
||||
plt.imshow(subpixelDistribution, origin='lower')
|
||||
plt.colorbar()
|
||||
plt.savefig('InferenceResults/SiemenStar_subpixel_2Photon_Distribution.png', dpi=300)
|
||||
np.save('InferenceResults/SiemenStar_subpixel_2Photon_Distribution.npy', subpixelDistribution)
|
||||
plt.savefig(f'InferenceResults/{task}/subpixel_2Photon_Distribution.png', dpi=300)
|
||||
np.save(f'InferenceResults/{task}/subpixel_2Photon_Distribution.npy', subpixelDistribution)
|
||||
+77
-64
@@ -15,15 +15,26 @@ torch.backends.cudnn.deterministic = True
|
||||
torch.backends.cudnn.benchmark = False
|
||||
|
||||
configs = {}
|
||||
configs['SiemenStar'] = {
|
||||
'dataFiles': [f'/mnt/sls_det_storage/moench_data/MLXID/Samples/Measurement/2504_SOLEIL_SiemenStarClusters_MOENCH040_150V/clusters_chunk{i}.h5' for i in range(200)],
|
||||
configs['SiemenStarLowerLeft'] = {
|
||||
'dataFiles': [f'/mnt/sls_det_storage/moench_data/MLXID/Samples/Measurement/2504_SOLEIL_SiemenStarClusters_MOENCH040_150V/SiemenStarLowerLeft/clusters_chunk{i}.h5' for i in range(200)], # 200 files
|
||||
'modelVersion': '251022',
|
||||
'roi': [140, 230, 120, 210], # x_min, x_max, y_min, y_max,
|
||||
'noise': 0.13 # in keV
|
||||
'noise': 0.13, # keV; for the model selection
|
||||
}
|
||||
|
||||
configs['SiemenStarLowerRight'] = {
|
||||
'dataFiles': [f'/home/xie_x1/MLXID/DataProcess/Samples/SiemenStarLowerRight/1Photon_CS3_chunk{i}.h5' for i in range(320)], # 320 files
|
||||
'modelVersion': '251022',
|
||||
'roi': [235, 345, 110, 220], # x_min, x_max, y_min, y_max,
|
||||
'noise': 0.13, # keV
|
||||
}
|
||||
|
||||
task = 'SiemenStarLowerRight'
|
||||
config = configs[task]
|
||||
|
||||
BinningFactor = 10
|
||||
numberOfAugOps = 6
|
||||
Roi = configs['SiemenStar']['roi']
|
||||
numberOfAugOps = 8
|
||||
Roi = config['roi']
|
||||
X_st, X_ed, Y_st, Y_ed = Roi
|
||||
mlSuperFrame = np.zeros(((Y_ed-Y_st)*BinningFactor, (X_ed-X_st)*BinningFactor))
|
||||
countFrame = np.zeros((Y_ed-Y_st, X_ed-X_st))
|
||||
@@ -58,22 +69,19 @@ def apply_inverse_transforms(predictions: torch.Tensor, numberOfAugOps: int) ->
|
||||
return corrected.mean(dim=1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
task = 'SiemenStar'
|
||||
config = configs[task]
|
||||
|
||||
model = models.get_model_class(config['modelVersion'])().cuda()
|
||||
model.load_state_dict(torch.load(f'/home/xie_x1/MLXID/DeepLearning/Models/singlePhoton{config["modelVersion"]}_15.3keV_Noise{config["noise"]}keV_E500_aug8.pth', weights_only=True))
|
||||
predictions = []
|
||||
referencePoints = []
|
||||
nChunks = len(config['dataFiles']) // 32 + 1
|
||||
model.load_state_dict(torch.load(f'/home/xie_x1/MLXID/DeepLearning/Models/singlePhoton{config["modelVersion"]}_15keV_Noise{config["noise"]}keV_E300_aug8.pth', weights_only=True))
|
||||
nChunks = np.ceil(len(config['dataFiles']) / 16).astype(int)
|
||||
for idxChunk in range(nChunks):
|
||||
stFileIdx = idxChunk * 32
|
||||
edFileIdx = min((idxChunk + 1) * 32, len(config['dataFiles']))
|
||||
predictions = []
|
||||
referencePoints = []
|
||||
stFileIdx = idxChunk * 16
|
||||
edFileIdx = min((idxChunk + 1) * 16, len(config['dataFiles']))
|
||||
sampleFiles = config['dataFiles'][stFileIdx : edFileIdx]
|
||||
print(f'Processing files {stFileIdx} to {edFileIdx}...')
|
||||
dataset = singlePhotonDataset(
|
||||
sampleFiles,
|
||||
sampleRatio=1.0,
|
||||
sampleRatio=1,
|
||||
datasetName='Inference',
|
||||
numberOfAugOps=numberOfAugOps
|
||||
)
|
||||
@@ -81,7 +89,7 @@ if __name__ == "__main__":
|
||||
dataset,
|
||||
batch_size=8192,
|
||||
shuffle=False,
|
||||
num_workers=32,
|
||||
num_workers=16,
|
||||
pin_memory=True,
|
||||
)
|
||||
|
||||
@@ -95,67 +103,72 @@ if __name__ == "__main__":
|
||||
outputs = model(inputs_cuda)[:, :2].cpu() # only x and y
|
||||
_chunk_predictions.append(outputs)
|
||||
predictions.extend(_chunk_predictions)
|
||||
|
||||
### save samples and inferred positions
|
||||
_h5_file = h5py.File(f'InferredSamples/Chunk{idxChunk}.h5', 'w')
|
||||
dset_1Photon_clusters = _h5_file.create_dataset(
|
||||
'clusters', (0, 5, 5), maxshape=(None, 5, 5), dtype='f4',
|
||||
chunks=True, compression='gzip'
|
||||
)
|
||||
dset_1photon_label = _h5_file.create_dataset(
|
||||
'labels', (0, 4), maxshape=(None, 4), dtype='f4',
|
||||
chunks=True
|
||||
)
|
||||
_len = dataset.samples.shape[0]
|
||||
dset_1Photon_clusters.resize((_len, 5, 5))
|
||||
dset_1photon_label.resize((_len, 4))
|
||||
_chunk_samples = np.zeros(( _len, 5, 5), dtype=np.float32)
|
||||
_chunk_samples[:, 1:-1, 1:-1] = dataset.samples[:, 0, :, :]
|
||||
dset_1Photon_clusters[:] = _chunk_samples
|
||||
# _h5_file = h5py.File(f'InferredSamples/Chunk{idxChunk}.h5', 'w')
|
||||
# dset_1Photon_clusters = _h5_file.create_dataset(
|
||||
# 'clusters', (0, 5, 5), maxshape=(None, 5, 5), dtype='f4',
|
||||
# chunks=True, compression='gzip'
|
||||
# )
|
||||
# dset_1photon_label = _h5_file.create_dataset(
|
||||
# 'labels', (0, 4), maxshape=(None, 4), dtype='f4',
|
||||
# chunks=True
|
||||
# )
|
||||
# _len = dataset.samples.shape[0]
|
||||
# dset_1Photon_clusters.resize((_len, 5, 5))
|
||||
# dset_1photon_label.resize((_len, 4))
|
||||
# _chunk_samples = np.zeros(( _len, 5, 5), dtype=np.float32)
|
||||
# _chunk_samples[:, 1:-1, 1:-1] = dataset.samples[:, 0, :, :]
|
||||
# dset_1Photon_clusters[:] = _chunk_samples
|
||||
|
||||
_chunk_predictions = torch.cat(_chunk_predictions, dim=0)
|
||||
_chunk_predictions = apply_inverse_transforms(_chunk_predictions, numberOfAugOps)
|
||||
_chunk_labels = np.zeros((_len, 4), dtype=np.float32)
|
||||
_chunk_labels[:, :2] = _chunk_predictions.numpy()
|
||||
dset_1photon_label[:] = _chunk_labels
|
||||
_h5_file.close()
|
||||
# _chunk_predictions = torch.cat(_chunk_predictions, dim=0)
|
||||
# _chunk_predictions = apply_inverse_transforms(_chunk_predictions, numberOfAugOps)
|
||||
# _chunk_labels = np.zeros((_len, 4), dtype=np.float32)
|
||||
# _chunk_labels[:, :2] = _chunk_predictions.numpy()
|
||||
# dset_1photon_label[:] = _chunk_labels
|
||||
# _h5_file.close()
|
||||
|
||||
np.savez(f'InferredSamples/Chunk{idxChunk}.npz', samples=_chunk_samples, labels=_chunk_labels)
|
||||
# np.savez(f'InferredSamples/Chunk{idxChunk}.npz', samples=_chunk_samples, labels=_chunk_labels)
|
||||
|
||||
|
||||
predictions = torch.cat(predictions, dim=0)
|
||||
predictions = apply_inverse_transforms(predictions, numberOfAugOps)
|
||||
predictions += torch.tensor([1.5, 1.5]).unsqueeze(0) # adjust back to original coordinate system
|
||||
referencePoints = np.concatenate(referencePoints, axis=0)
|
||||
print(f'mean x = {torch.mean(predictions[:, 0])}, std x = {torch.std(predictions[:, 0])}')
|
||||
print(f'mean y = {torch.mean(predictions[:, 1])}, std y = {torch.std(predictions[:, 1])}')
|
||||
absolutePositions = predictions.numpy() + referencePoints[:, :2] - 1
|
||||
|
||||
hit_x = np.floor((absolutePositions[:, 0] - Roi[0]) * BinningFactor).astype(int)
|
||||
hit_x = np.clip(hit_x, 0, mlSuperFrame.shape[1]-1)
|
||||
hit_y = np.floor((absolutePositions[:, 1] - Roi[2]) * BinningFactor).astype(int)
|
||||
hit_y = np.clip(hit_y, 0, mlSuperFrame.shape[0]-1)
|
||||
np.add.at(mlSuperFrame, (hit_y, hit_x), 1)
|
||||
predictions = torch.cat(predictions, dim=0)
|
||||
predictions = apply_inverse_transforms(predictions, numberOfAugOps)
|
||||
predictions += torch.tensor([1.5, 1.5]).unsqueeze(0) # adjust back to original coordinate system
|
||||
referencePoints = np.concatenate(referencePoints, axis=0)
|
||||
print(f'mean x = {torch.mean(predictions[:, 0])}, std x = {torch.std(predictions[:, 0])}')
|
||||
print(f'mean y = {torch.mean(predictions[:, 1])}, std y = {torch.std(predictions[:, 1])}')
|
||||
absolutePositions = predictions.numpy() + referencePoints[:, :2] - 1
|
||||
|
||||
hit_x = np.floor((absolutePositions[:, 0] - Roi[0]) * BinningFactor).astype(int)
|
||||
hit_x = np.clip(hit_x, 0, mlSuperFrame.shape[1]-1)
|
||||
hit_y = np.floor((absolutePositions[:, 1] - Roi[2]) * BinningFactor).astype(int)
|
||||
hit_y = np.clip(hit_y, 0, mlSuperFrame.shape[0]-1)
|
||||
np.add.at(mlSuperFrame, (hit_y, hit_x), 1)
|
||||
|
||||
np.add.at(countFrame, ((referencePoints[:, 1] - Roi[2]).astype(int),
|
||||
(referencePoints[:, 0] - Roi[0]).astype(int)), 1)
|
||||
np.add.at(countFrame, ((referencePoints[:, 1] - Roi[2]).astype(int),
|
||||
(referencePoints[:, 0] - Roi[0]).astype(int)), 1)
|
||||
|
||||
np.add.at(subpixelDistribution,
|
||||
(np.floor((absolutePositions[:, 1] % 1) * BinningFactor).astype(int),
|
||||
np.floor((absolutePositions[:, 0] % 1) * BinningFactor).astype(int)), 1)
|
||||
np.add.at(subpixelDistribution,
|
||||
(np.floor((absolutePositions[:, 1] % 1) * BinningFactor).astype(int),
|
||||
np.floor((absolutePositions[:, 0] % 1) * BinningFactor).astype(int)), 1)
|
||||
|
||||
import os
|
||||
os.makedirs(f'InferenceResults/{task}', exist_ok=True)
|
||||
|
||||
plt.clf()
|
||||
plt.imshow(mlSuperFrame, origin='lower', extent=[Y_st, Y_ed, X_st, X_ed])
|
||||
plt.colorbar()
|
||||
plt.savefig('InferenceResults/SiemenStar_ML_superFrame.png', dpi=300)
|
||||
np.save('InferenceResults/SiemenStar_ML_superFrame.npy', mlSuperFrame)
|
||||
plt.clf()
|
||||
plt.savefig(f'InferenceResults/{task}/ML_1Photon_superFrame.png', dpi=300)
|
||||
np.save(f'InferenceResults/{task}/ML_1Photon_superFrame.npy', mlSuperFrame)
|
||||
|
||||
plt.clf()
|
||||
plt.imshow(countFrame, origin='lower', extent=[Y_st, Y_ed, X_st, X_ed])
|
||||
plt.colorbar()
|
||||
plt.savefig('InferenceResults/SiemenStar_count_Frame.png', dpi=300)
|
||||
np.save('InferenceResults/SiemenStar_count_Frame.npy', countFrame)
|
||||
plt.savefig(f'InferenceResults/{task}/count_Frame.png', dpi=300)
|
||||
np.save(f'InferenceResults/{task}/count_Frame.npy', countFrame)
|
||||
|
||||
plt.clf()
|
||||
plt.imshow(subpixelDistribution, origin='lower')
|
||||
plt.imshow(subpixelDistribution, origin='lower', extent=[0, BinningFactor, 0, BinningFactor])
|
||||
plt.colorbar()
|
||||
plt.savefig('InferenceResults/SiemenStar_subpixel_Distribution.png', dpi=300)
|
||||
np.save('InferenceResults/SiemenStar_subpixel_Distribution.npy', subpixelDistribution)
|
||||
plt.savefig(f'InferenceResults/{task}/subpixel_1Photon_Distribution.png', dpi=300)
|
||||
np.save(f'InferenceResults/{task}/subpixel_1Photon_Distribution.npy', subpixelDistribution)
|
||||
Reference in New Issue
Block a user