with pedestal and noise
This commit is contained in:
204
ClusterFinding.ipynb
Normal file
204
ClusterFinding.ipynb
Normal file
File diff suppressed because one or more lines are too long
439
EtaGeneration.ipynb
Normal file
439
EtaGeneration.ipynb
Normal file
File diff suppressed because one or more lines are too long
10
moench_g4_hg.json
Normal file
10
moench_g4_hg.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"sigma": 10.0,
|
||||
"pixel_size": 25.0,
|
||||
"grid_size": 3,
|
||||
"resolution": 400,
|
||||
"gain": 150.0,
|
||||
"noise": 25.0,
|
||||
"pedestal": 1000,
|
||||
"image_size": [10,10]
|
||||
}
|
||||
@@ -1,2 +1,2 @@
|
||||
from .generate import Generator
|
||||
from .plotting import plot_gaussian
|
||||
from .plotting import plot_gaussian, charge_and_pixel_plot, plot_cluster_finding
|
||||
@@ -1,41 +1,98 @@
|
||||
import torch
|
||||
import math
|
||||
import numpy as np
|
||||
|
||||
|
||||
def sum_pixels(t, grid):
|
||||
"""
|
||||
Given a charge density as a torch array sum it to pixels
|
||||
"""
|
||||
if t.ndim == 2:
|
||||
resolution = t.shape[0]
|
||||
pixels = np.zeros((grid,grid))
|
||||
|
||||
step = resolution//grid
|
||||
for i in range(grid):
|
||||
for j in range(grid):
|
||||
pixels[i,j] = t[i*step:(i+1)*step, j*step:(j+1)*step].sum()
|
||||
return pixels
|
||||
|
||||
elif t.ndim == 3:
|
||||
resolution = t.shape[1]
|
||||
step = resolution//grid
|
||||
pixels = np.zeros((t.shape[0], grid, grid))
|
||||
for i in range(grid):
|
||||
for j in range(grid):
|
||||
pixels[:,i,j] = t[:,i*step:(i+1)*step, j*step:(j+1)*step].sum(axis = 1).sum(axis = 1)
|
||||
|
||||
return pixels
|
||||
|
||||
import time
|
||||
import hdf5plugin
|
||||
import h5py
|
||||
import json
|
||||
|
||||
class Generator:
|
||||
def __init__(self, sigma, pixel_size, grid_size, resolution, device = 'cpu'):
|
||||
def __init__(self,
|
||||
sigma,
|
||||
pixel_size,
|
||||
grid_size,
|
||||
resolution,
|
||||
gain = 150, #ADU/keV
|
||||
noise = 25, #ADU sigma
|
||||
photon_energy = 8.0, #keV
|
||||
pedestal = 1000, #ADU
|
||||
image_size = (10,10),
|
||||
device = 'cpu'):
|
||||
self.sigma = sigma
|
||||
self.pixel_size = pixel_size
|
||||
self.grid_size = grid_size
|
||||
self.resolution = resolution
|
||||
self.device = device
|
||||
self.gain = gain #ADU/keV
|
||||
self.photon_energy = photon_energy #keV
|
||||
self.noise = noise #ADU, TODO! scale with gain
|
||||
self.pedestal = pedestal
|
||||
self.image_size = tuple(it for it in image_size)
|
||||
|
||||
def fromJSON(fname):
|
||||
with open(fname) as f:
|
||||
j = json.load(f)
|
||||
return Generator(**j)
|
||||
|
||||
def _sum_pixels(self, t):
|
||||
"""
|
||||
Given a charge density as a torch array sum it to pixels
|
||||
"""
|
||||
if t.ndim == 2:
|
||||
pixels = np.zeros((self.grid_size,self.grid_size))
|
||||
|
||||
step = self.resolution//self.grid_size
|
||||
for i in range(self.grid_size):
|
||||
for j in range(self.grid_size):
|
||||
pixels[i,j] = t[i*step:(i+1)*step, j*step:(j+1)*step].sum()
|
||||
return pixels
|
||||
|
||||
elif t.ndim == 3:
|
||||
step = self.resolution//self.grid_size
|
||||
pixels = np.zeros((t.shape[0], self.grid_size, self.grid_size))
|
||||
for i in range(self.grid_size):
|
||||
for j in range(self.grid_size):
|
||||
pixels[:,i,j] = t[:,i*step:(i+1)*step, j*step:(j+1)*step].sum(axis = 1).sum(axis = 1).cpu()
|
||||
|
||||
return pixels
|
||||
|
||||
def _apply_gain(self, pixels):
|
||||
if pixels.ndim == 2:
|
||||
pixels = (pixels * self.photon_energy/pixels.sum()*self.gain)
|
||||
pixels = np.random.normal(pixels, self.noise)
|
||||
pixels = pixels.astype(np.int32)
|
||||
elif pixels.ndim == 3:
|
||||
s = pixels.sum(axis = 1).sum(axis = 1).mean()
|
||||
pixels = (pixels * self.photon_energy/s*self.gain)
|
||||
pixels = np.random.normal(pixels, self.noise)
|
||||
pixels = pixels.astype(np.int32)
|
||||
else:
|
||||
raise NotImplementedError(f"Cannot apply gain for {pixels.shape}")
|
||||
return pixels
|
||||
|
||||
def dark(self):
|
||||
"""Generate a dark image for pedestal calculation"""
|
||||
return np.random.normal(loc = 0, scale = self.noise, size = self.image_size).astype(np.uint16)+self.pedestal
|
||||
|
||||
def sparse(self, n_photons):
|
||||
"""Generate an image with sparse photon hits"""
|
||||
noise = self.noise
|
||||
self.noise = 0
|
||||
mx,my,pixels = self.uniform_hits(n_photons)
|
||||
self.noise = noise
|
||||
mx = mx/self.pixel_size-1
|
||||
my = my/self.pixel_size-1
|
||||
|
||||
hit_pixels = [(row,col) for row, col in zip(np.random.randint(1,self.image_size[0]-1,n_photons),np.random.randint(1,self.image_size[0]-1,n_photons))]
|
||||
image = self.dark()
|
||||
for i in range(n_photons):
|
||||
s = (slice(hit_pixels[i][1]-1,hit_pixels[i][1]+2,1),slice(hit_pixels[i][0]-1,hit_pixels[i][0]+2,1))
|
||||
image[s] += pixels[i].astype(np.uint16)
|
||||
mx[i] += hit_pixels[i][0]
|
||||
my[i] += hit_pixels[i][1]
|
||||
hits = np.vstack((mx[:,0,0],my[:,0,0])).T
|
||||
return image, hits
|
||||
|
||||
def hit(self, mx, my):
|
||||
x = torch.linspace(0, self.pixel_size*self.grid_size,
|
||||
self.resolution, device = self.device)
|
||||
@@ -43,9 +100,11 @@ class Generator:
|
||||
t = 1 / (2*math.pi*self.sigma**2) * \
|
||||
torch.exp(-((x - my)**2 / (2*self.sigma**2) + (y - mx)**2 / (2*self.sigma**2)))
|
||||
|
||||
p = sum_pixels(t, self.grid_size)
|
||||
p = self._sum_pixels(t)
|
||||
if self.device != 'cpu':
|
||||
t = t.cpu()
|
||||
|
||||
p = self._apply_gain(p)
|
||||
return t, p
|
||||
|
||||
def dx(self):
|
||||
@@ -81,11 +140,11 @@ class Generator:
|
||||
|
||||
|
||||
#Sum signal in pixels for all N depositions
|
||||
step = self.resolution//self.grid_size
|
||||
pixels = torch.zeros((n_hits,self.grid_size,self.grid_size))
|
||||
for i in range(self.grid_size):
|
||||
for j in range(self.grid_size):
|
||||
pixels[:,i,j] = ts[:,i*step:(i+1)*step, j*step:(j+1)*step].sum(axis = 1).sum(axis = 1)
|
||||
pixels = self._sum_pixels(ts)
|
||||
pixels = self._apply_gain(pixels)
|
||||
|
||||
mx = mx.cpu()
|
||||
my = my.cpu()
|
||||
|
||||
return mx, my, pixels
|
||||
|
||||
@@ -106,9 +165,6 @@ class Generator:
|
||||
low = self.pixel_size*(self.grid_size//2)
|
||||
high = low+self.pixel_size
|
||||
|
||||
# mx = torch.rand(n_hits,1,1, device = self.device) * (high-low)+low
|
||||
# my = torch.rand(n_hits,1,1, device = self.device) * (high-low) +low
|
||||
|
||||
mx = torch.rand(n_hits,1,1, device = self.device)
|
||||
my = torch.rand(n_hits,1,1, device = self.device)
|
||||
mask = (mx + my > 1)
|
||||
@@ -130,5 +186,46 @@ class Generator:
|
||||
|
||||
return mx, my, pixels
|
||||
|
||||
|
||||
def write_clusters_for_eta(self, n_hits, n_frames, tag = ""):
|
||||
# Cluster file has the format
|
||||
# int32_t frame_number
|
||||
# uint32_t number_of_clusters
|
||||
# int16_t x, int16_t y, int32_t data[9] * number_of_clusters
|
||||
t0 = time.perf_counter()
|
||||
frame_number = np.int32(1)
|
||||
number_of_clusters = np.uint32(n_hits)
|
||||
|
||||
with open(f'.data/test_{self.grid_size}x{self.grid_size}{tag}.clust', 'wb') as f:
|
||||
|
||||
#Set up an hdf5 file for truth
|
||||
hf = h5py.File(f'.data/test_{self.grid_size}x{self.grid_size}{tag}.h5', 'w')
|
||||
hf.create_dataset("x", (n_frames, n_hits), dtype = np.float32)
|
||||
hf.create_dataset("y", (n_frames, n_hits), dtype = np.float32)
|
||||
|
||||
for i in range(n_frames):
|
||||
print(f'{i}',end = '\r')
|
||||
#Generate a new set of n_hits hits
|
||||
mx, my, pixels = self.uniform_hits(n_hits)
|
||||
#Convert clusters to a numpy array
|
||||
clusters = np.zeros(n_hits, dtype = [('row', np.int16), ('col', np.int16), ('data', np.int32, (self.grid_size,self.grid_size))])
|
||||
clusters['data'] = pixels*1000
|
||||
clusters['row'] = 1
|
||||
clusters['col'] = 1
|
||||
|
||||
#Write
|
||||
frame_number.tofile(f)
|
||||
number_of_clusters.tofile(f)
|
||||
clusters.tofile(f)
|
||||
|
||||
#Write ground truth to hdf5
|
||||
hf['x'][frame_number-1] = mx[:,0,0].cpu()
|
||||
hf['y'][frame_number-1] = my[:,0,0].cpu()
|
||||
|
||||
frame_number += 1
|
||||
print()
|
||||
hf.close()
|
||||
t = time.perf_counter()-t0
|
||||
print(f'Duration: {t:.3f}s, FPS: {n_frames/t:.2f}, hits/s: {n_frames*n_hits/t:.2}s')
|
||||
|
||||
#
|
||||
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib as mpl
|
||||
import numpy as np
|
||||
import aare
|
||||
import seaborn as sns
|
||||
|
||||
def plot_gaussian(t, pixel_size, grid_size, ax = None):
|
||||
print(f'{t.shape=}')
|
||||
|
||||
resolution = t.shape[0]
|
||||
xa = np.linspace(0,grid_size*pixel_size,resolution)
|
||||
ticks = [tick for tick in range(0,pixel_size*grid_size+1, pixel_size)]
|
||||
ticks = [tick for tick in np.arange(0,pixel_size*grid_size+1, pixel_size)]
|
||||
|
||||
if ax is None:
|
||||
fig, ax = plt.subplots(figsize = (7,7))
|
||||
@@ -25,4 +28,68 @@ def plot_gaussian(t, pixel_size, grid_size, ax = None):
|
||||
|
||||
ax.set_xlabel(r'Position x [$\mu$m]')
|
||||
ax.set_ylabel(r'Position y [$\mu$m]')
|
||||
return fig, ax, mesh
|
||||
return fig, ax, mesh
|
||||
|
||||
def plot_cluster_finding(image, clusters, hits, interpolated_hits):
|
||||
#We pass in an aare.ClusterVector so we need to make a numpy array from it
|
||||
arr = np.array(clusters)
|
||||
|
||||
|
||||
fig, ax = plt.subplots(figsize = (8,8))
|
||||
im = ax.pcolormesh(image)
|
||||
ax.set_aspect('equal')
|
||||
aare.add_colorbar(ax, im)
|
||||
|
||||
#Three loops in case we have less found clusters than photons
|
||||
for i, hit in enumerate(hits):
|
||||
ax.plot(*hit, 'x', color = 'red', ms = 7, mew = 1.5)
|
||||
|
||||
for i, hit in enumerate(interpolated_hits):
|
||||
ax.plot(hit['x'], hit['y'], 'o', color = 'cyan', ms = 8, mew = 1.5, fillstyle = 'none')
|
||||
|
||||
for i, cluster in enumerate(arr):
|
||||
box = mpl.patches.Rectangle((cluster['x']-1, cluster['y']-1), 3,3, fill = None, ec = 'white', lw = 1.5)
|
||||
ax.add_patch(box)
|
||||
|
||||
|
||||
legend_elements = [mpl.lines.Line2D([0], [0], color='red', lw=0, marker = 'x', label='Hit'),
|
||||
mpl.lines.Line2D([0], [0], color='cyan', lw=0, marker = 'o',fillstyle = 'none',label='Interpolated'),
|
||||
mpl.patches.Patch(fill=None, edgecolor='white',
|
||||
label='Cluster')]
|
||||
|
||||
ax.legend(handles=legend_elements);
|
||||
ax.set_xlim(0,image.shape[1])
|
||||
ax.set_ylim(0,image.shape[0])
|
||||
return fig, ax
|
||||
|
||||
def charge_and_pixel_plot(charge, pixels, pos, pixel_size, grid_size):
|
||||
fig, axs = plt.subplots(1,2, figsize = (10,5), constrained_layout=True)
|
||||
_, ax, mesh = plot_gaussian(charge, pixel_size=pixel_size, grid_size = grid_size, ax = axs[0])
|
||||
axs[0].plot(*pos, 'x', color = 'red', label = 'hit')
|
||||
axs[0].set_title('charge')
|
||||
axs[0].legend()
|
||||
axs[0].set_aspect('equal')
|
||||
aare.add_colorbar(axs[0], mesh)
|
||||
im = axs[1].imshow(pixels, origin = 'lower')
|
||||
|
||||
labels = [i for i in range(grid_size)]
|
||||
print(labels)
|
||||
sns.heatmap(pixels, annot=True, fmt="d", linewidths=.5, ax=axs[1], cmap = 'viridis', cbar = False, xticklabels = labels, yticklabels = labels)
|
||||
axs[1].set_xticks([i for i in range(grid_size)])
|
||||
axs[1].set_aspect('equal')
|
||||
axs[1].set_title('pixel response');
|
||||
|
||||
ticks = [0.5+i for i in range(grid_size)]
|
||||
labels = [i for i in range(grid_size)]
|
||||
axs[1].set_xticks(ticks)
|
||||
axs[1].set_xticklabels(labels)
|
||||
axs[1].set_yticks(ticks)
|
||||
axs[1].set_xticklabels(labels)
|
||||
axs[1].set_xlim(0, grid_size)
|
||||
axs[1].set_ylim(0, grid_size)
|
||||
axs[1].set_xlabel('col')
|
||||
axs[1].set_ylabel('row')
|
||||
|
||||
aare.add_colorbar(axs[1], axs[1].collections[0])
|
||||
fig.set_constrained_layout_pads(w_pad=0.40, h_pad=0.05) # More space for labels
|
||||
return fig, axs
|
||||
Reference in New Issue
Block a user