added image analysis tools and small updates throughout
This commit is contained in:
File diff suppressed because one or more lines are too long
@@ -19,3 +19,4 @@ finally:
|
||||
from . import utils
|
||||
from . import plot
|
||||
from . import analysis
|
||||
from . import image
|
||||
|
||||
@@ -30,7 +30,7 @@ def setup_cachedirs(pgroup=None, cachedir=None):
|
||||
if cachedir is not None:
|
||||
# explicit directory given, use this choice
|
||||
memory = Memory(cachedir, verbose=0, compress=2)
|
||||
return
|
||||
return memory
|
||||
|
||||
try:
|
||||
if pgroup is None:
|
||||
@@ -48,6 +48,8 @@ def setup_cachedirs(pgroup=None, cachedir=None):
|
||||
except PermissionError as e:
|
||||
cachedir = "/tmp"
|
||||
memory = Memory(cachedir, verbose=0, compress=2)
|
||||
|
||||
return memory
|
||||
|
||||
|
||||
setup_cachedirs()
|
||||
@@ -177,6 +179,80 @@ def get_contrast_images(
|
||||
)
|
||||
|
||||
|
||||
@memory.cache(ignore=["batch_size"]) # we ignore batch_size for caching purposes
|
||||
def perform_image_stack_sum(
|
||||
fileset,
|
||||
channel="JF16T03V01",
|
||||
alignment_channels=None,
|
||||
batch_size=10,
|
||||
roi: Optional[ROI] = None,
|
||||
preview=False,
|
||||
# operations=["sum"],
|
||||
lower_cutoff_threshold=None,
|
||||
):
|
||||
"""
|
||||
Performs one or more calculations ("sum", "mean" or "std") for a given region of interest (roi)
|
||||
for an image channel from a fileset (e.g. "run0352/data/acq0001.*.h5" or step.fnames from a SFScanInfo object).
|
||||
|
||||
Allows alignment, i.e. reducing only to a common subset with other channels.
|
||||
|
||||
Calculations are performed in batches to reduce maximum memory requirements.
|
||||
|
||||
Preview only applies calculation to first batch and returns.
|
||||
|
||||
Returns a dictionary ({"JF16T03V01_intensity":[11, 18, 21, 55, ...]})
|
||||
with the given channel values for each pulse and corresponding pulse id.
|
||||
"""
|
||||
|
||||
possible_operations = {
|
||||
"sum": ["intensity", np.sum],
|
||||
"mean": ["mean", np.mean],
|
||||
"std": ["mean", np.std],
|
||||
}
|
||||
|
||||
with SFDataFiles(*fileset) as data:
|
||||
if alignment_channels is not None:
|
||||
channels = [channel] + [ch for ch in alignment_channels]
|
||||
else:
|
||||
channels = [channel]
|
||||
|
||||
subset = data[channels]
|
||||
subset.drop_missing()
|
||||
|
||||
Images = subset[channel]
|
||||
|
||||
# create empty array for stack sum with right shape
|
||||
im = Images[0]
|
||||
if roi is None:
|
||||
im_ROI = im[:]
|
||||
else:
|
||||
im_ROI = im[:, roi.rows, roi.cols]
|
||||
|
||||
summed = np.zeros(im_ROI[0].shape)
|
||||
|
||||
#for image_slice, slice2 in zip(Images.in_batches(batch_size), other.in_batches(batch_size)):
|
||||
|
||||
for image_slice in Images.in_batches(batch_size):
|
||||
|
||||
index_slice, im = image_slice
|
||||
|
||||
if roi is None:
|
||||
im_ROI = im[:]
|
||||
else:
|
||||
im_ROI = im[:, roi.rows, roi.cols]
|
||||
|
||||
if lower_cutoff_threshold is not None:
|
||||
im_ROI = np.clip(im_ROI, lower_cutoff_threshold, np.inf)
|
||||
|
||||
summed = summed + np.sum(im_ROI, axis=(0))
|
||||
|
||||
# only return first batch
|
||||
if preview:
|
||||
break
|
||||
|
||||
return summed
|
||||
|
||||
|
||||
def fit_2d_gaussian(image, roi: Optional[ROI] = None, plot=False):
|
||||
"""
|
||||
2D Gaussian fit using LMFit for a given image and an optional region of interest.
|
||||
|
||||
46
src/cristallina/image.py
Normal file
46
src/cristallina/image.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import numpy as np
|
||||
|
||||
import matplotlib
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
from skimage import exposure
|
||||
from skimage.filters import rank
|
||||
from skimage.morphology import disk
|
||||
|
||||
|
||||
def plot_image(image, norm='linear', cmap=plt.cm.viridis, title="Image", ax=None):
|
||||
""" Plots an image (array-like or PIL image) using some default settings.
|
||||
"""
|
||||
|
||||
if ax is None:
|
||||
fig, ax = plt.subplots(constrained_layout=True, dpi=150)
|
||||
|
||||
ax.imshow(image, origin='lower', cmap=plt.cm.viridis, norm=norm)
|
||||
ax.set_title(title)
|
||||
|
||||
|
||||
def enhance_image(image, algorithm='autolevel', radius=5):
|
||||
""" Enhanced a given image (2d array) using one out of
|
||||
'equalize_hist'
|
||||
'entropy'
|
||||
'autolevel'
|
||||
'global_equalize'
|
||||
algorithms with a given (pixel) radius.
|
||||
"""
|
||||
|
||||
arr_norm = np.clip(image, 0, np.max(image))
|
||||
arr_norm = image/(np.max(image)-np.min(image))
|
||||
|
||||
if algorithm == 'equalize_hist':
|
||||
img_algo = exposure.equalize_adapthist(arr_norm, kernel_size=radius, clip_limit=0.99)
|
||||
|
||||
elif algorithm == 'entropy':
|
||||
img_algo = rank.entropy(arr_norm, footprint=disk(radius*2))
|
||||
|
||||
elif algorithm == 'autolevel':
|
||||
img_algo = rank.autolevel(arr_norm, disk(radius*2))
|
||||
|
||||
elif algorithm == 'global_equalize':
|
||||
img_algo = rank.equalize(arr_norm, footprint=disk(radius*2))
|
||||
|
||||
return img_algo
|
||||
@@ -95,14 +95,15 @@ def print_run_info(
|
||||
|
||||
print(f"Total file size: {total_size/(1024*1024*1024):.1f} GB\n")
|
||||
|
||||
try:
|
||||
for step in scan:
|
||||
ch = step.channels
|
||||
print("\n".join([str(c) for c in ch]))
|
||||
# print only channels for first step
|
||||
break
|
||||
except sfdata.errors.NoUsableFileError:
|
||||
logger.warning("Cannot access files on /sf...")
|
||||
if print_channels:
|
||||
try:
|
||||
for step in scan:
|
||||
ch = step.channels
|
||||
print("\n".join([str(c) for c in ch]))
|
||||
# print only channels for first step
|
||||
break
|
||||
except sfdata.errors.NoUsableFileError:
|
||||
logger.warning("Cannot access files on /sf...")
|
||||
|
||||
|
||||
def number_of_steps(scan_number_or_scan):
|
||||
|
||||
Reference in New Issue
Block a user