Startup
This commit is contained in:
1032
script/___Lib/diffutils.py
Normal file
1032
script/___Lib/diffutils.py
Normal file
File diff suppressed because it is too large
Load Diff
750
script/___Lib/ijutils.py
Normal file
750
script/___Lib/ijutils.py
Normal file
@@ -0,0 +1,750 @@
|
||||
####################################################################################################
|
||||
# Facade to ImageJ functionality
|
||||
####################################################################################################
|
||||
|
||||
#More information on:
|
||||
# Image: https://imagej.nih.gov/ij/docs/guide/146-28.html#toc-Section-28
|
||||
# Process: https://imagej.nih.gov/ij/docs/guide/146-29.html#toc-Section-29
|
||||
# Analyze: https://imagej.nih.gov/ij/docs/guide/146-30.html#toc-Section-30
|
||||
|
||||
import ch.psi.utils.Convert as Convert
|
||||
import ch.psi.pshell.imaging.Utils as Utils
|
||||
from startup import get_context
|
||||
import java.awt.image.BufferedImage as BufferedImage
|
||||
import jarray
|
||||
|
||||
import ij.IJ as IJ
|
||||
import ij.ImageJ as ImageJ
|
||||
import ij.WindowManager as WindowManager
|
||||
import ij.ImagePlus as ImagePlus
|
||||
import ij.Prefs as Prefs
|
||||
import ij.io.FileSaver as FileSaver
|
||||
import ij.io.Opener as Opener
|
||||
|
||||
import ij.process.ImageProcessor as ImageProcessor
|
||||
import ij.process.ByteProcessor as ByteProcessor
|
||||
import ij.process.ShortProcessor as ShortProcessor
|
||||
import ij.process.ColorProcessor as ColorProcessor
|
||||
import ij.process.FloatProcessor as FloatProcessor
|
||||
import ij.process.ImageConverter as ImageConverter
|
||||
import ij.process.AutoThresholder as AutoThresholder
|
||||
import ij.process.LUT as LUT
|
||||
import ij.measure.Measurements as Measurements
|
||||
import ij.measure.ResultsTable as ResultsTable
|
||||
import ij.plugin.filter.Analyzer as Analyzer
|
||||
import ij.plugin.filter.GaussianBlur as GaussianBlur
|
||||
import ij.plugin.filter.Filters as Filters
|
||||
import ij.plugin.filter.FFTFilter as FFTFilter
|
||||
import ij.plugin.filter.BackgroundSubtracter as BackgroundSubtracter
|
||||
import ij.plugin.filter.EDM as EDM
|
||||
import ij.plugin.filter.Shadows as Shadows
|
||||
import ij.plugin.filter.UnsharpMask as UnsharpMask
|
||||
import ij.plugin.filter.MaximumFinder as MaximumFinder
|
||||
import ij.plugin.filter.EDM as EDM
|
||||
import ij.plugin.filter.Shadows as Shadows
|
||||
import ij.plugin.filter.UnsharpMask as UnsharpMask
|
||||
import ij.plugin.filter.RankFilters as RankFilters
|
||||
import ij.plugin.filter.Convolver as Convolver
|
||||
import ij.plugin.filter.ParticleAnalyzer as ParticleAnalyzer
|
||||
|
||||
import ij.plugin.ContrastEnhancer as ContrastEnhancer
|
||||
import ij.plugin.Thresholder as Thresholder
|
||||
import ij.plugin.ImageCalculator as ImageCalculator
|
||||
import ij.plugin.FFT as FFT
|
||||
import ij.plugin.Concatenator as Concatenator
|
||||
|
||||
#ImageJ customizations
|
||||
import ch.psi.pshell.imaging.ij.FFTMath as FFTMath
|
||||
import ch.psi.pshell.imaging.ij.FFTFilter as FFTFilter
|
||||
import ch.psi.pshell.imaging.ij.Binary as Binary
|
||||
import ch.psi.pshell.imaging.ij.Slicer as Slicer
|
||||
|
||||
|
||||
#This eliminates the error messages due to the bug on ij.gui.ImageWindow row 555 (ij is null)
|
||||
if not "_image_j" in globals().keys():
|
||||
_image_j = ImageJ(None, ImageJ.NO_SHOW)
|
||||
|
||||
###################################################################################################
|
||||
#Image creation, copying & saving
|
||||
###################################################################################################
|
||||
def load_image(image, title = "img"):
|
||||
"""
|
||||
image: file name or BufferedImage
|
||||
"""
|
||||
if isinstance(image, str):
|
||||
try:
|
||||
file = get_context().setup.expandPath(image)
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
image = Utils.newImage(file)
|
||||
except:
|
||||
#try loading from assembly
|
||||
image = get_context().setup.getAssemblyImage(image)
|
||||
return ImagePlus(title, image)
|
||||
|
||||
|
||||
|
||||
def load_array(array, width=None, height=None, title = "img"):
|
||||
"""
|
||||
array: 1d array if width and height defined , or else 2d array to be flattened.
|
||||
"""
|
||||
#2D
|
||||
if (width==None) and (height==None):
|
||||
if array.typecode == '[B': proc = ByteProcessor(len(array[0]), len(array), Convert.flatten(array))
|
||||
elif array.typecode == '[S': proc = ShortProcessor(len(array[0]), len(array), Convert.flatten(array), None)
|
||||
elif array.typecode in ['[I','[F', '[D']: proc = FloatProcessor(len(array[0]), len(array), Convert.flatten(array))
|
||||
else: raise Exception("Invalid array type")
|
||||
#1D
|
||||
else:
|
||||
if (len(array) > width*height):
|
||||
array = array[:(width*height)]
|
||||
if array.typecode == 'b': proc = ByteProcessor(width, height, array)
|
||||
elif array.typecode == 'h': proc = ShortProcessor(width, height, array, None)
|
||||
elif array.typecode in ['i','f','d']: proc = FloatProcessor(width, height, array)
|
||||
else: raise Exception("Invalid array type")
|
||||
return ImagePlus(title, proc)
|
||||
|
||||
def save_image(ip, path=None, format = None):
|
||||
"""
|
||||
Saves image or stack
|
||||
If parameters omitted, saves image again in same location, with same format.
|
||||
"""
|
||||
fs = FileSaver(ip)
|
||||
if path == None: fs.save()
|
||||
else:
|
||||
try:
|
||||
path = get_context().setup.expandPath(path)
|
||||
except:
|
||||
pass
|
||||
if format == "bmp": fs.saveAsBmp(path)
|
||||
elif format == "fits": fs.saveAsFits(path)
|
||||
elif format == "gif": fs.saveAsGif(path)
|
||||
elif format == "jpeg": fs.saveAsJpeg(path)
|
||||
elif format == "lut": fs.saveAsLut(path)
|
||||
elif format == "pgm": fs.saveAsPgm(path)
|
||||
elif format == "png": fs.saveAsPng(path)
|
||||
elif format == "raw" and ip.getImageStackSize()>1: fs.saveAsRawStack(path)
|
||||
elif format == "raw": fs.saveAsRaw(path)
|
||||
elif format == "txt": fs.saveAsText(path)
|
||||
elif format == "tiff" and ip.getImageStackSize()>1: fs.saveAsTiffStack(path)
|
||||
elif format == "tiff": fs.saveAsTiff(path)
|
||||
elif format == "zip": fs.saveAsZip(path)
|
||||
|
||||
|
||||
def open_image(path):
|
||||
"""
|
||||
Open file using ij.io,Opener
|
||||
"""
|
||||
try:
|
||||
path = get_context().setup.expandPath(path)
|
||||
except:
|
||||
pass
|
||||
opener = Opener()
|
||||
return opener.openImage(path)
|
||||
|
||||
def new_image(width, height, image_type="byte", title = "img", fill_color = None):
|
||||
"""
|
||||
type = "byte", "short", "color" or "float"
|
||||
"""
|
||||
if image_type == "byte": p=ByteProcessor(width, height)
|
||||
elif image_type == "short": p=ShortProcessor(width, height)
|
||||
elif image_type == "color": p=ColorProcessor(width, height)
|
||||
elif image_type == "float": p=FloatProcessor(width, height)
|
||||
else: raise Exception("Invalid image type " + str(image_type))
|
||||
ret = ImagePlus(title, p)
|
||||
if fill_color is not None:
|
||||
p.setColor(fill_color)
|
||||
p.resetRoi()
|
||||
p.fill()
|
||||
return ret
|
||||
|
||||
def get_ip_array(ip):
|
||||
"""
|
||||
Returns data array of ImagePlus
|
||||
"""
|
||||
if type(ip.getProcessor()) == FloatProcessor:
|
||||
return ip.getProcessor().getFloatArray()
|
||||
else:
|
||||
return ip.getProcessor().getIntArray()
|
||||
|
||||
|
||||
def sub_image(ip, x, y, width, height):
|
||||
"""
|
||||
Returns new ImagePlus
|
||||
"""
|
||||
ip.setRoi(x, y, width, height)
|
||||
p=ip.getProcessor().crop()
|
||||
return ImagePlus(ip.getTitle() + " subimage", p)
|
||||
|
||||
def copy_image(ip):
|
||||
return ip.duplicate()
|
||||
|
||||
def copy_image_to(ip_source, ip_dest, x, y):
|
||||
ip_source.deleteRoi()
|
||||
ip_source.copy()
|
||||
ip_dest.setRoi(x, y, ip_source.getWidth(), ip_source.getHeight())
|
||||
ip_dest.paste()
|
||||
ip_dest.changes = False
|
||||
ip_dest.deleteRoi()
|
||||
|
||||
def pad_image(ip, left=0, right=0, top=0, bottom=0, fill_color = None):
|
||||
p=ip.getProcessor()
|
||||
width = p.getWidth() + left + right
|
||||
height = p.getHeight() + top + bottom
|
||||
image_type = get_image_type(ip)
|
||||
ret = new_image(width, height, image_type, ip.getTitle() + " padded", fill_color)
|
||||
ip.deleteRoi()
|
||||
ip.copy()
|
||||
ret.setRoi(left, top, p.getWidth(), p.getHeight())
|
||||
ret.paste()
|
||||
ret.changes = False
|
||||
ret.deleteRoi()
|
||||
return ret
|
||||
|
||||
def get_image_type(ip):
|
||||
"""
|
||||
Returns: "byte", "short", "color" or "float"
|
||||
"""
|
||||
p=ip.getProcessor()
|
||||
if type(p) == ShortProcessor: return "short"
|
||||
elif type(p) == ColorProcessor: return "color"
|
||||
elif type(p) == FloatProcessor: return "float"
|
||||
return "byte"
|
||||
|
||||
###################################################################################################
|
||||
#Image type conversion
|
||||
###################################################################################################
|
||||
def grayscale(ip, do_scaling=None, in_place=True):
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
ic = ImageConverter(ip)
|
||||
if do_scaling is not None:
|
||||
ic.setDoScaling(do_scaling)
|
||||
ic.convertToGray8()
|
||||
return ip
|
||||
|
||||
def get_channel(ip, channel):
|
||||
"""
|
||||
Return a channel from a color image as a new ImagePlus.
|
||||
channel: "red", "green","blue", "alpha", "brightness",
|
||||
"""
|
||||
proc = ip.getProcessor()
|
||||
if channel == "red": ret = proc.getChannel(1, None)
|
||||
elif channel == "green": ret = proc.getChannel(2, None)
|
||||
elif channel == "blue": ret = proc.getChannel(3, None)
|
||||
elif channel == "alpha": ret = proc.getChannel(4, None)
|
||||
elif channel == "brightness": ret = proc.getBrightness()
|
||||
else: raise Exception("Invalid channel " + str(channel))
|
||||
return ImagePlus(ip.getTitle() + " channel: " + channel, ret)
|
||||
|
||||
###################################################################################################
|
||||
#Thresholder
|
||||
###################################################################################################
|
||||
def threshold(ip, min_threshold, max_threshold, in_place=True):
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
ip.getProcessor().setThreshold(min_threshold, max_threshold, ImageProcessor.NO_LUT_UPDATE)
|
||||
WindowManager.setTempCurrentImage(ip)
|
||||
Thresholder().run("mask")
|
||||
return ip
|
||||
|
||||
def auto_threshold(ip, dark_background = False, method = AutoThresholder.getMethods()[0], in_place=True):
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
ip.getProcessor().setAutoThreshold(method, dark_background , ImageProcessor.NO_LUT_UPDATE)
|
||||
WindowManager.setTempCurrentImage(ip)
|
||||
thresholder=Thresholder().run("mask")
|
||||
return ip
|
||||
|
||||
###################################################################################################
|
||||
#Binary functions
|
||||
###################################################################################################
|
||||
def binary_op(ip, op, dark_background=False, iterations=1, count=1, in_place=True):
|
||||
"""
|
||||
op = "erode","dilate", "open","close", "outline", "fill holes", "skeletonize"
|
||||
"""
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
binary = Binary(count, iterations, dark_background )
|
||||
binary.setup(op, ip)
|
||||
binary.run(ip.getProcessor())
|
||||
return ip
|
||||
|
||||
def binary_erode(ip, dark_background=False, iterations=1, count=1, in_place=True):
|
||||
return binary_op(ip, "erode", dark_background, iterations, count, in_place)
|
||||
|
||||
def binary_dilate(ip, dark_background=False, iterations=1, count=1, in_place=True):
|
||||
return binary_op(ip, "dilate", dark_background, iterations, count, in_place)
|
||||
|
||||
def binary_open(ip, dark_background=False, iterations=1, count=1, in_place=True):
|
||||
return binary_op(ip, "open", dark_background, iterations, count, in_place)
|
||||
|
||||
def binary_close(ip, dark_background=False, iterations=1, count=1, in_place=True):
|
||||
return binary_op(ip, "close", dark_background, iterations, count)
|
||||
|
||||
def binary_outline(ip, dark_background=False, in_place=True):
|
||||
return binary_op(ip, "outline", dark_background, in_place=in_place)
|
||||
|
||||
def binary_fill_holes(ip, dark_background=False, in_place=True):
|
||||
return binary_op(ip, "fill holes", dark_background, in_place=in_place)
|
||||
|
||||
def binary_skeletonize(ip, dark_background=False, in_place=True):
|
||||
return binary_op(ip, "skeletonize", dark_background, in_place=in_place)
|
||||
|
||||
def analyse_particles(ip, min_size, max_size, fill_holes = True, exclude_edges = True, extra_measurements = 0, \
|
||||
print_table = False, output_image = "outlines", minCirc = 0.0, maxCirc = 1.0):
|
||||
"""
|
||||
Returns: tuple (ResultsTable results_table, ImagePlus output_image)
|
||||
output_image = "outlines", "overlay_outlines", "masks", "overlay_masks", "roi_masks" or None
|
||||
extra_measurements = mask with Measurements.CENTROID, PERIMETER, RECT, MIN_MAX, ELLIPSE, CIRCULARITY, AREA_FRACTION, INTEGRATED_DENSITY, INVERT_Y, FERET, KURTOSIS, MEDIAN, MODE, SKEWNESS, STD_DEV
|
||||
Measurements is a mask of flags: https://imagej.nih.gov/ij/developer/api/ij/measure/Measurements.html.
|
||||
Returned ResultsTable hold public fields: https://imagej.nih.gov/ij/developer/api/ij/measure/ResultsTable.html
|
||||
|
||||
"""
|
||||
rt = ResultsTable()
|
||||
show_summary = False
|
||||
options = ParticleAnalyzer.SHOW_RESULTS | ParticleAnalyzer.CLEAR_WORKSHEET
|
||||
"""
|
||||
ParticleAnalyzer.SHOW_ROI_MASKS | \
|
||||
#ParticleAnalyzer.RECORD_STARTS | \
|
||||
#ParticleAnalyzer.ADD_TO_MANAGER | \
|
||||
#ParticleAnalyzer.FOUR_CONNECTED | \
|
||||
#ParticleAnalyzer.IN_SITU_SHOW | \
|
||||
#ParticleAnalyzer.SHOW_NONE | \
|
||||
"""
|
||||
if show_summary: options = options | ParticleAnalyzer.DISPLAY_SUMMARY
|
||||
if output_image == "outlines": options = options | ParticleAnalyzer.SHOW_OUTLINES
|
||||
elif output_image == "overlay_outlines": options = options | ParticleAnalyzer.SHOW_OVERLAY_OUTLINES
|
||||
elif output_image == "masks": options = options | ParticleAnalyzer.SHOW_MASKS
|
||||
elif output_image == "overlay_masks": options = options | ParticleAnalyzer.SHOW_OVERLAY_MASKS
|
||||
elif output_image == "roi_masks": options = options | ParticleAnalyzer.SHOW_ROI_MASKS
|
||||
#ParticleAnalyzer.SHOW_ROI_MASKS
|
||||
if exclude_edges: options = options | ParticleAnalyzer.EXCLUDE_EDGE_PARTICLES
|
||||
if fill_holes: options = options | ParticleAnalyzer.INCLUDE_HOLES
|
||||
measurements = Measurements.AREA | Measurements.MEAN | Measurements.CENTER_OF_MASS | Measurements.RECT
|
||||
pa = ParticleAnalyzer(options, measurements, rt, min_size, max_size, minCirc, maxCirc)
|
||||
pa.setHideOutputImage(True)
|
||||
pa.setResultsTable(rt)
|
||||
if pa.analyze(ip):
|
||||
if print_table:
|
||||
print rt.getColumnHeadings()
|
||||
for row in range (rt.counter):
|
||||
print rt.getRowAsString(row)
|
||||
return (rt, pa.getOutputImage())
|
||||
|
||||
###################################################################################################
|
||||
#Image operators
|
||||
###################################################################################################
|
||||
def op_image(ip1, ip2, op, float_result=False, in_place=True):
|
||||
"""
|
||||
op = "add","subtract", "multiply","divide", "and", "or", "xor", "min", "max", "average", "difference" or "copy"
|
||||
"""
|
||||
ip1 = ip1 if in_place else ip1.duplicate()
|
||||
ic = ImageCalculator()
|
||||
pars = op
|
||||
if float_result:
|
||||
op = op + " float"
|
||||
ic.run(pars, ip1, ip2)
|
||||
return ip1
|
||||
|
||||
def op_const(ip, op, val, in_place=True):
|
||||
"""
|
||||
op = "add","subtract", "multiply","divide", "and", "or", "xor", "min", "max", "gamma", "set" or "log", "exp", "sqr", "sqrt","abs"
|
||||
"""
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
pr = ip.getProcessor()
|
||||
if op == 'add': pr.add(val)
|
||||
elif op == 'sub': pr.subtract(val)
|
||||
elif op == 'multiply': pr.multiply(val)
|
||||
elif op == 'divide' and val!=0: pr.multiply(1.0/val)
|
||||
elif op == 'and': pr.and(val)
|
||||
elif op == 'or': pr.or(val)
|
||||
elif op == 'xor': pr.xor(val)
|
||||
elif op == 'min': pr.min(val);pr.resetMinAndMax()
|
||||
elif op == 'max': pr.max(val);pr.resetMinAndMax()
|
||||
elif op == 'gamma' and 0.05 < val < 5.0: pr.gamma(val)
|
||||
elif op == 'set': pr.set(val)
|
||||
elif op == 'log': pr.log()
|
||||
elif op == 'exp': pr.exp()
|
||||
elif op == 'sqr': pr.sqr()
|
||||
elif op == 'sqrt': pr.sqrt()
|
||||
elif op == 'abs': pr.abs();pr.resetMinAndMax()
|
||||
else: raise Exception("Invalid operation " + str(op))
|
||||
return ip
|
||||
|
||||
def op_fft(ip1, ip2, op, do_inverse = True) :
|
||||
"""
|
||||
Images must have same sizes, and multiple of 2 height and width.
|
||||
op = "correlate" (complex conjugate multiply), "convolve" (Fourier domain multiply), "deconvolve" (Fourier domain divide)
|
||||
"""
|
||||
if op == "correlate": op_index = 0
|
||||
elif op == "convolve": op_index = 1
|
||||
elif op == "deconvolve": op_index = 2
|
||||
else: raise Exception("Invalid operation " + str(op))
|
||||
return FFTMath().doMath(ip1, ip2, op_index, do_inverse)
|
||||
|
||||
def op_rank(ip, op, kernel_radius =1 , dark_outliers = False ,threshold = 50, in_place=True):
|
||||
"""
|
||||
op = "mean", "min", "max", "variance", "median", "close_maxima", "open_maxima", "remove_outliers", "remove_nan", "despeckle"
|
||||
"""
|
||||
if op == "mean": filter_type = RankFilters.MEAN
|
||||
elif op == "min": filter_type = RankFilters.MIN
|
||||
elif op == "max": filter_type = RankFilters.MAX
|
||||
elif op == "variance": filter_type = RankFilters.VARIANCE
|
||||
elif op == "median": filter_type = RankFilters.MEDIAN
|
||||
elif op == "close_maxima": filter_type = RankFilters.CLOSE
|
||||
elif op == "open_maxima": filter_type = RankFilters.OPEN
|
||||
elif op == "remove_outliers": filter_type = RankFilters.OUTLIERS
|
||||
elif op == "remove_nan": filter_type = RankFilters.REMOVE_NAN
|
||||
elif op == "despeckle": filter_type, kernel_radius = RankFilters.MEDIAN, 1
|
||||
else: raise Exception("Invalid operation " + str(op))
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
RankFilters().rank(ip.getProcessor(), kernel_radius, filter_type, RankFilters.DARK_OUTLIERS if dark_outliers else RankFilters.BRIGHT_OUTLIERS ,threshold)
|
||||
return ip
|
||||
|
||||
def op_edm(ip, op="edm", dark_background=False, in_place=True):
|
||||
"""
|
||||
Euclidian distance map & derived operations
|
||||
op ="edm", "watershed","points", "voronoi"
|
||||
"""
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
pr = ip.getProcessor()
|
||||
edm=EDM()
|
||||
Prefs.blackBackground=dark_background
|
||||
if op=="edm":
|
||||
#pr.setPixels(0, edm.makeFloatEDM(pr, 0, False));
|
||||
#pr.resetMinAndMax();
|
||||
if dark_background:
|
||||
pr.invert()
|
||||
edm.toEDM(pr)
|
||||
else:
|
||||
edm.setup(op, ip)
|
||||
edm.run(pr)
|
||||
return ip
|
||||
|
||||
def watershed(ip, dark_background=False, in_place=True):
|
||||
return op_edm(ip, "watershed", dark_background, in_place)
|
||||
|
||||
def ultimate_points(ip, dark_background=False, in_place=True):
|
||||
return op_edm(ip, "points", dark_background, in_place)
|
||||
|
||||
def veronoi(ip, dark_background=False, in_place=True):
|
||||
return op_edm(ip, "voronoi", dark_background, in_place)
|
||||
|
||||
def edm(ip, dark_background=False, in_place=True):
|
||||
return op_edm(ip, "edm", dark_background, in_place)
|
||||
|
||||
def op_filter(ip, op, in_place=True):
|
||||
"""
|
||||
This is redundant as just calls processor methods.
|
||||
op ="invert", "smooth", "sharpen", "edge", "add"
|
||||
"""
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
f = Filters()
|
||||
f.setup(op, ip )
|
||||
f.run(ip.getProcessor())
|
||||
return ip
|
||||
|
||||
###################################################################################################
|
||||
#Other operations
|
||||
###################################################################################################
|
||||
def gaussian_blur(ip, sigma_x=3.0, sigma_y=3.0, accuracy = 0.01, in_place=True):
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
GaussianBlur().blurGaussian(ip.getProcessor(), sigma_x, sigma_y, accuracy)
|
||||
return ip
|
||||
|
||||
def find_maxima(ip, tolerance=25, threshold = ImageProcessor.NO_THRESHOLD, output_type=MaximumFinder.IN_TOLERANCE, exclude_on_edges = False, is_edm = False):
|
||||
"""
|
||||
Returns new ImagePlus
|
||||
tolerance: maxima are accepted only if protruding more than this value from the ridge to a higher maximum
|
||||
threshhold: minimum height of a maximum (uncalibrated);
|
||||
output_type = SINGLE_POINTS, IN_TOLERANCE or SEGMENTED. No output image is created for output types POINT_SELECTION, LIST and COUNT.
|
||||
"""
|
||||
byte_processor = MaximumFinder().findMaxima(ip.getProcessor(), tolerance, threshold, output_type, exclude_on_edges, is_edm)
|
||||
return ImagePlus(ip.getTitle() + " maxima", byte_processor)
|
||||
|
||||
|
||||
def get_maxima_points(ip, tolerance=25, exclude_on_edges = False):
|
||||
polygon = MaximumFinder().getMaxima(ip.getProcessor(), tolerance, exclude_on_edges)
|
||||
return (polygon.xpoints, polygon.ypoints)
|
||||
|
||||
def enhance_contrast(ip, equalize_histo = True, saturated_pixels = 0.5, normalize = False, stack_histo = False, in_place=True):
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
ce = ContrastEnhancer()
|
||||
if equalize_histo:
|
||||
ce.equalize(ip.getProcessor());
|
||||
else:
|
||||
ce.stretchHistogram(ip.getProcessor(), saturated_pixels)
|
||||
if normalize:
|
||||
ip.getProcessor().setMinAndMax(0,1.0 if (ip.getProcessor().getBitDepth()==32) else ip.getProcessor().maxValue())
|
||||
return ip
|
||||
|
||||
def shadows(ip, op, in_place=True):
|
||||
"""
|
||||
op ="north","northeast", "east", "southeast","south", "southwest", "west","northwest"
|
||||
"""
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
shadows= Shadows()
|
||||
shadows.setup(op, ip)
|
||||
shadows.run(ip.getProcessor())
|
||||
return ip
|
||||
|
||||
def unsharp_mask(ip, sigma, weight, in_place=True):
|
||||
"""
|
||||
Float processor
|
||||
"""
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
ip.getProcessor().snapshot()
|
||||
unsharp=UnsharpMask()
|
||||
USmask.setup(" ", ip)
|
||||
USmask.sharpenFloat( ip.getProcessor(),sigma, weight)
|
||||
return ip
|
||||
|
||||
def subtract_background(ip, radius = 50, create_background=False, dark_background=False, use_paraboloid =True, do_presmooth = True, correctCorners = True, rgb_brightness=False, in_place=True):
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
if rgb_brightness:
|
||||
BackgroundSubtracter().rollingBallBrightnessBackground(ip.getProcessor(), radius, create_background,not dark_background, use_paraboloid, do_presmooth, correctCorners)
|
||||
else:
|
||||
BackgroundSubtracter().rollingBallBackground(ip.getProcessor(), radius, create_background, not dark_background, use_paraboloid, do_presmooth, correctCorners)
|
||||
return ip
|
||||
|
||||
###################################################################################################
|
||||
#FFT
|
||||
###################################################################################################
|
||||
def image_fft(ip, show = True):
|
||||
WindowManager.setTempCurrentImage(ip)
|
||||
fft = FFT()
|
||||
fft.run("fft")
|
||||
#TODO: how to avoid it to be created?
|
||||
#ret = ImagePlus("FHT of " + ip.getTitle(), WindowManager.getCurrentImage().getProcessor())
|
||||
ret = WindowManager.getCurrentImage()
|
||||
if not show:
|
||||
WindowManager.getCurrentImage().hide()
|
||||
return ret
|
||||
|
||||
|
||||
def image_ffti(ip, show = True):
|
||||
WindowManager.setTempCurrentImage(ip)
|
||||
fft = FFT()
|
||||
fft.run("inverse")
|
||||
#WindowManager.getCurrentImage().hide()
|
||||
#TODO: how to avoid it to be created?
|
||||
#ret = WindowManager.getCurrentImage()
|
||||
#WindowManager.getCurrentImage().hide()
|
||||
#ret = ImagePlus(ip.getTitle() + " ffti", WindowManager.getCurrentImage().getProcessor())
|
||||
ret = WindowManager.getCurrentImage()
|
||||
if not show:
|
||||
WindowManager.getCurrentImage().hide()
|
||||
|
||||
return ret
|
||||
|
||||
def bandpass_filter(ip, small_dia_px, large_dia_px, suppress_stripes = 0, stripes_tolerance_direction = 5.0, autoscale_after_filtering = False, saturate_if_autoscale = False, display_filter = False, in_place=True):
|
||||
"""
|
||||
suppress_stripes = 0 for none, 1 for horizontal, 2 for vertical
|
||||
"""
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
filter= FFTFilter();
|
||||
FFTFilter.filterLargeDia = large_dia_px
|
||||
FFTFilter.filterSmallDia = small_dia_px
|
||||
FFTFilter.choiceIndex = suppress_stripes
|
||||
FFTFilter.toleranceDia = stripes_tolerance_direction
|
||||
FFTFilter.doScalingDia = autoscale_after_filtering
|
||||
FFTFilter.saturateDia = saturate_if_autoscale
|
||||
FFTFilter.displayFilter =display_filter
|
||||
filter.setup(None, ip);
|
||||
filter.run(ip.getProcessor())
|
||||
return ip
|
||||
|
||||
###################################################################################################
|
||||
#Convolution
|
||||
###################################################################################################
|
||||
|
||||
KERNEL_BLUR = [[0.1111, 0.1111, 0.1111], [0.1111, 0.1111, 0.1111], [0.1111, 0.1111, 0.1111]]
|
||||
KERNEL_SHARPEN = [[0.0, -0.75, 0.0], [-0.75, 4.0, -0.75], [0.0, -0.75, 0.0]]
|
||||
KERNEL_SHARPEN_2 = [[-1.0, -1.0, -1.0], [-1.0, 9.0, -1.0], [-1.0, -1.0, -1.0]]
|
||||
KERNEL_LIGHT = [[0.1, 0.1, 0.1], [0.1, 1.0, 0.1],[0.1, 0.1, 0.1]]
|
||||
KERNEL_DARK = [[0.01, 0.01, 0.01],[0.01, 0.5, 0.01],[0.01, 0.01, 0.01]]
|
||||
KERNEL_EDGE_DETECT = [[0.0, -0.75, 0.0], [-0.75, 3.0, -0.75], [0.0, -0.75, 0.0]]
|
||||
KERNEL_EDGE_DETECT_2 = [[-0.5, -0.5, -0.5], [-0.5, 4.0, -0.5], [-0.5, -0.5, -0.5]]
|
||||
KERNEL_DIFFERENTIAL_EDGE_DETECT = [[-1.0, 0.0, 1.0], [0.0, 0.0, 0.0], [1.0, 0.0, -1.0]]
|
||||
KERNEL_PREWITT = [[-2.0, -1.0, 0.0], [-1.0, 0.0, 1.0 ], [0.0, 1.0, 2.0]]
|
||||
KERNEL_SOBEL = [[2.0, 2.0, 0.0], [2.0, 0.0, -2.0 ], [0.0, -2.0, -2.0]]
|
||||
|
||||
|
||||
def convolve(ip, kernel, in_place=True):
|
||||
"""
|
||||
kernel: list of lists
|
||||
"""
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
kernel_width = len(kernel)
|
||||
kernel_height= len(kernel[0])
|
||||
kernel = [item for row in kernel for item in row]
|
||||
#Convolver().convolve(ip.getProcessor(), kernel, kernel_width, kernel_height)
|
||||
ip.getProcessor().convolve(kernel, kernel_width, kernel_height)
|
||||
return ip
|
||||
|
||||
|
||||
###################################################################################################
|
||||
#Shortcut to ImageProcessor methods
|
||||
###################################################################################################
|
||||
def invert(ip, in_place=True):
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
ip.getProcessor().invert()
|
||||
return ip
|
||||
|
||||
def smooth(ip, in_place=True):
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
ip.getProcessor().smooth()
|
||||
return ip
|
||||
|
||||
def sharpen(ip, in_place=True):
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
ip.getProcessor().sharpen()
|
||||
return ip
|
||||
|
||||
def edges(ip, in_place=True): #Sobel
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
ip.getProcessor().findEdges()
|
||||
return ip
|
||||
|
||||
def noise(ip, sigma = 25.0, in_place=True):
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
ip.getProcessor().noise(sigma)
|
||||
return ip
|
||||
|
||||
def remap(ip, min=None, max=None, in_place=True):
|
||||
ip = ip if in_place else ip.duplicate()
|
||||
if min is None or max is None:
|
||||
stats = get_statistics(ip, Measurements.MIN_MAX)
|
||||
if min is None: min = stats.min
|
||||
if max is None: max = stats.max
|
||||
ip.getProcessor().setMinAndMax(min, max)
|
||||
return ip
|
||||
|
||||
def set_lut(ip, r, g, b):
|
||||
"""
|
||||
r,g and b are lists of 256 integers
|
||||
"""
|
||||
r = [x if x<128 else x-256 for x in r]
|
||||
g = [x if x<128 else x-256 for x in g]
|
||||
b = [x if x<128 else x-256 for x in b]
|
||||
ip.setLut(LUT(jarray.array(r,'b'),jarray.array(g,'b'),jarray.array(b,'b')))
|
||||
|
||||
def resize(ip, width, height):
|
||||
"""
|
||||
Returns new ImagePlus
|
||||
"""
|
||||
p = ip.getProcessor().resize(width, height)
|
||||
return ImagePlus(ip.getTitle() + " resized", p)
|
||||
|
||||
def binning(ip, factor):
|
||||
p=ip.getProcessor().bin(factor)
|
||||
return ImagePlus(ip.getTitle() + " resized", p)
|
||||
|
||||
def get_histogram(ip, hist_min = 0, hist_max = 0, hist_bins = 256, roi=None):
|
||||
"""
|
||||
hist_min, hist_max, hist_bins used only for float images (otherwise fixed to 0,255,256)
|
||||
roi is list [x,y,w,h]
|
||||
"""
|
||||
if roi == None: ip.deleteRoi()
|
||||
else: ip.setRoi(roi[0],roi[1],roi[2],roi[3])
|
||||
image_statistics = ip.getStatistics(0, hist_bins, hist_min, hist_max)
|
||||
return image_statistics.getHistogram()
|
||||
|
||||
|
||||
def get_array(ip):
|
||||
return ip.getProcessor().getIntArray()
|
||||
|
||||
def get_line(ip, x1, y1, x2, y2):
|
||||
return ip.getProcessor().getLine(x1, y1, x2, y2)
|
||||
|
||||
def get_pixel_range(ip):
|
||||
return (ip.getProcessor().getMin(), ip.getProcessor().getMax())
|
||||
|
||||
def get_num_channels(ip):
|
||||
return ip.getProcessor().getNChannels()
|
||||
|
||||
def is_binary(ip):
|
||||
return ip.getProcessor().isBinary()
|
||||
|
||||
def get_pixel(ip, x, y):
|
||||
return ip.getProcessor().getPixel(x,y)
|
||||
|
||||
def get_pixel_array(ip, x, y):
|
||||
a = [0]*get_num_channels(ip)
|
||||
return ip.getProcessor().getPixel(x,y,a)
|
||||
|
||||
def get_pixels(ip):
|
||||
return ip.getProcessor().getPixels()
|
||||
|
||||
def get_width(ip):
|
||||
return ip.getProcessor().getWidth()
|
||||
|
||||
def get_height(ip):
|
||||
return ip.getProcessor().getHeight()
|
||||
|
||||
def get_row(ip, y):
|
||||
a = [0]*get_width(ip)
|
||||
array = jarray.array(a,'i')
|
||||
ip.getProcessor().getRow(0, y, array, get_width(ip))
|
||||
return array
|
||||
|
||||
def get_col(ip, x):
|
||||
a = [0]*get_height(ip)
|
||||
array = jarray.array(a,'i')
|
||||
ip.getProcessor().getColumn(x, 0, array, get_height(ip))
|
||||
return array
|
||||
|
||||
def get_statistics(ip, measurements = None):
|
||||
"""
|
||||
Measurements is a mask of flags: https://imagej.nih.gov/ij/developer/api/ij/measure/Measurements.html.
|
||||
Statistics object hold public fields: https://imagej.nih.gov/ij/developer/api/ij/process/ImageStatistics.html
|
||||
"""
|
||||
if measurements is None:
|
||||
return ip.getStatistics()
|
||||
else:
|
||||
return ip.getStatistics(measurements)
|
||||
|
||||
###################################################################################################
|
||||
#Image stack functions
|
||||
###################################################################################################
|
||||
def create_stack(ip_list, keep=True, title = None):
|
||||
stack = Concatenator().concatenate(ip_list, keep)
|
||||
if title is not None:
|
||||
stack.setTitle(title)
|
||||
return stack
|
||||
|
||||
def reslice(stack, start_at = "Top", vertically = True, flip = True, output_pixel_spacing=1.0, avoid_interpolation = True, title = None):
|
||||
ss = Slicer()
|
||||
ss.rotate = vertically
|
||||
ss.startAt = start_at
|
||||
ss.flip = flip
|
||||
ss.nointerpolate = avoid_interpolation
|
||||
ss.outputZSpacing = output_pixel_spacing
|
||||
stack = ss.reslice(stack)
|
||||
if title is not None:
|
||||
stack.setTitle(title)
|
||||
return stack
|
||||
|
||||
|
||||
|
||||
###############################################################################
|
||||
# ImagePlus list operations
|
||||
###############################################################################
|
||||
|
||||
def integrate_ips(ips, as_float=True):
|
||||
"""
|
||||
Integrate list if ImagePlus with the same size.
|
||||
"""
|
||||
aux = None
|
||||
for i in range(len(ips)):
|
||||
if i==0:
|
||||
img_type = "float" if as_float else "short"
|
||||
aux = new_image(ips[i].width, ips[i].height, image_type=img_type, title = "sum", fill_color = None)
|
||||
op_image(aux, ips[i], "add", float_result=as_float, in_place=True)
|
||||
return aux
|
||||
|
||||
def average_ips (ips, roi=None, as_float=True):
|
||||
"""
|
||||
Average list if ImagePlus with the same size.
|
||||
"""
|
||||
aux = integrate_ips(ips, as_float)
|
||||
op_const(aux, "divide", float(len(ips)), in_place=True)
|
||||
return aux
|
||||
144
script/___Lib/jeputils.py
Normal file
144
script/___Lib/jeputils.py
Normal file
@@ -0,0 +1,144 @@
|
||||
###################################################################################################
|
||||
# Facade to JEP: Embedded Python
|
||||
###################################################################################################
|
||||
|
||||
#Matplotlib won't work out of the box because it's default backend (Qt) uses signals, which only works in
|
||||
#the main thread. Ideally should find a fix, in order to mark the running thread as the main.
|
||||
#As a workaround, one can use the Tk backend:
|
||||
#
|
||||
#import matplotlib
|
||||
#matplotlib.use('TkAgg')
|
||||
|
||||
|
||||
#In principle just add JEP jar and library to the extensions folder.
|
||||
#
|
||||
#Alternatively on Linux:
|
||||
# Python 2:
|
||||
# - Add <python home>/lib/python3.X/site-packages/jep to LD_LIBRARY_PATH
|
||||
# - Add <python home>/lib/python3.X/site-packages/jep/jep-X.X.X.jar to the class path
|
||||
#
|
||||
#Python3:
|
||||
# - Add JEP library folder to LD_LIBRARY_PATH
|
||||
# - If using OpenJDK, add also python <python home>/lib folder to LD_LIBRARY_PATH
|
||||
# - Set LD_PRELOAD=<python home>/lib/libpython3.5m.so
|
||||
|
||||
|
||||
import sys
|
||||
import os
|
||||
import jep.Jep
|
||||
import jep.NDArray
|
||||
import java.lang.Thread
|
||||
from startup import to_array, get_context
|
||||
|
||||
__jep = {}
|
||||
|
||||
def __get_jep():
|
||||
t = java.lang.Thread.currentThread()
|
||||
if not t in __jep:
|
||||
init_jep()
|
||||
return __jep[t]
|
||||
|
||||
def __close_jep():
|
||||
t = java.lang.Thread.currentThread()
|
||||
if t in __jep:
|
||||
__jep[t].close()
|
||||
|
||||
def init_jep():
|
||||
#TODO: Should do it but generates errors
|
||||
#__close_jep()
|
||||
j = jep.Jep(False)
|
||||
#Faster, but statements must be complete
|
||||
j.setInteractive(False)
|
||||
__jep[java.lang.Thread.currentThread()] = j
|
||||
j.eval("import sys")
|
||||
#sys.argv is not present in JEP and may be needed for certain modules (as Tkinter)
|
||||
j.eval("sys.argv = ['PShell']");
|
||||
#Add standard script path to python path
|
||||
j.eval("sys.path.append('" + get_context().setup.getScriptPath() + "')")
|
||||
|
||||
#Redirect stdout
|
||||
j.eval("class JepStdout:\n" +
|
||||
" def write(self, str):\n" +
|
||||
" self.str += str\n" +
|
||||
" def clear(self):\n" +
|
||||
" self.str = ''\n" +
|
||||
" def flush(self):\n" +
|
||||
" pass\n")
|
||||
j.eval("sys.stdout=JepStdout()");
|
||||
j.eval("sys.stderr=JepStdout()");
|
||||
j.eval("sys.stdout.clear()")
|
||||
j.eval("sys.stderr.clear()")
|
||||
|
||||
def __print_stdout():
|
||||
j=__get_jep()
|
||||
output = j.getValue("sys.stdout.str")
|
||||
err = j.getValue("sys.stderr.str")
|
||||
j.eval("sys.stdout.clear()")
|
||||
j.eval("sys.stderr.clear()")
|
||||
if (output is not None) and len(output)>0:
|
||||
print output
|
||||
if (err is not None) and len(err)>0:
|
||||
print >> sys.stderr, err
|
||||
|
||||
def run_jep(script_name, vars = {}):
|
||||
global __jep
|
||||
script = get_context().scriptManager.library.resolveFile(script_name)
|
||||
if script is None :
|
||||
script= os.path.abspath(script_name)
|
||||
j=__get_jep()
|
||||
|
||||
for v in vars:
|
||||
j.set(v, vars[v])
|
||||
try:
|
||||
j.runScript(script)
|
||||
finally:
|
||||
__print_stdout()
|
||||
|
||||
def eval_jep(line):
|
||||
j=__get_jep()
|
||||
try:
|
||||
j.eval(line)
|
||||
finally:
|
||||
__print_stdout()
|
||||
|
||||
def set_jep(var, value):
|
||||
j=__get_jep()
|
||||
j.set(var, value)
|
||||
|
||||
def get_jep(var):
|
||||
j=__get_jep()
|
||||
return j.getValue(var)
|
||||
|
||||
def call_jep(module, function, args = [], reload=False):
|
||||
j=__get_jep()
|
||||
if "/" in module:
|
||||
script = get_context().scriptManager.library.resolveFile(module)
|
||||
if "\\" in script:
|
||||
#Windows paths
|
||||
module_path = script[0:script.rfind("\\")]
|
||||
module = script[script.rfind("\\")+1:]
|
||||
else:
|
||||
#Linux paths
|
||||
module_path = script[0:script.rfind("/")]
|
||||
module = script[script.rfind("/")+1:]
|
||||
eval_jep("import sys")
|
||||
eval_jep("sys.path.append('" + module_path + "')")
|
||||
if module.endswith(".py"):
|
||||
module = module[0:-3]
|
||||
|
||||
f = module+"_" + function+"_"+str(j.hashCode())
|
||||
try:
|
||||
if reload:
|
||||
eval_jep("import " + module)
|
||||
eval_jep("reload(" + module+")")
|
||||
eval_jep("from " + module + " import " + function + " as " + f)
|
||||
ret = j.invoke(f, args)
|
||||
finally:
|
||||
__print_stdout()
|
||||
return ret
|
||||
|
||||
#Converts pythonlist or Java array to numpy array
|
||||
def to_npa(data, dimensions = None, type = None):
|
||||
|
||||
data = to_array(data,'d' if type is None else type)
|
||||
return jep.NDArray(data, dimensions)
|
||||
655
script/___Lib/mathutils.py
Normal file
655
script/___Lib/mathutils.py
Normal file
@@ -0,0 +1,655 @@
|
||||
###################################################################################################
|
||||
# Facade to Apache Commons Math
|
||||
###################################################################################################
|
||||
|
||||
import sys
|
||||
import math
|
||||
import operator
|
||||
|
||||
import java.util.List
|
||||
import java.lang.reflect.Array
|
||||
import java.lang.Class as Class
|
||||
import jarray
|
||||
import org.python.core.PyArray as PyArray
|
||||
import ch.psi.utils.Convert as Convert
|
||||
|
||||
import org.apache.commons.math3.util.FastMath as FastMath
|
||||
import org.apache.commons.math3.util.Pair as Pair
|
||||
import org.apache.commons.math3.complex.Complex as Complex
|
||||
|
||||
import org.apache.commons.math3.analysis.DifferentiableUnivariateFunction as DifferentiableUnivariateFunction
|
||||
import org.apache.commons.math3.analysis.function.Gaussian as Gaussian
|
||||
import org.apache.commons.math3.analysis.function.HarmonicOscillator as HarmonicOscillator
|
||||
import org.apache.commons.math3.analysis.differentiation.DerivativeStructure as DerivativeStructure
|
||||
import org.apache.commons.math3.analysis.differentiation.FiniteDifferencesDifferentiator as FiniteDifferencesDifferentiator
|
||||
import org.apache.commons.math3.analysis.integration.SimpsonIntegrator as SimpsonIntegrator
|
||||
import org.apache.commons.math3.analysis.integration.TrapezoidIntegrator as TrapezoidIntegrator
|
||||
import org.apache.commons.math3.analysis.integration.RombergIntegrator as RombergIntegrator
|
||||
import org.apache.commons.math3.analysis.integration.MidPointIntegrator as MidPointIntegrator
|
||||
import org.apache.commons.math3.analysis.polynomials.PolynomialFunction as PolynomialFunction
|
||||
import org.apache.commons.math3.analysis.polynomials.PolynomialFunctionLagrangeForm as PolynomialFunctionLagrangeForm
|
||||
import org.apache.commons.math3.analysis.solvers.LaguerreSolver as LaguerreSolver
|
||||
import org.apache.commons.math3.analysis.UnivariateFunction as UnivariateFunction
|
||||
import org.apache.commons.math3.analysis.interpolation.SplineInterpolator as SplineInterpolator
|
||||
import org.apache.commons.math3.analysis.interpolation.LinearInterpolator as LinearInterpolator
|
||||
import org.apache.commons.math3.analysis.interpolation.NevilleInterpolator as NevilleInterpolator
|
||||
import org.apache.commons.math3.analysis.interpolation.LoessInterpolator as LoessInterpolator
|
||||
import org.apache.commons.math3.analysis.interpolation.DividedDifferenceInterpolator as DividedDifferenceInterpolator
|
||||
import org.apache.commons.math3.analysis.interpolation.AkimaSplineInterpolator as AkimaSplineInterpolator
|
||||
|
||||
import org.apache.commons.math3.fitting.GaussianCurveFitter as GaussianCurveFitter
|
||||
import org.apache.commons.math3.fitting.PolynomialCurveFitter as PolynomialCurveFitter
|
||||
import org.apache.commons.math3.fitting.HarmonicCurveFitter as HarmonicCurveFitter
|
||||
import org.apache.commons.math3.fitting.WeightedObservedPoint as WeightedObservedPoint
|
||||
import org.apache.commons.math3.fitting.leastsquares.MultivariateJacobianFunction as MultivariateJacobianFunction
|
||||
import org.apache.commons.math3.fitting.leastsquares.LeastSquaresBuilder as LeastSquaresBuilder
|
||||
import org.apache.commons.math3.fitting.leastsquares.LevenbergMarquardtOptimizer as LevenbergMarquardtOptimizer
|
||||
import org.apache.commons.math3.fitting.leastsquares.GaussNewtonOptimizer as GaussNewtonOptimizer
|
||||
|
||||
import org.apache.commons.math3.stat.regression.SimpleRegression as SimpleRegression
|
||||
|
||||
import org.apache.commons.math3.transform.FastFourierTransformer as FastFourierTransformer
|
||||
import org.apache.commons.math3.transform.DftNormalization as DftNormalization
|
||||
import org.apache.commons.math3.transform.TransformType as TransformType
|
||||
|
||||
import org.apache.commons.math3.linear.ArrayRealVector as ArrayRealVector
|
||||
import org.apache.commons.math3.linear.Array2DRowRealMatrix as Array2DRowRealMatrix
|
||||
import org.apache.commons.math3.linear.MatrixUtils as MatrixUtils
|
||||
|
||||
|
||||
|
||||
###################################################################################################
|
||||
#Derivative and interpolation
|
||||
###################################################################################################
|
||||
|
||||
def get_values(f, xdata):
|
||||
"""Return list of values of a function
|
||||
|
||||
Args:
|
||||
f(UnivariateFunction): function
|
||||
xdata(float array or list): Domain values
|
||||
Returns:
|
||||
List of doubles
|
||||
|
||||
"""
|
||||
v = []
|
||||
for x in xdata:
|
||||
v.append(f.value(x))
|
||||
return v
|
||||
|
||||
def interpolate(data, xdata = None, interpolation_type = "linear"):
|
||||
"""Interpolate data array or list to a UnivariateFunction
|
||||
|
||||
Args:
|
||||
data(float array or list): The values to interpolate
|
||||
xdata(float array or list, optional): Domain values
|
||||
interpolation_type(str , optional): "linear", "cubic", "akima", "neville", "loess", "newton"
|
||||
Returns:
|
||||
UnivariateDifferentiableFunction object
|
||||
|
||||
"""
|
||||
if xdata is None:
|
||||
from startup import frange
|
||||
xdata = frange(0, len(data), 1.0)
|
||||
else:
|
||||
#X must be ordered
|
||||
xy = sorted(zip(xdata,data), key=operator.itemgetter(0))
|
||||
xdata, data = zip(*xy)
|
||||
if len(data) != len(xdata) or len(data)<2:
|
||||
raise Exception("Dimension mismatch")
|
||||
|
||||
if interpolation_type == "cubic":
|
||||
i = SplineInterpolator()
|
||||
elif interpolation_type == "linear":
|
||||
i = LinearInterpolator()
|
||||
elif interpolation_type == "akima":
|
||||
i = AkimaSplineInterpolator()
|
||||
elif interpolation_type == "neville":
|
||||
i = NevilleInterpolator()
|
||||
elif interpolation_type == "loess":
|
||||
i = LoessInterpolator()
|
||||
elif interpolation_type == "newton":
|
||||
i = DividedDifferenceInterpolator()
|
||||
else:
|
||||
raise Exception("Invalid interpolation type")
|
||||
from startup import to_array
|
||||
return i.interpolate(to_array(xdata,'d'), to_array(data,'d'))
|
||||
|
||||
def deriv(f, xdata = None, interpolation_type = "linear"):
|
||||
"""Calculate derivative of UnivariateFunction, array or list.
|
||||
|
||||
Args:
|
||||
f(UnivariateFunction or array): The function object. If array it is interpolated.
|
||||
xdata(float array or list, optional): Domain values to process.
|
||||
interpolation_type(str , optional): "linear", "cubic", "akima", "neville", "loess", "newton"
|
||||
Returns:
|
||||
List with the derivative values for xdata
|
||||
|
||||
"""
|
||||
if not isinstance(f,UnivariateFunction):
|
||||
if xdata is None:
|
||||
from startup import frange
|
||||
xdata = frange(0, len(f), 1.0)
|
||||
f = interpolate(f, xdata, interpolation_type)
|
||||
if xdata is None:
|
||||
if isinstance(f,DifferentiableUnivariateFunction):
|
||||
return f.derivative()
|
||||
raise Exception("Domain range not defined")
|
||||
d = []
|
||||
for x in xdata:
|
||||
xds = DerivativeStructure(1, 2, 0, x)
|
||||
yds = f.value(xds)
|
||||
d.append( yds.getPartialDerivative(1))
|
||||
return d
|
||||
|
||||
def integrate(f, range = None, xdata = None, interpolation_type = "linear", integrator_type = "simpson"):
|
||||
"""Integrate UnivariateFunction, array or list in an interval.
|
||||
|
||||
Args:
|
||||
f(UnivariateFunction or array): The function object. If array it is interpolated.
|
||||
range(list, optional): integration range ([min, max]).
|
||||
xdata(float array or list, optional): disregarded if f is UnivariateFunction.
|
||||
interpolation_type(str , optional): "linear", "cubic", "akima", "neville", "loess", "newton"
|
||||
integrator_type(str , optional): "simpson", "trapezoid", "romberg" or "midpoint"
|
||||
Returns:
|
||||
Integrated value (Float)
|
||||
|
||||
"""
|
||||
if not isinstance(f, UnivariateFunction):
|
||||
from startup import frange
|
||||
if xdata is None:
|
||||
xdata = frange(0, len(f), 1.0)
|
||||
if range is None:
|
||||
range = xdata
|
||||
f = interpolate(f, xdata, interpolation_type)
|
||||
if range is None:
|
||||
raise Exception("Domain range not defined")
|
||||
d = []
|
||||
if integrator_type == "simpson":
|
||||
integrator = SimpsonIntegrator()
|
||||
elif integrator_type == "trapezoid":
|
||||
integrator = TrapezoidIntegrator()
|
||||
elif integrator_type == "romberg":
|
||||
integrator = RombergIntegrator()
|
||||
elif integrator_type == "midpoint":
|
||||
integrator = MidPointIntegrator()
|
||||
raise Exception("Invalid integrator type")
|
||||
lower = min(range)
|
||||
upper = max(range)
|
||||
return integrator.integrate(MAX_EVALUATIONS, f, lower, upper)
|
||||
|
||||
def trapz(y, xdata=None):
|
||||
"""Integrate an array or list using the composite trapezoidal rule.
|
||||
|
||||
Args:
|
||||
y(array or list)
|
||||
xdata(float array or list, optional)
|
||||
"""
|
||||
return integrate(y, range = None, xdata = xdata, interpolation_type = "linear", integrator_type = "trapezoid")
|
||||
|
||||
###################################################################################################
|
||||
#Fitting and peak search
|
||||
###################################################################################################
|
||||
|
||||
try:
|
||||
MAX_FLOAT = sys.float_info.max
|
||||
except: # Python 2.5
|
||||
MAX_FLOAT = 1.7976931348623157e+308
|
||||
|
||||
MAX_ITERATIONS = 1000
|
||||
MAX_EVALUATIONS = 1000000
|
||||
|
||||
def calculate_peaks(function, start_value, end_value = MAX_FLOAT, positive=True):
|
||||
"""Calculate peaks of a DifferentiableUnivariateFunction in a given range by finding the roots of the derivative
|
||||
|
||||
Args:
|
||||
function(DifferentiableUnivariateFunction): The function object.
|
||||
start_value(float): start of range
|
||||
end_value(float, optional): end of range
|
||||
positive (boolean, optional): True for searching positive peaks, False for negative.
|
||||
Returns:
|
||||
List of peaks in the interval
|
||||
|
||||
"""
|
||||
derivative = function.derivative()
|
||||
derivative2 = derivative.derivative()
|
||||
ret = []
|
||||
solver = LaguerreSolver()
|
||||
for complex in solver.solveAllComplex(derivative.coefficients, start_value):
|
||||
r = complex.real
|
||||
if start_value < r < end_value:
|
||||
if (positive and (derivative2.value(r) < 0)) or ( (not positive) and (derivative2.value(r) > 0)):
|
||||
ret.append(r)
|
||||
return ret
|
||||
|
||||
|
||||
def estimate_peak_indexes(data, xdata = None, threshold = None, min_peak_distance = None, positive = True):
|
||||
"""Estimation of peaks in an array by ordering local maxima according to given criteria.
|
||||
|
||||
Args:
|
||||
data(float array or list)
|
||||
xdata(float array or list, optional): if not None must have the same length as data.
|
||||
threshold(float, optional): if specified filter peaks below this value
|
||||
min_peak_distance(float, optional): if specified defines minimum distance between two peaks.
|
||||
if xdata == None, it represents index counts, otherwise in xdata units.
|
||||
positive (boolean, optional): True for searching positive peaks, False for negative.
|
||||
Returns:
|
||||
List of peaks indexes.
|
||||
"""
|
||||
peaks = []
|
||||
indexes = sorted(range(len(data)),key=lambda x:data[x])
|
||||
if positive:
|
||||
indexes = reversed(indexes)
|
||||
for index in indexes:
|
||||
first = (index == 0)
|
||||
last = (index == (len(data)-1))
|
||||
val=data[index]
|
||||
prev = float('NaN') if first else data[index-1]
|
||||
next = float('NaN') if last else data[index+1]
|
||||
|
||||
if threshold is not None:
|
||||
if (positive and (val<threshold)) or ((not positive) and (val>threshold)):
|
||||
break
|
||||
if ( positive and (first or val>prev ) and (last or val>=next ) ) or (
|
||||
(not positive) and (first or val<prev ) and (last or val<=next ) ):
|
||||
append = True
|
||||
if min_peak_distance is not None:
|
||||
for peak in peaks:
|
||||
if ((xdata is None) and (abs(peak-index) < min_peak_distance)) or (
|
||||
(xdata is not None) and (abs(xdata[peak]-xdata[index]) < min_peak_distance)):
|
||||
append = False
|
||||
break
|
||||
if append:
|
||||
peaks.append(index)
|
||||
return peaks
|
||||
|
||||
def _assert_valid_for_fit(y,x):
|
||||
if len(y)<2 or ((x is not None) and (len(x)>len(y))):
|
||||
raise Exception("Invalid data for fit")
|
||||
|
||||
def fit_gaussians(y, x, peak_indexes):
|
||||
"""Fits data on multiple gaussians on the given peak indexes.
|
||||
|
||||
Args:
|
||||
x(float array or list)
|
||||
y(float array or list)
|
||||
peak_indexes(list of int)
|
||||
Returns:
|
||||
List of tuples of gaussian parameters: (normalization, mean, sigma)
|
||||
"""
|
||||
_assert_valid_for_fit(y,x)
|
||||
ret = []
|
||||
|
||||
minimum = min(y)
|
||||
for peak in peak_indexes:
|
||||
#Copy data
|
||||
data = y[:]
|
||||
#Remover data from other peaks
|
||||
for p in peak_indexes:
|
||||
limit = int(round((p+peak)/2))
|
||||
if (p > peak):
|
||||
data[limit : len(y)] =[minimum] * (len(y)-limit)
|
||||
elif (p < peak):
|
||||
data[0:limit] = [minimum] *limit
|
||||
#Build fit point list
|
||||
values = create_fit_point_list(data, x)
|
||||
maximum = max(data)
|
||||
gaussian_fitter = GaussianCurveFitter.create().withStartPoint([(maximum-minimum)/2,x[peak],1.0]).withMaxIterations(MAX_ITERATIONS)
|
||||
#Fit return parameters: (normalization, mean, sigma)
|
||||
try:
|
||||
ret.append(gaussian_fitter.fit(values).tolist())
|
||||
except:
|
||||
ret.append(None) #Fitting error
|
||||
return ret
|
||||
|
||||
|
||||
def create_fit_point_list(y, x, weights = None):
|
||||
values = []
|
||||
for i in sorted(range(len(x)),key=lambda v:x[v]): #Creating list ordered by x, needed for gauss fit
|
||||
if weights is None:
|
||||
values.append(WeightedObservedPoint(1.0, x[i], y[i]))
|
||||
else:
|
||||
values.append(WeightedObservedPoint(weights[i], x[i], y[i]))
|
||||
return values
|
||||
|
||||
def fit_polynomial(y, x, order, start_point = None, weights = None):
|
||||
"""Fits data into a polynomial.
|
||||
|
||||
Args:
|
||||
x(float array or list): observed points x
|
||||
y(float array or list): observed points y
|
||||
order(int): if start_point is provided order parameter is disregarded - set to len(start_point)-1.
|
||||
start_point(optional tuple of float): initial parameters (a0, a1, a2, ...)
|
||||
weights(optional float array or list): weight for each observed point
|
||||
Returns:
|
||||
Tuples of polynomial parameters: (a0, a1, a2, ...)
|
||||
"""
|
||||
_assert_valid_for_fit(y,x)
|
||||
fit_point_list = create_fit_point_list(y, x, weights)
|
||||
if start_point is None:
|
||||
polynomial_fitter = PolynomialCurveFitter.create(order).withMaxIterations(MAX_ITERATIONS)
|
||||
else:
|
||||
polynomial_fitter = PolynomialCurveFitter.create(0).withStartPoint(start_point).withMaxIterations(MAX_ITERATIONS)
|
||||
try:
|
||||
return polynomial_fitter.fit(fit_point_list).tolist()
|
||||
except:
|
||||
raise Exception("Fitting failure")
|
||||
|
||||
def fit_gaussian(y, x, start_point = None, weights = None):
|
||||
"""Fits data into a gaussian.
|
||||
|
||||
Args:
|
||||
x(float array or list): observed points x
|
||||
y(float array or list): observed points y
|
||||
start_point(optional tuple of float): initial parameters (normalization, mean, sigma)
|
||||
If None, use a custom initial estimation.
|
||||
Set to "default" to force Commons.Math the default (GaussianCurveFitter.ParameterGuesser).
|
||||
weights(optional float array or list): weight for each observed point
|
||||
Returns:
|
||||
Tuples of gaussian parameters: (normalization, mean, sigma)
|
||||
"""
|
||||
_assert_valid_for_fit(y,x)
|
||||
fit_point_list = create_fit_point_list(y, x, weights)
|
||||
|
||||
#If start point not provided, start on peak
|
||||
if start_point is None:
|
||||
maximum, minimum = max(y), min(y)
|
||||
norm = maximum - minimum
|
||||
mean = x[y.index(maximum)]
|
||||
sigma = trapz([v-minimum for v in y], x) / (norm*math.sqrt(2*math.pi))
|
||||
start_point = (norm, mean, sigma)
|
||||
elif start_point == "simple":
|
||||
start_point = [(max(y)-min(y))/2, x[y.index(max(y))], 1.0]
|
||||
elif start_point == "default":
|
||||
start_point = GaussianCurveFitter.ParameterGuesser(fit_point_list).guess().tolist()
|
||||
gaussian_fitter = GaussianCurveFitter.create().withStartPoint(start_point).withMaxIterations(MAX_ITERATIONS)
|
||||
try:
|
||||
return gaussian_fitter.fit(fit_point_list).tolist() # (normalization, mean, sigma)
|
||||
except:
|
||||
raise Exception("Fitting failure")
|
||||
|
||||
def fit_harmonic(y, x, start_point = None, weights = None):
|
||||
"""Fits data into an harmonic.
|
||||
|
||||
Args:
|
||||
x(float array or list): observed points x
|
||||
y(float array or list): observed points y
|
||||
start_point(optional tuple of float): initial parameters (amplitude, angular_frequency, phase)
|
||||
weights(optional float array or list): weight for each observed point
|
||||
Returns:
|
||||
Tuples of harmonic parameters: (amplitude, angular_frequency, phase)
|
||||
"""
|
||||
_assert_valid_for_fit(y,x)
|
||||
fit_point_list = create_fit_point_list(y, x, weights)
|
||||
if start_point is None:
|
||||
harmonic_fitter = HarmonicCurveFitter.create().withMaxIterations(MAX_ITERATIONS)
|
||||
else:
|
||||
harmonic_fitter = HarmonicCurveFitter.create().withStartPoint(start_point).withMaxIterations(MAX_ITERATIONS)
|
||||
try:
|
||||
return harmonic_fitter.fit(fit_point_list).tolist() # (amplitude, angular_frequency, phase)
|
||||
except:
|
||||
raise Exception("Fitting failure")
|
||||
|
||||
|
||||
def fit_gaussian_offset(y, x, start_point = None, weights = None):
|
||||
"""Fits data into a gaussian with offset (constant background).
|
||||
f(x) = a + b * exp(-(pow((x - c), 2) / (2 * pow(d, 2))))
|
||||
|
||||
Args:
|
||||
x(float array or list): observed points x
|
||||
y(float array or list): observed points y
|
||||
start_point(optional tuple of float): initial parameters (normalization, mean, sigma)
|
||||
weights(optional float array or list): weight for each observed point
|
||||
Returns:
|
||||
Tuples of gaussian parameters: (offset, normalization, mean, sigma)
|
||||
"""
|
||||
|
||||
# For normalised gauss curve sigma=1/(amp*sqrt(2*pi))
|
||||
if start_point is None:
|
||||
off = min(y) # good enough starting point for offset
|
||||
com = x[y.index(max(y))]
|
||||
amp = max(y) - off
|
||||
sigma = trapz([v-off for v in y], x) / (amp*math.sqrt(2*math.pi))
|
||||
start_point = [off, amp, com , sigma]
|
||||
|
||||
class Model(MultivariateJacobianFunction):
|
||||
def value(self, variables):
|
||||
value = ArrayRealVector(len(x))
|
||||
jacobian = Array2DRowRealMatrix(len(x), 4)
|
||||
for i in range(len(x)):
|
||||
(a,b,c,d) = (variables.getEntry(0), variables.getEntry(1), variables.getEntry(2), variables.getEntry(3))
|
||||
v = math.exp(-(math.pow((x[i] - c), 2) / (2 * math.pow(d, 2))))
|
||||
model = a + b * v
|
||||
value.setEntry(i, model)
|
||||
jacobian.setEntry(i, 0, 1) # derivative with respect to p0 = a
|
||||
jacobian.setEntry(i, 1, v) # derivative with respect to p1 = b
|
||||
v2 = b*v*((x[i] - c)/math.pow(d, 2))
|
||||
jacobian.setEntry(i, 2, v2) # derivative with respect to p2 = c
|
||||
jacobian.setEntry(i, 3, v2*(x[i] - c)/d ) # derivative with respect to p3 = d
|
||||
return Pair(value, jacobian)
|
||||
|
||||
model = Model()
|
||||
target = [v for v in y] #the target is to have all points at the positios
|
||||
(parameters, residuals, rms, evals, iters) = optimize_least_squares(model, target, start_point, weights)
|
||||
return parameters
|
||||
|
||||
|
||||
def fit_gaussian_linear(y, x, start_point = None, weights = None):
|
||||
"""Fits data into a gaussian with linear background.
|
||||
f(x) = a * x + b + c * exp(-(pow((x - d), 2) / (2 * pow(e, 2))))
|
||||
|
||||
Args:
|
||||
x(float array or list): observed points x
|
||||
y(float array or list): observed points y
|
||||
start_point(optional tuple of float): initial parameters (normalization, mean, sigma)
|
||||
weights(optional float array or list): weight for each observed point
|
||||
Returns:
|
||||
Tuples of gaussian parameters: (a, b, normalization, mean, sigma)
|
||||
"""
|
||||
|
||||
# For normalised gauss curve sigma=1/(amp*sqrt(2*pi))
|
||||
if start_point is None:
|
||||
off = min(y) # good enough starting point for offset
|
||||
com = x[y.index(max(y))]
|
||||
amp = max(y) - off
|
||||
sigma = trapz([v-off for v in y], x) / (amp*math.sqrt(2*math.pi))
|
||||
start_point = [0, off, amp, com, sigma]
|
||||
|
||||
class Model(MultivariateJacobianFunction):
|
||||
def value(self, variables):
|
||||
value = ArrayRealVector(len(x))
|
||||
jacobian = Array2DRowRealMatrix(len(x), 5)
|
||||
for i in range(len(x)):
|
||||
(a,b,c,d,e) = (variables.getEntry(0), variables.getEntry(1), variables.getEntry(2), variables.getEntry(3), variables.getEntry(4))
|
||||
v = math.exp(-(math.pow((x[i] - d), 2) / (2 * math.pow(e, 2))))
|
||||
model = a*x[i] + b + c * v
|
||||
value.setEntry(i, model)
|
||||
jacobian.setEntry(i, 0, x[i]) # derivative with respect to p0 = a
|
||||
jacobian.setEntry(i, 1, 1) # derivative with respect to p1 = b
|
||||
jacobian.setEntry(i, 2, v) # derivative with respect to p2 = c
|
||||
v2 = c*v*((x[i] - d)/math.pow(e, 2))
|
||||
jacobian.setEntry(i, 3, v2) # derivative with respect to p3 = d
|
||||
jacobian.setEntry(i, 4, v2*(x[i] - d)/e ) # derivative with respect to p4 = e
|
||||
return Pair(value, jacobian)
|
||||
|
||||
model = Model()
|
||||
target = [v for v in y] #the target is to have all points at the positios
|
||||
(parameters, residuals, rms, evals, iters) = optimize_least_squares(model, target, start_point, weights)
|
||||
return parameters
|
||||
|
||||
def fit_gaussian_exp_bkg(y, x, start_point = None, weights = None):
|
||||
"""Fits data into a gaussian with exponential background.
|
||||
f(x) = a * math.exp(-(x/b)) + c * exp(-(pow((x - d), 2) / (2 * pow(e, 2))))
|
||||
|
||||
Args:
|
||||
x(float array or list): observed points x
|
||||
y(float array or list): observed points y
|
||||
start_point(optional tuple of float): initial parameters (normalization, mean, sigma)
|
||||
weights(optional float array or list): weight for each observed point
|
||||
Returns:
|
||||
Tuples of gaussian parameters: (a,b , normalization, mean, sigma)
|
||||
"""
|
||||
|
||||
# For normalised gauss curve sigma=1/(amp*sqrt(2*pi))
|
||||
if start_point is None:
|
||||
off = min(y) # good enough starting point for offset
|
||||
com = x[len(x)/2]
|
||||
#com = 11.9
|
||||
amp = max(y) - off
|
||||
sigma = trapz([v-off for v in y], x) / (amp*math.sqrt(2*math.pi))
|
||||
start_point = [1, 1, amp, com, sigma]
|
||||
|
||||
class Model(MultivariateJacobianFunction):
|
||||
def value(self, variables):
|
||||
value = ArrayRealVector(len(x))
|
||||
jacobian = Array2DRowRealMatrix(len(x), 5)
|
||||
for i in range(len(x)):
|
||||
(a,b,c,d,e) = (variables.getEntry(0), variables.getEntry(1), variables.getEntry(2), variables.getEntry(3), variables.getEntry(4))
|
||||
v = math.exp(-(math.pow((x[i] - d), 2) / (2 * math.pow(e, 2))))
|
||||
bkg=math.exp(-(x[i]/b))
|
||||
model = a*bkg + c * v
|
||||
value.setEntry(i, model)
|
||||
jacobian.setEntry(i, 0, bkg) # derivative with respect to p0 = a
|
||||
jacobian.setEntry(i, 1, a*x[i]*bkg/math.pow(b, 2)) # derivative with respect to p1 = b
|
||||
jacobian.setEntry(i, 2, v) # derivative with respect to p2 = c
|
||||
v2 = c*v*((x[i] - d)/math.pow(e, 2))
|
||||
jacobian.setEntry(i, 3, v2) # derivative with respect to p3 = d
|
||||
jacobian.setEntry(i, 4, v2*(x[i] - d)/e ) # derivative with respect to p4 = e
|
||||
return Pair(value, jacobian)
|
||||
|
||||
model = Model()
|
||||
target = [v for v in y] #the target is to have all points at the positios
|
||||
(parameters, residuals, rms, evals, iters) = optimize_least_squares(model, target, start_point, weights)
|
||||
return parameters
|
||||
|
||||
|
||||
###################################################################################################
|
||||
#Least squares problem
|
||||
###################################################################################################
|
||||
|
||||
def optimize_least_squares(model, target, initial, weights):
|
||||
"""Fits a parametric model to a set of observed values by minimizing a cost function.
|
||||
|
||||
Args:
|
||||
model(MultivariateJacobianFunction): observed points x
|
||||
target(float array or list): observed data
|
||||
initial(optional tuple of float): initial guess
|
||||
weights(optional float array or list): weight for each observed point
|
||||
Returns:
|
||||
Tuples of harmonic parameters: (amplitude, angular_frequency, phase)
|
||||
"""
|
||||
if isinstance(weights,tuple) or isinstance(weights,list):
|
||||
weights = MatrixUtils.createRealDiagonalMatrix(weights)
|
||||
problem = LeastSquaresBuilder().start(initial).model(model).target(target).lazyEvaluation(False).maxEvaluations(MAX_EVALUATIONS).maxIterations(MAX_ITERATIONS).weight(weights).build()
|
||||
optimizer = LevenbergMarquardtOptimizer()
|
||||
optimum = optimizer.optimize(problem)
|
||||
|
||||
parameters=optimum.getPoint().toArray().tolist()
|
||||
residuals = optimum.getResiduals().toArray().tolist()
|
||||
rms = optimum.getRMS()
|
||||
evals = optimum.getEvaluations()
|
||||
iters = optimum.getIterations()
|
||||
return (parameters, residuals, rms, evals, iters)
|
||||
|
||||
|
||||
###################################################################################################
|
||||
#FFT
|
||||
###################################################################################################
|
||||
|
||||
def is_power(num, base):
|
||||
if base<=1: return num == 1
|
||||
power = int (math.log (num, base) + 0.5)
|
||||
return base ** power == num
|
||||
|
||||
def pad_to_power_of_two(data):
|
||||
if is_power(len(data),2):
|
||||
return data
|
||||
pad =(1 << len(data).bit_length()) - len(data)
|
||||
elem = complex(0,0) if type(data[0]) is complex else [0.0,]
|
||||
return data + elem * pad
|
||||
|
||||
def get_real(values):
|
||||
"""Returns real part of a complex numbers vector.
|
||||
Args:
|
||||
values: List of complex.
|
||||
Returns:
|
||||
List of float
|
||||
"""
|
||||
ret = []
|
||||
for c in values:
|
||||
ret.append(c.real)
|
||||
return ret
|
||||
|
||||
def get_imag(values):
|
||||
"""Returns imaginary part of a complex numbers vector.
|
||||
Args:
|
||||
values: List of complex.
|
||||
Returns:
|
||||
List of float
|
||||
"""
|
||||
ret = []
|
||||
for c in values:
|
||||
ret.append(c.imag)
|
||||
return ret
|
||||
|
||||
def get_modulus(values):
|
||||
"""Returns the modulus of a complex numbers vector.
|
||||
Args:
|
||||
values: List of complex.
|
||||
Returns:
|
||||
List of float
|
||||
"""
|
||||
ret = []
|
||||
for c in values:
|
||||
ret.append(math.hypot(c.imag,c.real))
|
||||
return ret
|
||||
|
||||
def get_phase(values):
|
||||
"""Returns the phase of a complex numbers vector.
|
||||
Args:
|
||||
values: List of complex.
|
||||
Returns:
|
||||
List of float
|
||||
"""
|
||||
ret = []
|
||||
for c in values:
|
||||
ret.append(math.atan(c.imag/c.real))
|
||||
return ret
|
||||
|
||||
def fft(f):
|
||||
"""Calculates the Fast Fourrier Transform of a vector, padding to the next power of 2 elements.
|
||||
Args:
|
||||
values(): List of float or complex
|
||||
Returns:
|
||||
List of complex
|
||||
"""
|
||||
f = pad_to_power_of_two(f)
|
||||
if type(f[0]) is complex:
|
||||
aux = []
|
||||
for c in f:
|
||||
aux.append(Complex(c.real, c.imag))
|
||||
f = aux
|
||||
fftt = FastFourierTransformer(DftNormalization.STANDARD)
|
||||
ret = []
|
||||
for c in fftt.transform(f,TransformType.FORWARD ):
|
||||
ret.append(complex(c.getReal(),c.getImaginary()))
|
||||
return ret
|
||||
|
||||
def ffti(f):
|
||||
"""Calculates the Inverse Fast Fourrier Transform of a vector, padding to the next power of 2 elements.
|
||||
Args:
|
||||
values(): List of float or complex
|
||||
Returns:
|
||||
List of complex
|
||||
"""
|
||||
f = pad_to_power_of_two(f)
|
||||
if type(f[0]) is complex:
|
||||
aux = []
|
||||
for c in f:
|
||||
aux.append(Complex(c.real, c.imag))
|
||||
f = aux
|
||||
fftt = FastFourierTransformer(DftNormalization.STANDARD)
|
||||
ret = []
|
||||
for c in fftt.transform(f,TransformType.INVERSE ):
|
||||
ret.append(complex(c.getReal(),c.getImaginary()))
|
||||
return ret
|
||||
119
script/___Lib/plotutils.py
Normal file
119
script/___Lib/plotutils.py
Normal file
@@ -0,0 +1,119 @@
|
||||
###################################################################################################
|
||||
# Plot utilities
|
||||
###################################################################################################
|
||||
|
||||
import ch.psi.pshell.plot.LinePlotSeries as LinePlotSeries
|
||||
import ch.psi.pshell.plot.LinePlotErrorSeries as LinePlotErrorSeries
|
||||
import math
|
||||
from startup import frange, to_array
|
||||
|
||||
def plot_function(plot, function, name, range, show_points = True, show_lines = True, color = None):
|
||||
"""Plots a function to a plot.
|
||||
|
||||
Args:
|
||||
plot(LinePlot)
|
||||
function(UnivariateFunction): Gaussian, PolynomialFunction, HarmonicOscillator...
|
||||
name(str): name of the series
|
||||
range(list or array of floats): x values to plot
|
||||
Returns:
|
||||
Tuples of harmonic parameters: (amplitude, angular_frequency, phase)
|
||||
"""
|
||||
if plot.style.isError():
|
||||
s = LinePlotErrorSeries(name, color)
|
||||
else:
|
||||
s = LinePlotSeries(name, color)
|
||||
plot.addSeries(s)
|
||||
s.setPointsVisible(show_points)
|
||||
s.setLinesVisible(show_lines)
|
||||
for x in range:
|
||||
s.appendData(x, function.value(x))
|
||||
return s
|
||||
|
||||
def plot_data(plot, data, name, xdata = None, error = None, show_points = True, show_lines = True, color = None):
|
||||
"""Plots a subscriptable object to a plot.
|
||||
|
||||
Args:
|
||||
plot(LinePlot)
|
||||
data(subscriptable): Y data
|
||||
name(str): name of the series
|
||||
xdata(subscriptable): X data
|
||||
error(subscriptable): Error data (only for error plots)
|
||||
Returns:
|
||||
Tuples of harmonic parameters: (amplitude, angular_frequency, phase)
|
||||
"""
|
||||
if plot.style.isError():
|
||||
s = LinePlotErrorSeries(name, color)
|
||||
else:
|
||||
s = LinePlotSeries(name, color)
|
||||
plot.addSeries(s)
|
||||
s.setPointsVisible(show_points)
|
||||
s.setLinesVisible(show_lines)
|
||||
if xdata is None:
|
||||
xdata = range(len(data))
|
||||
xdata = to_array(xdata, 'd')
|
||||
data = to_array(data, 'd')
|
||||
if plot.style.isError():
|
||||
error = to_array(error, 'd')
|
||||
s.setData(xdata, data, error)
|
||||
else:
|
||||
s.setData(xdata, data)
|
||||
return s
|
||||
|
||||
def plot_point(plot, x, y, size = 3, color = None, name = "Point"):
|
||||
s = LinePlotSeries(name, color)
|
||||
plot.addSeries(s)
|
||||
s.setPointSize(size)
|
||||
s.appendData(x, y)
|
||||
return s
|
||||
|
||||
def plot_line(plot, x1, y1, x2, y2, width = 1, color = None, name = "Line"):
|
||||
s = LinePlotSeries(name, color)
|
||||
plot.addSeries(s)
|
||||
s.setLineWidth(width)
|
||||
s.setPointsVisible(False)
|
||||
s.appendData(x1, y1)
|
||||
s.appendData(x2, y2)
|
||||
return s
|
||||
|
||||
def plot_cross(plot, x, y, size = 1.0, width = 1, color = None, name = "Cross"):
|
||||
size = float(size)
|
||||
s = LinePlotSeries(name, color)
|
||||
plot.addSeries(s)
|
||||
s.setLineWidth(width)
|
||||
s.setPointsVisible(False)
|
||||
s.appendData(float('nan'), float('nan'))
|
||||
s.appendData(x-size/2, y)
|
||||
s.appendData(x+size/2, y)
|
||||
s.appendData(float('nan'), float('nan'))
|
||||
s.appendData(x, y-size/2)
|
||||
s.appendData(x, y+size/2)
|
||||
return s
|
||||
|
||||
def plot_rectangle(plot, x1, y1, x2, y2, width = 1, color = None, name = "Rectangle"):
|
||||
s = LinePlotSeries(name, color)
|
||||
plot.addSeries(s)
|
||||
s.setLineWidth(width)
|
||||
s.setPointsVisible(False)
|
||||
s.appendData(x1, y1)
|
||||
s.appendData(x1, y2)
|
||||
s.appendData(x2, y2)
|
||||
s.appendData(x2, y1)
|
||||
s.appendData(x1, y1)
|
||||
return s
|
||||
|
||||
def plot_circle(plot, cx, cy, radius, width = 1, color = None, name = "Circle"):
|
||||
s = LinePlotSeries(name, color)
|
||||
plot.addSeries(s)
|
||||
s.setLineWidth(width)
|
||||
s.setPointsVisible(False)
|
||||
res=float(radius) / 100.0
|
||||
epson = 1e-12
|
||||
for xp in frange (cx+radius-epson , cx-radius+epson , -res):
|
||||
yp = math.sqrt(math.pow(radius, 2) - math.pow(xp - cx, 2)) + cy
|
||||
s.appendData(xp, yp)
|
||||
for xp in frange (cx-radius+epson , cx+radius-epson, res):
|
||||
yp = -math.sqrt(math.pow(radius, 2) - math.pow(xp - cx, 2)) + cy
|
||||
s.appendData(xp, yp)
|
||||
if s.getCount()>0:
|
||||
s.appendData(s.getX()[0], s.getY()[0])
|
||||
return s
|
||||
2526
script/___Lib/startup.py
Normal file
2526
script/___Lib/startup.py
Normal file
File diff suppressed because it is too large
Load Diff
191
script/___Lib/statsutils.py
Normal file
191
script/___Lib/statsutils.py
Normal file
@@ -0,0 +1,191 @@
|
||||
###################################################################################################
|
||||
# Utilities for generating reports from command statistics files
|
||||
###################################################################################################
|
||||
|
||||
#CsvJdbc JAR file must be downloaded to extensions folder:
|
||||
#http://central.maven.org/maven2/net/sourceforge/csvjdbc/csvjdbc/1.0.34/csvjdbc-1.0.34.jar
|
||||
|
||||
|
||||
import java.sql.DriverManager as DriverManager
|
||||
import java.sql.ResultSet as ResultSet
|
||||
import java.util.Properties as Properties
|
||||
import java.lang.Class as Class
|
||||
import os
|
||||
from startup import get_context
|
||||
import ch.psi.pshell.core.CommandManager.CommandStatisticsFileRange as CommandStatisticsFileRange
|
||||
|
||||
stmt = None
|
||||
STAT_COLUMN_NAMES = ["Command","Args","Source","Start","End","Background","Result","Return"]
|
||||
def get_stats_connection():
|
||||
global stmt
|
||||
Class.forName("org.relique.jdbc.csv.CsvDriver");
|
||||
db = os.path.abspath(get_context().setup.expandPath("{home}/statistics"))
|
||||
props = Properties()
|
||||
props.put("fileExtension", ".csv")
|
||||
props.put("separator", ";")
|
||||
props.put("timestampFormat", "dd/MM/yy HH:mm:ss.SSS")
|
||||
props.put("indexedFiles", "true");
|
||||
props.put("columnTypes", "String,String,String,Timestamp,Timestamp,Boolean,String,String");
|
||||
|
||||
fileRange = get_context().commandManager.commandStatisticsConfig.fileRange
|
||||
if fileRange==CommandStatisticsFileRange.Daily:
|
||||
props.put("fileTailPattern", "(\\d+)_(\\d+)_(\\d+)");
|
||||
props.put("fileTailParts", "Year,Month,Day");
|
||||
elif fileRange==CommandStatisticsFileRange.Monthly:
|
||||
props.put("fileTailPattern", "(\\d+)_(\\d+)"); #props.put("fileTailPattern", "-(\\d+)_(\\d+)");
|
||||
props.put("fileTailParts", "Year,Month");
|
||||
elif fileRange==CommandStatisticsFileRange.Yearly:
|
||||
props.put("fileTailPattern", "(\\d+)");
|
||||
props.put("fileTailParts", "Year");
|
||||
|
||||
conn = DriverManager.getConnection("jdbc:relique:csv:" + db, props);
|
||||
stmt = conn.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE,ResultSet.CONCUR_READ_ONLY);
|
||||
return conn
|
||||
|
||||
def _get_count(sql):
|
||||
ret = 0
|
||||
results = stmt.executeQuery("SELECT COUNT(*) AS count FROM . WHERE " + sql)
|
||||
if results.first():
|
||||
ret = results.getInt("count")
|
||||
return ret
|
||||
|
||||
def _add_sql_time(sql, start, end):
|
||||
if start:
|
||||
if len(start)==8:
|
||||
start = start + " 00:00:00.000"
|
||||
sql = sql + " AND Start>='" + start + "'"
|
||||
if end:
|
||||
if len(end)==8:
|
||||
end = end + " 00:00:00.000"
|
||||
sql = sql + " AND (\"End\"<'" + end + "')"
|
||||
return sql
|
||||
|
||||
def get_count(command= "%%", start = None, end = None, result= "%%"):
|
||||
sql = "Command LIKE '"+ command +"' AND Result LIKE '"+ result +"'"
|
||||
sql = _add_sql_time(sql, start, end)
|
||||
return _get_count(sql)
|
||||
|
||||
def get_return_count(command= "%%", start = None, end = None, ret= "%%"):
|
||||
sql = "Command LIKE '"+ command +"' AND Return = '"+ ret +"'"
|
||||
sql = _add_sql_time(sql, start, end)
|
||||
return _get_count(sql)
|
||||
|
||||
def get_cmd_stats(command = "%", start = None, end = None):
|
||||
s = get_count(command, start, end, "success")
|
||||
a = get_count(command, start, end, "abort")
|
||||
e = get_count(command, start, end, "error")
|
||||
return (s,a,e)
|
||||
|
||||
def get_errors(command = "%", start = None, end = None):
|
||||
sql = "SELECT Return, Count(Return) as count FROM . WHERE Command LIKE '"+ command +"' AND Result='error'"
|
||||
sql = _add_sql_time(sql, start, end)
|
||||
sql = sql + " GROUP BY Return ORDER BY count DESC"
|
||||
results = stmt.executeQuery(sql)
|
||||
ret = []
|
||||
while results.next():
|
||||
ret.append((results.getInt("count"), results.getString("Return")))
|
||||
return ret
|
||||
|
||||
|
||||
def get_cmd_records(command = "%", start = None, end = None, result= "%%"):
|
||||
sql = "SELECT * FROM . WHERE Command LIKE '"+ command +"' AND Result LIKE '"+ result +"'"
|
||||
sql = _add_sql_time(sql, start, end)
|
||||
results = stmt.executeQuery(sql)
|
||||
ret = []
|
||||
while results.next():
|
||||
rec={}
|
||||
for col in STAT_COLUMN_NAMES:
|
||||
rec[col]= results.getString(col)
|
||||
ret.append(rec)
|
||||
return ret
|
||||
|
||||
def get_commands(commands =None, start = None, end = None):
|
||||
ret = []
|
||||
if (commands is None) or (len(commands)==0):
|
||||
sql = "SELECT * FROM . WHERE Command != ''"
|
||||
sql = _add_sql_time(sql, start, end)
|
||||
sql = sql + " GROUP BY Command"
|
||||
results = stmt.executeQuery(sql)
|
||||
while results.next():
|
||||
cmd = results.getString("Command")
|
||||
if cmd and not " " in cmd:
|
||||
ret.append(cmd)
|
||||
else:
|
||||
for cmd in commands:
|
||||
if get_count(cmd, start, end) >0 :
|
||||
ret.append(cmd)
|
||||
return ret
|
||||
|
||||
def print_cmd_stats(command = "%", start = None, end = None):
|
||||
print "-----------------------------------------------------------"
|
||||
print "Statistics from ", start , " to ", end
|
||||
(s,a,e) = get_cmd_stats(command, start, end)
|
||||
t=s+a+e #get_count(command, start, end, "%")
|
||||
print "Command: " , command , " Records: ", t
|
||||
if t>0:
|
||||
print "%-10s %7.2f%% - %d" % ("Success", (float(s)/t) * 100, s)
|
||||
print "%-10s %7.2f%% - %d" % ("Abort", (float(a)/t) * 100, a)
|
||||
print "%-10s %7.2f%% - %d" % ("Error", (float(e)/t) * 100, e)
|
||||
|
||||
print "\nErrors:"
|
||||
print "%5s %s" % ("Count", "Error")
|
||||
errors = get_errors(command, start, end)
|
||||
for error in errors:
|
||||
print "%5d %s" % (error[0], error[1])
|
||||
print "-----------------------------------------------------------"
|
||||
|
||||
def print_cmd_records(command = "%", start = None, end = None, result= "%%"):
|
||||
print "-----------------------------------------------------------"
|
||||
print "Records from ", start , " to ", end
|
||||
info = get_cmd_records(command, start, end, result)
|
||||
print "Command: " , command , " Result: ", result, " Records: ", len(info)
|
||||
|
||||
for col in STAT_COLUMN_NAMES:
|
||||
print col+ "; " ,
|
||||
print
|
||||
|
||||
for cmd in info:
|
||||
s = ""
|
||||
for col in STAT_COLUMN_NAMES:
|
||||
s = s + cmd[col]+ "; "
|
||||
print s
|
||||
print "-----------------------------------------------------------"
|
||||
|
||||
def print_stats(commands = None, start = None, end = None):
|
||||
print "-----------------------------------------------------------"
|
||||
print "Statistics from ", start , " to ", end
|
||||
print "%-20s %-5s %8s %8s %8s" % ("Command", "Total", "Success", "Abort", "Error")
|
||||
cmds = get_commands(commands)
|
||||
for cmd in cmds:
|
||||
(s,a,e) = get_cmd_stats(cmd, start, end)
|
||||
t=s+a+e
|
||||
if t>0:
|
||||
print "%-20s %-5d %7.2f%% %7.2f%% %7.2f%%" % (cmd, t, (float(s)/t) * 100, (float(a)/t) * 100, (float(e)/t) * 100)
|
||||
else:
|
||||
print "%-20s %-5d" % (cmd, t)
|
||||
print "-----------------------------------------------------------"
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__=='__main__':
|
||||
conn = get_stats_connection()
|
||||
|
||||
#Print stats of all commands, with no time range
|
||||
print_stats()
|
||||
|
||||
cmds = ["%scan1%", "%scan2%"]
|
||||
start= "01/03/19"
|
||||
end= "01/04/19"
|
||||
|
||||
#Print stats all commands containing 'scan1' and 'scan2' in the month 03.2019
|
||||
print_stats(cmds, start, end)
|
||||
|
||||
#Print individual statistics, including error count, for commands containing 'scan1' and 'scan2'
|
||||
for cmd in cmds:
|
||||
print_cmd_stats (cmd, start, end)
|
||||
|
||||
#Print all records for commands containing 'scan1'
|
||||
print_cmd_records("%scan1%%", start, end, "error")
|
||||
conn.close()
|
||||
|
||||
Reference in New Issue
Block a user