diff --git a/script/___Lib/diffutils.py b/script/___Lib/diffutils.py new file mode 100644 index 0000000..926e3b5 --- /dev/null +++ b/script/___Lib/diffutils.py @@ -0,0 +1,1032 @@ +###################################################################################################\ +# Diffcalc utilities +################################################################################################### + +###################################################################################################\ +# Installing +################################################################################################### + +#1- Download from: https://github.com/DiamondLightSource/diffcalc/archive/v2.1.zip +#2- Extract the contents to {script}/Lib/diffcalc +#3- Download http://central.maven.org/maven2/gov/nist/math/jama/1.0.3/jama-1.0.3.jar +# to the extensions folder. +#4- On {script}/Lib/diffcalc/diffcalc/gdasupport/you.py, the line " wl.asynchronousMoveTo(1)" +# must be commented for the energy not to move when the library is loaded. + +###################################################################################################\ +# Library loading and Hardware setup +################################################################################################### + +#1- Create a MotorGroup with the diffractometer motors +# e.g. 'sixc', containing mu, delta, gam, eta, chi, phi motors (gam = nu) +# or 'fivec', containing delta, gam, eta, chi, phi motors +# or 'fourc', containing delta, eta, chi, phi motors +#2- Create positioner to read/set the energy in kEv, e.g. named 'en' +#3- Execute: run("diffutils") +#4- Execute: setup_diff(sixc, en) + + +###################################################################################################\ +# API +################################################################################################### + +# Orientation commands defined in https://github.com/DiamondLightSource/diffcalc#id19 are +# defined heren with identical signatures, and so the constraint commands. +# Motion command names were changed because thge original can collide with other globals: +# hklci, hklca, hklwh, hklget, hklmv and hklsim(hkl). + + +from __future__ import absolute_import +import traceback + + +import Jama.Matrix +diffcalc_path = os.path.abspath(get_context().setup.expandPath("{script}/Lib/diffcalc")) +if not diffcalc_path in sys.path: + sys.path.append(diffcalc_path) + +import diffcalc +import math +import os +from diffcalc import settings +from diffcalc.hkl.you.geometry import YouGeometry,SixCircle, FiveCircle, FourCircle, YouPosition +from diffcalc.hardware import HardwareAdapter +from diffcalc.ub.persistence import UBCalculationJSONPersister, UbCalculationNonPersister +from diffcalc.gdasupport.minigda.scannable import ScannableBase, ScannableGroup +#from diffcalc.gdasupport.minigda import command +from diffcalc.hardware import HardwareAdapter + + +import ch.psi.pshell.device.PositionerConfig as PositionerConfig +import ch.psi.pshell.device.RegisterConfig as RegisterConfig +import ch.psi.pshell.device.Register as Register + +_difcalc_names = {} + +# +# Disable error handling designed for interactive use +#diffcalc.util.DEBUG = True +# Disable console bold charcters +diffcalc.util.COLOURISE_TERMINAL_OUTPUT = False + +################################################################################################### +# Device mapping to difcalc +################################################################################################### +class PositionerScannable(ScannableBase): + def __init__(self, positioner, name = None): + self.positioner = positioner + self.name = positioner.name if name is None else name + self.inputNames = [self.name] + self.outputFormat = ['% 6.4f'] + self.level = 3 + + def isBusy(self): + return self.positioner.state == State.Busy + + def waitWhileBusy(self): + self.positioner.waitReady(-1) + + def asynchronousMoveTo(self, new_position): + #print "Moving " , self.name, " to: ", new_position + self.positioner.moveAsync(float(new_position), -1) + + def getPosition(self): + return self.positioner.getPosition() + +def _get_diffcalc_axis_names(): + nu_name=diffcalc.hkl.you.constraints.NUNAME + return ("mu", "delta", nu_name, "eta", "chi", "phi") + +class PositionerScannableGroup(ScannableGroup): + def __init__(self, name, motors, diffcalc_axis_names=None): + self.name = name + global _difcalc_names + _difcalc_names = {} + positioners = [] + if diffcalc_axis_names is None: + if len(motors) == 6: diffcalc_axis_names = _get_diffcalc_axis_names() + elif len(motors) == 5: diffcalc_axis_names = ("delta", "gam", "eta", "chi", " phi") + elif len(motors) == 4: diffcalc_axis_names = ("delta", "eta", "chi", " phi") + self.diffcalc_axis_names = diffcalc_axis_names + for i in range(len(motors)): + _difcalc_names[motors[i]] = diffcalc_axis_names[i] + exec('self.' + diffcalc_axis_names[i] + ' = PositionerScannable(' + motors[i].name + ', "' +diffcalc_axis_names[i] + '")') + exec('positioners.append(self.' + diffcalc_axis_names[i] + ')' ) + #for m in motors: + # exec('self.' + m.name + ' = PositionerScannable(' + m.name + ', "' + m.name + '")') + # exec('positioners.append(self.' + m.name + ')' ) + ScannableGroup.__init__(self, self.name, positioners) + +class MotorGroupScannable(PositionerScannableGroup): + def __init__(self, motor_group, diffcalc_axis_names=None, simultaneous_move=False): + self.simultaneous_move = simultaneous_move + self.motor_group = motor_group + PositionerScannableGroup.__init__(self, motor_group.name, motor_group.motors, diffcalc_axis_names) + self.motor_group.restoreSpeedAfterMove = self.simultaneous_move + + #Make sync moves (default implementation trigger each motor individually) + def asynchronousMoveTo(self, position): + if self.simultaneous_move: + position = [(float('nan') if v is None else v) for v in position] + self.motor_group.write(position) + else: + PositionerScannableGroup.asynchronousMoveTo(self, position) + + +class ScannableAdapter(HardwareAdapter): + def __init__(self, diffractometer, energy, energy_multiplier_to_kev=1): + self.diffractometer = diffractometer + self.energy = energy + self.energy_multiplier_to_kev = energy_multiplier_to_kev + input_names = diffractometer.getInputNames() + HardwareAdapter.__init__(self, input_names) + + #Returns the current physical POSITIONS + def get_position(self): + """ + pos = getDiffractometerPosition() -- returns the current physical + diffractometer position as a list in degrees + """ + return self.diffractometer.getPosition() + + #returns energy in kEv + def get_energy(self): + """energy = get_energy() -- returns energy in kEv (NOT eV!) """ + multiplier = self.energy_multiplier_to_kev + energy = self.energy.getPosition() * multiplier + if energy is None: + raise DiffcalcException("Energy has not been set") + return energy + + def get_motor(self,name): + global _motor_group + global _difcalc_names + for m in _difcalc_names.keys(): + if _difcalc_names[m] == name: + return m + for m in _motor_group.motors: + if m.name == name: + return m + raise Exception("Invalid axis name: " + str(name)) + + + def get_lower_limit(self, name): + '''returns lower limits by axis name. Limit may be None if not set + ''' + m = self.get_motor(name) + ret = m.getMinValue() + if ret == float("NaN"): ret = None + return ret + + def get_upper_limit(self, name): + '''returns upper limit by axis name. Limit may be None if not set + ''' + m = self.get_motor(name) + ret = m.getMaxValue() + if ret == float("NaN"): ret = None + return ret + + def set_lower_limit(self, name, value): + """value may be None to remove limit""" + if value is None: value = float("NaN") + m = self.get_motor(name) + m.config.minValue =value + + def set_upper_limit(self, name, value): + """value may be None to remove limit""" + if value is None: value = float("NaN") + m = self.get_motor(name) + m.config.maxValue =value + + def is_axis_value_within_limits(self, axis_name, value): + m = self.get_motor(axis_name) + upper = self.get_upper_limit(axis_name) + lower = self.get_lower_limit(axis_name) + if (upper is None) or (math.isnan(upper)): upper = sys.float_info.max + if (lower is None) or (math.isnan(lower)): lower = -sys.float_info.max + return lower <= value <= upper + + @property + def name(self): + return self.diffractometer.getName() + +class MotorGroupAdapter(ScannableAdapter): + def __init__(self, diffractometer, energy, energy_multiplier_to_kev=1, diffcalc_axis_names=None, simultaneous_move=False): + self.diffractometer = MotorGroupScannable(diffractometer, diffcalc_axis_names, simultaneous_move) + self.energy = PositionerScannable(energy) + self.energy.level = 3 + ScannableAdapter.__init__(self, self.diffractometer, self.energy, energy_multiplier_to_kev) + +class Wavelength(RegisterBase): + def doRead(self): + try: + return get_wavelength().getPosition() + except: + return None + + def doWrite(self, val): + get_wavelength().asynchronousMoveTo(val) + + +################################################################################################### +# HKL Pseudo-devices +################################################################################################### +class HklPositoner (PositionerBase): + def __init__(self, name, index, hkl_group): + PositionerBase.__init__(self, name, PositionerConfig()) + self.setParent(hkl_group) + self.index = index + + def isReady(self): + return PositionerBase.isReady(self) and self.getParent().isReady() + + def doRead(self): + return self.getParent()._setpoint[self.index] + + def doWrite(self, value): + #print "Setting " , self.getName(), "to: ", value + pos = [None, None, None] + pos[self.index] = value + self.getParent().write(pos) + + def doReadReadback(self): + if java.lang.Thread.currentThread() != self.getParent()._updating_thread: + self.getParent().update() + return self.getParent()._readback[self.index] + +class HklGroup(RegisterBase, Register.RegisterArray): + def __init__(self, name): + RegisterBase.__init__(self, name, RegisterConfig()) + self.hkl=get_hkl() + self.h, self.k, self.l = HklPositoner("h", 0, self), HklPositoner("k", 1, self), HklPositoner("l", 2, self) + add_device(self.h, True) + add_device(self.k, True) + add_device(self.l, True) + self._setpoint = self.doRead() + self._updating = False + + def getSize(self): + return 3 + + def doRead(self): + try: + self._readback = self.hkl.getPosition() + self._updating_thread = java.lang.Thread.currentThread() + self.h.update() + self.k.update() + self.l.update() + except: + #traceback.print_exc() + self._readback = (None, None, None) + finally: + self._updating_thread = None + return self._readback + + def doWrite(self, pos): + self._setpoint = None if (pos is None) else [(None if v is None else float(v)) for v in pos] + #print "Moving to: " + str(pos) + self.hkl.asynchronousMoveTo(pos) + + def sim(self, pos): + return self.hkl.simulateMoveTo(pos) + +################################################################################################### +# System setup +################################################################################################### +you = None +dc, ub, hardware, hkl = None, None, None, None +_motor_group = None +def setup_diff(diffractometer= None, energy= None, diffcalc_axis_names = None, geometry=None, persist_ub=True, simultaneous_move=False): + """ + configure diffractometer. Display configuration if no parameter is given + diffractometer: Diffraction motor group + energy: Positioner having energy in kev + geometry: YouGeometry extension. If none, uses default + diffcalc_axis_names: if None use defaults: + - mu, delta, gam, eta, chi, phi (six circle) + - delta, gam, eta, chi, phi (ficve circle) + - delta, eta, chi, phi (four circle) + """ + global you, dc, ub, hardware, hkl, _motor_group + if diffractometer is not None: + _motor_group = diffractometer + you = None + if geometry is not None: + settings.geometry = geometry + elif diffcalc_axis_names is not None: + class CustomGeometry(YouGeometry): + def __init__(self): + self.all_axis_names = _get_diffcalc_axis_names() + self.my_axis_names = diffcalc_axis_names + fixed_constraints = {} + for axis in self.all_axis_names: + if not axis in self.my_axis_names: + fixed_constraints[axis] = 0 + YouGeometry.__init__(self, diffractometer.name, fixed_constraints) + def physical_angles_to_internal_position(self, physical_angle_tuple): + pos=[] + index = 0 + for axis in self.all_axis_names: + pos.append(physical_angle_tuple[index] if (axis in self.my_axis_names) else 0) + index = index+1 + pos.append("DEG")#units + return YouPosition(*pos) + def internal_position_to_physical_angles(self, internal_position): + pos = internal_position.clone() + pos.changeToDegrees() + pos = pos.totuple() + ret = [] + for i in range (len(self.all_axis_names)): + if self.all_axis_names[i] in self.my_axis_names: + ret.append(pos[i]) + return tuple(ret) + settings.geometry = CustomGeometry() + elif len(diffractometer.motors) == 6: + settings.geometry = SixCircle() + elif len(diffractometer.motors) == 5: + settings.geometry = FiveCircle() + elif len(diffractometer.motors) == 4: + settings.geometry = FourCircle() + else: + raise Exception("Invalid motor group") + settings.hardware = MotorGroupAdapter(diffractometer, energy, 1, diffcalc_axis_names, simultaneous_move) + + if persist_ub: + settings.persistence_path = os.path.abspath(get_context().setup.expandPath("{config}/diffcalc")) + if not os.path.exists(settings.persistence_path): + os.makedirs(settings.persistence_path) + print "UB calculations persistence path: " + settings.persistence_path + settings.ubcalc_persister = UBCalculationJSONPersister(settings.persistence_path) + else: + print "UB calculations are not persisteds" + settings.ubcalc_persister = UbCalculationNonPersister() + settings.axes_scannable_group = settings.hardware.diffractometer + settings.energy_scannable = settings.hardware.energy + settings.ubcalc_strategy = diffcalc.hkl.you.calc.YouUbCalcStrategy() + settings.angles_to_hkl_function = diffcalc.hkl.you.calc.youAnglesToHkl + from diffcalc.gdasupport import you + reload(you) + + # These must be imported AFTER the settings have been configured + from diffcalc.dc import dcyou as dc + from diffcalc.ub import ub + from diffcalc import hardware + from diffcalc.hkl.you import hkl + + add_device(HklGroup("hkl_group"), True) + add_device(Wavelength("wavelength", 6), True) + hkl_group.polling = 250 + wavelength.polling = 250 + + if settings.hardware is not None: + print "Diffractometer defined with:" + print " \t" + "Motor group: " + str(settings.hardware.diffractometer.name) + print " \t" + "Energy: " + str(settings.hardware.energy.name) + print "\nDiffcalc axis names:" + for m in _difcalc_names.keys(): + print " \t Motor " + m.name + " = Axis " + _difcalc_names[m] + else: + print "Diffractometer is not defined\n" + print + +def setup_axis(motor = None, min=None, max=None, cut=None): + """ + configure axis range and cut. + displays ranges if motor is None + """ + if motor is not None: + name = get_axis_name(motor) + if min is not None: hardware.setmin(name, min) + if max is not None: hardware.setmax(name, max) + if cut is not None: hardware.setcut(name, cut) + else: + print "Axis range configuration:" + hardware.hardware() + print + +################################################################################################### +# Acceess functions +################################################################################################### +def get_diff(): + return settings.hardware.diffractometer + +def get_energy(): + return settings.hardware.energy + +def get_adapter(): + return settings.hardware + +def get_motor_group(): + return _motor_group + +def get_wavelength(): + return you.wl + +def get_hkl(): + return you.hkl + +def get_axis_name(motor): + if is_string(motor): + motor = get_adapter().get_motor(motor) + return _difcalc_names[motor] + +################################################################################################### +# Orientation Commands +################################################################################################### + + +# State + +def newub(name): + """ + start a new ub calculation name + """ + try: + rmub(name) + except: + pass + try: + return ub.newub(name) + finally: + save_exp_context() +def loadub(name_or_num): + """ + load an existing ub calculation + """ + try: + return ub.loadub(name_or_num) + finally: + save_exp_context() + +def lastub(): + """ + load the last used ub calculation + """ + try: + return ub.lastub() + finally: + save_exp_context() + +def listub(): + """ + list the ub calculations available to load + """ + return ub.listub() + +def rmub(name_or_num): + """ + remove existing ub calculation + """ + return ub.rmub(name_or_num) + +def saveubas(name): + """ + save the ub calculation with a new name + """ + try: + return ub.saveubas(name) + finally: + save_exp_context() + +# Lattice + +def setlat(name=None, *args): + """ + set lattice parameters (Angstroms and Deg) + setlat -- interactively enter lattice parameters (Angstroms and Deg) + setlat name a -- assumes cubic + setlat name a b -- assumes tetragonal + setlat name a b c -- assumes ortho + setlat name a b c gamma -- assumes mon/hex with gam not equal to 90 + setlat name a b c alpha beta gamma -- arbitrary + """ + return ub.setlat(name, *args) + +def c2th(hkl, en=None): + """ + calculate two-theta angle for reflection + """ + return ub.c2th(hkl, en) + +def hklangle(hkl1, hkl2): + """ + calculate angle between [h1 k1 l1] and [h2 k2 l2] crystal planes + """ + return ub.hklangle(hkl1, hkl2) + + +# Reference (surface) + +def setnphi(xyz = None): + """ + sets or displays (xyz=None) n_phi reference + """ + return ub.setnphi(xyz) + + +def setnhkl(hkl = None): + """ + sets or displays (hkl=None) n_hkl reference + """ + return ub.setnhkl(hkl) + +# Reflections + +def showref(): + """ + shows full reflection list + """ + return ub.showref() + +def addref(*args): + """ + Add reflection + addref -- add reflection interactively + addref [h k l] {'tag'} -- add reflection with current position and energy + addref [h k l] (p1, .., pN) energy {'tag'} -- add arbitrary reflection + """ + return ub.addref(*args) + +def editref(idx): + """ + interactively edit a reflection (idx is tag or index numbered from 1) + """ + return ub.editref(idx) + +def delref(idx): + """ + deletes a reflection (idx is tag or index numbered from 1) + """ + return ub.delref(idx) + + +def clearref(): + """ + deletes all the reflections + """ + return ub.clearref() + +def swapref(idx1=None, idx2=None): + """ + swaps two reflections + swapref -- swaps first two reflections used for calculating U matrix + swapref {num1 | 'tag1'} {num2 | 'tag2'} -- swaps two reflections + """ + return ub.swapref(idx1, idx2) + + +# Crystal Orientations + +def showorient(): + """ + shows full list of crystal orientations + """ + #TODO: Workaround of bug on Diffcalc (str_lines needs parameter) + if ub.ubcalc._state.orientlist: + print '\n'.join(ub.ubcalc._state.orientlist.str_lines(None)) + return + return ub.showorient() + +def addorient(*args): + """ + addorient -- add crystal orientation interactively + addorient [h k l] [x y z] {'tag'} -- add crystal orientation in laboratory frame + """ + return ub.addorient(*args) + +def editorient(idx): + """ + interactively edit a crystal orientation (idx is tag or index numbered from 1) + """ + return ub.editorient(tag_or_num) + +def delorient(idx): + """ + deletes a crystal orientation (idx is tag or index numbered from 1) + """ + return ub.delorient(tag_or_num) + +def clearorient(): + """ + deletes all the crystal orientations + """ + return ub.clearorient() + +def swaporient(idx1=None, idx2=None): + """ + swaps two swaporient + swaporient -- swaps first two crystal orientations used for calculating U matrix + swaporient {num1 | 'tag1'} {num2 | 'tag2'} -- swaps two crystal orientations + """ + return ub.swaporient(idx1, idx2) + + +# UB Matrix +def showub(): + """ + show the complete state of the ub calculation + NOT A DIFFCALC COMMAND + """ + return ub.ub() + +def checkub(): + """ + show calculated and entered hkl values for reflections + """ + return ub.checkub() + +def setu(U=None): + """ + manually set U matrix + setu -- set U matrix interactively + setu [[..][..][..]] -- manually set U matrix + """ + return ub.setu(U) + +def setub(UB=None): + """ + manually set UB matrix + setub -- set UB matrix interactively + setub [[..][..][..]] -- manually set UB matrix + """ + return ub.setub(UB) + +def getub(): + """ + returns current UB matrix + NOT A DIFFCALC COMMAND + """ + return None if ub.ubcalc._UB is None else ub.ubcalc._UB.tolist() + +def calcub(idx1=None, idx2=None): + """ + (re)calculate u matrix + calcub -- (re)calculate U matrix from the first two reflections and/or orientations. + calcub idx1 idx2 -- (re)calculate U matrix from reflections and/or orientations referred by indices and/or tags idx1 and idx2. + """ + return ub.calcub(idx1, idx2) + +def trialub(idx=1): + """ + (re)calculate u matrix using one reflection only + Use indice or tags idx1. Default: use first reflection. + """ + return ub.trialub(idx) + +def refineub(*args): + """ + refine unit cell dimensions and U matrix to match diffractometer angles for a given hkl value + refineub -- interactively + refineub [h k l] {pos} + """ + return ub.refineub(*args) + +def fitub(*args): + """ + fitub ref1, ref2, ref3... -- fit UB matrix to match list of provided reference reflections. + """ + return ub.fitub(*args) + +def addmiscut(angle, xyz=None): + """ + apply miscut to U matrix using a specified miscut angle in degrees and a rotation axis (default: [0 1 0]) + """ + return ub.addmiscut(angle, xyz) + +def setmiscut(angle, xyz=None): + """ + manually set U matrix using a specified miscut angle in degrees and a rotation axis (default: [0 1 0]) + """ + return ub.setmiscut(angle, xyz) + + + +################################################################################################### +# Motion Commands +################################################################################################### + +#Constraints + +def con(*args): + """ + list or set available constraints and values + con -- list available constraints and values + con {val} -- constrains and optionally sets one constraint + con {val} {val} {val} -- clears and then fully constrains + """ + try: + ret = hkl.con(*args) + finally: + save_exp_context() + return ret + +def uncon(name): + """ + remove constraint + """ + try: + ret = hkl.uncon(name) + finally: + save_exp_context() + return ret + + +# HKL +def allhkl(_hkl, wavelength=None): + """ + print all hkl solutions ignoring limits + """ + return hkl.allhkl(_hkl, wavelength) + + +#Hardware + +def setmin(axis, val=None): + """ + set lower limits used by auto sector code (nan to clear) + """ + name = get_axis_name(axis) + try: + hardware.setmin(name, val) + finally: + save_exp_context() + +def setmax(axis, val=None): + """ + set upper limits used by auto sector code (nan to clear) + """ + name = get_axis_name(axis) + try: + return hardware.setmax(name, val) + finally: + save_exp_context() + +def setcut(axis, val): + """ + sets cut angle + """ + name = get_axis_name(axis) + try: + return hardware.setcut(name, val) + finally: + save_exp_context() + +################################################################################################### +# Motion commands: not standard Diffcalc names +################################################################################################### + + +def hklci(positions, energy=None): + """ + converts positions of motors to reciprocal space coordinates (H K L) + """ + return dc.angles_to_hkl(positions, energy) + +def hklca(hkl, energy=None): + """ + converts reciprocal space coordinates (H K L) to positions of motors. + """ + return dc.hkl_to_angles(hkl[0], hkl[1], hkl[2], energy) + +def hklwh(): + """ + prints the current reciprocal space coordinates (H K L) and positions of motors. + """ + hkl = hklget() + print "HKL: " + str(hkl) + for m in _difcalc_names.keys(): + print _difcalc_names[m] + " [" + m.name + "] :" + str(m.take()) + +def hklget(): + """ + get current hkl position + """ + return hkl_group.read() + +def hklmv(hkl): + """ + move to hkl position + """ + hkl_group.write(hkl) + +def hklsim(hkl): + """ + simulates moving diffractometer + """ + return hkl_group.sim(hkl) + + +################################################################################################### +# HKL Combined Scan +################################################################################################### +def hklscan(vector, readables,latency = 0.0, passes = 1, **pars): + """ + HKL Scan: + + Args: + vector(list of lists): HKL values to be scanned + readables(list of Readable): Sensors to be sampled on each step. + latency(float, optional): settling time for each step before readout, defaults to 0.0. + passes(int, optional): number of passes + pars(keyworded variable length arguments, optional): scan optional named arguments: + - title(str, optional): plotting window name. + - hidden(bool, optional): if true generates no effects on user interface. + - before_read (function, optional): callback on each step, before sampling. Arguments: positions, scan + - after_read (function, optional): callback on each step, after sampling. Arguments: record, scan. + - before_pass (function, optional): callback before each scan pass execution. Arguments: pass_num, scan. + - after_pass (function, optional): callback after each scan pass execution. Arguments: pass_num, scan. + - Aditional arguments defined by set_exec_pars. + Returns: + ScanResult object. + + """ + readables=to_list(string_to_obj(readables)) + pars["initial_move"] = False + scan = ManualScan([h,k,l], readables ,vector[0], vector[-1], [len(vector)-1] * 3, dimensions = 1) + if not "domain_axis" in pars.keys(): + pars["domain_axis"] = "Index" + processScanPars(scan, pars) + scan.start() + try: + for pos in vector: + #print "Writing ", pos + hkl_group.write(pos) + time.sleep(0.1) #Make sure is busy + get_motor_group().update() + get_motor_group().waitReady(-1) + time.sleep(latency) + hkl_group.update() + if scan.before_read: scan.before_read(pos,scan) + scan.append ([h.take(), k.take(), l.take()], [h.getPosition(), k.getPosition(), l.getPosition()], [readable.read() for readable in readables ]) + if scan.after_read: scan.after_read(scan.currentRecord,scan) + finally: + scan.end() + return scan.result + +def get_constraints(): + constraints={} + from diffcalc.hkl.you.constraints import valueless_constraints + all_constraints=hkl.hklcalc.constraints.all + for name in all_constraints: + if not hkl.hklcalc.constraints.is_constraint_fixed(name): + value = hkl.hklcalc.constraints.get_constraint(name) + if name in valueless_constraints: + constraints[name] = None + elif value is not None: + constraints[name] = value + return constraints + + +def set_constraints(constraints): + for name in constraints.keys(): + try: + value = constraints[name] + if value is None: + con(name) + else: + con(name, value) + except: + print sys.exc_info()[1] + +def get_limits(): + limits={} + for name in settings.hardware.get_axes_names(): + axis = {} + axis["lower_limit"] = settings.hardware.get_lower_limit(name) + axis["upper_limit"] = settings.hardware.get_upper_limit(name) + axis["cut"] = settings.hardware.get_cuts()[name] + limits[name]=axis + return limits + +def set_limits(limits): + for name in limits.keys(): + try: + axis = limits[name] + if axis.get("lower_limit") is not None: setmin (name, axis["lower_limit"]) + if axis.get("upper_limit") is not None: setmax (name, axis["upper_limit"]) + if axis.get("cut") is not None: setcut (name, axis["cut"]) + except: + print sys.exc_info()[1] + +def get_exp_context(): + context = {} + try: + context["limits"] = get_limits() + except: + context["limits"] = None + try: + context["constraints"] = get_constraints() + except: + context["constraints"] = None + try: + context["ub"] = ub.ubcalc._state.name + except: + context["ub"] = None + return context + +def set_exp_context(context): + try: + if context.get("limits") is not None: + set_limits(context["limits"]) + except: + print sys.exc_info()[1] + try: + if context.get("constraints") is not None: + set_constraints(context["constraints"]) + except: + print sys.exc_info()[1] + try: + if context.get("ub") is not None: + loadub(str(context["ub"])) + except: + print sys.exc_info()[1] + + +EXPERIMENT_CONTEXT_FILE = get_context().setup.expandPath("{context}/diff_exp_context.json") +def save_exp_context(): + """ + Saves experiment context (constraints, ub and hw limits) + """ + try: + c = get_exp_context() + with open(EXPERIMENT_CONTEXT_FILE, 'w') as json_file: + json.dump(c, json_file) + except: + print "Cannot save experiment context: ", sys.exc_info()[1] + +def load_exp_context(): + """ + Loads experiment context (constraints, ub and hw limits) + """ + try: + with open(EXPERIMENT_CONTEXT_FILE) as json_file: + c = json.load(json_file) + set_exp_context(c) + except: + print "Cannot load experiment context: ", sys.exc_info()[1] + + + +################################################################################################### +# Experiment context +################################################################################################### + + +def test_diffcalc(): + print "Start test" + energy.move(20.0) + delta.config.maxSpeed = 50.0 + delta.speed = 50.0 + delta.move(1.0) + + #Setup + setup_diff(sixc, energy) + setup_axis('gam', 0, 179) + setup_axis('delta', 0, 179) + setup_axis('delta', min=0) + setup_axis('phi', cut=-180.0) + setup_axis() + + #Orientation + listub() + # Create a new ub calculation and set lattice parameters + newub('test') + setlat('cubic', 1, 1, 1, 90, 90, 90) + # Add 1st reflection (demonstrating the hardware adapter) + settings.hardware.wavelength = 1 + c2th([1, 0, 0]) # energy from hardware + settings.hardware.position = 0, 60, 0, 30, 0, 0 + addref([1, 0, 0])# energy and position from hardware + # Add 2nd reflection (this time without the harware adapter) + c2th([0, 1, 0], 12.39842) + addref([0, 1, 0], [0, 60, 0, 30, 0, 90], 12.39842) + # check the state + showub() + checkub() + + #Constraints + con('qaz', 90) + con('a_eq_b') + con('mu', 0) + con() + + #Motion + print hklci((0., 60., 0., 30., 0., 0.)) + print hklca((1, 0, 0)) + sixc.write([0, 60, 0, 30, 90, 0]) + print "sixc=" , sixc.position + wavelength.write(1.0) + print "wavelength = ", wavelength.read() + lastub() + setu ([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + showref() + swapref(1,2) + hklwh() + hklsim([0.0,1.0,1.0]) + hklmv([0.0,1.0,1.0]) + + #Scans + lscan(l, [sin], 1.0, 1.5, 0.1) + ascan([k,l], [sin], [1.0, 1.0], [1.2, 1.3], [0.1, 0.1], zigzag=True, parallel_positioning = False) + vector = [[1.0,1.0,1.0], [1.0,1.0,1.1], [1.0,1.0,1.2], [1.0,1.0,1.4]] + hklscan(vector, [sin, arr], 0.9) \ No newline at end of file diff --git a/script/___Lib/ijutils.py b/script/___Lib/ijutils.py new file mode 100644 index 0000000..91ebc4e --- /dev/null +++ b/script/___Lib/ijutils.py @@ -0,0 +1,750 @@ +#################################################################################################### +# Facade to ImageJ functionality +#################################################################################################### + +#More information on: +# Image: https://imagej.nih.gov/ij/docs/guide/146-28.html#toc-Section-28 +# Process: https://imagej.nih.gov/ij/docs/guide/146-29.html#toc-Section-29 +# Analyze: https://imagej.nih.gov/ij/docs/guide/146-30.html#toc-Section-30 + +import ch.psi.utils.Convert as Convert +import ch.psi.pshell.imaging.Utils as Utils +from startup import get_context +import java.awt.image.BufferedImage as BufferedImage +import jarray + +import ij.IJ as IJ +import ij.ImageJ as ImageJ +import ij.WindowManager as WindowManager +import ij.ImagePlus as ImagePlus +import ij.Prefs as Prefs +import ij.io.FileSaver as FileSaver +import ij.io.Opener as Opener + +import ij.process.ImageProcessor as ImageProcessor +import ij.process.ByteProcessor as ByteProcessor +import ij.process.ShortProcessor as ShortProcessor +import ij.process.ColorProcessor as ColorProcessor +import ij.process.FloatProcessor as FloatProcessor +import ij.process.ImageConverter as ImageConverter +import ij.process.AutoThresholder as AutoThresholder +import ij.process.LUT as LUT +import ij.measure.Measurements as Measurements +import ij.measure.ResultsTable as ResultsTable +import ij.plugin.filter.Analyzer as Analyzer +import ij.plugin.filter.GaussianBlur as GaussianBlur +import ij.plugin.filter.Filters as Filters +import ij.plugin.filter.FFTFilter as FFTFilter +import ij.plugin.filter.BackgroundSubtracter as BackgroundSubtracter +import ij.plugin.filter.EDM as EDM +import ij.plugin.filter.Shadows as Shadows +import ij.plugin.filter.UnsharpMask as UnsharpMask +import ij.plugin.filter.MaximumFinder as MaximumFinder +import ij.plugin.filter.EDM as EDM +import ij.plugin.filter.Shadows as Shadows +import ij.plugin.filter.UnsharpMask as UnsharpMask +import ij.plugin.filter.RankFilters as RankFilters +import ij.plugin.filter.Convolver as Convolver +import ij.plugin.filter.ParticleAnalyzer as ParticleAnalyzer + +import ij.plugin.ContrastEnhancer as ContrastEnhancer +import ij.plugin.Thresholder as Thresholder +import ij.plugin.ImageCalculator as ImageCalculator +import ij.plugin.FFT as FFT +import ij.plugin.Concatenator as Concatenator + +#ImageJ customizations +import ch.psi.pshell.imaging.ij.FFTMath as FFTMath +import ch.psi.pshell.imaging.ij.FFTFilter as FFTFilter +import ch.psi.pshell.imaging.ij.Binary as Binary +import ch.psi.pshell.imaging.ij.Slicer as Slicer + + +#This eliminates the error messages due to the bug on ij.gui.ImageWindow row 555 (ij is null) +if not "_image_j" in globals().keys(): + _image_j = ImageJ(None, ImageJ.NO_SHOW) + +################################################################################################### +#Image creation, copying & saving +################################################################################################### +def load_image(image, title = "img"): + """ + image: file name or BufferedImage + """ + if isinstance(image, str): + try: + file = get_context().setup.expandPath(image) + except: + pass + try: + image = Utils.newImage(file) + except: + #try loading from assembly + image = get_context().setup.getAssemblyImage(image) + return ImagePlus(title, image) + + + +def load_array(array, width=None, height=None, title = "img"): + """ + array: 1d array if width and height defined , or else 2d array to be flattened. + """ + #2D + if (width==None) and (height==None): + if array.typecode == '[B': proc = ByteProcessor(len(array[0]), len(array), Convert.flatten(array)) + elif array.typecode == '[S': proc = ShortProcessor(len(array[0]), len(array), Convert.flatten(array), None) + elif array.typecode in ['[I','[F', '[D']: proc = FloatProcessor(len(array[0]), len(array), Convert.flatten(array)) + else: raise Exception("Invalid array type") + #1D + else: + if (len(array) > width*height): + array = array[:(width*height)] + if array.typecode == 'b': proc = ByteProcessor(width, height, array) + elif array.typecode == 'h': proc = ShortProcessor(width, height, array, None) + elif array.typecode in ['i','f','d']: proc = FloatProcessor(width, height, array) + else: raise Exception("Invalid array type") + return ImagePlus(title, proc) + +def save_image(ip, path=None, format = None): + """ + Saves image or stack + If parameters omitted, saves image again in same location, with same format. + """ + fs = FileSaver(ip) + if path == None: fs.save() + else: + try: + path = get_context().setup.expandPath(path) + except: + pass + if format == "bmp": fs.saveAsBmp(path) + elif format == "fits": fs.saveAsFits(path) + elif format == "gif": fs.saveAsGif(path) + elif format == "jpeg": fs.saveAsJpeg(path) + elif format == "lut": fs.saveAsLut(path) + elif format == "pgm": fs.saveAsPgm(path) + elif format == "png": fs.saveAsPng(path) + elif format == "raw" and ip.getImageStackSize()>1: fs.saveAsRawStack(path) + elif format == "raw": fs.saveAsRaw(path) + elif format == "txt": fs.saveAsText(path) + elif format == "tiff" and ip.getImageStackSize()>1: fs.saveAsTiffStack(path) + elif format == "tiff": fs.saveAsTiff(path) + elif format == "zip": fs.saveAsZip(path) + + +def open_image(path): + """ + Open file using ij.io,Opener + """ + try: + path = get_context().setup.expandPath(path) + except: + pass + opener = Opener() + return opener.openImage(path) + +def new_image(width, height, image_type="byte", title = "img", fill_color = None): + """ + type = "byte", "short", "color" or "float" + """ + if image_type == "byte": p=ByteProcessor(width, height) + elif image_type == "short": p=ShortProcessor(width, height) + elif image_type == "color": p=ColorProcessor(width, height) + elif image_type == "float": p=FloatProcessor(width, height) + else: raise Exception("Invalid image type " + str(image_type)) + ret = ImagePlus(title, p) + if fill_color is not None: + p.setColor(fill_color) + p.resetRoi() + p.fill() + return ret + +def get_ip_array(ip): + """ + Returns data array of ImagePlus + """ + if type(ip.getProcessor()) == FloatProcessor: + return ip.getProcessor().getFloatArray() + else: + return ip.getProcessor().getIntArray() + + +def sub_image(ip, x, y, width, height): + """ + Returns new ImagePlus + """ + ip.setRoi(x, y, width, height) + p=ip.getProcessor().crop() + return ImagePlus(ip.getTitle() + " subimage", p) + +def copy_image(ip): + return ip.duplicate() + +def copy_image_to(ip_source, ip_dest, x, y): + ip_source.deleteRoi() + ip_source.copy() + ip_dest.setRoi(x, y, ip_source.getWidth(), ip_source.getHeight()) + ip_dest.paste() + ip_dest.changes = False + ip_dest.deleteRoi() + +def pad_image(ip, left=0, right=0, top=0, bottom=0, fill_color = None): + p=ip.getProcessor() + width = p.getWidth() + left + right + height = p.getHeight() + top + bottom + image_type = get_image_type(ip) + ret = new_image(width, height, image_type, ip.getTitle() + " padded", fill_color) + ip.deleteRoi() + ip.copy() + ret.setRoi(left, top, p.getWidth(), p.getHeight()) + ret.paste() + ret.changes = False + ret.deleteRoi() + return ret + +def get_image_type(ip): + """ + Returns: "byte", "short", "color" or "float" + """ + p=ip.getProcessor() + if type(p) == ShortProcessor: return "short" + elif type(p) == ColorProcessor: return "color" + elif type(p) == FloatProcessor: return "float" + return "byte" + +################################################################################################### +#Image type conversion +################################################################################################### +def grayscale(ip, do_scaling=None, in_place=True): + ip = ip if in_place else ip.duplicate() + ic = ImageConverter(ip) + if do_scaling is not None: + ic.setDoScaling(do_scaling) + ic.convertToGray8() + return ip + +def get_channel(ip, channel): + """ + Return a channel from a color image as a new ImagePlus. + channel: "red", "green","blue", "alpha", "brightness", + """ + proc = ip.getProcessor() + if channel == "red": ret = proc.getChannel(1, None) + elif channel == "green": ret = proc.getChannel(2, None) + elif channel == "blue": ret = proc.getChannel(3, None) + elif channel == "alpha": ret = proc.getChannel(4, None) + elif channel == "brightness": ret = proc.getBrightness() + else: raise Exception("Invalid channel " + str(channel)) + return ImagePlus(ip.getTitle() + " channel: " + channel, ret) + +################################################################################################### +#Thresholder +################################################################################################### +def threshold(ip, min_threshold, max_threshold, in_place=True): + ip = ip if in_place else ip.duplicate() + ip.getProcessor().setThreshold(min_threshold, max_threshold, ImageProcessor.NO_LUT_UPDATE) + WindowManager.setTempCurrentImage(ip) + Thresholder().run("mask") + return ip + +def auto_threshold(ip, dark_background = False, method = AutoThresholder.getMethods()[0], in_place=True): + ip = ip if in_place else ip.duplicate() + ip.getProcessor().setAutoThreshold(method, dark_background , ImageProcessor.NO_LUT_UPDATE) + WindowManager.setTempCurrentImage(ip) + thresholder=Thresholder().run("mask") + return ip + +################################################################################################### +#Binary functions +################################################################################################### +def binary_op(ip, op, dark_background=False, iterations=1, count=1, in_place=True): + """ + op = "erode","dilate", "open","close", "outline", "fill holes", "skeletonize" + """ + ip = ip if in_place else ip.duplicate() + binary = Binary(count, iterations, dark_background ) + binary.setup(op, ip) + binary.run(ip.getProcessor()) + return ip + +def binary_erode(ip, dark_background=False, iterations=1, count=1, in_place=True): + return binary_op(ip, "erode", dark_background, iterations, count, in_place) + +def binary_dilate(ip, dark_background=False, iterations=1, count=1, in_place=True): + return binary_op(ip, "dilate", dark_background, iterations, count, in_place) + +def binary_open(ip, dark_background=False, iterations=1, count=1, in_place=True): + return binary_op(ip, "open", dark_background, iterations, count, in_place) + +def binary_close(ip, dark_background=False, iterations=1, count=1, in_place=True): + return binary_op(ip, "close", dark_background, iterations, count) + +def binary_outline(ip, dark_background=False, in_place=True): + return binary_op(ip, "outline", dark_background, in_place=in_place) + +def binary_fill_holes(ip, dark_background=False, in_place=True): + return binary_op(ip, "fill holes", dark_background, in_place=in_place) + +def binary_skeletonize(ip, dark_background=False, in_place=True): + return binary_op(ip, "skeletonize", dark_background, in_place=in_place) + +def analyse_particles(ip, min_size, max_size, fill_holes = True, exclude_edges = True, extra_measurements = 0, \ + print_table = False, output_image = "outlines", minCirc = 0.0, maxCirc = 1.0): + """ + Returns: tuple (ResultsTable results_table, ImagePlus output_image) + output_image = "outlines", "overlay_outlines", "masks", "overlay_masks", "roi_masks" or None + extra_measurements = mask with Measurements.CENTROID, PERIMETER, RECT, MIN_MAX, ELLIPSE, CIRCULARITY, AREA_FRACTION, INTEGRATED_DENSITY, INVERT_Y, FERET, KURTOSIS, MEDIAN, MODE, SKEWNESS, STD_DEV + Measurements is a mask of flags: https://imagej.nih.gov/ij/developer/api/ij/measure/Measurements.html. + Returned ResultsTable hold public fields: https://imagej.nih.gov/ij/developer/api/ij/measure/ResultsTable.html + + """ + rt = ResultsTable() + show_summary = False + options = ParticleAnalyzer.SHOW_RESULTS | ParticleAnalyzer.CLEAR_WORKSHEET + """ + ParticleAnalyzer.SHOW_ROI_MASKS | \ + #ParticleAnalyzer.RECORD_STARTS | \ + #ParticleAnalyzer.ADD_TO_MANAGER | \ + #ParticleAnalyzer.FOUR_CONNECTED | \ + #ParticleAnalyzer.IN_SITU_SHOW | \ + #ParticleAnalyzer.SHOW_NONE | \ + """ + if show_summary: options = options | ParticleAnalyzer.DISPLAY_SUMMARY + if output_image == "outlines": options = options | ParticleAnalyzer.SHOW_OUTLINES + elif output_image == "overlay_outlines": options = options | ParticleAnalyzer.SHOW_OVERLAY_OUTLINES + elif output_image == "masks": options = options | ParticleAnalyzer.SHOW_MASKS + elif output_image == "overlay_masks": options = options | ParticleAnalyzer.SHOW_OVERLAY_MASKS + elif output_image == "roi_masks": options = options | ParticleAnalyzer.SHOW_ROI_MASKS + #ParticleAnalyzer.SHOW_ROI_MASKS + if exclude_edges: options = options | ParticleAnalyzer.EXCLUDE_EDGE_PARTICLES + if fill_holes: options = options | ParticleAnalyzer.INCLUDE_HOLES + measurements = Measurements.AREA | Measurements.MEAN | Measurements.CENTER_OF_MASS | Measurements.RECT + pa = ParticleAnalyzer(options, measurements, rt, min_size, max_size, minCirc, maxCirc) + pa.setHideOutputImage(True) + pa.setResultsTable(rt) + if pa.analyze(ip): + if print_table: + print rt.getColumnHeadings() + for row in range (rt.counter): + print rt.getRowAsString(row) + return (rt, pa.getOutputImage()) + +################################################################################################### +#Image operators +################################################################################################### +def op_image(ip1, ip2, op, float_result=False, in_place=True): + """ + op = "add","subtract", "multiply","divide", "and", "or", "xor", "min", "max", "average", "difference" or "copy" + """ + ip1 = ip1 if in_place else ip1.duplicate() + ic = ImageCalculator() + pars = op + if float_result: + op = op + " float" + ic.run(pars, ip1, ip2) + return ip1 + +def op_const(ip, op, val, in_place=True): + """ + op = "add","subtract", "multiply","divide", "and", "or", "xor", "min", "max", "gamma", "set" or "log", "exp", "sqr", "sqrt","abs" + """ + ip = ip if in_place else ip.duplicate() + pr = ip.getProcessor() + if op == 'add': pr.add(val) + elif op == 'sub': pr.subtract(val) + elif op == 'multiply': pr.multiply(val) + elif op == 'divide' and val!=0: pr.multiply(1.0/val) + elif op == 'and': pr.and(val) + elif op == 'or': pr.or(val) + elif op == 'xor': pr.xor(val) + elif op == 'min': pr.min(val);pr.resetMinAndMax() + elif op == 'max': pr.max(val);pr.resetMinAndMax() + elif op == 'gamma' and 0.05 < val < 5.0: pr.gamma(val) + elif op == 'set': pr.set(val) + elif op == 'log': pr.log() + elif op == 'exp': pr.exp() + elif op == 'sqr': pr.sqr() + elif op == 'sqrt': pr.sqrt() + elif op == 'abs': pr.abs();pr.resetMinAndMax() + else: raise Exception("Invalid operation " + str(op)) + return ip + +def op_fft(ip1, ip2, op, do_inverse = True) : + """ + Images must have same sizes, and multiple of 2 height and width. + op = "correlate" (complex conjugate multiply), "convolve" (Fourier domain multiply), "deconvolve" (Fourier domain divide) + """ + if op == "correlate": op_index = 0 + elif op == "convolve": op_index = 1 + elif op == "deconvolve": op_index = 2 + else: raise Exception("Invalid operation " + str(op)) + return FFTMath().doMath(ip1, ip2, op_index, do_inverse) + +def op_rank(ip, op, kernel_radius =1 , dark_outliers = False ,threshold = 50, in_place=True): + """ + op = "mean", "min", "max", "variance", "median", "close_maxima", "open_maxima", "remove_outliers", "remove_nan", "despeckle" + """ + if op == "mean": filter_type = RankFilters.MEAN + elif op == "min": filter_type = RankFilters.MIN + elif op == "max": filter_type = RankFilters.MAX + elif op == "variance": filter_type = RankFilters.VARIANCE + elif op == "median": filter_type = RankFilters.MEDIAN + elif op == "close_maxima": filter_type = RankFilters.CLOSE + elif op == "open_maxima": filter_type = RankFilters.OPEN + elif op == "remove_outliers": filter_type = RankFilters.OUTLIERS + elif op == "remove_nan": filter_type = RankFilters.REMOVE_NAN + elif op == "despeckle": filter_type, kernel_radius = RankFilters.MEDIAN, 1 + else: raise Exception("Invalid operation " + str(op)) + ip = ip if in_place else ip.duplicate() + RankFilters().rank(ip.getProcessor(), kernel_radius, filter_type, RankFilters.DARK_OUTLIERS if dark_outliers else RankFilters.BRIGHT_OUTLIERS ,threshold) + return ip + +def op_edm(ip, op="edm", dark_background=False, in_place=True): + """ + Euclidian distance map & derived operations + op ="edm", "watershed","points", "voronoi" + """ + ip = ip if in_place else ip.duplicate() + pr = ip.getProcessor() + edm=EDM() + Prefs.blackBackground=dark_background + if op=="edm": + #pr.setPixels(0, edm.makeFloatEDM(pr, 0, False)); + #pr.resetMinAndMax(); + if dark_background: + pr.invert() + edm.toEDM(pr) + else: + edm.setup(op, ip) + edm.run(pr) + return ip + +def watershed(ip, dark_background=False, in_place=True): + return op_edm(ip, "watershed", dark_background, in_place) + +def ultimate_points(ip, dark_background=False, in_place=True): + return op_edm(ip, "points", dark_background, in_place) + +def veronoi(ip, dark_background=False, in_place=True): + return op_edm(ip, "voronoi", dark_background, in_place) + +def edm(ip, dark_background=False, in_place=True): + return op_edm(ip, "edm", dark_background, in_place) + +def op_filter(ip, op, in_place=True): + """ + This is redundant as just calls processor methods. + op ="invert", "smooth", "sharpen", "edge", "add" + """ + ip = ip if in_place else ip.duplicate() + f = Filters() + f.setup(op, ip ) + f.run(ip.getProcessor()) + return ip + +################################################################################################### +#Other operations +################################################################################################### +def gaussian_blur(ip, sigma_x=3.0, sigma_y=3.0, accuracy = 0.01, in_place=True): + ip = ip if in_place else ip.duplicate() + GaussianBlur().blurGaussian(ip.getProcessor(), sigma_x, sigma_y, accuracy) + return ip + +def find_maxima(ip, tolerance=25, threshold = ImageProcessor.NO_THRESHOLD, output_type=MaximumFinder.IN_TOLERANCE, exclude_on_edges = False, is_edm = False): + """ + Returns new ImagePlus + tolerance: maxima are accepted only if protruding more than this value from the ridge to a higher maximum + threshhold: minimum height of a maximum (uncalibrated); + output_type = SINGLE_POINTS, IN_TOLERANCE or SEGMENTED. No output image is created for output types POINT_SELECTION, LIST and COUNT. + """ + byte_processor = MaximumFinder().findMaxima(ip.getProcessor(), tolerance, threshold, output_type, exclude_on_edges, is_edm) + return ImagePlus(ip.getTitle() + " maxima", byte_processor) + + +def get_maxima_points(ip, tolerance=25, exclude_on_edges = False): + polygon = MaximumFinder().getMaxima(ip.getProcessor(), tolerance, exclude_on_edges) + return (polygon.xpoints, polygon.ypoints) + +def enhance_contrast(ip, equalize_histo = True, saturated_pixels = 0.5, normalize = False, stack_histo = False, in_place=True): + ip = ip if in_place else ip.duplicate() + ce = ContrastEnhancer() + if equalize_histo: + ce.equalize(ip.getProcessor()); + else: + ce.stretchHistogram(ip.getProcessor(), saturated_pixels) + if normalize: + ip.getProcessor().setMinAndMax(0,1.0 if (ip.getProcessor().getBitDepth()==32) else ip.getProcessor().maxValue()) + return ip + +def shadows(ip, op, in_place=True): + """ + op ="north","northeast", "east", "southeast","south", "southwest", "west","northwest" + """ + ip = ip if in_place else ip.duplicate() + shadows= Shadows() + shadows.setup(op, ip) + shadows.run(ip.getProcessor()) + return ip + +def unsharp_mask(ip, sigma, weight, in_place=True): + """ + Float processor + """ + ip = ip if in_place else ip.duplicate() + ip.getProcessor().snapshot() + unsharp=UnsharpMask() + USmask.setup(" ", ip) + USmask.sharpenFloat( ip.getProcessor(),sigma, weight) + return ip + +def subtract_background(ip, radius = 50, create_background=False, dark_background=False, use_paraboloid =True, do_presmooth = True, correctCorners = True, rgb_brightness=False, in_place=True): + ip = ip if in_place else ip.duplicate() + if rgb_brightness: + BackgroundSubtracter().rollingBallBrightnessBackground(ip.getProcessor(), radius, create_background,not dark_background, use_paraboloid, do_presmooth, correctCorners) + else: + BackgroundSubtracter().rollingBallBackground(ip.getProcessor(), radius, create_background, not dark_background, use_paraboloid, do_presmooth, correctCorners) + return ip + +################################################################################################### +#FFT +################################################################################################### +def image_fft(ip, show = True): + WindowManager.setTempCurrentImage(ip) + fft = FFT() + fft.run("fft") + #TODO: how to avoid it to be created? + #ret = ImagePlus("FHT of " + ip.getTitle(), WindowManager.getCurrentImage().getProcessor()) + ret = WindowManager.getCurrentImage() + if not show: + WindowManager.getCurrentImage().hide() + return ret + + +def image_ffti(ip, show = True): + WindowManager.setTempCurrentImage(ip) + fft = FFT() + fft.run("inverse") + #WindowManager.getCurrentImage().hide() + #TODO: how to avoid it to be created? + #ret = WindowManager.getCurrentImage() + #WindowManager.getCurrentImage().hide() + #ret = ImagePlus(ip.getTitle() + " ffti", WindowManager.getCurrentImage().getProcessor()) + ret = WindowManager.getCurrentImage() + if not show: + WindowManager.getCurrentImage().hide() + + return ret + +def bandpass_filter(ip, small_dia_px, large_dia_px, suppress_stripes = 0, stripes_tolerance_direction = 5.0, autoscale_after_filtering = False, saturate_if_autoscale = False, display_filter = False, in_place=True): + """ + suppress_stripes = 0 for none, 1 for horizontal, 2 for vertical + """ + ip = ip if in_place else ip.duplicate() + filter= FFTFilter(); + FFTFilter.filterLargeDia = large_dia_px + FFTFilter.filterSmallDia = small_dia_px + FFTFilter.choiceIndex = suppress_stripes + FFTFilter.toleranceDia = stripes_tolerance_direction + FFTFilter.doScalingDia = autoscale_after_filtering + FFTFilter.saturateDia = saturate_if_autoscale + FFTFilter.displayFilter =display_filter + filter.setup(None, ip); + filter.run(ip.getProcessor()) + return ip + +################################################################################################### +#Convolution +################################################################################################### + +KERNEL_BLUR = [[0.1111, 0.1111, 0.1111], [0.1111, 0.1111, 0.1111], [0.1111, 0.1111, 0.1111]] +KERNEL_SHARPEN = [[0.0, -0.75, 0.0], [-0.75, 4.0, -0.75], [0.0, -0.75, 0.0]] +KERNEL_SHARPEN_2 = [[-1.0, -1.0, -1.0], [-1.0, 9.0, -1.0], [-1.0, -1.0, -1.0]] +KERNEL_LIGHT = [[0.1, 0.1, 0.1], [0.1, 1.0, 0.1],[0.1, 0.1, 0.1]] +KERNEL_DARK = [[0.01, 0.01, 0.01],[0.01, 0.5, 0.01],[0.01, 0.01, 0.01]] +KERNEL_EDGE_DETECT = [[0.0, -0.75, 0.0], [-0.75, 3.0, -0.75], [0.0, -0.75, 0.0]] +KERNEL_EDGE_DETECT_2 = [[-0.5, -0.5, -0.5], [-0.5, 4.0, -0.5], [-0.5, -0.5, -0.5]] +KERNEL_DIFFERENTIAL_EDGE_DETECT = [[-1.0, 0.0, 1.0], [0.0, 0.0, 0.0], [1.0, 0.0, -1.0]] +KERNEL_PREWITT = [[-2.0, -1.0, 0.0], [-1.0, 0.0, 1.0 ], [0.0, 1.0, 2.0]] +KERNEL_SOBEL = [[2.0, 2.0, 0.0], [2.0, 0.0, -2.0 ], [0.0, -2.0, -2.0]] + + +def convolve(ip, kernel, in_place=True): + """ + kernel: list of lists + """ + ip = ip if in_place else ip.duplicate() + kernel_width = len(kernel) + kernel_height= len(kernel[0]) + kernel = [item for row in kernel for item in row] + #Convolver().convolve(ip.getProcessor(), kernel, kernel_width, kernel_height) + ip.getProcessor().convolve(kernel, kernel_width, kernel_height) + return ip + + +################################################################################################### +#Shortcut to ImageProcessor methods +################################################################################################### +def invert(ip, in_place=True): + ip = ip if in_place else ip.duplicate() + ip.getProcessor().invert() + return ip + +def smooth(ip, in_place=True): + ip = ip if in_place else ip.duplicate() + ip.getProcessor().smooth() + return ip + +def sharpen(ip, in_place=True): + ip = ip if in_place else ip.duplicate() + ip.getProcessor().sharpen() + return ip + +def edges(ip, in_place=True): #Sobel + ip = ip if in_place else ip.duplicate() + ip.getProcessor().findEdges() + return ip + +def noise(ip, sigma = 25.0, in_place=True): + ip = ip if in_place else ip.duplicate() + ip.getProcessor().noise(sigma) + return ip + +def remap(ip, min=None, max=None, in_place=True): + ip = ip if in_place else ip.duplicate() + if min is None or max is None: + stats = get_statistics(ip, Measurements.MIN_MAX) + if min is None: min = stats.min + if max is None: max = stats.max + ip.getProcessor().setMinAndMax(min, max) + return ip + +def set_lut(ip, r, g, b): + """ + r,g and b are lists of 256 integers + """ + r = [x if x<128 else x-256 for x in r] + g = [x if x<128 else x-256 for x in g] + b = [x if x<128 else x-256 for x in b] + ip.setLut(LUT(jarray.array(r,'b'),jarray.array(g,'b'),jarray.array(b,'b'))) + +def resize(ip, width, height): + """ + Returns new ImagePlus + """ + p = ip.getProcessor().resize(width, height) + return ImagePlus(ip.getTitle() + " resized", p) + +def binning(ip, factor): + p=ip.getProcessor().bin(factor) + return ImagePlus(ip.getTitle() + " resized", p) + +def get_histogram(ip, hist_min = 0, hist_max = 0, hist_bins = 256, roi=None): + """ + hist_min, hist_max, hist_bins used only for float images (otherwise fixed to 0,255,256) + roi is list [x,y,w,h] + """ + if roi == None: ip.deleteRoi() + else: ip.setRoi(roi[0],roi[1],roi[2],roi[3]) + image_statistics = ip.getStatistics(0, hist_bins, hist_min, hist_max) + return image_statistics.getHistogram() + + +def get_array(ip): + return ip.getProcessor().getIntArray() + +def get_line(ip, x1, y1, x2, y2): + return ip.getProcessor().getLine(x1, y1, x2, y2) + +def get_pixel_range(ip): + return (ip.getProcessor().getMin(), ip.getProcessor().getMax()) + +def get_num_channels(ip): + return ip.getProcessor().getNChannels() + +def is_binary(ip): + return ip.getProcessor().isBinary() + +def get_pixel(ip, x, y): + return ip.getProcessor().getPixel(x,y) + +def get_pixel_array(ip, x, y): + a = [0]*get_num_channels(ip) + return ip.getProcessor().getPixel(x,y,a) + +def get_pixels(ip): + return ip.getProcessor().getPixels() + +def get_width(ip): + return ip.getProcessor().getWidth() + +def get_height(ip): + return ip.getProcessor().getHeight() + +def get_row(ip, y): + a = [0]*get_width(ip) + array = jarray.array(a,'i') + ip.getProcessor().getRow(0, y, array, get_width(ip)) + return array + +def get_col(ip, x): + a = [0]*get_height(ip) + array = jarray.array(a,'i') + ip.getProcessor().getColumn(x, 0, array, get_height(ip)) + return array + +def get_statistics(ip, measurements = None): + """ + Measurements is a mask of flags: https://imagej.nih.gov/ij/developer/api/ij/measure/Measurements.html. + Statistics object hold public fields: https://imagej.nih.gov/ij/developer/api/ij/process/ImageStatistics.html + """ + if measurements is None: + return ip.getStatistics() + else: + return ip.getStatistics(measurements) + +################################################################################################### +#Image stack functions +################################################################################################### +def create_stack(ip_list, keep=True, title = None): + stack = Concatenator().concatenate(ip_list, keep) + if title is not None: + stack.setTitle(title) + return stack + +def reslice(stack, start_at = "Top", vertically = True, flip = True, output_pixel_spacing=1.0, avoid_interpolation = True, title = None): + ss = Slicer() + ss.rotate = vertically + ss.startAt = start_at + ss.flip = flip + ss.nointerpolate = avoid_interpolation + ss.outputZSpacing = output_pixel_spacing + stack = ss.reslice(stack) + if title is not None: + stack.setTitle(title) + return stack + + + +############################################################################### +# ImagePlus list operations +############################################################################### + +def integrate_ips(ips, as_float=True): + """ + Integrate list if ImagePlus with the same size. + """ + aux = None + for i in range(len(ips)): + if i==0: + img_type = "float" if as_float else "short" + aux = new_image(ips[i].width, ips[i].height, image_type=img_type, title = "sum", fill_color = None) + op_image(aux, ips[i], "add", float_result=as_float, in_place=True) + return aux + +def average_ips (ips, roi=None, as_float=True): + """ + Average list if ImagePlus with the same size. + """ + aux = integrate_ips(ips, as_float) + op_const(aux, "divide", float(len(ips)), in_place=True) + return aux \ No newline at end of file diff --git a/script/___Lib/jeputils.py b/script/___Lib/jeputils.py new file mode 100644 index 0000000..eee9d31 --- /dev/null +++ b/script/___Lib/jeputils.py @@ -0,0 +1,144 @@ +################################################################################################### +# Facade to JEP: Embedded Python +################################################################################################### + +#Matplotlib won't work out of the box because it's default backend (Qt) uses signals, which only works in +#the main thread. Ideally should find a fix, in order to mark the running thread as the main. +#As a workaround, one can use the Tk backend: +# +#import matplotlib +#matplotlib.use('TkAgg') + + +#In principle just add JEP jar and library to the extensions folder. +# +#Alternatively on Linux: +# Python 2: +# - Add /lib/python3.X/site-packages/jep to LD_LIBRARY_PATH +# - Add /lib/python3.X/site-packages/jep/jep-X.X.X.jar to the class path +# +#Python3: +# - Add JEP library folder to LD_LIBRARY_PATH +# - If using OpenJDK, add also python /lib folder to LD_LIBRARY_PATH +# - Set LD_PRELOAD=/lib/libpython3.5m.so + + +import sys +import os +import jep.Jep +import jep.NDArray +import java.lang.Thread +from startup import to_array, get_context + +__jep = {} + +def __get_jep(): + t = java.lang.Thread.currentThread() + if not t in __jep: + init_jep() + return __jep[t] + +def __close_jep(): + t = java.lang.Thread.currentThread() + if t in __jep: + __jep[t].close() + +def init_jep(): + #TODO: Should do it but generates errors + #__close_jep() + j = jep.Jep(False) + #Faster, but statements must be complete + j.setInteractive(False) + __jep[java.lang.Thread.currentThread()] = j + j.eval("import sys") + #sys.argv is not present in JEP and may be needed for certain modules (as Tkinter) + j.eval("sys.argv = ['PShell']"); + #Add standard script path to python path + j.eval("sys.path.append('" + get_context().setup.getScriptPath() + "')") + + #Redirect stdout + j.eval("class JepStdout:\n" + + " def write(self, str):\n" + + " self.str += str\n" + + " def clear(self):\n" + + " self.str = ''\n" + + " def flush(self):\n" + + " pass\n") + j.eval("sys.stdout=JepStdout()"); + j.eval("sys.stderr=JepStdout()"); + j.eval("sys.stdout.clear()") + j.eval("sys.stderr.clear()") + +def __print_stdout(): + j=__get_jep() + output = j.getValue("sys.stdout.str") + err = j.getValue("sys.stderr.str") + j.eval("sys.stdout.clear()") + j.eval("sys.stderr.clear()") + if (output is not None) and len(output)>0: + print output + if (err is not None) and len(err)>0: + print >> sys.stderr, err + +def run_jep(script_name, vars = {}): + global __jep + script = get_context().scriptManager.library.resolveFile(script_name) + if script is None : + script= os.path.abspath(script_name) + j=__get_jep() + + for v in vars: + j.set(v, vars[v]) + try: + j.runScript(script) + finally: + __print_stdout() + +def eval_jep(line): + j=__get_jep() + try: + j.eval(line) + finally: + __print_stdout() + +def set_jep(var, value): + j=__get_jep() + j.set(var, value) + +def get_jep(var): + j=__get_jep() + return j.getValue(var) + +def call_jep(module, function, args = [], reload=False): + j=__get_jep() + if "/" in module: + script = get_context().scriptManager.library.resolveFile(module) + if "\\" in script: + #Windows paths + module_path = script[0:script.rfind("\\")] + module = script[script.rfind("\\")+1:] + else: + #Linux paths + module_path = script[0:script.rfind("/")] + module = script[script.rfind("/")+1:] + eval_jep("import sys") + eval_jep("sys.path.append('" + module_path + "')") + if module.endswith(".py"): + module = module[0:-3] + + f = module+"_" + function+"_"+str(j.hashCode()) + try: + if reload: + eval_jep("import " + module) + eval_jep("reload(" + module+")") + eval_jep("from " + module + " import " + function + " as " + f) + ret = j.invoke(f, args) + finally: + __print_stdout() + return ret + +#Converts pythonlist or Java array to numpy array +def to_npa(data, dimensions = None, type = None): + + data = to_array(data,'d' if type is None else type) + return jep.NDArray(data, dimensions) \ No newline at end of file diff --git a/script/___Lib/mathutils.py b/script/___Lib/mathutils.py new file mode 100644 index 0000000..4bb570a --- /dev/null +++ b/script/___Lib/mathutils.py @@ -0,0 +1,655 @@ +################################################################################################### +# Facade to Apache Commons Math +################################################################################################### + +import sys +import math +import operator + +import java.util.List +import java.lang.reflect.Array +import java.lang.Class as Class +import jarray +import org.python.core.PyArray as PyArray +import ch.psi.utils.Convert as Convert + +import org.apache.commons.math3.util.FastMath as FastMath +import org.apache.commons.math3.util.Pair as Pair +import org.apache.commons.math3.complex.Complex as Complex + +import org.apache.commons.math3.analysis.DifferentiableUnivariateFunction as DifferentiableUnivariateFunction +import org.apache.commons.math3.analysis.function.Gaussian as Gaussian +import org.apache.commons.math3.analysis.function.HarmonicOscillator as HarmonicOscillator +import org.apache.commons.math3.analysis.differentiation.DerivativeStructure as DerivativeStructure +import org.apache.commons.math3.analysis.differentiation.FiniteDifferencesDifferentiator as FiniteDifferencesDifferentiator +import org.apache.commons.math3.analysis.integration.SimpsonIntegrator as SimpsonIntegrator +import org.apache.commons.math3.analysis.integration.TrapezoidIntegrator as TrapezoidIntegrator +import org.apache.commons.math3.analysis.integration.RombergIntegrator as RombergIntegrator +import org.apache.commons.math3.analysis.integration.MidPointIntegrator as MidPointIntegrator +import org.apache.commons.math3.analysis.polynomials.PolynomialFunction as PolynomialFunction +import org.apache.commons.math3.analysis.polynomials.PolynomialFunctionLagrangeForm as PolynomialFunctionLagrangeForm +import org.apache.commons.math3.analysis.solvers.LaguerreSolver as LaguerreSolver +import org.apache.commons.math3.analysis.UnivariateFunction as UnivariateFunction +import org.apache.commons.math3.analysis.interpolation.SplineInterpolator as SplineInterpolator +import org.apache.commons.math3.analysis.interpolation.LinearInterpolator as LinearInterpolator +import org.apache.commons.math3.analysis.interpolation.NevilleInterpolator as NevilleInterpolator +import org.apache.commons.math3.analysis.interpolation.LoessInterpolator as LoessInterpolator +import org.apache.commons.math3.analysis.interpolation.DividedDifferenceInterpolator as DividedDifferenceInterpolator +import org.apache.commons.math3.analysis.interpolation.AkimaSplineInterpolator as AkimaSplineInterpolator + +import org.apache.commons.math3.fitting.GaussianCurveFitter as GaussianCurveFitter +import org.apache.commons.math3.fitting.PolynomialCurveFitter as PolynomialCurveFitter +import org.apache.commons.math3.fitting.HarmonicCurveFitter as HarmonicCurveFitter +import org.apache.commons.math3.fitting.WeightedObservedPoint as WeightedObservedPoint +import org.apache.commons.math3.fitting.leastsquares.MultivariateJacobianFunction as MultivariateJacobianFunction +import org.apache.commons.math3.fitting.leastsquares.LeastSquaresBuilder as LeastSquaresBuilder +import org.apache.commons.math3.fitting.leastsquares.LevenbergMarquardtOptimizer as LevenbergMarquardtOptimizer +import org.apache.commons.math3.fitting.leastsquares.GaussNewtonOptimizer as GaussNewtonOptimizer + +import org.apache.commons.math3.stat.regression.SimpleRegression as SimpleRegression + +import org.apache.commons.math3.transform.FastFourierTransformer as FastFourierTransformer +import org.apache.commons.math3.transform.DftNormalization as DftNormalization +import org.apache.commons.math3.transform.TransformType as TransformType + +import org.apache.commons.math3.linear.ArrayRealVector as ArrayRealVector +import org.apache.commons.math3.linear.Array2DRowRealMatrix as Array2DRowRealMatrix +import org.apache.commons.math3.linear.MatrixUtils as MatrixUtils + + + +################################################################################################### +#Derivative and interpolation +################################################################################################### + +def get_values(f, xdata): + """Return list of values of a function + + Args: + f(UnivariateFunction): function + xdata(float array or list): Domain values + Returns: + List of doubles + + """ + v = [] + for x in xdata: + v.append(f.value(x)) + return v + +def interpolate(data, xdata = None, interpolation_type = "linear"): + """Interpolate data array or list to a UnivariateFunction + + Args: + data(float array or list): The values to interpolate + xdata(float array or list, optional): Domain values + interpolation_type(str , optional): "linear", "cubic", "akima", "neville", "loess", "newton" + Returns: + UnivariateDifferentiableFunction object + + """ + if xdata is None: + from startup import frange + xdata = frange(0, len(data), 1.0) + else: + #X must be ordered + xy = sorted(zip(xdata,data), key=operator.itemgetter(0)) + xdata, data = zip(*xy) + if len(data) != len(xdata) or len(data)<2: + raise Exception("Dimension mismatch") + + if interpolation_type == "cubic": + i = SplineInterpolator() + elif interpolation_type == "linear": + i = LinearInterpolator() + elif interpolation_type == "akima": + i = AkimaSplineInterpolator() + elif interpolation_type == "neville": + i = NevilleInterpolator() + elif interpolation_type == "loess": + i = LoessInterpolator() + elif interpolation_type == "newton": + i = DividedDifferenceInterpolator() + else: + raise Exception("Invalid interpolation type") + from startup import to_array + return i.interpolate(to_array(xdata,'d'), to_array(data,'d')) + +def deriv(f, xdata = None, interpolation_type = "linear"): + """Calculate derivative of UnivariateFunction, array or list. + + Args: + f(UnivariateFunction or array): The function object. If array it is interpolated. + xdata(float array or list, optional): Domain values to process. + interpolation_type(str , optional): "linear", "cubic", "akima", "neville", "loess", "newton" + Returns: + List with the derivative values for xdata + + """ + if not isinstance(f,UnivariateFunction): + if xdata is None: + from startup import frange + xdata = frange(0, len(f), 1.0) + f = interpolate(f, xdata, interpolation_type) + if xdata is None: + if isinstance(f,DifferentiableUnivariateFunction): + return f.derivative() + raise Exception("Domain range not defined") + d = [] + for x in xdata: + xds = DerivativeStructure(1, 2, 0, x) + yds = f.value(xds) + d.append( yds.getPartialDerivative(1)) + return d + +def integrate(f, range = None, xdata = None, interpolation_type = "linear", integrator_type = "simpson"): + """Integrate UnivariateFunction, array or list in an interval. + + Args: + f(UnivariateFunction or array): The function object. If array it is interpolated. + range(list, optional): integration range ([min, max]). + xdata(float array or list, optional): disregarded if f is UnivariateFunction. + interpolation_type(str , optional): "linear", "cubic", "akima", "neville", "loess", "newton" + integrator_type(str , optional): "simpson", "trapezoid", "romberg" or "midpoint" + Returns: + Integrated value (Float) + + """ + if not isinstance(f, UnivariateFunction): + from startup import frange + if xdata is None: + xdata = frange(0, len(f), 1.0) + if range is None: + range = xdata + f = interpolate(f, xdata, interpolation_type) + if range is None: + raise Exception("Domain range not defined") + d = [] + if integrator_type == "simpson": + integrator = SimpsonIntegrator() + elif integrator_type == "trapezoid": + integrator = TrapezoidIntegrator() + elif integrator_type == "romberg": + integrator = RombergIntegrator() + elif integrator_type == "midpoint": + integrator = MidPointIntegrator() + raise Exception("Invalid integrator type") + lower = min(range) + upper = max(range) + return integrator.integrate(MAX_EVALUATIONS, f, lower, upper) + +def trapz(y, xdata=None): + """Integrate an array or list using the composite trapezoidal rule. + + Args: + y(array or list) + xdata(float array or list, optional) + """ + return integrate(y, range = None, xdata = xdata, interpolation_type = "linear", integrator_type = "trapezoid") + +################################################################################################### +#Fitting and peak search +################################################################################################### + +try: + MAX_FLOAT = sys.float_info.max +except: # Python 2.5 + MAX_FLOAT = 1.7976931348623157e+308 + +MAX_ITERATIONS = 1000 +MAX_EVALUATIONS = 1000000 + +def calculate_peaks(function, start_value, end_value = MAX_FLOAT, positive=True): + """Calculate peaks of a DifferentiableUnivariateFunction in a given range by finding the roots of the derivative + + Args: + function(DifferentiableUnivariateFunction): The function object. + start_value(float): start of range + end_value(float, optional): end of range + positive (boolean, optional): True for searching positive peaks, False for negative. + Returns: + List of peaks in the interval + + """ + derivative = function.derivative() + derivative2 = derivative.derivative() + ret = [] + solver = LaguerreSolver() + for complex in solver.solveAllComplex(derivative.coefficients, start_value): + r = complex.real + if start_value < r < end_value: + if (positive and (derivative2.value(r) < 0)) or ( (not positive) and (derivative2.value(r) > 0)): + ret.append(r) + return ret + + +def estimate_peak_indexes(data, xdata = None, threshold = None, min_peak_distance = None, positive = True): + """Estimation of peaks in an array by ordering local maxima according to given criteria. + + Args: + data(float array or list) + xdata(float array or list, optional): if not None must have the same length as data. + threshold(float, optional): if specified filter peaks below this value + min_peak_distance(float, optional): if specified defines minimum distance between two peaks. + if xdata == None, it represents index counts, otherwise in xdata units. + positive (boolean, optional): True for searching positive peaks, False for negative. + Returns: + List of peaks indexes. + """ + peaks = [] + indexes = sorted(range(len(data)),key=lambda x:data[x]) + if positive: + indexes = reversed(indexes) + for index in indexes: + first = (index == 0) + last = (index == (len(data)-1)) + val=data[index] + prev = float('NaN') if first else data[index-1] + next = float('NaN') if last else data[index+1] + + if threshold is not None: + if (positive and (valthreshold)): + break + if ( positive and (first or val>prev ) and (last or val>=next ) ) or ( + (not positive) and (first or vallen(y))): + raise Exception("Invalid data for fit") + +def fit_gaussians(y, x, peak_indexes): + """Fits data on multiple gaussians on the given peak indexes. + + Args: + x(float array or list) + y(float array or list) + peak_indexes(list of int) + Returns: + List of tuples of gaussian parameters: (normalization, mean, sigma) + """ + _assert_valid_for_fit(y,x) + ret = [] + + minimum = min(y) + for peak in peak_indexes: + #Copy data + data = y[:] + #Remover data from other peaks + for p in peak_indexes: + limit = int(round((p+peak)/2)) + if (p > peak): + data[limit : len(y)] =[minimum] * (len(y)-limit) + elif (p < peak): + data[0:limit] = [minimum] *limit + #Build fit point list + values = create_fit_point_list(data, x) + maximum = max(data) + gaussian_fitter = GaussianCurveFitter.create().withStartPoint([(maximum-minimum)/2,x[peak],1.0]).withMaxIterations(MAX_ITERATIONS) + #Fit return parameters: (normalization, mean, sigma) + try: + ret.append(gaussian_fitter.fit(values).tolist()) + except: + ret.append(None) #Fitting error + return ret + + +def create_fit_point_list(y, x, weights = None): + values = [] + for i in sorted(range(len(x)),key=lambda v:x[v]): #Creating list ordered by x, needed for gauss fit + if weights is None: + values.append(WeightedObservedPoint(1.0, x[i], y[i])) + else: + values.append(WeightedObservedPoint(weights[i], x[i], y[i])) + return values + +def fit_polynomial(y, x, order, start_point = None, weights = None): + """Fits data into a polynomial. + + Args: + x(float array or list): observed points x + y(float array or list): observed points y + order(int): if start_point is provided order parameter is disregarded - set to len(start_point)-1. + start_point(optional tuple of float): initial parameters (a0, a1, a2, ...) + weights(optional float array or list): weight for each observed point + Returns: + Tuples of polynomial parameters: (a0, a1, a2, ...) + """ + _assert_valid_for_fit(y,x) + fit_point_list = create_fit_point_list(y, x, weights) + if start_point is None: + polynomial_fitter = PolynomialCurveFitter.create(order).withMaxIterations(MAX_ITERATIONS) + else: + polynomial_fitter = PolynomialCurveFitter.create(0).withStartPoint(start_point).withMaxIterations(MAX_ITERATIONS) + try: + return polynomial_fitter.fit(fit_point_list).tolist() + except: + raise Exception("Fitting failure") + +def fit_gaussian(y, x, start_point = None, weights = None): + """Fits data into a gaussian. + + Args: + x(float array or list): observed points x + y(float array or list): observed points y + start_point(optional tuple of float): initial parameters (normalization, mean, sigma) + If None, use a custom initial estimation. + Set to "default" to force Commons.Math the default (GaussianCurveFitter.ParameterGuesser). + weights(optional float array or list): weight for each observed point + Returns: + Tuples of gaussian parameters: (normalization, mean, sigma) + """ + _assert_valid_for_fit(y,x) + fit_point_list = create_fit_point_list(y, x, weights) + + #If start point not provided, start on peak + if start_point is None: + maximum, minimum = max(y), min(y) + norm = maximum - minimum + mean = x[y.index(maximum)] + sigma = trapz([v-minimum for v in y], x) / (norm*math.sqrt(2*math.pi)) + start_point = (norm, mean, sigma) + elif start_point == "simple": + start_point = [(max(y)-min(y))/2, x[y.index(max(y))], 1.0] + elif start_point == "default": + start_point = GaussianCurveFitter.ParameterGuesser(fit_point_list).guess().tolist() + gaussian_fitter = GaussianCurveFitter.create().withStartPoint(start_point).withMaxIterations(MAX_ITERATIONS) + try: + return gaussian_fitter.fit(fit_point_list).tolist() # (normalization, mean, sigma) + except: + raise Exception("Fitting failure") + +def fit_harmonic(y, x, start_point = None, weights = None): + """Fits data into an harmonic. + + Args: + x(float array or list): observed points x + y(float array or list): observed points y + start_point(optional tuple of float): initial parameters (amplitude, angular_frequency, phase) + weights(optional float array or list): weight for each observed point + Returns: + Tuples of harmonic parameters: (amplitude, angular_frequency, phase) + """ + _assert_valid_for_fit(y,x) + fit_point_list = create_fit_point_list(y, x, weights) + if start_point is None: + harmonic_fitter = HarmonicCurveFitter.create().withMaxIterations(MAX_ITERATIONS) + else: + harmonic_fitter = HarmonicCurveFitter.create().withStartPoint(start_point).withMaxIterations(MAX_ITERATIONS) + try: + return harmonic_fitter.fit(fit_point_list).tolist() # (amplitude, angular_frequency, phase) + except: + raise Exception("Fitting failure") + + +def fit_gaussian_offset(y, x, start_point = None, weights = None): + """Fits data into a gaussian with offset (constant background). + f(x) = a + b * exp(-(pow((x - c), 2) / (2 * pow(d, 2)))) + + Args: + x(float array or list): observed points x + y(float array or list): observed points y + start_point(optional tuple of float): initial parameters (normalization, mean, sigma) + weights(optional float array or list): weight for each observed point + Returns: + Tuples of gaussian parameters: (offset, normalization, mean, sigma) + """ + + # For normalised gauss curve sigma=1/(amp*sqrt(2*pi)) + if start_point is None: + off = min(y) # good enough starting point for offset + com = x[y.index(max(y))] + amp = max(y) - off + sigma = trapz([v-off for v in y], x) / (amp*math.sqrt(2*math.pi)) + start_point = [off, amp, com , sigma] + + class Model(MultivariateJacobianFunction): + def value(self, variables): + value = ArrayRealVector(len(x)) + jacobian = Array2DRowRealMatrix(len(x), 4) + for i in range(len(x)): + (a,b,c,d) = (variables.getEntry(0), variables.getEntry(1), variables.getEntry(2), variables.getEntry(3)) + v = math.exp(-(math.pow((x[i] - c), 2) / (2 * math.pow(d, 2)))) + model = a + b * v + value.setEntry(i, model) + jacobian.setEntry(i, 0, 1) # derivative with respect to p0 = a + jacobian.setEntry(i, 1, v) # derivative with respect to p1 = b + v2 = b*v*((x[i] - c)/math.pow(d, 2)) + jacobian.setEntry(i, 2, v2) # derivative with respect to p2 = c + jacobian.setEntry(i, 3, v2*(x[i] - c)/d ) # derivative with respect to p3 = d + return Pair(value, jacobian) + + model = Model() + target = [v for v in y] #the target is to have all points at the positios + (parameters, residuals, rms, evals, iters) = optimize_least_squares(model, target, start_point, weights) + return parameters + + +def fit_gaussian_linear(y, x, start_point = None, weights = None): + """Fits data into a gaussian with linear background. + f(x) = a * x + b + c * exp(-(pow((x - d), 2) / (2 * pow(e, 2)))) + + Args: + x(float array or list): observed points x + y(float array or list): observed points y + start_point(optional tuple of float): initial parameters (normalization, mean, sigma) + weights(optional float array or list): weight for each observed point + Returns: + Tuples of gaussian parameters: (a, b, normalization, mean, sigma) + """ + + # For normalised gauss curve sigma=1/(amp*sqrt(2*pi)) + if start_point is None: + off = min(y) # good enough starting point for offset + com = x[y.index(max(y))] + amp = max(y) - off + sigma = trapz([v-off for v in y], x) / (amp*math.sqrt(2*math.pi)) + start_point = [0, off, amp, com, sigma] + + class Model(MultivariateJacobianFunction): + def value(self, variables): + value = ArrayRealVector(len(x)) + jacobian = Array2DRowRealMatrix(len(x), 5) + for i in range(len(x)): + (a,b,c,d,e) = (variables.getEntry(0), variables.getEntry(1), variables.getEntry(2), variables.getEntry(3), variables.getEntry(4)) + v = math.exp(-(math.pow((x[i] - d), 2) / (2 * math.pow(e, 2)))) + model = a*x[i] + b + c * v + value.setEntry(i, model) + jacobian.setEntry(i, 0, x[i]) # derivative with respect to p0 = a + jacobian.setEntry(i, 1, 1) # derivative with respect to p1 = b + jacobian.setEntry(i, 2, v) # derivative with respect to p2 = c + v2 = c*v*((x[i] - d)/math.pow(e, 2)) + jacobian.setEntry(i, 3, v2) # derivative with respect to p3 = d + jacobian.setEntry(i, 4, v2*(x[i] - d)/e ) # derivative with respect to p4 = e + return Pair(value, jacobian) + + model = Model() + target = [v for v in y] #the target is to have all points at the positios + (parameters, residuals, rms, evals, iters) = optimize_least_squares(model, target, start_point, weights) + return parameters + +def fit_gaussian_exp_bkg(y, x, start_point = None, weights = None): + """Fits data into a gaussian with exponential background. + f(x) = a * math.exp(-(x/b)) + c * exp(-(pow((x - d), 2) / (2 * pow(e, 2)))) + + Args: + x(float array or list): observed points x + y(float array or list): observed points y + start_point(optional tuple of float): initial parameters (normalization, mean, sigma) + weights(optional float array or list): weight for each observed point + Returns: + Tuples of gaussian parameters: (a,b , normalization, mean, sigma) + """ + + # For normalised gauss curve sigma=1/(amp*sqrt(2*pi)) + if start_point is None: + off = min(y) # good enough starting point for offset + com = x[len(x)/2] + #com = 11.9 + amp = max(y) - off + sigma = trapz([v-off for v in y], x) / (amp*math.sqrt(2*math.pi)) + start_point = [1, 1, amp, com, sigma] + + class Model(MultivariateJacobianFunction): + def value(self, variables): + value = ArrayRealVector(len(x)) + jacobian = Array2DRowRealMatrix(len(x), 5) + for i in range(len(x)): + (a,b,c,d,e) = (variables.getEntry(0), variables.getEntry(1), variables.getEntry(2), variables.getEntry(3), variables.getEntry(4)) + v = math.exp(-(math.pow((x[i] - d), 2) / (2 * math.pow(e, 2)))) + bkg=math.exp(-(x[i]/b)) + model = a*bkg + c * v + value.setEntry(i, model) + jacobian.setEntry(i, 0, bkg) # derivative with respect to p0 = a + jacobian.setEntry(i, 1, a*x[i]*bkg/math.pow(b, 2)) # derivative with respect to p1 = b + jacobian.setEntry(i, 2, v) # derivative with respect to p2 = c + v2 = c*v*((x[i] - d)/math.pow(e, 2)) + jacobian.setEntry(i, 3, v2) # derivative with respect to p3 = d + jacobian.setEntry(i, 4, v2*(x[i] - d)/e ) # derivative with respect to p4 = e + return Pair(value, jacobian) + + model = Model() + target = [v for v in y] #the target is to have all points at the positios + (parameters, residuals, rms, evals, iters) = optimize_least_squares(model, target, start_point, weights) + return parameters + + +################################################################################################### +#Least squares problem +################################################################################################### + +def optimize_least_squares(model, target, initial, weights): + """Fits a parametric model to a set of observed values by minimizing a cost function. + + Args: + model(MultivariateJacobianFunction): observed points x + target(float array or list): observed data + initial(optional tuple of float): initial guess + weights(optional float array or list): weight for each observed point + Returns: + Tuples of harmonic parameters: (amplitude, angular_frequency, phase) + """ + if isinstance(weights,tuple) or isinstance(weights,list): + weights = MatrixUtils.createRealDiagonalMatrix(weights) + problem = LeastSquaresBuilder().start(initial).model(model).target(target).lazyEvaluation(False).maxEvaluations(MAX_EVALUATIONS).maxIterations(MAX_ITERATIONS).weight(weights).build() + optimizer = LevenbergMarquardtOptimizer() + optimum = optimizer.optimize(problem) + + parameters=optimum.getPoint().toArray().tolist() + residuals = optimum.getResiduals().toArray().tolist() + rms = optimum.getRMS() + evals = optimum.getEvaluations() + iters = optimum.getIterations() + return (parameters, residuals, rms, evals, iters) + + +################################################################################################### +#FFT +################################################################################################### + +def is_power(num, base): + if base<=1: return num == 1 + power = int (math.log (num, base) + 0.5) + return base ** power == num + +def pad_to_power_of_two(data): + if is_power(len(data),2): + return data + pad =(1 << len(data).bit_length()) - len(data) + elem = complex(0,0) if type(data[0]) is complex else [0.0,] + return data + elem * pad + +def get_real(values): + """Returns real part of a complex numbers vector. + Args: + values: List of complex. + Returns: + List of float + """ + ret = [] + for c in values: + ret.append(c.real) + return ret + +def get_imag(values): + """Returns imaginary part of a complex numbers vector. + Args: + values: List of complex. + Returns: + List of float + """ + ret = [] + for c in values: + ret.append(c.imag) + return ret + +def get_modulus(values): + """Returns the modulus of a complex numbers vector. + Args: + values: List of complex. + Returns: + List of float + """ + ret = [] + for c in values: + ret.append(math.hypot(c.imag,c.real)) + return ret + +def get_phase(values): + """Returns the phase of a complex numbers vector. + Args: + values: List of complex. + Returns: + List of float + """ + ret = [] + for c in values: + ret.append(math.atan(c.imag/c.real)) + return ret + +def fft(f): + """Calculates the Fast Fourrier Transform of a vector, padding to the next power of 2 elements. + Args: + values(): List of float or complex + Returns: + List of complex + """ + f = pad_to_power_of_two(f) + if type(f[0]) is complex: + aux = [] + for c in f: + aux.append(Complex(c.real, c.imag)) + f = aux + fftt = FastFourierTransformer(DftNormalization.STANDARD) + ret = [] + for c in fftt.transform(f,TransformType.FORWARD ): + ret.append(complex(c.getReal(),c.getImaginary())) + return ret + +def ffti(f): + """Calculates the Inverse Fast Fourrier Transform of a vector, padding to the next power of 2 elements. + Args: + values(): List of float or complex + Returns: + List of complex + """ + f = pad_to_power_of_two(f) + if type(f[0]) is complex: + aux = [] + for c in f: + aux.append(Complex(c.real, c.imag)) + f = aux + fftt = FastFourierTransformer(DftNormalization.STANDARD) + ret = [] + for c in fftt.transform(f,TransformType.INVERSE ): + ret.append(complex(c.getReal(),c.getImaginary())) + return ret \ No newline at end of file diff --git a/script/___Lib/plotutils.py b/script/___Lib/plotutils.py new file mode 100644 index 0000000..c67acb6 --- /dev/null +++ b/script/___Lib/plotutils.py @@ -0,0 +1,119 @@ +################################################################################################### +# Plot utilities +################################################################################################### + +import ch.psi.pshell.plot.LinePlotSeries as LinePlotSeries +import ch.psi.pshell.plot.LinePlotErrorSeries as LinePlotErrorSeries +import math +from startup import frange, to_array + +def plot_function(plot, function, name, range, show_points = True, show_lines = True, color = None): + """Plots a function to a plot. + + Args: + plot(LinePlot) + function(UnivariateFunction): Gaussian, PolynomialFunction, HarmonicOscillator... + name(str): name of the series + range(list or array of floats): x values to plot + Returns: + Tuples of harmonic parameters: (amplitude, angular_frequency, phase) + """ + if plot.style.isError(): + s = LinePlotErrorSeries(name, color) + else: + s = LinePlotSeries(name, color) + plot.addSeries(s) + s.setPointsVisible(show_points) + s.setLinesVisible(show_lines) + for x in range: + s.appendData(x, function.value(x)) + return s + +def plot_data(plot, data, name, xdata = None, error = None, show_points = True, show_lines = True, color = None): + """Plots a subscriptable object to a plot. + + Args: + plot(LinePlot) + data(subscriptable): Y data + name(str): name of the series + xdata(subscriptable): X data + error(subscriptable): Error data (only for error plots) + Returns: + Tuples of harmonic parameters: (amplitude, angular_frequency, phase) + """ + if plot.style.isError(): + s = LinePlotErrorSeries(name, color) + else: + s = LinePlotSeries(name, color) + plot.addSeries(s) + s.setPointsVisible(show_points) + s.setLinesVisible(show_lines) + if xdata is None: + xdata = range(len(data)) + xdata = to_array(xdata, 'd') + data = to_array(data, 'd') + if plot.style.isError(): + error = to_array(error, 'd') + s.setData(xdata, data, error) + else: + s.setData(xdata, data) + return s + +def plot_point(plot, x, y, size = 3, color = None, name = "Point"): + s = LinePlotSeries(name, color) + plot.addSeries(s) + s.setPointSize(size) + s.appendData(x, y) + return s + +def plot_line(plot, x1, y1, x2, y2, width = 1, color = None, name = "Line"): + s = LinePlotSeries(name, color) + plot.addSeries(s) + s.setLineWidth(width) + s.setPointsVisible(False) + s.appendData(x1, y1) + s.appendData(x2, y2) + return s + +def plot_cross(plot, x, y, size = 1.0, width = 1, color = None, name = "Cross"): + size = float(size) + s = LinePlotSeries(name, color) + plot.addSeries(s) + s.setLineWidth(width) + s.setPointsVisible(False) + s.appendData(float('nan'), float('nan')) + s.appendData(x-size/2, y) + s.appendData(x+size/2, y) + s.appendData(float('nan'), float('nan')) + s.appendData(x, y-size/2) + s.appendData(x, y+size/2) + return s + +def plot_rectangle(plot, x1, y1, x2, y2, width = 1, color = None, name = "Rectangle"): + s = LinePlotSeries(name, color) + plot.addSeries(s) + s.setLineWidth(width) + s.setPointsVisible(False) + s.appendData(x1, y1) + s.appendData(x1, y2) + s.appendData(x2, y2) + s.appendData(x2, y1) + s.appendData(x1, y1) + return s + +def plot_circle(plot, cx, cy, radius, width = 1, color = None, name = "Circle"): + s = LinePlotSeries(name, color) + plot.addSeries(s) + s.setLineWidth(width) + s.setPointsVisible(False) + res=float(radius) / 100.0 + epson = 1e-12 + for xp in frange (cx+radius-epson , cx-radius+epson , -res): + yp = math.sqrt(math.pow(radius, 2) - math.pow(xp - cx, 2)) + cy + s.appendData(xp, yp) + for xp in frange (cx-radius+epson , cx+radius-epson, res): + yp = -math.sqrt(math.pow(radius, 2) - math.pow(xp - cx, 2)) + cy + s.appendData(xp, yp) + if s.getCount()>0: + s.appendData(s.getX()[0], s.getY()[0]) + return s \ No newline at end of file diff --git a/script/___Lib/startup.py b/script/___Lib/startup.py new file mode 100644 index 0000000..e9579a1 --- /dev/null +++ b/script/___Lib/startup.py @@ -0,0 +1,2526 @@ +################################################################################################### +# Global definitions and built-in functions +################################################################################################### + +import sys +import time +import math +import os.path +from operator import add, mul, sub, truediv +from time import sleep +from array import array +import jarray + +import java.lang.Class as Class +import java.lang.Object as Object +import java.beans.PropertyChangeListener +import java.util.concurrent.Callable +import java.util.List +import java.lang.reflect.Array +import java.lang.Thread +import java.awt.image.BufferedImage as BufferedImage +import java.awt.Color as Color +import java.awt.Dimension as Dimension +import java.awt.Font as Font +import org.python.core.PyArray as PyArray +import org.python.core.PyFunction as PyFunction +import org.python.core.PyGenerator as PyGenerator + +import ch.psi.utils.Threading as Threading +import ch.psi.utils.State as State +import ch.psi.utils.Convert as Convert +import ch.psi.utils.Arr as Arr +import ch.psi.utils.Chrono as Chrono +import ch.psi.pshell.core.CommandSource as CommandSource +import ch.psi.pshell.core.ContextAdapter as ContextListener +import ch.psi.pshell.core.Context +import ch.psi.pshell.core.InlineDevice as InlineDevice +import ch.psi.pshell.data.PlotDescriptor as PlotDescriptor +import ch.psi.pshell.data.Table as Table +import ch.psi.pshell.device.Device as Device +import ch.psi.pshell.device.DeviceBase as DeviceBase +import ch.psi.pshell.device.DeviceConfig as DeviceConfig +import ch.psi.pshell.device.RegisterBase as RegisterBase +import ch.psi.pshell.device.ProcessVariableBase as ProcessVariableBase +import ch.psi.pshell.device.ControlledVariableBase as ControlledVariableBase +import ch.psi.pshell.device.PositionerBase as PositionerBase +import ch.psi.pshell.device.MotorBase as MotorBase +import ch.psi.pshell.device.DiscretePositionerBase as DiscretePositionerBase +import ch.psi.pshell.device.MotorGroupBase as MotorGroupBase +import ch.psi.pshell.device.MotorGroupDiscretePositioner as MotorGroupDiscretePositioner +import ch.psi.pshell.device.ReadonlyRegisterBase as ReadonlyRegisterBase +import ch.psi.pshell.device.ReadonlyAsyncRegisterBase as ReadonlyAsyncRegisterBase +import ch.psi.pshell.device.Register as Register +import ch.psi.pshell.device.RegisterCache as RegisterCache +import ch.psi.pshell.device.ReadonlyRegister.ReadonlyRegisterArray as ReadonlyRegisterArray +import ch.psi.pshell.device.ReadonlyRegister.ReadonlyRegisterMatrix as ReadonlyRegisterMatrix +import ch.psi.pshell.device.DummyPositioner as DummyPositioner +import ch.psi.pshell.device.DummyMotor as DummyMotor +import ch.psi.pshell.device.DummyRegister as DummyRegister +import ch.psi.pshell.device.Timestamp as Timestamp +import ch.psi.pshell.device.Interlock as Interlock +import ch.psi.pshell.device.Readable as Readable +import ch.psi.pshell.device.Readable.ReadableArray as ReadableArray +import ch.psi.pshell.device.Readable.ReadableMatrix as ReadableMatrix +import ch.psi.pshell.device.Readable.ReadableCalibratedArray as ReadableCalibratedArray +import ch.psi.pshell.device.Readable.ReadableCalibratedMatrix as ReadableCalibratedMatrix +import ch.psi.pshell.device.ArrayCalibration as ArrayCalibration +import ch.psi.pshell.device.MatrixCalibration as MatrixCalibration +import ch.psi.pshell.device.Writable as Writable +import ch.psi.pshell.device.Writable.WritableArray as WritableArray +import ch.psi.pshell.device.Stoppable as Stoppable +import ch.psi.pshell.device.Averager as Averager +import ch.psi.pshell.device.ArrayAverager as ArrayAverager +import ch.psi.pshell.device.Delta as Delta +import ch.psi.pshell.device.DeviceAdapter as DeviceListener +import ch.psi.pshell.device.ReadbackDeviceAdapter as ReadbackDeviceListener +import ch.psi.pshell.device.MotorAdapter as MotorListener +import ch.psi.pshell.device.MoveMode as MoveMode +import ch.psi.pshell.device.SettlingCondition as SettlingCondition +import ch.psi.pshell.epics.Epics as Epics +import ch.psi.pshell.epics.EpicsScan as EpicsScan +import ch.psi.pshell.epics.ChannelSettlingCondition as ChannelSettlingCondition +import ch.psi.pshell.imaging.Source as Source +import ch.psi.pshell.imaging.SourceBase as SourceBase +import ch.psi.pshell.imaging.DirectSource as DirectSource +import ch.psi.pshell.imaging.RegisterMatrixSource as RegisterMatrixSource +import ch.psi.pshell.imaging.ImageListener as ImageListener +import ch.psi.pshell.plot.LinePlotSeries as LinePlotSeries +import ch.psi.pshell.plot.LinePlotErrorSeries as LinePlotErrorSeries +import ch.psi.pshell.plot.MatrixPlotSeries as MatrixPlotSeries +import ch.psi.pshell.scan.ScanBase as ScanBase +import ch.psi.pshell.scan.LineScan +import ch.psi.pshell.scan.ContinuousScan +import ch.psi.pshell.scan.AreaScan +import ch.psi.pshell.scan.VectorScan +import ch.psi.pshell.scan.ManualScan +import ch.psi.pshell.scan.HardwareScan +import ch.psi.pshell.scan.RegionScan +import ch.psi.pshell.scan.TimeScan +import ch.psi.pshell.scan.MonitorScan +import ch.psi.pshell.scan.BinarySearch +import ch.psi.pshell.scan.HillClimbingSearch +import ch.psi.pshell.scan.ScanResult +import ch.psi.pshell.bs.BsScan +import ch.psi.pshell.bs.Stream as Stream +import ch.psi.pshell.scripting.ViewPreference as Preference +import ch.psi.pshell.scripting.ScriptUtils as ScriptUtils +from javax.swing.SwingUtilities import invokeLater, invokeAndWait + +def get_context(): + return ch.psi.pshell.core.Context.getInstance() + +def on_command_started(info): + pass + +def on_command_finished(info): + pass + +################################################################################################### +#Type conversion and checking +################################################################################################### + +def to_array(obj, type = 'o'): + """Convert Python list to Java array. + + Args: + obj(list): Original data. + type(str): array type 'b' = byte, 'h' = short, 'i' = int, 'l' = long, 'f' = float, 'd' = double, + 'c' = char, 'z' = boolean, 's' = String, 'o' = Object + Returns: + Java array. + """ + if type[0] == '[': + type = type[1:] + arrayType = ScriptUtils.getType("["+type) + + if obj is None: + return None + if isinstance(obj,java.util.List): + obj = obj.toArray() + if type != 'o': + obj = Convert.toPrimitiveArray(obj, ScriptUtils.getType(type)) + if isinstance(obj,PyArray): + if type != 'o': + if (Arr.getRank(obj)== 1) and (obj.typecode != type): + ret = java.lang.reflect.Array.newInstance(ScriptUtils.getType(type), len(obj)) + if type == 's': + for i in range(len(obj)): ret[i] = str(obj[i]) + elif type == 'c': + for i in range(len(obj)): ret[i] = chr(obj[i]) + else: + for i in range(len(obj)): ret[i] = obj[i] + obj = ret + if type not in ['o', 's']: + obj = Convert.toPrimitiveArray(obj) + return obj + if is_list(obj): + if type=='o' or type== 's': + ret = java.lang.reflect.Array.newInstance(ScriptUtils.getType(type), len(obj)) + for i in range (len(obj)): + if is_list(obj[i]): + ret[i] = to_array(obj[i],type) + elif type == 's': + ret[i] = str(obj[i]) + else: + ret[i] = obj[i] + return ret + + if len(obj)>0 and is_list(obj[0]): + if len(obj[0])>0 and is_list(obj[0][0]): + ret = java.lang.reflect.Array.newInstance(arrayType,len(obj),len(obj[0])) + for i in range(len(obj)): + ret[i]=to_array(obj[i], type) + return ret + else: + ret = java.lang.reflect.Array.newInstance(arrayType,len(obj)) + for i in range(len(obj)): + ret[i]=to_array(obj[i], type) + return ret + return jarray.array(obj,type) + return obj + +def to_list(obj): + """Convert an object into a Python List. + + Args: + obj(tuple or array or ArrayList): Original data. + + Returns: + List. + """ + if obj is None: + return None + if isinstance(obj,tuple) or isinstance(obj,java.util.ArrayList) : + return list(obj) + #if isinstance(obj,PyArray): + # return obj.tolist() + if not isinstance(obj,list): + return [obj,] + return obj + +def is_list(obj): + return isinstance(obj,tuple) or isinstance(obj,list) or isinstance (obj, java.util.ArrayList) + +def is_string(obj): + return (type(obj) is str) or (type(obj) is unicode) + + +################################################################################################### +#Standard scan commands +################################################################################################### + +def on_before_scan_readout(scan, pos): + try: + if scan.before_read != None: + arguments = scan.before_read.func_code.co_argcount + if arguments == 0: + scan.before_read() + elif arguments==1: + scan.before_read(pos.tolist()) + elif arguments==2: + scan.before_read(pos.tolist(), scan) + except AttributeError: + pass + +def on_after_scan_readout(scan, record): + try: + if scan.after_read != None: + arguments = scan.after_read.func_code.co_argcount + if arguments == 0: + scan.after_read() + elif arguments==1: + scan.after_read(record) + elif arguments==2: + scan.after_read(record, scan) + except AttributeError: + pass + +def on_before_scan_pass(scan, num_pass): + try: + if scan.before_pass != None: + arguments = scan.before_pass.func_code.co_argcount + if arguments == 0: + scan.before_pass() + elif arguments==1: + scan.before_pass(num_pass) + elif arguments==2: + scan.before_pass(num_pass, scan) + except AttributeError: + pass + +def on_after_scan_pass(scan, num_pass): + try: + if scan.after_pass != None: + arguments = scan.after_pass.func_code.co_argcount + if arguments == 0: + scan.after_pass() + elif arguments==1: + scan.after_pass(num_pass) + elif arguments==2: + scan.after_pass(num_pass, scan) + except AttributeError: + pass + +class LineScan(ch.psi.pshell.scan.LineScan): + def onBeforeReadout(self, pos): + on_before_scan_readout(self, pos) + + def onAfterReadout(self, record): + on_after_scan_readout(self, record) + + def onBeforePass(self, num_pass): + on_before_scan_pass(self, num_pass) + + def onAfterPass(self, num_pass): + on_after_scan_pass(self, num_pass) + +class ContinuousScan(ch.psi.pshell.scan.ContinuousScan): + def onBeforeReadout(self, pos): + on_before_scan_readout(self, pos) + + def onAfterReadout(self, record): + on_after_scan_readout(self, record) + + def onBeforePass(self, num_pass): + on_before_scan_pass(self, num_pass) + + def onAfterPass(self, num_pass): + on_after_scan_pass(self, num_pass) + +class AreaScan(ch.psi.pshell.scan.AreaScan): + def onBeforeReadout(self, pos): + on_before_scan_readout(self, pos) + + def onAfterReadout(self, record): + on_after_scan_readout(self, record) + + def onBeforePass(self, num_pass): + on_before_scan_pass(self, num_pass) + + def onAfterPass(self, num_pass): + on_after_scan_pass(self, num_pass) + +class RegionScan(ch.psi.pshell.scan.RegionScan): + def onBeforeReadout(self, pos): + on_before_scan_readout(self, pos) + + def onAfterReadout(self, record): + on_after_scan_readout(self, record) + + def onBeforePass(self, num_pass): + on_before_scan_pass(self, num_pass) + + def onAfterPass(self, num_pass): + on_after_scan_pass(self, num_pass) + +class VectorScan(ch.psi.pshell.scan.VectorScan): + def onBeforeReadout(self, pos): + on_before_scan_readout(self, pos) + + def onAfterReadout(self, record): + on_after_scan_readout(self, record) + + def onBeforePass(self, num_pass): + on_before_scan_pass(self, num_pass) + + def onAfterPass(self, num_pass): + on_after_scan_pass(self, num_pass) + +class ContinuousScan(ch.psi.pshell.scan.ContinuousScan): + def onBeforeReadout(self, pos): + on_before_scan_readout(self, pos) + + def onAfterReadout(self, record): + on_after_scan_readout(self, record) + + def onBeforePass(self, num_pass): + on_before_scan_pass(self, num_pass) + + def onAfterPass(self, num_pass): + on_after_scan_pass(self, num_pass) + +class TimeScan(ch.psi.pshell.scan.TimeScan): + def onBeforeReadout(self, pos): + on_before_scan_readout(self, pos) + + def onAfterReadout(self, record): + on_after_scan_readout(self, record) + + def onBeforePass(self, num_pass): + on_before_scan_pass(self, num_pass) + + def onAfterPass(self, num_pass): + on_after_scan_pass(self, num_pass) + +class MonitorScan(ch.psi.pshell.scan.MonitorScan): + def onBeforeReadout(self, pos): + on_before_scan_readout(self, pos) + + def onAfterReadout(self, record): + on_after_scan_readout(self, record) + + def onBeforePass(self, num_pass): + on_before_scan_pass(self, num_pass) + + def onAfterPass(self, num_pass): + on_after_scan_pass(self, num_pass) + +class BsScan(ch.psi.pshell.bs.BsScan): + def onBeforeReadout(self, pos): + on_before_scan_readout(self, pos) + + def onAfterReadout(self, record): + on_after_scan_readout(self, record) + + def onBeforePass(self, num_pass): + on_before_scan_pass(self, num_pass) + + def onAfterPass(self, num_pass): + on_after_scan_pass(self, num_pass) + +class ManualScan (ch.psi.pshell.scan.ManualScan): + def __init__(self, writables, readables, start = None, end = None, steps = None, relative = False, dimensions = None): + ch.psi.pshell.scan.ManualScan.__init__(self, writables, readables, start, end, steps, relative) + self._dimensions = dimensions + + def append(self,setpoints, positions, values, timestamps=None): + ch.psi.pshell.scan.ManualScan.append(self, to_array(setpoints), to_array(positions), to_array(values), None if (timestamps is None) else to_array(timestamps)) + + def getDimensions(self): + if self._dimensions == None: + return ch.psi.pshell.scan.ManualScan.getDimensions(self) + else: + return self._dimensions + +class BinarySearch(ch.psi.pshell.scan.BinarySearch): + def onBeforeReadout(self, pos): + on_before_scan_readout(self, pos) + + def onAfterReadout(self, record): + on_after_scan_readout(self, record) + +class HillClimbingSearch(ch.psi.pshell.scan.HillClimbingSearch): + def onBeforeReadout(self, pos): + on_before_scan_readout(self, pos) + + def onAfterReadout(self, record): + on_after_scan_readout(self, record) + +def processScanPars(scan, pars): + scan.before_read = pars.pop("before_read",None) + scan.after_read = pars.pop("after_read",None) + scan.before_pass = pars.pop("before_pass",None) + scan.after_pass = pars.pop("after_pass",None) + scan.setPlotTitle(pars.pop("title",None)) + scan.setHidden(pars.pop("hidden",False)) + scan.setSettleTimeout (pars.pop("settle_timeout",ScanBase.getScansSettleTimeout())) + scan.setUseWritableReadback (pars.pop("use_readback",ScanBase.getScansUseWritableReadback())) + scan.setInitialMove(pars.pop("initial_move",ScanBase.getScansTriggerInitialMove())) + scan.setParallelPositioning(pars.pop("parallel_positioning",ScanBase.getScansParallelPositioning())) + scan.setAbortOnReadableError(pars.pop("abort_on_error",ScanBase.getAbortScansOnReadableError())) + scan.setRestorePosition (pars.pop("restore_position",ScanBase.getRestorePositionOnRelativeScans())) + scan.setCheckPositions(pars.pop("check_positions",ScanBase.getScansCheckPositions())) + + + get_context().setCommandPars(scan, pars) + +def lscan(writables, readables, start, end, steps, latency=0.0, relative=False, passes=1, zigzag=False, **pars): + """Line Scan: positioners change together, linearly from start to end positions. + + Args: + writables(list of Writable): Positioners set on each step. + readables(list of Readable): Sensors to be sampled on each step. + start(list of float): start positions of writables. + end(list of float): final positions of writables. + steps(int or float or list of float): number of scan steps (int) or step size (float). + relative (bool, optional): if true, start and end positions are relative to current. + latency(float, optional): settling time for each step before readout, defaults to 0.0. + passes(int, optional): number of passes + zigzag(bool, optional): if true writables invert direction on each pass. + pars(keyworded variable length arguments, optional): scan optional named arguments: + - title(str, optional): plotting window name. + - hidden(bool, optional): if true generates no effects on user interface. + - before_read (function(positions, scan), optional): callback on each step, before sampling. + - after_read (function(record, scan), optional): callback on each step, after sampling. + - before_pass (function(pass_num, scan), optional): callback before each scan pass execution. + - after_pass (function(pass_num, scan), optional): callback after each scan pass execution. + - settle_timeout(int, optional): timeout for each positioner get to position. Default (-1) waits forever. + - initial_move (bool, optional): if true (default) perform move to initial position prior to scan start. + - parallel_positioning (bool, optional): if true (default) all positioners are set in parallel. + - abort_on_error (bool, optional): if true then aborts scan in sensor failures. Default is false. + - restore_position (bool, optional): if true (default) then restore initial position after relative scans. + - check_positions (bool, optional): if true (default) verifies if in correct positions after move finishes. + - Aditional arguments defined by set_exec_pars. + + Returns: + ScanResult object. + """ + latency_ms=int(latency*1000) + writables=to_list(string_to_obj(writables)) + readables=to_list(string_to_obj(readables)) + start=to_list(start) + end=to_list(end) + if type(steps) is float or is_list(steps): + steps = to_list(steps) + scan = LineScan(writables,readables, start, end , steps, relative, latency_ms, int(passes), zigzag) + processScanPars(scan, pars) + scan.start() + return scan.getResult() + +def vscan(writables, readables, vector, line = False, latency=0.0, relative=False, passes=1, zigzag=False, **pars): + """Vector Scan: positioners change following values provided in a vector. + + Args: + writables(list of Writable): Positioners set on each step. + readables(list of Readable): Sensors to be sampled on each step. + vector (generator (floats or lists of float) or list of list of float): positioner values. + line (bool, optional): if true, processs as line scan (1d) + relative (bool, optional): if true, start and end positions are relative to current. + latency(float, optional): settling time for each step before readout, defaults to 0.0. + passes(int, optional): number of passes (disregarded if vector is a generator). + zigzag(bool, optional): if true writables invert direction on each pass (disregarded if vector is a generator). + pars(keyworded variable length arguments, optional): scan optional named arguments: + - title(str, optional): plotting window name. + - before_read (function(positions, scan), optional): callback on each step, before sampling. + - after_read (function(record, scan), optional): callback on each step, after sampling. + - before_pass (function(pass_num, scan), optional): callback before each scan pass execution. + - after_pass (function(pass_num, scan), optional): callback after each scan pass execution. + - settle_timeout(int, optional): timeout for each positioner get to position. Default (-1) waits forever. + - initial_move (bool, optional): if true (default) perform move to initial position prior to scan start. + - parallel_positioning (bool, optional): if true (default) all positioners are set in parallel. + - abort_on_error (bool, optional): if true then aborts scan in sensor failures. Default is false. + - restore_position (bool, optional): if true (default) then restore initial position after relative scans. + - check_positions (bool, optional): if true (default) verifies if in correct positions after move finishes. + - Aditional arguments defined by set_exec_pars. + + Returns: + ScanResult object. + """ + latency_ms=int(latency*1000) + writables=to_list(string_to_obj(writables)) + readables=to_list(string_to_obj(readables)) + if type (vector) == PyGenerator: + scan = VectorScan(writables,readables, vector, line, relative, latency_ms) + else: + if len(vector) == 0: + vector.append([]) + elif (not is_list(vector[0])) and (not isinstance(vector[0],PyArray)): + vector = [[x,] for x in vector] + vector = to_array(vector, 'd') + scan = VectorScan(writables,readables, vector, line, relative, latency_ms, int(passes), zigzag) + processScanPars(scan, pars) + scan.start() + return scan.getResult() + +def ascan(writables, readables, start, end, steps, latency=0.0, relative=False, passes=1, zigzag=False, **pars): + """Area Scan: multi-dimentional scan, each positioner is a dimention. + + Args: + writables(list of Writable): Positioners set on each step. + readables(list of Readable): Sensors to be sampled on each step. + start(list of float): start positions of writables. + end(list of float): final positions of writables. + steps(list of int or list of float): number of scan steps (int) or step size (float). + latency(float, optional): settling time for each step before readout, defaults to 0.0. + relative (bool, optional): if true, start and end positions are relative to current. + passes(int, optional): number of passes + zigzag (bool, optional): if true writables invert direction on each row. + pars(keyworded variable length arguments, optional): scan optional named arguments: + - title(str, optional): plotting window name. + - before_read (function(positions, scan), optional): callback on each step, before sampling. + - after_read (function(record, scan), optional): callback on each step, after sampling. + - before_pass (function(pass_num, scan), optional): callback before each scan pass execution. + - after_pass (function(pass_num, scan), optional): callback after each scan pass execution. + - settle_timeout(int, optional): timeout for each positioner get to position. Default (-1) waits forever. + - initial_move (bool, optional): if true (default) perform move to initial position prior to scan start. + - parallel_positioning (bool, optional): if true (default) all positioners are set in parallel. + - abort_on_error (bool, optional): if true then aborts scan in sensor failures. Default is false. + - restore_position (bool, optional): if true (default) then restore initial position after relative scans. + - check_positions (bool, optional): if true (default) verifies if in correct positions after move finishes. + - Aditional arguments defined by set_exec_pars. + + Returns: + ScanResult object. + """ + latency_ms=int(latency*1000) + writables=to_list(string_to_obj(writables)) + readables=to_list(string_to_obj(readables)) + start=to_list(start) + end=to_list(end) + if is_list(steps): + steps = to_list(steps) + scan = AreaScan(writables,readables, start, end , steps, relative, latency_ms, int(passes), zigzag) + processScanPars(scan, pars) + scan.start() + return scan.getResult() + + +def rscan(writable, readables, regions, latency=0.0, relative=False, passes=1, zigzag=False, **pars): + """Region Scan: positioner scanned linearly, from start to end positions, in multiple regions. + + Args: + writable(Writable): Positioner set on each step, for each region. + readables(list of Readable): Sensors to be sampled on each step. + regions (list of tuples (float,float, int) or (float,float, float)): each tuple define a scan region + (start, stop, steps) or (start, stop, step_size) + relative (bool, optional): if true, start and end positions are relative to current. + latency(float, optional): settling time for each step before readout, defaults to 0.0. + passes(int, optional): number of passes + zigzag(bool, optional): if true writable invert direction on each pass. + pars(keyworded variable length arguments, optional): scan optional named arguments: + - title(str, optional): plotting window name. + - before_read (function(positions, scan), optional): callback on each step, before sampling. + - after_read (function(record, scan), optional): callback on each step, after sampling. + - before_pass (function(pass_num, scan), optional): callback before each scan pass execution. + - after_pass (function(pass_num, scan), optional): callback after each scan pass execution. + - settle_timeout(int, optional): timeout for each positioner get to position. Default (-1) waits forever. + - initial_move (bool, optional): if true (default) perform move to initial position prior to scan start. + - parallel_positioning (bool, optional): if true (default) all positioners are set in parallel. + - abort_on_error (bool, optional): if true then aborts scan in sensor failures. Default is false. + - restore_position (bool, optional): if true (default) then restore initial position after relative scans. + - check_positions (bool, optional): if true (default) verifies if in correct positions after move finishes. + - Aditional arguments defined by set_exec_pars. + + Returns: + ScanResult object. + """ + start=[] + end=[] + steps=[] + for region in regions: + start.append(region[0]) + end.append(region[1]) + steps.append(region[2]) + latency_ms=int(latency*1000) + writable=string_to_obj(writable) + readables=to_list(string_to_obj(readables)) + start=to_list(start) + end=to_list(end) + steps = to_list(steps) + scan = RegionScan(writable,readables, start, end , steps, relative, latency_ms, int(passes), zigzag) + processScanPars(scan, pars) + scan.start() + return scan.getResult() + +def cscan(writables, readables, start, end, steps, latency=0.0, time=None, relative=False, passes=1, zigzag=False, **pars): + """Continuous Scan: positioner change continuously from start to end position and readables are sampled on the fly. + + Args: + writable(Speedable or list of Motor): A positioner with a getSpeed method or + a list of motors. + readables(list of Readable): Sensors to be sampled on each step. + start(float or list of float): start positions of writables. + end(float or list of float): final positions of writabless. + steps(int or float or list of float): number of scan steps (int) or step size (float). + latency(float, optional): sleep time in each step before readout, defaults to 0.0. + time (float, seconds): if not None then speeds are set according to time. + relative (bool, optional): if true, start and end positions are relative to current. + passes(int, optional): number of passes + pars(keyworded variable length arguments, optional): scan optional named arguments: + - title(str, optional): plotting window name. + - before_read (function(positions, scan), optional): callback on each step, before sampling. + - after_read (function(record, scan), optional): callback on each step, after sampling. + - before_pass (function(pass_num, scan), optional): callback before each scan pass execution. + - after_pass (function(pass_num, scan), optional): callback after each scan pass execution. + - abort_on_error (bool, optional): if true then aborts scan in sensor failures. Default is false. + - restore_position (bool, optional): if true (default) then restore initial position after relative scans. + - Aditional arguments defined by set_exec_pars. + + Returns: + ScanResult object. + """ + latency_ms=int(latency*1000) + readables=to_list(string_to_obj(readables)) + writables=to_list(string_to_obj(writables)) + start=to_list(start) + end=to_list(end) + #A single Writable with fixed speed + if time is None: + if is_list(steps): steps=steps[0] + scan = ContinuousScan(writables[0],readables, start[0], end[0] , steps, relative, latency_ms, int(passes), zigzag) + #A set of Writables with speed configurable + else: + if type(steps) is float or is_list(steps): + steps = to_list(steps) + scan = ContinuousScan(writables,readables, start, end , steps, time, relative, latency_ms, int(passes), zigzag) + + processScanPars(scan, pars) + scan.start() + return scan.getResult() + +def hscan(config, writable, readables, start, end, steps, passes=1, zigzag=False, **pars): + """Hardware Scan: values sampled by external hardware and received asynchronously. + + Args: + config(dict): Configuration of the hardware scan. The "class" key provides the implementation class. + Other keys are implementation specific. + writable(Writable): A positioner appropriated to the hardware scan type. + readables(list of Readable): Sensors appropriated to the hardware scan type. + start(float): start positions of writable. + end(float): final positions of writables. + steps(int or float): number of scan steps (int) or step size (float). + passes(int, optional): number of passes + pars(keyworded variable length arguments, optional): scan optional named arguments: + - title(str, optional): plotting window name. + - after_read (function(record, scan), optional): callback on each step, after sampling. + - before_pass (function(pass_num, scan), optional): callback before each scan pass execution. + - after_pass (function(pass_num, scan), optional): callback after each scan pass execution. + - abort_on_error (bool, optional): if true then aborts scan in sensor failures. Default is false. + - Aditional arguments defined by set_exec_pars. + + Returns: + ScanResult object. + """ + cls = Class.forName(config["class"]) + class HardwareScan(cls): + def __init__(self, config, writable, readables, start, end, stepSize, passes, zigzag): + cls.__init__(self, config, writable, readables, start, end, stepSize, passes, zigzag) + def onAfterReadout(self, record): + on_after_scan_readout(self, record) + def onBeforePass(self, num_pass): + on_before_scan_pass(self, num_pass) + def onAfterPass(self, num_pass): + on_after_scan_pass(self, num_pass) + + readables=to_list(string_to_obj(readables)) + scan = HardwareScan(config, writable,readables, start, end , steps, int(passes), zigzag) + processScanPars(scan, pars) + scan.start() + return scan.getResult() + +def bscan(stream, records, timeout = None, passes=1, **pars): + """BS Scan: records all values in a beam synchronous stream. + + Args: + stream(Stream): stream object or list of chanel names to build stream from + records(int): number of records to store + timeout(float, optional): maximum scan time in seconds. + passes(int, optional): number of passes + pars(keyworded variable length arguments, optional): scan optional named arguments: + - title(str, optional): plotting window name. + - before_read (function(positions, scan), optional): callback on each step, before sampling. + - after_read (function(record, scan), optional): callback on each step, after sampling. + - before_pass (function(pass_num, scan), optional): callback before each scan pass execution. + - after_pass (function(pass_num, scan), optional): callback after each scan pass execution. + - Aditional arguments defined by set_exec_pars. + + Returns: + ScanResult object. + """ + timeout_ms=int(timeout*1000) if ((timeout is not None) and (timeout>=0)) else -1 + if not is_list(stream): + stream=string_to_obj(stream) + scan = BsScan(stream,int(records), timeout_ms, int(passes)) + processScanPars(scan, pars) + scan.start() + return scan.getResult() + +def tscan(readables, points, interval, passes=1, **pars): + """Time Scan: sensors are sampled in fixed time intervals. + + Args: + readables(list of Readable): Sensors to be sampled on each step. + points(int): number of samples. + interval(float): time interval between readouts. Minimum temporization is 0.001s + passes(int, optional): number of passes + pars(keyworded variable length arguments, optional): scan optional named arguments: + - title(str, optional): plotting window name. + - before_read (function(positions, scan), optional): callback on each step, before sampling. + - after_read (function(record, scan), optional): callback on each step, after sampling. + - before_pass (function(pass_num, scan), optional): callback before each scan pass execution. + - after_pass (function(pass_num, scan), optional): callback after each scan pass execution. + - abort_on_error (bool, optional): if true then aborts scan in sensor failures. Default is false. + - Aditional arguments defined by set_exec_pars. + + Returns: + ScanResult object. + """ + interval= max(interval, 0.001) #Minimum temporization is 1ms + interval_ms=int(interval*1000) + readables=to_list(string_to_obj(readables)) + scan = TimeScan(readables, points, interval_ms, int(passes)) + processScanPars(scan, pars) + scan.start() + return scan.getResult() + +def mscan(trigger, readables, points, timeout = None, async=True, take_initial=False, passes=1, **pars): + """Monitor Scan: sensors are sampled when received change event of the trigger device. + + Args: + trigger(Device or list of Device): Source of the sampling triggering. + readables(list of Readable): Sensors to be sampled on each step. + If trigger has cache and is included in readables, it is not read + for each step, but the change event value is used. + points(int): number of samples. + timeout(float, optional): maximum scan time in seconds. + async(bool, optional): if True then records are sampled and stored on event change callback. Enforce + reading only cached values of sensors. + If False, the scan execution loop waits for trigger cache update. Do not make + cache only access, but may loose change events. + take_initial(bool, optional): if True include current values as first record (before first trigger). + passes(int, optional): number of passes + pars(keyworded variable length arguments, optional): scan optional named arguments: + - title(str, optional): plotting window name. + - before_read (function(positions, scan), optional): callback on each step, before sampling. + - after_read (function(record, scan), optional): callback on each step, after sampling. + - before_pass (function(pass_num, scan), optional): callback before each scan pass execution. + - after_pass (function(pass_num, scan), optional): callback after each scan pass execution. + - abort_on_error (bool, optional): if true then aborts scan in sensor failures. Default is false. + - Aditional arguments defined by set_exec_pars. + + Returns: + ScanResult object. + """ + timeout_ms=int(timeout*1000) if ((timeout is not None) and (timeout>=0)) else -1 + trigger = string_to_obj(trigger) + readables=to_list(string_to_obj(readables)) + scan = MonitorScan(trigger, readables, points, timeout_ms, async, take_initial, int(passes)) + processScanPars(scan, pars) + scan.start() + return scan.getResult() + +def escan(name, **pars): + """Epics Scan: execute an Epics Scan Record. + + Args: + name(str): Name of scan record. + title(str, optional): plotting window name. + pars(keyworded variable length arguments, optional): scan optional named arguments: + - title(str, optional): plotting window name. + - Aditional arguments defined by set_exec_pars. + + Returns: + ScanResult object. + """ + scan = EpicsScan(name) + processScanPars(scan, pars) + scan.start() + return scan.getResult() + + +def bsearch(writables, readable, start, end, steps, maximum = True, strategy = "Normal", latency=0.0, relative=False, **pars): + """Binary search: searches writables in a binary search fashion to find a local maximum for the readable. + + Args: + writables(list of Writable): Positioners set on each step. + readable(Readable): Sensor to be sampled. + start(list of float): start positions of writables. + end(list of float): final positions of writables. + steps(float or list of float): resolution of search for each writable. + maximum (bool , optional): if True (default) search maximum, otherwise minimum. + strategy (str , optional): "Normal": starts search midway to scan range and advance in the best direction. + Uses orthogonal neighborhood (4-neighborhood for 2d) + "Boundary": starts search on scan range. + "FullNeighborhood": Uses complete neighborhood (8-neighborhood for 2d) + + latency(float, optional): settling time for each step before readout, defaults to 0.0. + relative (bool, optional): if true, start and end positions are relative to current. + pars(keyworded variable length arguments, optional): scan optional named arguments: + - title(str, optional): plotting window name. + - before_read (function(positions, scan), optional): callback on each step, before sampling. + - after_read (function(record, scan), optional): callback on each step, after sampling. + - settle_timeout(int, optional): timeout for each positioner get to position. Default (-1) waits forever. + - parallel_positioning (bool, optional): if true (default) all positioners are set in parallel. + - abort_on_error (bool, optional): if true then aborts scan in sensor failures. Default is false. + - restore_position (bool, optional): if true (default) then restore initial position after relative scans. + - check_positions (bool, optional): if true (default) verifies if in correct positions after move finishes. + - Aditional arguments defined by set_exec_pars. + + Returns: + SearchResult object. + """ + latency_ms=int(latency*1000) + writables=to_list(string_to_obj(writables)) + readable=string_to_obj(readable) + start=to_list(start) + end=to_list(end) + steps = to_list(steps) + strategy = BinarySearch.Strategy.valueOf(strategy) + scan = BinarySearch(writables,readable, start, end , steps, maximum, strategy, relative, latency_ms) + processScanPars(scan, pars) + scan.start() + return scan.getResult() + +def hsearch(writables, readable, range_min, range_max, initial_step, resolution, filter=1, maximum=True, latency=0.0, relative=False, **pars): + """Hill Climbing search: searches writables in decreasing steps to find a local maximum for the readable. + Args: + writables(list of Writable): Positioners set on each step. + readable(Readable): Sensor to be sampled. + range_min(list of float): minimum positions of writables. + range_max(list of float): maximum positions of writables. + initial_step(float or list of float):initial step size for for each writable. + resolution(float or list of float): resolution of search for each writable (minimum step size). + filter(int): number of aditional steps to filter noise + maximum (bool , optional): if True (default) search maximum, otherwise minimum. + latency(float, optional): settling time for each step before readout, defaults to 0.0. + relative (bool, optional): if true, start and end positions are relative to current. + pars(keyworded variable length arguments, optional): scan optional named arguments: + - title(str, optional): plotting window name. + - before_read (function(positions, scan), optional): callback on each step, before sampling. + - after_read (function(record, scan), optional): callback on each step, after sampling. + - settle_timeout(int, optional): timeout for each positioner get to position. Default (-1) waits forever. + - parallel_positioning (bool, optional): if true (default) all positioners are set in parallel. + - abort_on_error (bool, optional): if true then aborts scan in sensor failures. Default is false. + - restore_position (bool, optional): if true (default) then restore initial position after relative scans. + - check_positions (bool, optional): if true (default) verifies if in correct positions after move finishes. + - Aditional arguments defined by set_exec_pars. + + Returns: + SearchResult object. + """ + latency_ms=int(latency*1000) + writables=to_list(string_to_obj(writables)) + readable=string_to_obj(readable) + range_min=to_list(range_min) + range_max=to_list(range_max) + initial_step = to_list(initial_step) + resolution = to_list(resolution) + scan = HillClimbingSearch(writables,readable, range_min, range_max , initial_step, resolution, filter, maximum, relative, latency_ms) + processScanPars(scan, pars) + scan.start() + return scan.getResult() + + +################################################################################################### +#Data plotting +################################################################################################### + +def plot(data, name = None, xdata = None, ydata=None, title=None): + """Request one or multiple plots of user data (1d, 2d or 3d). + + Args: + data: array or list of values. For multiple plots, list of arrays. + name(str or list of str, optional): plot name. For multiple plots, list of names. + xdata: array or list of values. For multiple plots, list of arrays. + ydata: array or list of values. For multiple plots, list of arrays. + title(str, optional): plotting window name. + + Returns: + ArrayList of Plot objects. + """ + if isinstance(data, ch.psi.pshell.data.Table): + if is_list(xdata): + xdata = to_array(xdata, 'd') + return get_context().plot(data,xdata,name,title) + + if isinstance(data, ch.psi.pshell.scan.ScanResult): + return get_context().plot(data,title) + + if (name is not None) and is_list(name): + if len(name)==0: + name=None; + else: + if (data==None): + data = [] + for n in name: + data.append([]) + plots = java.lang.reflect.Array.newInstance(Class.forName("ch.psi.pshell.data.PlotDescriptor"), len(data)) + for i in range (len(data)): + plotName = None if (name is None) else name[i] + x = xdata + if is_list(x) and len(x)>0 and (is_list(x[i]) or isinstance(x[i] , java.util.List) or isinstance(x[i],PyArray)): + x = x[i] + y = ydata + if is_list(y) and len(y)>0 and (is_list(y[i]) or isinstance(y[i] , java.util.List) or isinstance(y[i],PyArray)): + y = y[i] + plots[i] = PlotDescriptor(plotName , to_array(data[i], 'd'), to_array(x, 'd'), to_array(y, 'd')) + return get_context().plot(plots,title) + else: + plot = PlotDescriptor(name, to_array(data, 'd'), to_array(xdata, 'd'), to_array(ydata, 'd')) + return get_context().plot(plot,title) + +def get_plots(title=None): + """Return all current plots in the plotting window given by 'title'. + + Args: + title(str, optional): plotting window name. + + Returns: + ArrayList of Plot objects. + """ + return get_context().getPlots(title) + +def get_plot_snapshots(title = None, file_type = "png", size = None, temp_path = get_context().setup.getContextPath()): + """Returns list with file names of plots snapshots from a plotting context. + + Args: + title(str, optional): plotting window name. + file_type(str, optional): "png", "jpg", "bmp" or "gif" + size(array, optional): [width, height] + temp_path(str, optional): path where the files will be generated. + + Returns: + list of strings + """ + time.sleep(0.1) #Give some time to plot to be finished - it is not sync with acquisition + ret = [] + if size != None: + size = Dimension(size[0], size[1]) + plots = get_plots(title) + for i in range(len(plots)): + p = plots[i] + name = p.getTitle() + if name is None or name == "": + name = str(i) + file_name = os.path.abspath(temp_path + "/" + name + "." + file_type) + p.saveSnapshot(file_name , file_type, size) + ret.append(file_name) + return ret + + +################################################################################################### +#Data access functions +################################################################################################### + +def load_data(path, index=0, shape=None): + """Read data from the current persistence context or from data files. + + Args: + path(str): Path to group or dataset relative to the persistence context root. + If in the format 'root|path' then read from path given by 'root'. + index(int or list, optional): + if integer, data depth (used for 3D datasets returning a 2d matrix) + If a list, specifies the full coordinate for multidimensional datasets. + shape(list, optional): only valid if index is a list, provides the shape of the data array. + In this case return a flattened a one-dimensional array. + + Returns: + Data array + """ + if index is not None and is_list(index): + slice = get_context().dataManager.getData(path, index, shape) + else: + slice = get_context().dataManager.getData(path, index) + return slice.sliceData + +def get_attributes(path): + """Get the attributes from the current persistence context or from data files. + + Args: + path(str): Path to group or dataset relative to the current persistence context root. + If in the format 'root|path' then read from path given by 'root'. + Returns: + Dictionary + """ + return get_context().dataManager.getAttributes(path) + +def save_dataset(path, data, type='d', unsigned=False, features=None): + """Save data into a dataset within the current persistence context. + + Args: + path(str): Path to dataset relative to the current persistence context root. + type(str, optional): array type - 'd'=double (default), 'b'=byte, 'h'=short, 'i'=int, + 'l'=long, 'f'=float, 'c'=char, 's'=String, 'o'=Object + data (array or list): data to be saved + unsigned(boolean, optional): create a dataset of unsigned type. + features(dictionary, optional): See create_dataset. + + Returns: + Dictionary + """ + data = to_array(data, type) + get_context().dataManager.setDataset(path, data, unsigned, features) + +def create_group(path): + """Create an empty dataset within the current persistence context. + + Args: + path(str): Path to group relative to the current persistence context root. + Returns: + None + """ + get_context().dataManager.createGroup(path) + +def create_dataset(path, type, unsigned=False, dimensions=None, features=None): + """Create an empty dataset within the current persistence context. + + Args: + path(str): Path to dataset relative to the current persistence context root. + type(str): array type 'b' = byte, 'h' = short, 'i' = int, 'l' = long, 'f' = float, + 'd' = double, 'c' = char, 's' = String, 'o' = Object + unsigned(boolean, optional) + dimensions(tuple of int, optional): a 0 value means variable length in that dimension. + features(dictionary, optional): storage features for the dataset, format specific. + Keys for HDF5: "layout": "compact", "contiguous" or "chunked" + "compression": True, "max" or deflation level from 1 to 9 + "shuffle": Byte shuffle before compressing. + "chunk": tuple, setting the chunk size + Default: No compression, contiguous for fixed size arrays, chunked for variable size, compact for scalars. + Returns: + None + """ + get_context().dataManager.createDataset(path, ScriptUtils.getType(type), unsigned, dimensions, features) + +def create_table(path, names, types=None, lengths=None, features=None): + """Create an empty table (dataset of compound type) within the current persistence context. + + Args: + path(str): Path to dataset relative to the current persistence context root. + names(list of strings): name of each column + types(array of str): 'b' = byte, 'h' = short, 'i' = int, 'l' = long, 'f' = float, + 'd' = double, 'c' = char, 's' = String, 'o' = Object + Note:A '[' prefix on type name indicates an array type. + lengths(list of int): the array length for each columns(0 for scalar types). + features(dictionary, optional): See create_dataset. + Returns: + None + """ + type_classes = [] + if (types is not None): + for i in range (len(types)): + type_classes.append(ScriptUtils.getType(types[i])) + get_context().dataManager.createDataset(path, names, type_classes, lengths, features) + +def append_dataset(path, data, index=None, type='d', shape=None): + """Append data to dataset. + + Args: + path(str): Path to dataset relative to the current persistence context root. + data(number or array or list): name of each column. + index(int or list, optional): if set then add the data in a specific position in the dataset. + If integer is the index in an array (data must be 1 order lower than dataset) + If a list, specifies the full coordinate for multidimensional datasets. + type(str, optional): array type 'b' = byte, 'h' = short, 'i' = int, 'l' = long, 'f' = float, + 'd' = double, 'c' = char, 's' = String, 'o' = Object + default: 'd' (convert data to array of doubles) + shape(list, optional): only valid if index is a list, provides the shape of the data array. + In this case data must be a flattened one-dimensional array. + Returns: + None + """ + data = to_array(data, type) + if index is None: + get_context().dataManager.appendItem(path, data) + else: + if is_list(index): + if shape is None: + shape = [len(index)] + get_context().dataManager.setItem(path, data, index, shape) + else: + get_context().dataManager.setItem(path, data, index) + +def append_table(path, data): + """Append data to a table (dataset of compound type) + + Args: + path(str): Path to dataset relative to the current persistence context root. + data(list): List of valus for each column of the table. + Returns: + None + """ + if is_list(data): + arr = java.lang.reflect.Array.newInstance(Class.forName("java.lang.Object"),len(data)) + for i in range (len(data)): + if is_list(data[i]): + arr[i] = to_array(data[i], 'd') + else: + arr[i] = data[i] + data=arr + get_context().dataManager.appendItem(path, data) + +def flush_data(): + """Flush all data files immediately. + + Args: + None + Returns: + None + """ + get_context().dataManager.flush() + +def set_attribute(path, name, value, unsigned = False): + """Set an attribute to a group or dataset. + + Args: + path(str): Path to dataset relative to the current persistence context root. + name(str): name of the atttribute + value(Object): the attribute value + unsigned(bool, optional): if applies, indicate if value is unsigned. + Returns: + None + """ + if is_list(value): + value = Convert.toStringArray(to_array(value)) + get_context().dataManager.setAttribute(path, name, value, unsigned) + +def log(log, data_file=None): + """Writes a log to the system log and data context - if there is an ongoing scan or script execution. + + Args: + log(str): Log string. + data_file(bool, optional): if true logs to the data file, in addiction to the system logger. + If None(default) appends to data file only if it exists. + + Returns: + None + """ + get_context().scriptingLog(str(log)) + if data_file is None: + data_file = get_exec_pars().open + if data_file: + try: + get_context().dataManager.appendLog(str(log)) + except: + #Do not generate exception if cannot write to data file + pass + +def set_exec_pars(**args): + """ Configures the script execution parameters, overriding the system configuration. + + Args: + args(optional arguments): + name(str): value of the {name} tag. Default is the running script name. + type(str): value of the {type} tag. Default is empty. + This field can be used to store data in sub-folders of standard location. + path(str): If defined provides the full path name for data output root (overriding config)) + The tag {data} can be used to enter a path relative to the standard data folder. + layout(str): Overrides default data layout. + format(str): Overrides default data format. + depth_dim(int): dimension of 2d-matrixes in 3d datasets. + save(bool): Overrides the config option to auto save scan data. + flush(bool): Overrides the config option to flush file on each record. + keep(bool): Overrides the config option keep scan records in memory. If false do not add records to scan result. + preserve(bool): Overrides the config option to preserve device types. If false all values are converted to double. + compression(obj): True for enabling default compression, int for specifying deflation level. + Device or list of devices for specifying devices to be compressed. + shuffle(obj): True for enabling shuffling before compression. + Device or list of devices for specifying devices to be shuffled. + contiguous(obj): True for setting contiguous datasets for all devices. + Device or list of devices for specifying device datasets to be contiguous. + seq(int): Set next data file sequence number. + open(bool): If true create data output path immediately. If false closes output root, if open. + reset(bool): If true reset the scan counter - the {count} tag and set the timestamp to now. + group(str): Overrides default layout group name for scans + tag(str): Overrides default tag for scan names (affecting group or dataset name, according to layout) + then, then_success, then_exception(str): Sets statement to be executed on the completion of current. + defaults(bool): If true restore the original execution parameters. + + Graphical preferences can also be set. Keys are equal to lowercase of Preference enum: + "plot_disabled", "plot_layout", "table_disabled", "enabled_plots", "plot_types", "print_scan", "auto_range", + "manual_range","manual_range_y", "domain_axis", "status". See set_preference for more information. + + Shortcut entries: "line_plots": list of devices with enforced line plots. + "range": "none", "auto", [min_x, max_x] or [min_x, max_x, min_y, max_y] + "display": if false disables scan data plotting and printing. + """ + get_context().setExecutionPars(args) + +def get_exec_pars(): + """ Returns script execution parameters. + + Returns: + ExecutionParameters object. Fields: + name (str): execution name - {name} tag. + type (str): execution type - {type} tag. + path (str): output data root. + seq(int): data file sequence number. + open (bool): true if the output data root has been opened. + layout (str): data output layout. If None then using the configuration. + save (bool): auto save scan data option. + flush (bool): flush file on each record. + index (int): current scan index. + group (str): data group currently used for scan data storage. + if no ongoing scan return "/" if within a script, or else None if a console command. + scanPath (str): dataset or group corresponding to current scan. + scan (Scan): reference to current scan, if any + source (CommandSource): return the source of the script or command. + background (bool): return False if executing in main interpreter thread . + aborted (bool): True if execution has been aborted + """ + return get_context().getExecutionPars() + + +################################################################################################### +#EPICS channel access +################################################################################################### + +def _adjust_channel_value(value, var_type=None): + if (value is None): + return value + if (var_type is not None): + if is_list(value): + var_type = var_type.replace(',','').replace('[','') + ret = [] + for item in value: + ret.append(_adjust_channel_value(item), var_type) + value = ret + else: + var_type = var_type.lower() + if var_type=='b': + value = byte(value) + elif var_type=='i': + value = short(value) + elif var_type=='l': + value = int(value) + elif var_type=='f': + value = float(value) + elif var_type=='d': + value = float(value) + elif var_type=='s': + value = str(value) + + if isinstance(value,tuple): + value = list(value) + if isinstance(value,list): + list_type = type(value[0]) + array_types = { + int: "i", + long: "l", + float:"d", + str:Class.forName("java.lang.String"), + } + array_type = array_types.get(type(value[0]),'d') + array = PyArray(array_type) + array.fromlist(value) + value=array + return value + +def caget(name, type=None, size=None, meta = False ): + """Reads an Epics PV. + + Args: + name(str): PV name + type(str, optional): type of PV. By default gets the PV standard field type. + Scalar values: 'b', 'i', 'l', 'd', 's'. + Array values: '[b', '[i,', '[l', '[d', '[s'. + size (int, optional): for arrays, number of elements to be read. Default read all. + meta (bool, optional): if true gets channel value and metadata (timestamp, severity). + + Returns: + PV value if meta is false, otherwise a dictionary containing PV value and metadata + """ + if meta: + return Epics.getMeta(name, Epics.getChannelType(type), size) + return Epics.get(name, Epics.getChannelType(type), size) + +def cawait(name, value, timeout=None, comparator=None, type=None, size=None): + """Wait for a PV to have a given value. + + Args: + name(str): PV name + value (obj): value to compare to + timeout(float, optional): time in seconds to wait. If None, waits forever. + comparator(java.util.Comparator or float, optional): if None waits for equality. + If a numeric value is provided, waits for channel to be in range. + type(str, optional): type of PV. By default gets the PV standard field type. + Scalar values: 'b', 'i', 'l', 'd', 's'. + Array values: '[b', '[i,', '[l', '[d', '[s'. + size (int, optional): for arrays, number of elements to be read. Default read all. + + Returns: + None + """ + if (timeout is not None): + timeout = int(timeout*1000) + value = _adjust_channel_value(value) + Epics.waitValue(name, value, comparator, timeout, Epics.getChannelType(type), size) + +def caput(name, value, timeout = None): + """Writes to an Epics PV. + + Args: + name(str): PV name + value(scalar, string or array): new PV value. + timeout(int, optional): timeout in seconds to the write. If None waits forever to completion. + + Returns: + None + """ + value=_adjust_channel_value(value) + if (timeout is not None): + timeout = int(timeout*1000) + return Epics.put(name, value, timeout) + +def caputq(name, value): + """Writes to an Epics PV and does not wait. + + Args: + name(str): PV name + value(scalar, string or array): new PV value. + + Returns: + None + """ + value=_adjust_channel_value(value) + return Epics.putq(name, value) + +def camon(name, type=None, size=None, wait = sys.maxint): + """Install a monitor to an Epics PV and print value changes. + + Args: + name(str): PV name + type(str, optional): type of PV. By default gets the PV standard field type. + Scalar values: 'b', 'i', 'l', 'd', 's'. + Array values: '[b', '[i,', '[l', '[d', '[s'. + size (int, optional): for arrays, number of elements to be read. Default read all. + wait (int, optional): blocking time for this function. By default blocks forever. + Returns: + None + """ + val = lambda x: x.tolist() if isinstance(x,PyArray) else x + + class MonitorListener(java.beans.PropertyChangeListener): + def propertyChange(self, pce): + print val(pce.getNewValue()) + + channel = create_channel(name, type, size) + print val(channel.getValue()) + channel.setMonitored(True) + channel.addPropertyChangeListener(MonitorListener()) + + try: + time.sleep(wait) + finally: + Epics.closeChannel(channel) + +def create_channel_device(channel_name, type=None, size=None, device_name=None): + """Create a device from an EPICS PV. + + Args: + channel_name(str): PV name + type(str, optional): type of PV. By default gets the PV standard field type. + Scalar values: 'b', 'i', 'l', 'd', 's'. + Array values: '[b', '[i,', '[l', '[d', '[s'. + size (int, optional): for arrays, number of elements to be read. Default read all. + device_name (str, optional): device name (if different from hannel_name. + Returns: + None + """ + dev = Epics.newChannelDevice(channel_name if (device_name is None) else device_name , channel_name, Epics.getChannelType(type)) + if get_context().isSimulation(): + dev.setSimulated() + dev.initialize() + if (size is not None): + dev.setSize(size) + return dev + +def create_channel(name, type=None, size=None): + return Epics.newChannel(name, Epics.getChannelType(type), size) + +class Channel(java.beans.PropertyChangeListener, Writable, Readable): + def __init__(self, channel_name, type = None, size = None, callback=None, alias = None): + """ Create an object that encapsulates an Epics PV connection. + Args: + channel_name(str):name of the channel + type(str, optional): type of PV. By default gets the PV standard field type. + Scalar values: 'b', 'i', 'l', 'd', 's'. + Array values: '[b', '[i,', '[l', '[d', '[s'. + size(int, optional): the size of the channel + callback(function, optional): The monitor callback. + alias(str): name to be used on scans. + """ + self.channel = create_channel(channel_name, type, size) + self.callback = callback + if alias is not None: + set_device_alias(self, alias) + else: + set_device_alias(self, channel_name) + + def get_name(self): + """Return the name of the channel. + """ + return self.channel.name + + def get_size(self): + """Return the size of the channel. + """ + return self.channel.size + + def set_size(self, size): + """Set the size of the channel. + """ + self.channel.size = size + + def is_connected(self): + """Return True if channel is connected. + """ + return self.channel.connected + + def is_monitored(self): + """Return True if channel is monitored + """ + return self.channel.monitored + + def set_monitored(self, value): + """Set a channel monitor to trigger the callback function defined in the constructor. + """ + self.channel.monitored = value + if (value): + self.channel.addPropertyChangeListener(self) + else: + self.channel.removePropertyChangeListener(self) + + def propertyChange(self, pce): + if pce.getPropertyName() == "value": + if self.callback is not None: + self.callback(pce.getNewValue()) + + def put(self, value, timeout=None): + """Write to channel and wait value change. In the case of a timeout throws a TimeoutException. + Args: + value(obj): value to be written + timeout(float, optional): timeout in seconds. If none waits forever. + """ + if (timeout==None): + self.channel.setValue(value) + else: + self.channel.setValueAsync(value).get(int(timeout*1000), java.util.concurrent.TimeUnit.MILLISECONDS); + + def putq(self, value): + """Write to channel and don't wait. + """ + self.channel.setValueNoWait(value) + + def get(self, force = False): + """Get channel value. + """ + return self.channel.getValue(force) + + def wait_for_value(self, value, timeout=None, comparator=None): + """Wait channel to reach a value, using a given comparator. In the case of a timeout throws a TimeoutException. + Args: + value(obj): value to be verified. + timeout(float, optional): timeout in seconds. If None waits forever. + comparator (java.util.Comparator, optional). If None, uses Object.equals. + """ + if comparator is None: + if timeout is None: + self.channel.waitForValue(value) + else: + self.channel.waitForValue(value, int(timeout*1000)) + else: + if timeout is None: + self.channel.waitForValue(value, comparator) + else: + self.channel.waitForValue(value, comparator, int(timeout*1000)) + + def close(self): + """Close the channel. + """ + self.channel.destroy() + + def write(self, value): + self.put(value) + + def read(self): + return self.get() + + +################################################################################################### +#Concurrent execution +################################################################################################### + +class Callable(java.util.concurrent.Callable): + def __init__(self, method, *args): + self.method = method + self.args = args + self.thread = java.lang.Thread.currentThread() + def call(self): + try: + get_context().startedChildThread(self.thread) + return self.method(*self.args) + finally: + get_context().finishedChildThread(self.thread) + +def fork(*functions): + """Start execution of functions in parallel. + + Args: + *functions(function references) + + Returns: + List of callable objects + """ + callables = [] + for m in functions: + if is_list(m): + callables.append(Callable(m[0],*m[1])) + else: + callables.append(Callable(m)) + return Threading.fork(callables) + +def join(futures): + """Wait parallel execution of functions. + + Args: + futures(list of Future) : as returned from fork + + Returns: + None +""" + try: + return Threading.join(futures) + except java.util.concurrent.ExecutionException, ex: + raise ex.getCause() + +def parallelize(*functions): + """Equivalent to fork + join + + Args: + *functions(function references) + + Returns: + None + """ + futures = fork(*functions) + return join(futures) + +def invoke(f, wait = False): + """ Execute in event thread. + + Args: + f(function reference) + wait (boolean, optional) + """ + if is_list(f): [m, a] = f; f = lambda: m(*a) + invokeAndWait(f) if wait else invokeLater(f) + + +################################################################################################### +#Script evaluation and background task control. +################################################################################################### + +def run(script_name, args = None, locals = None): + """Run script: can be absolute path, relative, or short name to be search in the path. + Args: + args(Dict ot List): Sets sys.argv (if list) or gobal variables(if dict) to the script. + locals(Dict): If not none sets the locals()for the runing script. + If locals is used then script definitions will not go to global namespace. + + Returns: + The script return value (if set with set_return) + """ + script = get_context().scriptManager.library.resolveFile(script_name) + if script is not None and os.path.isfile(script): + info = get_context().startScriptExecution(script_name, args) + try: + set_return(None) + if args is not None: + if isinstance(args,list) or isinstance(args,tuple): + sys.argv = list(args) + globals()["args"] = sys.argv + else: + for arg in args.keys(): + globals()[arg] = args[arg] + if (locals is None): + execfile(script, globals()) + else: + execfile(script, globals(), locals) + ret = get_return() + get_context().finishScriptExecution(info, ret) + return ret + except Exception, ex: + get_context().finishScriptExecution(info, ex) + raise ex + raise IOError("Invalid script: " + str(script_name)) + +def abort(): + """Abort the execution of ongoing task. It can be called from the script to quit. + + Args: + None + + Returns: + None + """ + #Cannot be on script execution thread + fork(get_context().abort) + +def start_task(script, delay = 0.0, interval = -1): + """Start a background task + + Args: + script(str): Name of the script implementing the task + delay(float, optional): time in seconds for the first execution. + Default starts immediately. + interval(float, optional): time in seconds for between execution. + If negative (default), single-execution. + + Returns: + None + """ + delay_ms=int(delay*1000) + interval_ms=int(interval*1000) if (interval>=0) else int(interval) + get_context().taskManager.create(script, delay_ms, interval_ms) + get_context().taskManager.start(script) + +def stop_task(script, force = False): + """Stop a background task + + Args: + script(str): Name of the script implementing the task + force(boolean, optional): interrupt current execution, if running + + Returns: + None + """ + get_context().taskManager.remove(script, force) + +def set_return(value): + """Sets the script return value. This value is returned by the "run" function. + + Args: + value(Object): script return value. + + Returns: + None + """ + #In Jython, the output of last statement is not returned when running a file + if __name__ == "__main__": + global __THREAD_EXEC_RESULT__ + if is_interpreter_thread(): + global _ + _=value + __THREAD_EXEC_RESULT__[java.lang.Thread.currentThread()]=value #Used when running file + else: + #if startup is imported, cannot set global + caller = _get_caller() + if is_interpreter_thread(): + caller.f_globals["_"]=value + if not "__THREAD_EXEC_RESULT__" in caller.f_globals.keys(): + caller.f_globals["__THREAD_EXEC_RESULT__"] = {} + caller.f_globals["__THREAD_EXEC_RESULT__"][java.lang.Thread.currentThread()]=value + return value #Used when parsing file + +def get_return(): + if __name__ == "__main__": + global __THREAD_EXEC_RESULT__ + return __THREAD_EXEC_RESULT__[java.lang.Thread.currentThread()] + else: + return _get_caller().f_globals["__THREAD_EXEC_RESULT__"][java.lang.Thread.currentThread()] + +def is_interpreter_thread(): + return java.lang.Thread.currentThread().name == "Interpreter Thread" + + +################################################################################################### +#Versioning tools +################################################################################################### + +def commit(message, force = False): + """Commit the changes to the repository. If manual commit is not configured then there is no need to call this function: commits are made as needed. + + Args: + message(str): commit message + force(bool, optional): if False, raises exception if no change detected in repo + + Returns: + None + """ + get_context().commit(message, force) + +def diff(): + """Return list of changes in the repository + + Args: + None + + Returns: + None + """ + return get_context().diff() + +def checkout_tag(tag): + """Checkout a tag name. + + Args: + tag(str): tag name. + + Returns: + None + """ + get_context().checkoutTag(tag) + +def checkout_branch(tag): + """Checkout a local branch name. + + Args: + tag(str): branch name. + + Returns: + None + """ + get_context().checkoutLocalBranch(tag) + +def pull_repository(): + """Pull from remote repository. + + """ + get_context().pullFromUpstream() + +def push_repository(all_branches=True, force=False): + """Push to remote repository. + + Args: + all_branches(boolean, optional): all branches or just current. + force(boolean, optional): force flag. + + Returns: + None + """ + get_context().pushToUpstream(all_branches, force) + +def cleanup_repository(): + """Performs a repository cleanup. + + Args: + None + + Returns: + None + """ + get_context().cleanupRepository() + + +################################################################################################### +#Device Pool functions +################################################################################################### + +def get_device(device_name): + """Returns a configured device (or imaging source) by its name. + + Args: + device_name(str): name of the device. + + Returns: + device + """ + return get_context().devicePool.getByName(device_name) + +def add_device(device, force = False): + """Add a device (or imaging source) to the device pool. + + Args: + device(Device or Source): device object. + force(boolean, optional): if true then dispose existing device with same name. + Otherwise will fail in case of name clash. + + Returns: + True if device was added, false if was already in the pool, or exception in case of name clash. + """ + if get_context().devicePool.contains(device): + return False + if force: + dev = get_context().devicePool.getByName(device.getName()) + if dev is not None: + remove_device(dev) + return get_context().devicePool.addDevice(device) + +def remove_device(device): + """Remove a device (or imaging source) from the device pool. + + Args: + device(Device or Source): device object. + + Returns: + bool: true if device was removed. + """ + return get_context().devicePool.removeDevice(device) + +def set_device_alias(device, alias): + """Set a device alias to be used in scans (datasets and plots). + + Args: + device(Device): device object. + alias(str): replace device name in scans. + + Returns: + None + """ + get_context().dataManager.setAlias(device, alias) + +def stop(): + """Stop all devices implementing the Stoppable interface. + + Args: + None + + Returns: + None + """ + get_context().stopAll() + +def update(): + """Update all devices. + + Args: + None + + Returns: + None + """ + get_context().updateAll() + +def reinit(dev = None): + """Re-initialize devices. + + Args: + dev(Device, optional): Device to be re-initialized (if None, all devices not yet initialized) + + Returns: + List with devices not initialized. + """ + return to_list(get_context().reinit()) + +def create_device(url, parent=None): + """Create a device form a definition string(see InlineDevice) + + Args: + url(str or list of string): the device definition string (or list of strings) + parent(bool, optional): parent device + + Returns: + The created device (or list of devices) + """ + return InlineDevice.create(url, parent) + + +def create_averager(dev, count, interval=0.0, name = None, monitored = False): + """Creates and initializes and averager for dev. + + Args: + dev(Device): the source device + count(int): number of samples + interval(float, optional): sampling interval in seconds. + If less than zero, sampling is made on data change event. + name(str, optional): sets the name of the device (default is: averager) + monitored (bool, optional): if true then averager processes asynchronously. + + Returns: + Averager device + """ + dev = string_to_obj(dev) + if isinstance(dev, ReadableArray): + av = ArrayAverager(dev, count, int(interval*1000)) if (name is None) else ArrayAverager(name, dev, count, int(interval*1000)) + else: + av = Averager(dev, count, int(interval*1000)) if (name is None) else Averager(name, dev, count, int(interval*1000)) + av.initialize() + if (monitored): + av.monitored = True + return av + +def tweak(dev, step, is2d=False): + """Move one or more positioners in steps using the arrow keys. + Steps are increased/decreased using the shift and control keys. + + Args: + dev(Positioner or List): the device or list of devices to move. + step(float or List): step size or list of step sizes + is2d(bool, optional): if true moves second motor with up/down arrows. + """ + if (get_exec_pars().isBackground()): return + dev,step = to_list(string_to_obj(dev)),to_list(step) + while (True): + key=get_context().waitKey(0) + for i in range(len(dev)): + if not is2d or i==0: + if key == 0x25: dev[i].moveRel(-step[i]) #Left + elif key == 0x27: dev[i].moveRel(step[i]) #Right + if key in (0x10, 0x11): + step[i] = step[i]*2 if key == 0x10 else step[i]/2 + print "Tweak step for " + dev[i].name + " set to: "+str(step[i]) + if is2d and len(dev)>1: + if key == 0x26: dev[1].moveRel(step[1]) #Top + elif key == 0x28: dev[1].moveRel(-step[1]) #Bottom + + +################################################################################################### +#Mathematical functions +################################################################################################### + +def arrmul(a, b): + """Multiply 2 series of the same size. + + Args: + + a(subscriptable) + b(subscriptable) + + Returns: + List + """ + return map(mul, a, b) + +def arrdiv(a, b): + """Divide 2 series of the same size. + + Args: + + a(subscriptable) + b(subscriptable) + + Returns: + List + """ + return map(truediv, a, b) + +def arradd(a, b): + """Add 2 series of the same size. + + Args: + + a(subscriptable) + b(subscriptable) + + Returns: + List + """ + return map(add, a, b) + +def arrsub(a, b): + """Subtract 2 series of the same size. + + Args: + + a(subscriptable) + b(subscriptable) + + Returns: + List + """ + return map(sub, a, b) + +def arrabs(a): + """Returns the absolute of all elements in series. + + Args: + + a(subscriptable) + + Returns: + List + """ + return map(abs, a) + +def arroff(a, value = "mean"): + """Subtract offset to all elemets in series. + + Args: + + a(subscriptable) + type(int or str, optional): value to subtract from the array, or "mean" or "min". + + Returns: + List + """ + if value=="mean": + value = mean(a) + elif value=="min": + value = min(a) + return [x-value for x in a] + +def mean(data): + """Calculate the mean of a sequence. + + Args: + data(subscriptable) + + Returns: + Mean of the elements in the object. + """ + return reduce(lambda x, y: x + y, data) / len(data) + +def variance(data): + """Calculate the variance of a sequence. + + Args: + data(subscriptable) + + Returns: + Variance of the elements in the object. + """ + c = mean(data) + ss = sum((x-c)**2 for x in data) + return ss/len(data) + +def stdev(data): + """Calculate the standard deviation of a sequence. + + Args: + data(subscriptable) + + Returns: + Standard deviation of the elements in the object. + """ + return variance(data)**0.5 + + +def center_of_mass(data, x = None): + """Calculate the center of mass of a series, and its rms. + + Args: + + data(subscriptable) + x(list, tuple, array ..., optional): x coordinates + + Returns: + Tuple (com, rms) + """ + if x is None: + x = Arr.indexesDouble(len(data)) + data_sum = sum(data) + if (data_sum==0): + return float('nan') + xmd = arrmul( x, data) + com = sum(xmd) / data_sum + xmd2 = arrmul( x, xmd) + com2 = sum(xmd2) / data_sum + rms = math.sqrt(abs(com2 - com * com)) + return (com, rms) + +def poly(val, coefs): + """Evaluates a polinomial: (coefs[0] + coefs[1]*val + coefs[2]*val^2... + + Args: + val(float): value + coefs (list of loats): polinomial coefficients + Returns: + Evaluated function for val + """ + r = 0 + p = 0 + for c in coefs: + r = r + c * math.pow(val, p) + p = p + 1 + return r + +def histogram(data, range_min = None, range_max = None, bin = 1.0): + """Creates histogram on data. + + Args: + data (tuple, array, ArrayList or Array): input data can be multi-dimensional or nested. + range_min (int, optional): minimum histogram value. Default is floor(min(data)) + range_max (int, optional): maximul histogram value. Default is ceil(max(data)) + bin(int or float, optional): if int means number of bins. If float means bin size. Default = 1.0. + Returns: + tuple: (ydata, xdata) + """ + if range_min is None: range_min = math.floor(min(flatten(data))) + if range_max is None: range_max = math.ceil(max(flatten(data))) + if type(bin) is float: + bin_size = bin + n_bin = int(math.ceil(float(range_max - range_min)/bin_size)) + else: + n_bin = bin + bin_size = float(range_max - range_min)/bin + + result = [0] * n_bin + for d in flatten(data): + b = int( float(d - range_min) / bin_size) + if (b >=0) and (b < n_bin): + result[b] = result[b] + 1 + return (result, frange(range_min, range_max, bin_size)) + +def _turn(p, q, r): + return cmp((q[0] - p[0])*(r[1] - p[1]) - (r[0] - p[0])*(q[1] - p[1]), 0) + +def _keep(hull, r): + while len(hull) > 1 and _turn(hull[-2], hull[-1], r) != 1: + hull.pop() + return (not len(hull) or hull[-1] != r) and hull.append(r) or hull + +def convex_hull(point_list=None, x=None, y=None): + """Returns the convex hull from a list of points. Either point_list or x,y is provided. + (Alhorithm taken from http://tomswitzer.net/2010/03/graham-scan/) + Args: + point_list (array of tuples, optional): arrays of the points + x (array of float, optional): array with x coords of points + y (array of float, optional): array with y coords of points + Returns: + Array of points or (x,y) + """ + is_point_list = point_list is not None + if not point_list: + point_list=[] + for i in range(len(x)): + if((x[i] is not None) and (y[i] is not None)): point_list.append((x[i], y[i])) + point_list.sort() + lh,uh = reduce(_keep, point_list, []), reduce(_keep, reversed(point_list), []) + ret = lh.extend(uh[i] for i in xrange(1, len(uh) - 1)) or lh + if not is_point_list: + x, y = [], [] + for i in range(len(ret)): + x.append(ret[i][0]) + y.append(ret[i][1]) + return (x,y) + return ret + +################################################################################################### +#Utilities +################################################################################################### + +def get_setting(name=None): + """Get a persisted script setting value. + + Args: + name (str): name of the setting. + Returns: + String with setting value or None if setting is undefined. + If name is None then returns map with all settings. + """ + return get_context().getSettings() if (name is None) else get_context().getSetting(name) + +def set_setting(name, value): + """Set a persisted script setting value. + + Args: + name (str): name of the setting. + value (obj): value for the setting, converted to string (if None then remove the setting). + Returns: + None. + """ + get_context().setSetting(name, value) + +def exec_cmd(cmd): + """Executes a shell command. If errors happens raises an exception. + + Args: + cmd (str): command process input. + Returns: + Output of command process. + """ + import subprocess + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + (ret, err) = proc.communicate() + if (err is not None) and err!="": + raise Exception(err) + return ret + +def exec_cpython(script_name, args = [], method_name = None, python_name = "python"): + """Executes an external cpython process. + + Args: + script_name (str): name of the script (can be absolute or relative to script folder). + args(list, optional): arguments to python process (or parameters to method, if not None) + method_name (str, optional): if defined indicates a method to be called. + python_name (str, optional): name of executable + Returns: + Return of python process. + """ + if method_name is None: + script = get_context().scriptManager.library.resolveFile(script_name) + if script is None : + script= os.path.abspath(script_name) + c = python_name + " " + script + " " + if args is not None and (len(args)>0): + for arg in args: + c = c + str(arg) + " " + return exec_cmd(c) + else: + #Calling a method + import json + import tempfile + script = os.path.abspath(get_context().scriptManager.library.resolveFile(script_name)) + with open(get_context().setup.getContextPath()+ "/Temp" + str(java.lang.Thread.currentThread().getId())+".py", "wb") as f: + f.write(("script = '" +script +"'\n").replace('\\', '\\\\')) + f.write("function = '" +method_name +"'\n") + f.write("jsonargs = '" + json.dumps(args) +"'\n") + f.write("""import sys +import json +import os +args =json.loads(jsonargs) +i = script.rfind(os.sep) +module = script[i+1:-3] +sys.path.insert(1,script[:i+1]) +exec ('from ' + module + ' import ' + function + ' as function') +print (json.dumps(function(*args))) +""") + f.close() + ret = exec_cpython(os.path.abspath(f.name), python_name = python_name) + os.remove(f.name) + ret = '\n'+ret[0:-len(os.linesep)] + jsonret = ret[ret.rfind('\n')+1:].strip() + return json.loads(jsonret) + +def bsget(channel, modulo=1, offset=0, timeout = 5.0): + """Reads an values a bsread stream, using the default provider. + + Args: + channel(str or list of str): channel name(s) + module(int, optional): stream modulo + offset(int, optional): stream offset + timeout(float, optional): stream timeout in secs + Returns: + BS value or list of values + """ + channels = to_list(channel) + ret = Stream.readChannels(channels, modulo, offset, int(timeout * 1000)) + if is_string(channel): + return ret[0] + return ret + +def flatten(data): + """Flattens multi-dimentional or nested data. + + Args: + data (tuple, array, ArrayList or Array): input data + Returns: + Iterator on the flattened data. + """ + if isinstance(data,PyArray): + if not data.typecode.startswith('['): + return data + + import itertools + return itertools.chain(*data) + +def frange_gen(start, finish, step): + while ((step >= 0.0) and (start <= finish)) or ((step < 0.0) and (start >= finish)): + yield start + start += step + +def frange(start, finish, step, enforce_finish = False, inclusive_finish = False): + """Create a list with a range of float values (a float equivalent to "range"). + + Args: + start(float): start of range. + finish(float): end of range. + step(float): step size. + enforce_finish(boolean, optional): adds the final element even if range was not exact. + inclusive_finish(boolean, optional): if false finish is exclusive (like in "range"). + + Returns: + list + """ + step = float(step) + ret = list(frange_gen(start, finish, step)) + if len(ret) > 0: + if inclusive_finish == False: + if ret[-1]==finish: + del ret[-1] + if enforce_finish and ret[-1]!=finish: + ret.append(finish) + return ret + +def _get_caller(): + #Not doing inspect.currentframe().f_back because inspect is slow to load + return sys._getframe(1).f_back if hasattr(sys, "_getframe") else None + +def inject(): + """Restore initial globals: re-inject devices and startup variables to the interpreter. + + Args: + None + + Returns: + None + """ + if __name__ == "__main__": + get_context().injectVars() + else: + _get_caller().f_globals.update(get_context().scriptManager.injections) + +def notify(subject, text, attachments = None, to=None): + """Send email message. + + Args: + subject(str): Message subject. + text(str): Message body. + attachments(list of str, optional): list of files to be attached (expansion tokens are allowed). + to (list ofd str, optional): recipients. If None uses the recipients defined in mail.properties. + Returns: + None + """ + get_context().notify(subject, text, to_list(attachments), to_list(to)) + +def string_to_obj(o): + if is_string(o): + if "://" in o: + return InlineDevice(o) + return eval(o) + elif is_list(o): + ret = [] + for i in o: + ret.append(string_to_obj(i)) + return ret + return o + +def _getBuiltinFunctions(filter = None): + ret = [] + for name in globals().keys(): + val = globals()[name] + if type(val) is PyFunction: + if filter is None or filter in name: + #Only "public" documented functions + if not name.startswith('_') and (val.__doc__ is not None): + ret.append(val) + return to_array(ret) + + +def _getBuiltinFunctionNames(filter = None): + ret = [] + for function in _getBuiltinFunctions(filter): + ret.append(function.func_name) + return to_array(ret) + +def _getFunctionDoc(function): + if is_string(function): + if function not in globals(): + return + function = globals()[function] + if type(function) is PyFunction and '__doc__' in dir(function): + ac = function.func_code.co_argcount + var = function.func_code.co_varnames + args = list(var)[:ac] + defs = function.func_defaults + if defs is not None: + for i in range (len(defs)): + index = len(args) - len(defs) + i + args[index] = args[index] + " = " + str(defs[i]) + flags = function.func_code.co_flags + if flags & 4 > 0: + args.append('*' + var[ac]) + ac=ac+1 + if flags & 8 > 0: + args.append('**' + var[ac]) + d = function.func_doc + return function.func_name+ "(" + ", ".join(args) + ")" + "\n\n" + (d if (d is not None) else "") + +def help(object = None): + """ + Print help message for function or object (if available). + + Args: + object (any, optional): function or object to get help. + If null prints a list of the builtin functions. + + Returns: + None + """ + if object is None: + print "Built-in functions:" + for f in _getBuiltinFunctionNames(): + print "\t" + f + else: + if type(object) is PyFunction: + print _getFunctionDoc(object) + elif '__doc__' in dir(object): + #The default doc is now shown + import org.python.core.BuiltinDocs.object_doc + if object.__doc__ != org.python.core.BuiltinDocs.object_doc: + print object.__doc__ + +################################################################################################### +#UI interaction +################################################################################################### + +def set_status(status): + """Set the application status. + + Args: + status(str): new status. + + Returns: + None + """ + set_preference(Preference.STATUS, status) + +def setup_plotting( enable_plots=None, enable_table=None,plot_list=None, line_plots=None, range=None, domain=None, defaults=None): + if defaults == True: set_preference(Preference.DEFAULTS, True) + if enable_plots is not None: set_preference(Preference.PLOT_DISABLED, not enable_plots) + if enable_table is not None: set_preference(Preference.TABLE_DISABLED, not enable_table) + if plot_list is not None: set_preference(Preference.ENABLED_PLOTS, None if plot_list == "all" else plot_list) + if line_plots is not None: + plots = None + if line_plots != "none": + plots = {} + for p in line_plots: plots[p]=1 + set_preference(Preference.PLOT_TYPES, plots) + if range is not None: + if range == "none": set_preference(Preference.AUTO_RANGE, None) + elif range == "auto": set_preference(Preference.AUTO_RANGE, True) + else: set_preference(Preference.MANUAL_RANGE, range) + if domain is not None: set_preference(Preference.DOMAIN_AXIS, domain) + +def set_preference(preference, value): + """Hints to graphical layer: + + Args: + preference(Preference): Enum of preference types: + PLOT_DISABLED: enable/disable scan plot (True/False) + PLOT_LAYOUT: "Horizontal", "Vertical" or "Grid" + TABLE_DISABLED: enable/disable scan table (True/False) + ENABLED_PLOTS: select Readables to be plotted (list of Readable or String (names)) + PLOT_TYPES: Dictionary or (Readable or String):(String or int) pairs + where the key is a plot name and the value is the desired plot type + PRINT_SCAN: Print scan records to console + AUTO_RANGE: Automatic range scan plots x-axis + MANUAL_RANGE: Manually set scan plots x-axis + MANUAL_RANGE_Y: Manually set scan plots y-axis + DOMAIN_AXIS: Set the domain axis source: "Time", "Index", or a readable name. + Default(None): first positioner + STATUS: set application status + value(object): preference value + + Returns: + None + """ + value = to_array(value, 'o') #If list then convert to Object array + get_context().setPreference(preference, value) + +def get_string(msg, default = None, alternatives = None, password = False): + """ + Reads a string from UI + Args: + msg(str): display message. + default(str, optional): value displayed when window is shown. + alternatives(list of str, optional): if provided presents a combo box instead of an editing field. + password(boolean, optional): if True hides entered characters. + + Returns: + String entered of null if canceled + """ + if password : + return get_context().getPassword(msg, None) + return get_context().getString(msg, str(default) if (default is not None) else None, alternatives) + +def get_option(msg, type = "YesNoCancel"): + """ + Gets an option from UI + Args: + msg(str): display message. + type(str, optional): 'YesNo','YesNoCancel' or 'OkCancel' + + Returns: + 'Yes', 'No', 'Cancel' + """ + return get_context().getOption(msg, type) + +def show_message(msg, title=None, blocking = True): + """ + Pops a blocking message to UI + + Args: + msg(str): display message. + title(str, optional): dialog title + """ + get_context().showMessage(msg, title, blocking) + +def show_panel(device, title=None): + """ + Show, if exists, the panel relative to this device. + + Args: + device(Device or str or BufferedImage): device + title only apply to BufferedImage objects. For devices the title is the device name. + """ + if type(device) is BufferedImage: + device = DirectSource(title, device) + device.initialize() + if is_string(device): + device = get_device(device) + return get_context().showPanel(device) + + + + +if __name__ == "__main__": + ca_channel_path=os.path.join(get_context().setup.getStandardLibraryPath(), "epics") + sys.path.append(ca_channel_path) + #This is to destroy previous context of _ca (it is not shared with PShell) + if run_count > 0: + if sys.modules.has_key("_ca"): + print + import _ca + _ca.initialize() \ No newline at end of file diff --git a/script/___Lib/statsutils.py b/script/___Lib/statsutils.py new file mode 100644 index 0000000..ffecda6 --- /dev/null +++ b/script/___Lib/statsutils.py @@ -0,0 +1,191 @@ +################################################################################################### +# Utilities for generating reports from command statistics files +################################################################################################### + +#CsvJdbc JAR file must be downloaded to extensions folder: +#http://central.maven.org/maven2/net/sourceforge/csvjdbc/csvjdbc/1.0.34/csvjdbc-1.0.34.jar + + +import java.sql.DriverManager as DriverManager +import java.sql.ResultSet as ResultSet +import java.util.Properties as Properties +import java.lang.Class as Class +import os +from startup import get_context +import ch.psi.pshell.core.CommandManager.CommandStatisticsFileRange as CommandStatisticsFileRange + +stmt = None +STAT_COLUMN_NAMES = ["Command","Args","Source","Start","End","Background","Result","Return"] +def get_stats_connection(): + global stmt + Class.forName("org.relique.jdbc.csv.CsvDriver"); + db = os.path.abspath(get_context().setup.expandPath("{home}/statistics")) + props = Properties() + props.put("fileExtension", ".csv") + props.put("separator", ";") + props.put("timestampFormat", "dd/MM/yy HH:mm:ss.SSS") + props.put("indexedFiles", "true"); + props.put("columnTypes", "String,String,String,Timestamp,Timestamp,Boolean,String,String"); + + fileRange = get_context().commandManager.commandStatisticsConfig.fileRange + if fileRange==CommandStatisticsFileRange.Daily: + props.put("fileTailPattern", "(\\d+)_(\\d+)_(\\d+)"); + props.put("fileTailParts", "Year,Month,Day"); + elif fileRange==CommandStatisticsFileRange.Monthly: + props.put("fileTailPattern", "(\\d+)_(\\d+)"); #props.put("fileTailPattern", "-(\\d+)_(\\d+)"); + props.put("fileTailParts", "Year,Month"); + elif fileRange==CommandStatisticsFileRange.Yearly: + props.put("fileTailPattern", "(\\d+)"); + props.put("fileTailParts", "Year"); + + conn = DriverManager.getConnection("jdbc:relique:csv:" + db, props); + stmt = conn.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE,ResultSet.CONCUR_READ_ONLY); + return conn + +def _get_count(sql): + ret = 0 + results = stmt.executeQuery("SELECT COUNT(*) AS count FROM . WHERE " + sql) + if results.first(): + ret = results.getInt("count") + return ret + +def _add_sql_time(sql, start, end): + if start: + if len(start)==8: + start = start + " 00:00:00.000" + sql = sql + " AND Start>='" + start + "'" + if end: + if len(end)==8: + end = end + " 00:00:00.000" + sql = sql + " AND (\"End\"<'" + end + "')" + return sql + +def get_count(command= "%%", start = None, end = None, result= "%%"): + sql = "Command LIKE '"+ command +"' AND Result LIKE '"+ result +"'" + sql = _add_sql_time(sql, start, end) + return _get_count(sql) + +def get_return_count(command= "%%", start = None, end = None, ret= "%%"): + sql = "Command LIKE '"+ command +"' AND Return = '"+ ret +"'" + sql = _add_sql_time(sql, start, end) + return _get_count(sql) + +def get_cmd_stats(command = "%", start = None, end = None): + s = get_count(command, start, end, "success") + a = get_count(command, start, end, "abort") + e = get_count(command, start, end, "error") + return (s,a,e) + +def get_errors(command = "%", start = None, end = None): + sql = "SELECT Return, Count(Return) as count FROM . WHERE Command LIKE '"+ command +"' AND Result='error'" + sql = _add_sql_time(sql, start, end) + sql = sql + " GROUP BY Return ORDER BY count DESC" + results = stmt.executeQuery(sql) + ret = [] + while results.next(): + ret.append((results.getInt("count"), results.getString("Return"))) + return ret + + +def get_cmd_records(command = "%", start = None, end = None, result= "%%"): + sql = "SELECT * FROM . WHERE Command LIKE '"+ command +"' AND Result LIKE '"+ result +"'" + sql = _add_sql_time(sql, start, end) + results = stmt.executeQuery(sql) + ret = [] + while results.next(): + rec={} + for col in STAT_COLUMN_NAMES: + rec[col]= results.getString(col) + ret.append(rec) + return ret + +def get_commands(commands =None, start = None, end = None): + ret = [] + if (commands is None) or (len(commands)==0): + sql = "SELECT * FROM . WHERE Command != ''" + sql = _add_sql_time(sql, start, end) + sql = sql + " GROUP BY Command" + results = stmt.executeQuery(sql) + while results.next(): + cmd = results.getString("Command") + if cmd and not " " in cmd: + ret.append(cmd) + else: + for cmd in commands: + if get_count(cmd, start, end) >0 : + ret.append(cmd) + return ret + +def print_cmd_stats(command = "%", start = None, end = None): + print "-----------------------------------------------------------" + print "Statistics from ", start , " to ", end + (s,a,e) = get_cmd_stats(command, start, end) + t=s+a+e #get_count(command, start, end, "%") + print "Command: " , command , " Records: ", t + if t>0: + print "%-10s %7.2f%% - %d" % ("Success", (float(s)/t) * 100, s) + print "%-10s %7.2f%% - %d" % ("Abort", (float(a)/t) * 100, a) + print "%-10s %7.2f%% - %d" % ("Error", (float(e)/t) * 100, e) + + print "\nErrors:" + print "%5s %s" % ("Count", "Error") + errors = get_errors(command, start, end) + for error in errors: + print "%5d %s" % (error[0], error[1]) + print "-----------------------------------------------------------" + +def print_cmd_records(command = "%", start = None, end = None, result= "%%"): + print "-----------------------------------------------------------" + print "Records from ", start , " to ", end + info = get_cmd_records(command, start, end, result) + print "Command: " , command , " Result: ", result, " Records: ", len(info) + + for col in STAT_COLUMN_NAMES: + print col+ "; " , + print + + for cmd in info: + s = "" + for col in STAT_COLUMN_NAMES: + s = s + cmd[col]+ "; " + print s + print "-----------------------------------------------------------" + +def print_stats(commands = None, start = None, end = None): + print "-----------------------------------------------------------" + print "Statistics from ", start , " to ", end + print "%-20s %-5s %8s %8s %8s" % ("Command", "Total", "Success", "Abort", "Error") + cmds = get_commands(commands) + for cmd in cmds: + (s,a,e) = get_cmd_stats(cmd, start, end) + t=s+a+e + if t>0: + print "%-20s %-5d %7.2f%% %7.2f%% %7.2f%%" % (cmd, t, (float(s)/t) * 100, (float(a)/t) * 100, (float(e)/t) * 100) + else: + print "%-20s %-5d" % (cmd, t) + print "-----------------------------------------------------------" + + + + +if __name__=='__main__': + conn = get_stats_connection() + + #Print stats of all commands, with no time range + print_stats() + + cmds = ["%scan1%", "%scan2%"] + start= "01/03/19" + end= "01/04/19" + + #Print stats all commands containing 'scan1' and 'scan2' in the month 03.2019 + print_stats(cmds, start, end) + + #Print individual statistics, including error count, for commands containing 'scan1' and 'scan2' + for cmd in cmds: + print_cmd_stats (cmd, start, end) + + #Print all records for commands containing 'scan1' + print_cmd_records("%scan1%%", start, end, "error") + conn.close() +