merged changes for lakeshore and ccu4

This commit is contained in:
l_samenv
2025-03-06 17:26:51 +01:00
committed by Markus Zolliker
parent 95dc8b186e
commit 8c2588a5ed
7 changed files with 1565 additions and 371 deletions

View File

@ -22,6 +22,7 @@
import os
import re
from pathlib import Path
from os.path import basename, dirname, exists, join
import numpy as np
@ -31,13 +32,22 @@ from scipy.interpolate import PchipInterpolator, CubicSpline, PPoly # pylint: d
from frappy.errors import ProgrammingError, RangeError
from frappy.lib import clamp
def identity(x):
return x
def exp10(x):
return 10 ** np.array(x)
to_scale = {
'lin': lambda x: x,
'log': lambda x: np.log10(x),
'lin': identity,
'log': np.log10,
}
from_scale = {
'lin': lambda x: x,
'log': lambda x: 10 ** np.array(x),
'lin': identity,
'log': exp10,
}
TYPES = [ # lakeshore type, inp-type, loglog
('DT', 'si', False), # Si diode
@ -55,7 +65,7 @@ TYPES = [ # lakeshore type, inp-type, loglog
OPTION_TYPE = {
'loglog': 0, # boolean
'extrange': 2, # tuple(min T, max T for extrapolation
'extrange': 2, # tuple(min T, max T) for extrapolation
'calibrange': 2, # tuple(min T, max T)
}
@ -222,14 +232,6 @@ PARSERS = {
}
def check(x, y, islog):
# check interpolation error
yi = y[:-2] + (x[1:-1] - x[:-2]) * (y[2:] - y[:-2]) / (x[2:] - x[:-2])
if islog:
return sum((yi - y[1:-1]) ** 2)
return sum((np.log10(yi) - np.log10(y[1:-1])) ** 2)
def get_curve(newscale, curves):
"""get curve from curve cache (converts not existing ones)
@ -247,6 +249,7 @@ def get_curve(newscale, curves):
class CalCurve(HasOptions):
EXTRAPOLATION_AMOUNT = 0.1
MAX_EXTRAPOLATION_FACTOR = 2
filename = None # calibration file
def __init__(self, calibspec=None, *, x=None, y=None, cubic_spline=True, **options):
"""calibration curve
@ -257,7 +260,7 @@ class CalCurve(HasOptions):
[<full path> | <name>][,<key>=<value> ...]
for <key>/<value> as in parser arguments
:param x, y: x and y arrays (given instead of calibspec)
:param cubic_split: set to False for always using Pchip interpolation
:param cubic_spline: set to False for always using Pchip interpolation
:param options: options for parsers
"""
self.options = options
@ -265,26 +268,31 @@ class CalCurve(HasOptions):
parser = StdParser()
parser.xdata = x
parser.ydata = y
self.calibname = 'custom'
else:
if x or y:
raise ProgrammingError('can not give both calibspec and x,y ')
sensopt = calibspec.split(',')
calibname = sensopt.pop(0)
_, dot, ext = basename(calibname).rpartition('.')
self.calibname = basename(calibname)
head, dot, ext = self.calibname.rpartition('.')
if dot:
self.calibname = head
kind = None
pathlist = os.environ.get('FRAPPY_CALIB_PATH', '').split(':')
pathlist.append(join(dirname(__file__), 'calcurves'))
pathlist = [Path(p.strip()) for p in os.environ.get('FRAPPY_CALIB_PATH', '').split(':')]
pathlist.append(Path(dirname(__file__)) / 'calcurves')
for path in pathlist:
# first try without adding kind
filename = join(path.strip(), calibname)
if exists(filename):
filename = path / calibname
if filename.exists():
kind = ext if dot else None
break
# then try adding all kinds as extension
for nam in calibname, calibname.upper(), calibname.lower():
for kind in PARSERS:
filename = join(path.strip(), '%s.%s' % (nam, kind))
filename = path / f'{nam}.{kind}'
if exists(filename):
self.filename = filename
break
else:
continue
@ -328,6 +336,7 @@ class CalCurve(HasOptions):
not_incr_idx = np.argwhere(x[1:] <= x[:-1])
if len(not_incr_idx):
raise RangeError('x not monotonic at x=%.4g' % x[not_incr_idx[0]])
self.ptc = y[-1] > y[0]
self.x = {parser.xscale: x}
self.y = {parser.yscale: y}
@ -344,8 +353,7 @@ class CalCurve(HasOptions):
self.convert_x = to_scale[newscale]
self.convert_y = from_scale[newscale]
self.calibrange = self.options.get('calibrange')
dirty = set()
self.extra_points = False
self.extra_points = (0, 0)
self.cutted = False
if self.calibrange:
self.calibrange = sorted(self.calibrange)
@ -371,7 +379,6 @@ class CalCurve(HasOptions):
self.y = {newscale: y}
ibeg = 0
iend = len(x)
dirty.add('xy')
else:
self.extra_points = ibeg, len(x) - iend
else:
@ -493,13 +500,48 @@ class CalCurve(HasOptions):
except IndexError:
return defaultx
def export(self, logformat=False, nmax=199, yrange=None, extrapolate=True, xlimits=None):
def interpolation_error(self, x0, x1, y0, y1, funx, funy, relerror, return_tuple=False):
"""calcualte interpoaltion error
:param x0: start of interval
:param x1: end of interval
:param y0: y at start of interval
:param y1: y at end of interval
:param funx: function to convert x from exported scale to internal scale
:param funy: function to convert y from internal scale to exported scale
:param relerror: True when the exported y scale is linear
:param return_tuple: True: return interpolation error as a tuple with two values
(without and with 3 additional points)
False: return one value without additional points
:return: relative deviation
"""
xspace = np.linspace(x0, x1, 9)
x = funx(xspace)
yr = self.spline(x)
yspline = funy(yr)
yinterp = y0 + np.linspace(0.0, y1 - y0, 9)
# difference between spline (at m points) and liner interpolation
diff = np.abs(yspline - yinterp)
# estimate of interpolation error with 4 sections:
# difference between spline (at m points) and linear interpolation between neighboring points
if relerror:
fact = 2 / (np.abs(y0) + np.abs(y1)) # division by zero can not happen, as y0 and y1 can not both be zero
else:
fact = 2.3 # difference is in log10 -> multiply by 1 / log10(e)
result = np.max(diff, axis=0) * fact
if return_tuple:
diff2 = np.abs(0.5 * (yspline[:-2:2] + yspline[2::2]) - funy(yr[1:-1:2]))
return result, np.max(diff2, axis=0) * fact
return result
def export(self, logformat=False, nmax=199, yrange=None, extrapolate=True, xlimits=None, nmin=199):
"""export curve for downloading to hardware
:param nmax: max number of points. if the number of given points is bigger,
the points with the lowest interpolation error are omitted
:param logformat: a list with two elements of None, True or False
True: use log, False: use line, None: use log if self.loglog
:param logformat: a list with two elements of None, True or False for x and y
True: use log, False: use lin, None: use log if self.loglog
values None are replaced with the effectively used format
False / True are replaced by [False, False] / [True, True]
default is False
@ -507,25 +549,26 @@ class CalCurve(HasOptions):
:param extrapolate: a flag indicating whether the curves should be extrapolated
to the preset extrapolation range
:param xlimits: max x range
:param nmin: minimum number of points
:return: numpy array with 2 dimensions returning the curve
"""
if logformat in (True, False):
logformat = [logformat, logformat]
logformat = (logformat, logformat)
self.logformat = list(logformat)
try:
scales = []
for idx, logfmt in enumerate(logformat):
if logfmt and self.lin_forced[idx]:
raise ValueError('%s must contain positive values only' % 'xy'[idx])
logformat[idx] = linlog = self.loglog if logfmt is None else logfmt
self.logformat[idx] = linlog = self.loglog if logfmt is None else logfmt
scales.append('log' if linlog else 'lin')
xscale, yscale = scales
except (TypeError, AssertionError):
raise ValueError('logformat must be a 2 element list or a boolean')
raise ValueError('logformat must be a 2 element sequence or a boolean')
x = self.spline.x[1:-1] # raw units, excluding extrapolated points
x1, x2 = xmin, xmax = x[0], x[-1]
y1, y2 = sorted(self.spline([x1, x2]))
xr = self.spline.x[1:-1] # raw units, excluding extrapolated points
x1, x2 = xmin, xmax = xr[0], xr[-1]
if extrapolate and not yrange:
yrange = self.exty
@ -535,42 +578,100 @@ class CalCurve(HasOptions):
lim = to_scale[self.scale](xlimits)
xmin = clamp(xmin, *lim)
xmax = clamp(xmax, *lim)
# start and end index of calibrated range
ibeg, iend = self.extra_points[0], len(xr) - self.extra_points[1]
if xmin != x1 or xmax != x2:
ibeg, iend = np.searchsorted(x, (xmin, xmax))
if abs(x[ibeg] - xmin) < 0.1 * (x[ibeg + 1] - x[ibeg]):
i, j = np.searchsorted(xr, (xmin, xmax))
if abs(xr[i] - xmin) < 0.1 * (xr[i + 1] - xr[i]):
# remove first point, if close
ibeg += 1
if abs(x[iend - 1] - xmax) < 0.1 * (x[iend - 1] - x[iend - 2]):
i += 1
if abs(xr[j - 1] - xmax) < 0.1 * (xr[j - 1] - xr[j - 2]):
# remove last point, if close
iend -= 1
x = np.concatenate(([xmin], x[ibeg:iend], [xmax]))
y = self.spline(x)
j -= 1
offset = i - 1
xr = np.concatenate(([xmin], xr[i:j], [xmax]))
ibeg = max(0, ibeg - offset)
iend = min(len(xr), iend - offset)
yr = self.spline(xr)
# convert to exported scale
if xscale != self.scale:
x = to_scale[xscale](from_scale[self.scale](x))
if yscale != self.scale:
y = to_scale[yscale](from_scale[self.scale](y))
# reduce number of points, if needed
n = len(x)
i, j = 1, n - 1 # index range for calculating interpolation deviation
deviation = np.zeros(n)
while True:
# calculate interpolation error when a single point is omitted
ym = y[i-1:j-1] + (x[i:j] - x[i-1:j-1]) * (y[i+1:j+1] - y[i-1:j-1]) / (x[i+1:j+1] - x[i-1:j-1])
if yscale == 'log':
deviation[i:j] = np.abs(ym - y[i:j])
if xscale == self.scale:
xbwd = identity
x = xr
else:
if self.scale == 'log':
xfwd, xbwd = from_scale[self.scale], to_scale[self.scale]
else:
deviation[i:j] = np.abs(ym - y[i:j]) / (np.abs(ym + y[i:j]) + 1e-10)
if n <= nmax:
break
idx = np.argmin(deviation[1:-1]) + 1 # find index of the smallest error
y = np.delete(y, idx)
x = np.delete(x, idx)
deviation = np.delete(deviation, idx)
n -= 1
# index range to recalculate
i, j = max(1, idx - 1), min(n - 1, idx + 1)
self.deviation = deviation # for debugging purposes
xfwd, xbwd = to_scale[xscale], from_scale[xscale]
x = xfwd(xr)
if yscale == self.scale:
yfwd = identity
y = yr
else:
if self.scale == 'log':
yfwd = from_scale[self.scale]
else:
yfwd = to_scale[yscale]
y = yfwd(yr)
self.deviation = None
nmin = min(nmin, nmax)
n = len(x)
relerror = yscale == 'lin'
if len(x) > nmax:
# reduce number of points, if needed
i, j = 1, n - 1 # index range for calculating interpolation deviation
deviation = np.zeros(n)
while True:
deviation[i:j] = self.interpolation_error(
x[i-1:j-1], x[i+1:j+1], y[i-1:j-1], y[i+1:j+1],
xbwd, yfwd, relerror)
# calculate interpolation error when a single point is omitted
if n <= nmax:
break
idx = np.argmin(deviation[1:-1]) + 1 # find index of the smallest error
y = np.delete(y, idx)
x = np.delete(x, idx)
deviation = np.delete(deviation, idx)
n = len(x)
# index range to recalculate
i, j = max(1, idx - 1), min(n - 1, idx + 1)
self.deviation = deviation # for debugging purposes
elif n < nmin:
if ibeg + 1 < iend:
diff1, diff4 = self.interpolation_error(
x[ibeg:iend - 1], x[ibeg + 1:iend], y[ibeg:iend - 1], y[ibeg + 1:iend],
xbwd, yfwd, relerror, return_tuple=True)
dif_target = 1e-4
sq4 = np.sqrt(diff4) * 4
sq1 = np.sqrt(diff1)
offset = 0.49
n_mid = nmax - len(x) + iend - ibeg - 1
# iteration to find a dif target resulting in no more than nmax points
while True:
scale = 1 / np.sqrt(dif_target)
# estimate number of intermediate points (float!) needed to reach dif_target
# number of points estimated from the result of the interpolation error with 4 sections
n4 = np.maximum(1, sq4 * scale)
# number of points estimated from the result of the interpolation error with 1 section
n1 = np.maximum(1, sq1 * scale)
# use n4 where n4 > 4, n1, where n1 < 1 and a weighted average in between
nn = np.select([n4 > 4, n1 > 1],
[n4, (n4 * (n1 - 1) + n1 * (4 - n4)) / (3 + n1 - n4)], n1)
n_tot = np.sum(np.rint(nn + offset))
extra = n_tot - n_mid
if extra <= 0:
break
dif_target *= (n_tot / n_mid) ** 2
xnew = [x[:ibeg]]
for x0, x1, ni in zip(x[ibeg:iend-1], x[ibeg+1:iend], np.rint(nn + offset)):
xnew.append(np.linspace(x0, x1, int(ni) + 1)[:-1])
xnew.append(x[iend-1:])
x = np.concatenate(xnew)
y = yfwd(self.spline(xbwd(x)))
# for debugging purposes:
self.deviation = self.interpolation_error(x[:-1], x[1:], y[:-1], y[1:], xbwd, yfwd, relerror)
return np.stack([x, y], axis=1)