first potentially useful version
This commit is contained in:
68
commands.py
Normal file
68
commands.py
Normal file
@ -0,0 +1,68 @@
|
||||
from datetime import datetime
|
||||
import pandas as pd
|
||||
import epics
|
||||
|
||||
from utils.df import drop_col, compare_dfs, count_true
|
||||
from utils.epics import DataGetter
|
||||
from utils.execute import parallel
|
||||
#from utils.execute import serial as parallel
|
||||
from utils.fileio import load_config, load_csv, store_csv
|
||||
from utils.printing import print_good, print_bad
|
||||
|
||||
|
||||
def run(clargs):
|
||||
commands = {
|
||||
"check": run_check,
|
||||
"compare": run_compare
|
||||
}
|
||||
commands[clargs.command](clargs)
|
||||
|
||||
|
||||
def run_check(clargs):
|
||||
filename = clargs.filename
|
||||
chans = load_config(filename)
|
||||
pvs = (epics.PV(ch) for ch in chans) # putting PV constructors into ThreadPoolExecutor has weird effects
|
||||
|
||||
get_data = DataGetter(clargs.timeout, clargs.silent)
|
||||
data = parallel(get_data, pvs, chans)
|
||||
|
||||
df = pd.DataFrame(data).T
|
||||
df = df.infer_objects() #TODO: why is this needed?
|
||||
# print(df)
|
||||
# print(df.dtypes)
|
||||
|
||||
connected = df["connected"]
|
||||
if connected.all():
|
||||
print_good("all connections OK")
|
||||
else:
|
||||
ntotal = len(connected)
|
||||
ngood = count_true(connected)
|
||||
print_bad(f"only {ngood}/{ntotal} connections OK")
|
||||
|
||||
output = clargs.output
|
||||
if not output:
|
||||
return
|
||||
|
||||
timestamp = datetime.now()
|
||||
meta = f"{filename} / {timestamp}"
|
||||
store_csv(df, output, meta)
|
||||
|
||||
|
||||
def run_compare(clargs):
|
||||
fn1, fn2 = clargs.filenames
|
||||
df1 = load_csv(fn1)
|
||||
df2 = load_csv(fn2)
|
||||
|
||||
if clargs.ignore_values:
|
||||
drop_col(df1, "value")
|
||||
drop_col(df2, "value")
|
||||
|
||||
diff = compare_dfs(df1, df2)
|
||||
if diff.empty:
|
||||
print_good(f'"{fn1}" and "{fn2}" are identical')
|
||||
else:
|
||||
print_bad(f'"{fn1}" and "{fn2}" differ:')
|
||||
print(diff)
|
||||
|
||||
|
||||
|
19
config.py
19
config.py
@ -1,19 +0,0 @@
|
||||
|
||||
def load(fname):
|
||||
chans = set()
|
||||
for line in read_lines(fname):
|
||||
line = remove_comments(line).strip()
|
||||
if not line:
|
||||
continue
|
||||
chans.add(line)
|
||||
return sorted(chans)
|
||||
|
||||
def read_lines(fname):
|
||||
with open(fname, "r") as f:
|
||||
yield from f
|
||||
|
||||
def remove_comments(line, comment_char="#"):
|
||||
return line.split(comment_char)[0]
|
||||
|
||||
|
||||
|
14
data/test_chans_good.txt
Normal file
14
data/test_chans_good.txt
Normal file
@ -0,0 +1,14 @@
|
||||
SIN-TIMAST-TMA:Bunch-2-Appl-Freq-RB # rep rate
|
||||
|
||||
# vacuum gauges
|
||||
SATOP21-VMFR165-A010:PRESSURE
|
||||
SATOP21-VMCC165-A010:PRESSURE
|
||||
|
||||
|
||||
# slits
|
||||
SATOP21-OAPU161:MOTOR_W.RBV
|
||||
|
||||
|
||||
|
||||
SAROP21-OKB:MODE
|
||||
SATOP11-VVPG092-A010:PLC_OPEN
|
26
logger.py
26
logger.py
@ -1,26 +0,0 @@
|
||||
import logging as log
|
||||
|
||||
|
||||
LEVELS = (
|
||||
# log.CRITICAL,
|
||||
log.ERROR,
|
||||
log.WARNING,
|
||||
log.INFO,
|
||||
log.DEBUG,
|
||||
log.NOTSET
|
||||
)
|
||||
|
||||
|
||||
def set_log_level(verbosity):
|
||||
ntotal = len(LEVELS) - 1
|
||||
index = min(verbosity, ntotal)
|
||||
level = LEVELS[index]
|
||||
log.basicConfig(level=level, format="%(asctime)s %(levelname)s %(message)s")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#from logger import log, set_log_level
|
||||
|
||||
#set_log_level(clargs.verbose)
|
@ -1,57 +0,0 @@
|
||||
import epics
|
||||
|
||||
from config import load
|
||||
from execute import parallel
|
||||
#from execute import serial as parallel
|
||||
|
||||
|
||||
class PVCollection:
|
||||
|
||||
def __init__(self, chans):
|
||||
self.chans = chans
|
||||
self.pvs = [PV(ch) for ch in chans]
|
||||
|
||||
@classmethod
|
||||
def from_file(cls, fname):
|
||||
chans = load(fname)
|
||||
return cls(chans)
|
||||
|
||||
def connected(self):
|
||||
return self._run_all(lambda pv: pv.wait_for_connection(0.01))
|
||||
|
||||
def status(self):
|
||||
return self._run_all(lambda pv: pv.status)
|
||||
|
||||
def severity(self):
|
||||
return self._run_all(lambda pv: pv.severity)
|
||||
|
||||
def value(self):
|
||||
return self._run_all(lambda pv: pv.value)
|
||||
|
||||
def _run_all(self, func):
|
||||
return parallel(func, self.pvs)
|
||||
|
||||
|
||||
|
||||
class PV(epics.PV):
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
if not self.connected:
|
||||
return -1
|
||||
return super().status
|
||||
|
||||
@property
|
||||
def severity(self):
|
||||
if not self.connected:
|
||||
return -1
|
||||
return super().severity
|
||||
|
||||
@property
|
||||
def value(self):
|
||||
if not self.connected:
|
||||
return None
|
||||
return super().value
|
||||
|
||||
|
||||
|
187
sani.py
187
sani.py
@ -1,183 +1,40 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
subparsers = parser.add_subparsers(title="command", description="valid commands", dest="command", help="commands")
|
||||
|
||||
parser_check = subparsers.add_parser("check", help="check!", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
parser_check.add_argument("filename", help="name of input channel-list file")
|
||||
parser_check.add_argument("-o", "--output", help="output CSV file", default=None)
|
||||
parser_check.add_argument("-s", "--silent", help="do not show each channel's answer", action="store_true")
|
||||
parser_check.add_argument("-t", "--timeout", help="connection timeout in seconds", type=float, default=1)
|
||||
|
||||
parser_compare = subparsers.add_parser("compare", help="compare!")
|
||||
parser_compare.add_argument("filenames", metavar="filename", nargs=2, help="name of input CSV file, two are needed")
|
||||
parser_compare.add_argument("-v", "--ignore-values", help="do not check values", action="store_true")
|
||||
|
||||
clargs = parser.parse_args()
|
||||
|
||||
if not clargs.command:
|
||||
parser.print_help()
|
||||
raise SystemExit
|
||||
def main():
|
||||
clargs = handle_clargs()
|
||||
from commands import run
|
||||
run(clargs)
|
||||
|
||||
|
||||
def handle_clargs():
|
||||
import argparse
|
||||
|
||||
from datetime import datetime
|
||||
import epics
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from colorama import Fore, Style
|
||||
parser = argparse.ArgumentParser()
|
||||
subparsers = parser.add_subparsers(dest="command", help="valid commands")
|
||||
|
||||
from alarms import message
|
||||
from config import load
|
||||
from execute import parallel
|
||||
#from execute import serial as parallel
|
||||
parser_check = subparsers.add_parser("check", help="check a list of channels", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
parser_check.add_argument("filename", help="name of input channel-list file")
|
||||
parser_check.add_argument("-o", "--output", help="output CSV file", default=None)
|
||||
parser_check.add_argument("-s", "--silent", help="do not show each channel's answer", action="store_true")
|
||||
parser_check.add_argument("-t", "--timeout", help="connection timeout in seconds", type=float, default=1)
|
||||
|
||||
parser_compare = subparsers.add_parser("compare", help="compare two check results")
|
||||
parser_compare.add_argument("filenames", metavar="filename", nargs=2, help="name of input CSV file, two are needed")
|
||||
parser_compare.add_argument("-v", "--ignore-values", help="do not check values", action="store_true")
|
||||
|
||||
MSG_NOT_CONNECTED = "did not connect"
|
||||
MSG_SUCCESS = "OK"
|
||||
clargs = parser.parse_args()
|
||||
|
||||
SYM_GOOD = "👍"
|
||||
SYM_BAD = "💔"
|
||||
if not clargs.command:
|
||||
parser.print_help()
|
||||
raise SystemExit
|
||||
|
||||
COL_NOT_CONNECTED = Fore.RED
|
||||
COL_SUCCESS = Fore.GREEN
|
||||
COL_ALARM = Fore.YELLOW
|
||||
return clargs
|
||||
|
||||
#COL_COMP_LEFT = Fore.MAGENTA
|
||||
#COL_COMP_LEFT = Fore.CYAN
|
||||
|
||||
COL_RESET = Fore.RESET
|
||||
|
||||
|
||||
|
||||
def get_data(pv):
|
||||
connected = pv.wait_for_connection(clargs.timeout)
|
||||
|
||||
if not connected:
|
||||
value = np.nan
|
||||
status = severity = -1
|
||||
msg = MSG_NOT_CONNECTED
|
||||
col = COL_NOT_CONNECTED
|
||||
else:
|
||||
value = pv.value
|
||||
status = pv.status
|
||||
severity = pv.severity
|
||||
if status == 0 and severity == 0:
|
||||
msg = MSG_SUCCESS
|
||||
col = COL_SUCCESS
|
||||
else:
|
||||
msg = message(status, severity)
|
||||
col = COL_ALARM
|
||||
|
||||
data = {
|
||||
"connected": connected,
|
||||
"value": value,
|
||||
"status": status,
|
||||
"severity": severity
|
||||
}
|
||||
|
||||
if not clargs.silent:
|
||||
msg = colored(col, msg)
|
||||
print(pv.pvname, msg)
|
||||
return data
|
||||
|
||||
|
||||
def colored(color, msg):
|
||||
return color + str(msg) + COL_RESET
|
||||
|
||||
|
||||
|
||||
def run_check():
|
||||
filename = clargs.filename
|
||||
chans = load(filename)
|
||||
pvs = (epics.PV(ch) for ch in chans) # putting PV constructors into ThreadPoolExecutor has weird effects
|
||||
data = parallel(get_data, pvs, chans)
|
||||
|
||||
df = pd.DataFrame(data).T
|
||||
df = df.infer_objects() #TODO: why is this needed?
|
||||
# print(df)
|
||||
# print(df.dtypes)
|
||||
|
||||
connection_state = df["connected"]
|
||||
if connection_state.all():
|
||||
print(f"{SYM_GOOD} all connections OK")
|
||||
else:
|
||||
total = connection_state.index
|
||||
good = total[connection_state]
|
||||
|
||||
ntotal = len(total)
|
||||
ngood = len(good)
|
||||
|
||||
print(f"{SYM_BAD} only {ngood}/{ntotal} connections OK")
|
||||
|
||||
|
||||
output = clargs.output
|
||||
if not output:
|
||||
return
|
||||
|
||||
timestamp = datetime.now()
|
||||
meta = f"{filename} / {timestamp}"
|
||||
store_csv(df, output, meta)
|
||||
|
||||
|
||||
def run_compare():
|
||||
fn1, fn2 = clargs.filenames
|
||||
df1 = load_csv(fn1)
|
||||
df2 = load_csv(fn2)
|
||||
|
||||
if clargs.ignore_values:
|
||||
df1.drop("value", axis="columns", inplace=True)
|
||||
df2.drop("value", axis="columns", inplace=True)
|
||||
|
||||
def report_diff(x):
|
||||
return "" if equal(*x) else " {} | {}".format(*x)
|
||||
|
||||
def equal(a, b):
|
||||
return a == b or (np.isnan(a) and np.isnan(b))
|
||||
|
||||
df = pd.concat((df1, df2))
|
||||
changes = df.groupby(level=0).agg(report_diff)
|
||||
|
||||
changes.replace("", np.nan, inplace=True)
|
||||
changes.dropna(axis="columns", how="all", inplace=True)
|
||||
changes.dropna(axis="index", how="all", inplace=True)
|
||||
changes.replace(np.nan, "", inplace=True)
|
||||
|
||||
if changes.empty:
|
||||
print(f'{SYM_GOOD} "{fn1}" and "{fn2}" are identical')
|
||||
else:
|
||||
print(f'{SYM_BAD} "{fn1}" and "{fn2}" differ:')
|
||||
print(changes)
|
||||
|
||||
|
||||
|
||||
def store_csv(df, fname, meta):
|
||||
fname = fix_file_ext(fname, "csv")
|
||||
with open(fname, "w") as f:
|
||||
f.write(f"# {meta}\n")
|
||||
df.to_csv(f)
|
||||
|
||||
def load_csv(fname):
|
||||
fname = fix_file_ext(fname, "csv")
|
||||
return pd.read_csv(fname, index_col=0, comment="#", float_precision="high")
|
||||
|
||||
|
||||
|
||||
def fix_file_ext(fn, ext):
|
||||
if not ext.startswith("."):
|
||||
ext = "." + ext
|
||||
if not fn.endswith(ext):
|
||||
fn += ext
|
||||
return fn
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if clargs.command == "check":
|
||||
run_check()
|
||||
elif clargs.command == "compare":
|
||||
run_compare()
|
||||
main()
|
||||
|
||||
|
||||
|
||||
|
11
test.sh
Executable file
11
test.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
./sani.py check data/test_chans_good.txt -s
|
||||
echo
|
||||
./sani.py check data/test_chans_bad.txt
|
||||
echo
|
||||
./sani.py compare data/test1.csv data/test4
|
||||
echo
|
||||
./sani.py compare data/test1.csv data/test4 -v
|
||||
|
||||
|
0
utils/__init__.py
Normal file
0
utils/__init__.py
Normal file
13
utils/colors.py
Normal file
13
utils/colors.py
Normal file
@ -0,0 +1,13 @@
|
||||
from colorama import Fore
|
||||
|
||||
|
||||
RED = Fore.RED
|
||||
GREEN = Fore.GREEN
|
||||
YELLOW = Fore.YELLOW
|
||||
|
||||
|
||||
def colored(color, msg):
|
||||
return color + str(msg) + Fore.RESET
|
||||
|
||||
|
||||
|
16
utils/consts.py
Normal file
16
utils/consts.py
Normal file
@ -0,0 +1,16 @@
|
||||
from utils import colors
|
||||
|
||||
|
||||
COL_NOT_CONNECTED = colors.RED
|
||||
COL_SUCCESS = colors.GREEN
|
||||
COL_ALARM = colors.YELLOW
|
||||
|
||||
|
||||
MSG_NOT_CONNECTED = "did not connect"
|
||||
MSG_SUCCESS = "OK"
|
||||
|
||||
SYM_GOOD = "👍"
|
||||
SYM_BAD = "💔"
|
||||
|
||||
|
||||
|
47
utils/df.py
Normal file
47
utils/df.py
Normal file
@ -0,0 +1,47 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def compare_dfs(df1, df2):
|
||||
df = pd.concat((df1, df2))
|
||||
diff = df.groupby(level=0).agg(report_diff)
|
||||
drop_empty(diff)
|
||||
return diff
|
||||
|
||||
def report_diff(x):
|
||||
return "" if equal(*x) else " {} | {}".format(*x) #TODO: color left and right differently
|
||||
|
||||
def equal(a, b):
|
||||
return a == b or (np.isnan(a) and np.isnan(b))
|
||||
|
||||
|
||||
def drop_empty(df):
|
||||
replace_empty_nan(df)
|
||||
drop_nan_cols(df)
|
||||
drop_nan_rows(df)
|
||||
replace_nan_empty(df)
|
||||
|
||||
def replace_empty_nan(df):
|
||||
df.replace("", np.nan, inplace=True)
|
||||
|
||||
def replace_nan_empty(df):
|
||||
df.replace(np.nan, "", inplace=True)
|
||||
|
||||
def drop_nan_cols(df):
|
||||
df.dropna(axis="columns", how="all", inplace=True)
|
||||
|
||||
def drop_nan_rows(df):
|
||||
df.dropna(axis="index", how="all", inplace=True)
|
||||
|
||||
|
||||
def drop_col(df, name):
|
||||
df.drop(name, axis="columns", inplace=True)
|
||||
|
||||
|
||||
def count_true(bdf):
|
||||
good = bdf[bdf]
|
||||
ngood = len(good)
|
||||
return ngood
|
||||
|
||||
|
||||
|
47
utils/epics.py
Normal file
47
utils/epics.py
Normal file
@ -0,0 +1,47 @@
|
||||
import numpy as np
|
||||
|
||||
from .alarms import message
|
||||
from .colors import colored
|
||||
from .consts import COL_NOT_CONNECTED, COL_SUCCESS, COL_ALARM
|
||||
from .consts import MSG_NOT_CONNECTED, MSG_SUCCESS
|
||||
|
||||
|
||||
class DataGetter:
|
||||
|
||||
def __init__(self, timeout, silent):
|
||||
self.timeout = timeout
|
||||
self.silent = silent
|
||||
|
||||
def __call__(self, pv):
|
||||
connected = pv.wait_for_connection(self.timeout)
|
||||
|
||||
if not connected:
|
||||
value = np.nan
|
||||
status = severity = -1
|
||||
msg = MSG_NOT_CONNECTED
|
||||
col = COL_NOT_CONNECTED
|
||||
else:
|
||||
value = pv.value
|
||||
status = pv.status
|
||||
severity = pv.severity
|
||||
if status == 0 and severity == 0:
|
||||
msg = MSG_SUCCESS
|
||||
col = COL_SUCCESS
|
||||
else:
|
||||
msg = message(status, severity)
|
||||
col = COL_ALARM
|
||||
|
||||
data = {
|
||||
"connected": connected,
|
||||
"value": value,
|
||||
"status": status,
|
||||
"severity": severity
|
||||
}
|
||||
|
||||
if not self.silent:
|
||||
msg = colored(col, msg)
|
||||
print(pv.pvname, msg)
|
||||
return data
|
||||
|
||||
|
||||
|
40
utils/fileio.py
Normal file
40
utils/fileio.py
Normal file
@ -0,0 +1,40 @@
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def load_csv(fname):
|
||||
fname = fix_file_ext(fname, "csv")
|
||||
return pd.read_csv(fname, index_col=0, comment="#", float_precision="high")
|
||||
|
||||
def store_csv(df, fname, meta=None):
|
||||
fname = fix_file_ext(fname, "csv")
|
||||
with open(fname, "w") as f:
|
||||
if meta is not None:
|
||||
f.write(f"# {meta}\n")
|
||||
df.to_csv(f)
|
||||
|
||||
def fix_file_ext(fn, ext):
|
||||
if not ext.startswith("."):
|
||||
ext = "." + ext
|
||||
if not fn.endswith(ext):
|
||||
fn += ext
|
||||
return fn
|
||||
|
||||
|
||||
def load_config(fname):
|
||||
chans = set()
|
||||
for line in read_lines(fname):
|
||||
line = remove_comments(line).strip()
|
||||
if not line:
|
||||
continue
|
||||
chans.add(line)
|
||||
return sorted(chans)
|
||||
|
||||
def read_lines(fname):
|
||||
with open(fname, "r") as f:
|
||||
yield from f
|
||||
|
||||
def remove_comments(line, comment_char="#"):
|
||||
return line.split(comment_char)[0]
|
||||
|
||||
|
||||
|
11
utils/printing.py
Normal file
11
utils/printing.py
Normal file
@ -0,0 +1,11 @@
|
||||
from .consts import SYM_GOOD, SYM_BAD
|
||||
|
||||
|
||||
def print_good(*args, **kwargs):
|
||||
return print(SYM_GOOD, *args, **kwargs)
|
||||
|
||||
def print_bad(*args, **kwargs):
|
||||
return print(SYM_BAD, *args, **kwargs)
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user