added serial mode
This commit is contained in:
@ -19,7 +19,9 @@ The `data` folder contains some channel lists and output files for testing.
|
||||
|
||||
Reads a list of PV names from plain text file (comments starting with `#` are allowed, even recommended) and tests each channel for connection, alarm status and severity.
|
||||
|
||||
For each channel, the result will be printed as soon as it arrived. Thus, the output is ordered by response time. There is a command-line switch to suppress the output (`-q`/`--quiet`) and to set the connection time out in seconds (`-t`/`--timeout`).
|
||||
By default, the connection tests are performed in parallel. For each channel, the result will be printed as soon as it arrived. Thus, the output is ordered by response time. For debugging purposes, the parallelization of the tests can be turned off (`-s`/`--serial`), in which case the output is printed in the order of the channels in the input file (note that in serial mode each broken channel has to time out before the next channel is tested, in which case the total execution time can become very long).
|
||||
|
||||
There is a command-line switch to suppress the output (`-q`/`--quiet`) and to set the connection time out in seconds (`-t`/`--timeout`).
|
||||
|
||||
The test result can be written to a comma-separated values (csv) file by giving a filename to the `-o`/`--output` switch (`.csv` is automatically appended to the filename if missing).
|
||||
|
||||
|
@ -4,8 +4,7 @@ import epics
|
||||
|
||||
from utils.df import drop_col, compare_dfs, count_true
|
||||
from utils.epics import DataGetter
|
||||
from utils.execute import parallel
|
||||
#from utils.execute import serial as parallel
|
||||
from utils.execute import parallel, serial
|
||||
from utils.fileio import load_config, load_csv, store_csv
|
||||
from utils.printing import print_good, print_bad
|
||||
|
||||
@ -24,7 +23,8 @@ def run_check(clargs):
|
||||
pvs = (epics.PV(ch) for ch in chans) # putting PV constructors into ThreadPoolExecutor has weird effects
|
||||
|
||||
get_data = DataGetter(clargs.timeout, clargs.quiet)
|
||||
data = parallel(get_data, pvs, chans)
|
||||
run = serial if clargs.serial else parallel
|
||||
data = run(get_data, pvs, chans)
|
||||
|
||||
df = pd.DataFrame(data).T
|
||||
df = df.infer_objects() #TODO: why is this needed?
|
||||
|
1
sani.py
1
sani.py
@ -17,6 +17,7 @@ def handle_clargs():
|
||||
parser_check.add_argument("filename", help="name of input channel-list file")
|
||||
parser_check.add_argument("-o", "--output", help="output CSV file", default=None)
|
||||
parser_check.add_argument("-q", "--quiet", help="do not show each channel's answer", action="store_true")
|
||||
parser_check.add_argument("-s", "--serial", help="do not run checks in parallel", action="store_true")
|
||||
parser_check.add_argument("-t", "--timeout", help="connection timeout in seconds", type=float, default=1)
|
||||
|
||||
parser_compare = subparsers.add_parser("compare", help="compare two check results")
|
||||
|
Reference in New Issue
Block a user