first version

This commit is contained in:
2023-06-29 18:14:48 +02:00
parent d5c76ee26c
commit 33561355f0
6 changed files with 376 additions and 0 deletions

143
.gitignore vendored Normal file
View File

@ -0,0 +1,143 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# dumped data
*.h5

3
channels.txt Normal file
View File

@ -0,0 +1,3 @@
FELrepRate = SWISSFEL-STATUS:Bunch-2-Appl-Freq-RB
att64 = SATFE10-OATT064:MOT2TRANS.VALD
att65 = SATFE10-OATT065:MOT2TRANS.VALD

175
retro-stand.py Executable file
View File

@ -0,0 +1,175 @@
#!/usr/bin/env python
from collections import defaultdict
from pathlib import Path
import pandas as pd
from tqdm import tqdm
from sfdata import SFScanInfo, SFDataFiles
from utils import cprint, json_load
def process(base, channels):
base = Path(base)
dirs = base.glob("run*-*")
collected = []
for d in tqdm(sorted(dirs)):
run, name = parse_run_name(d.name)
fn_scan = d / "meta" / "scan.json"
scan = SFScanInfo(fn_scan)
n = len(scan)
info = scan.info
adj_ids = info["scan_parameters"]["Id"]
first_adj_id = adj_ids[0]
typ = "scan"
if n == 1:
if first_adj_id.lower() == "dummy":
typ = "static"
else:
cprint(f'run {run} is single step (i.e., static) but adjustable is "{first_adj_id}" (and not some variant of "dummy"), will treat as scan', color="red")
fn_acq = d / "meta" / "acq0001.json"
acq = json_load(fn_acq)
timestamp = acq["request_time"]
n_pulses = parse_n_pulses(scan)
entries = {
"run": run,
"filename": name,
"timeStamp": timestamp,
"n_pulses": n_pulses
}
if typ == "scan":
entries["scanned_adjs"] = first_adj_id if len(adj_ids) == 1 else adj_ids
res = parse_scan(run, scan)
entries.update(res)
step = scan[0]
res = read_data(step, channels)
entries.update(res)
tqdm.write(str(entries))
collected.append(entries)
return collected
def parse_run_name(name):
run, name = name.split("-", 1)
assert run.startswith("run")
run = run[len("run"):]
run = int(run)
return run, name
def parse_n_pulses(scan):
pids = scan.info["pulseIds"]
first_pids = pids[0]
pid_start, pid_stop = first_pids
n_pulses = pid_stop - pid_start
return n_pulses
def parse_scan(run, scan):
start, stop, n_steps = parse_rbks_best_effort(run, scan)
res = {
"v_min": start,
"v_max": stop,
"n_steps": n_steps
}
return res
def parse_rbks_best_effort(run, scan):
try:
return parse_rbks(scan.readbacks)
except Exception as e:
cprint(run, "Could not parse readbacks, will use set values, because of:", e, color="red")
return parse_rbks(scan.values)
def parse_rbks(rbks):
start = min(rbks)
stop = max(rbks)
nsteps = len(rbks)
return start, stop, nsteps
def read_data(step, channels):
res = {}
for col_name, ch_name in channels.items():
val = step[ch_name][0][0]
res[col_name] = val
return res
def dump(d, fn, key="data"):
df = pd.DataFrame.from_records(d)
print(df)
df.to_hdf(fn, key)
def read_channels_file(fn):
res = {}
with open(fn) as f:
for line in f:
line = line.split("#")[0].strip()
if not line:
continue
if "=" in line:
left, right = line.split("=", 1)
else:
left = right = line
left = left.strip()
right = right.strip()
# print(left, right)
res[left] = right
return res
if __name__ == "__main__":
import argparse
desc = "Retroactively produce a stand output hdf5 from the written data files"
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=desc)
parser.add_argument("base", help="base folder (e.g., /sf/instrument/data/p12345/raw/)")
parser.add_argument("-o", "--output", help="output file name, if not specified no output is written")
parser.add_argument("-c", "--channels", help="channels file name, ascii file where each line is: column name = channel name", default="channels.txt")
clargs = parser.parse_args()
# print(clargs)
# raise SystemExit
chans = read_channels_file(clargs.channels)
print("", "channels:", chans, "", sep="\n")
coll = process(clargs.base, chans)
df = pd.DataFrame.from_records(coll)
cprint("", "result:", df, "", sep="\n", color="green")
if clargs.output:
output = clargs.output
ext = ".h5"
if not output.endswith(ext):
output += ext
df.to_hdf(output, key="data")

5
utils/__init__.py Normal file
View File

@ -0,0 +1,5 @@
from .cprint import cprint
from .json_load import json_load

42
utils/cprint.py Normal file
View File

@ -0,0 +1,42 @@
from colorama import Fore
COLORS = {
"black": Fore.BLACK,
"blue": Fore.BLUE,
"cyan": Fore.CYAN,
"green": Fore.GREEN,
"magenta": Fore.MAGENTA,
"red": Fore.RED,
"white": Fore.WHITE,
"yellow": Fore.YELLOW,
None: None
}
def ncprint(*objects, color=None, sep=" ", **kwargs):
return cprint(*objects, color=None, sep=sep, **kwargs)
def cprint(*objects, color=None, sep=" ", **kwargs):
color = get_color(color)
text = flatten_strings(objects, sep)
return _print(color, text, sep, kwargs)
def get_color(color):
try:
return COLORS[color]
except KeyError as exc:
color = repr(color)
allowed = tuple(COLORS.keys())
raise ValueError(f"{color} not from {allowed}") from exc
def flatten_strings(objects, sep):
return sep.join(str(i) for i in objects)
def _print(color, text, sep, kwargs):
if color is not None:
text = color + text + Fore.RESET
return print(text, sep=sep, **kwargs)

8
utils/json_load.py Normal file
View File

@ -0,0 +1,8 @@
import json
def json_load(filename, *args, **kwargs):
with open(filename, "r") as f:
return json.load(f, *args, **kwargs)