Compare commits
44 Commits
smaract_im
...
fixes/lamn
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
54f1f42332 | ||
|
|
48df15f35c | ||
|
|
6f60bd4b2b | ||
| 5d97913956 | |||
| 93384b87e0 | |||
| 9a249363fd | |||
| f925a7c1db | |||
| 5811e445fe | |||
| 16ea7f410e | |||
| 9db56f5273 | |||
| 705df4b253 | |||
| 181b57494b | |||
| efd51462fc | |||
| 8a69c7aa36 | |||
| b19bfb7ca4 | |||
| b818181da2 | |||
| 303929f8e6 | |||
| 9f5799385c | |||
|
|
cb968abe73 | ||
|
|
5ab763ad38
|
||
|
fa35ddf1a9
|
|||
|
501bc52867
|
|||
|
f35c51efa7
|
|||
|
0c81c718d8
|
|||
|
|
ce88310125
|
||
|
|
e860571a64
|
||
|
|
24bd5a71bc
|
||
|
|
541eb97096
|
||
|
|
7911717142
|
||
|
|
bdf94533a5
|
||
|
5784331073
|
|||
|
|
7a2c6629f7 | ||
| 75cc672f08 | |||
| 67ef20cfc8 | |||
| f8d2af4c5b | |||
| 8195c12a35 | |||
| a45aa094ef | |||
|
|
2814add2de | ||
| 32b4c39659 | |||
| 8849b9ffea | |||
| c3aa882b1d | |||
| 6fad4f2034 | |||
|
|
ed8d012632 | ||
|
82d47c7511
|
@@ -6,6 +6,8 @@ from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
from csaxs_bec.bec_ipython_client.plugins.cSAXS import epics_put, fshclose
|
||||
from csaxs_bec.bec_ipython_client.plugins.omny.omny_general_tools import OMNYTools
|
||||
|
||||
|
||||
# import builtins to avoid linter errors
|
||||
dev = builtins.__dict__.get("dev")
|
||||
@@ -18,25 +20,32 @@ class LamNIInitError(Exception):
|
||||
|
||||
|
||||
class LaMNIInitStagesMixin:
|
||||
def __init__(self, client):
|
||||
super().__init__()
|
||||
self.client = client
|
||||
self.OMNYTools = OMNYTools(self.client)
|
||||
|
||||
def lamni_init_stages(self):
|
||||
user_input = input("Starting initialization of LamNI stages. OK? [y/n]")
|
||||
if user_input == "y":
|
||||
|
||||
if self.OMNYTools.yesno("Start initialization of LamNI stages. OK?"):
|
||||
print("staring...")
|
||||
dev.lsamrot.enabled = True
|
||||
else:
|
||||
return
|
||||
|
||||
|
||||
|
||||
|
||||
if self.check_all_axes_of_lamni_referenced():
|
||||
user_input = input("Continue anyways? [y/n]")
|
||||
if user_input == "y":
|
||||
if self.OMNYTools.yesno("All axes are referenced. Continue anyways?"):
|
||||
print("ok then...")
|
||||
else:
|
||||
return
|
||||
|
||||
axis_id_lsamrot = dev.lsamrot._config["deviceConfig"].get("axis_Id")
|
||||
if dev.lsamrot.controller.get_motor_limit_switch(axis_id_lsamrot)[1] == False:
|
||||
user_input = input("The rotation stage will be moved to one limit [y/n]")
|
||||
if user_input == "y":
|
||||
|
||||
if self.OMNYTools.yesno("The rotation stage will be moved to one limit"):
|
||||
print("starting...")
|
||||
else:
|
||||
return
|
||||
@@ -47,10 +56,7 @@ class LaMNIInitStagesMixin:
|
||||
print("The controller will be disabled in bec. To enable dev.lsamrot.enabled=True")
|
||||
return
|
||||
|
||||
user_input = input(
|
||||
"Init of loptz. Can the stage move to the upstream limit without collision?? [y/n]"
|
||||
)
|
||||
if user_input == "y":
|
||||
if self.OMNYTools.yesno("Init of loptz. Can the stage move to the upstream limit without collision?"):
|
||||
print("ok then...")
|
||||
else:
|
||||
return
|
||||
@@ -81,8 +87,7 @@ class LaMNIInitStagesMixin:
|
||||
time.sleep(0.1)
|
||||
self.find_reference_mark(dev.lsamrot)
|
||||
|
||||
user_input = input("Init of leye. Can the stage move to -x limit without collision? [y/n]")
|
||||
if user_input == "y":
|
||||
if self.OMNYTools.yesno("Init of leye. Can the stage move to -x limit without collision?"):
|
||||
print("starting...")
|
||||
else:
|
||||
return
|
||||
@@ -134,8 +139,7 @@ class LaMNIInitStagesMixin:
|
||||
return ord(axis_id.lower()) - 97
|
||||
|
||||
def _align_setup(self):
|
||||
user_input = input("Start moving stages to default initial positions? [y/n]")
|
||||
if user_input == "y":
|
||||
if self.OMNYTools.yesno("Start moving stages to default initial positions?"):
|
||||
print("Start moving stages...")
|
||||
else:
|
||||
print("Stopping.")
|
||||
|
||||
@@ -15,13 +15,19 @@ from bec_lib.pdf_writer import PDFWriter
|
||||
from typeguard import typechecked
|
||||
|
||||
from csaxs_bec.bec_ipython_client.plugins.cSAXS import epics_get, epics_put, fshopen
|
||||
from csaxs_bec.bec_ipython_client.plugins.omny.omny_general_tools import OMNYTools
|
||||
|
||||
from .lamni_optics_mixin import LaMNIInitStagesMixin, LamNIOpticsMixin
|
||||
|
||||
logger = bec_logger.logger
|
||||
bec = builtins.__dict__.get("bec")
|
||||
|
||||
|
||||
if builtins.__dict__.get("bec") is not None:
|
||||
bec = builtins.__dict__.get("bec")
|
||||
dev = builtins.__dict__.get("dev")
|
||||
umv = builtins.__dict__.get("umv")
|
||||
umvr = builtins.__dict__.get("umvr")
|
||||
|
||||
class XrayEyeAlign:
|
||||
# pixel calibration, multiply to get mm
|
||||
# PIXEL_CALIBRATION = 0.2/209 #.2 with binning
|
||||
@@ -510,8 +516,9 @@ class LamNI(LamNIOpticsMixin):
|
||||
def __init__(self, client):
|
||||
super().__init__()
|
||||
self.client = client
|
||||
self.device_manager = client.device_manager
|
||||
self.align = XrayEyeAlign(client, self)
|
||||
self.init = LaMNIInitStagesMixin()
|
||||
self.init = LaMNIInitStagesMixin(client)
|
||||
self.check_shutter = True
|
||||
self.check_light_available = True
|
||||
self.check_fofb = True
|
||||
@@ -524,6 +531,16 @@ class LamNI(LamNIOpticsMixin):
|
||||
self._beam_is_okay = True
|
||||
self._stop_beam_check_event = None
|
||||
self.beam_check_thread = None
|
||||
self.OMNYTools = OMNYTools(self.client)
|
||||
# Progress tracking
|
||||
self.progress = {}
|
||||
self.progress["tomo_type"] = "Equally spaced sub-tomograms"
|
||||
self.progress["subtomo"] = 0
|
||||
self.progress["subtomo_projection"] = 0
|
||||
self.progress["subtomo_total_projections"] = 1
|
||||
self.progress["projection"] = 0
|
||||
self.progress["total_projections"] = 1
|
||||
self.progress["angle"] = 0
|
||||
|
||||
def get_beamline_checks_enabled(self):
|
||||
print(
|
||||
@@ -598,12 +615,12 @@ class LamNI(LamNIOpticsMixin):
|
||||
if val == 1:
|
||||
# equally spaced tomography with 8 sub tomograms
|
||||
self.client.set_global_var("tomo_type", val)
|
||||
# elif val == 2:
|
||||
# # golden ratio tomography (sorted bunches)
|
||||
# self.client.set_global_var("tomo_type", val)
|
||||
# elif val == 3:
|
||||
# # equally spaced tomography with starting angles shifted by golden ratio
|
||||
# self.client.set_global_var("tomo_type", val)
|
||||
elif val == 2:
|
||||
# golden ratio tomography (sorted bunches)
|
||||
self.client.set_global_var("tomo_type", val)
|
||||
elif val == 3:
|
||||
# equally spaced tomography with starting angles shifted by golden ratio
|
||||
self.client.set_global_var("tomo_type", val)
|
||||
else:
|
||||
raise ValueError("Unknown tomo_type.")
|
||||
|
||||
@@ -740,6 +757,41 @@ class LamNI(LamNIOpticsMixin):
|
||||
def tomo_stitch_overlap(self, val: float):
|
||||
self.client.set_global_var("tomo_stitch_overlap", val)
|
||||
|
||||
@property
|
||||
def golden_max_number_of_projections(self):
|
||||
val = self.client.get_global_var("golden_max_number_of_projections")
|
||||
if val is None:
|
||||
return 1000.0
|
||||
return val
|
||||
|
||||
@golden_max_number_of_projections.setter
|
||||
def golden_max_number_of_projections(self, val: float):
|
||||
self.client.set_global_var("golden_max_number_of_projections", val)
|
||||
|
||||
@property
|
||||
def golden_ratio_bunch_size(self):
|
||||
val = self.client.get_global_var("golden_ratio_bunch_size")
|
||||
if val is None:
|
||||
return 20
|
||||
return val
|
||||
|
||||
@golden_ratio_bunch_size.setter
|
||||
def golden_ratio_bunch_size(self, val: float):
|
||||
if val < 20:
|
||||
raise ValueError("golden_ratio_bunch_size must be at least 20.")
|
||||
self.client.set_global_var("golden_ratio_bunch_size", val)
|
||||
|
||||
@property
|
||||
def golden_projections_at_0_deg_for_damage_estimation(self):
|
||||
val = self.client.get_global_var("golden_projections_at_0_deg_for_damage_estimation")
|
||||
if val is None:
|
||||
return 0
|
||||
return val
|
||||
|
||||
@golden_projections_at_0_deg_for_damage_estimation.setter
|
||||
def golden_projections_at_0_deg_for_damage_estimation(self, val: float):
|
||||
self.client.set_global_var("golden_projections_at_0_deg_for_damage_estimation", val)
|
||||
|
||||
@property
|
||||
def sample_name(self):
|
||||
val = self.client.get_global_var("sample_name")
|
||||
@@ -909,6 +961,49 @@ class LamNI(LamNIOpticsMixin):
|
||||
except Exception:
|
||||
logger.warning("Failed to send update to SciLog.")
|
||||
|
||||
def rt_off(self):
|
||||
dev.rtx.enabled = False
|
||||
dev.rty.enabled = False
|
||||
|
||||
def rt_on(self):
|
||||
dev.rtx.enabled = True
|
||||
dev.rty.enabled = True
|
||||
if dev.rtx.enabled == True:
|
||||
print("rt is enabled")
|
||||
else:
|
||||
print("failed to enable rt")
|
||||
|
||||
def feedback_enable_with_reset(self):
|
||||
self.device_manager.devices.rtx.controller.feedback_enable_with_reset()
|
||||
self.feedback_status()
|
||||
|
||||
def feedback_enable_without_reset(self):
|
||||
self.device_manager.devices.rtx.controller.feedback_enable_without_reset()
|
||||
self.feedback_status()
|
||||
|
||||
def feedback_disable(self):
|
||||
self.device_manager.devices.rtx.controller.feedback_disable()
|
||||
self.feedback_status()
|
||||
|
||||
def feedback_disable_and_reset_angle(self):
|
||||
self.device_manager.devices.rtx.controller.feedback_disable_and_even_reset_lamni_angle_interferometer()
|
||||
self.feedback_status()
|
||||
|
||||
def feedback_status(self):
|
||||
if self.device_manager.devices.rtx.controller.feedback_is_running():
|
||||
print("The rt feedback is \x1b[92mrunning\x1b[0m.")
|
||||
else:
|
||||
print("The rt feedback is \x1b[91mNOT\x1b[0m running.")
|
||||
|
||||
def show_interferometer_positions(self):
|
||||
self.device_manager.devices.rtx.controller.show_interferometer_positions()
|
||||
|
||||
def show_signal_strength(self):
|
||||
self.device_manager.devices.rtx.controller.show_signal_strength_interferometer()
|
||||
|
||||
def show_analog_signals(self):
|
||||
return self.device_manager.devices.rtx.controller.show_analog_signals()
|
||||
|
||||
def add_sample_database(
|
||||
self, samplename, date, eaccount, scan_number, setup, sample_additional_info, user
|
||||
):
|
||||
@@ -935,9 +1030,8 @@ class LamNI(LamNIOpticsMixin):
|
||||
# self.tomo_scan_projection(angle)
|
||||
# self.tomo_reconstruct()
|
||||
|
||||
def sub_tomo_scan(self, subtomo_number, start_angle=None):
|
||||
"""start a subtomo"""
|
||||
dev = builtins.__dict__.get("dev")
|
||||
def _write_subtomo_to_scilog(self, subtomo_number):
|
||||
"""Write subtomo start information to scilog."""
|
||||
bec = builtins.__dict__.get("bec")
|
||||
if self.tomo_id > 0:
|
||||
tags = ["BEC_subtomo", self.sample_name, f"tomo_id_{self.tomo_id}"]
|
||||
@@ -948,6 +1042,10 @@ class LamNI(LamNIOpticsMixin):
|
||||
tags,
|
||||
)
|
||||
|
||||
def sub_tomo_scan(self, subtomo_number, start_angle=None):
|
||||
"""start a subtomo"""
|
||||
self._write_subtomo_to_scilog(subtomo_number)
|
||||
|
||||
if start_angle is None:
|
||||
if subtomo_number == 1:
|
||||
start_angle = 0
|
||||
@@ -980,13 +1078,14 @@ class LamNI(LamNIOpticsMixin):
|
||||
if not (subtomo_number % 2):
|
||||
angles = np.flip(angles)
|
||||
for angle in angles:
|
||||
self.progress["tomo_type"] = "Equally spaced sub-tomograms"
|
||||
self.progress["subtomo"] = subtomo_number
|
||||
self.progress["subtomo_projection"] = angles.index(angle)
|
||||
self.progress["subtomo_total_projections"] = 180 / self.tomo_angle_stepsize
|
||||
self.progress["subtomo_projection"] = np.where(angles == angle)[0][0]
|
||||
self.progress["subtomo_total_projections"] = 360 / self.tomo_angle_stepsize
|
||||
self.progress["projection"] = (subtomo_number - 1) * self.progress[
|
||||
"subtomo_total_projections"
|
||||
] + self.progress["subtomo_projection"]
|
||||
self.progress["total_projections"] = 180 / self.tomo_angle_stepsize * 8
|
||||
self.progress["total_projections"] = 360 / self.tomo_angle_stepsize * 8
|
||||
self.progress["angle"] = angle
|
||||
self._tomo_scan_at_angle(angle, subtomo_number)
|
||||
|
||||
@@ -1049,6 +1148,11 @@ class LamNI(LamNIOpticsMixin):
|
||||
for scan_nr in range(start_scan_number, end_scan_number):
|
||||
self._write_tomo_scan_number(scan_nr, angle, subtomo_number)
|
||||
|
||||
if self._was_beam_okay() and not error_caught:
|
||||
successful = True
|
||||
else:
|
||||
self._wait_for_beamline_checks()
|
||||
|
||||
def _write_tomo_scan_number(self, scan_number: int, angle: float, subtomo_number: int) -> None:
|
||||
tomo_scan_numbers_file = os.path.expanduser(
|
||||
"~/Data10/specES1/dat-files/tomography_scannumbers.txt"
|
||||
@@ -1059,13 +1163,74 @@ class LamNI(LamNIOpticsMixin):
|
||||
f"{scan_number} {angle} {dev.lsamrot.read()['lsamrot']['value']:.3f} {self.tomo_id} {subtomo_number} {0} {'lamni'}\n"
|
||||
)
|
||||
|
||||
def tomo_scan(self, subtomo_start=1, start_angle=None):
|
||||
"""start a tomo scan"""
|
||||
def _golden(self, ii, howmany_sorted, maxangle=360, reverse=False):
|
||||
"""Return the ii-th golden ratio angle within sorted bunches of size howmany_sorted,
|
||||
and its subtomo number. Operates over maxangle degrees (360 for LamNI)."""
|
||||
golden = []
|
||||
for iji in range(
|
||||
(ii - (ii % howmany_sorted)), (ii - (ii % howmany_sorted)) + howmany_sorted, 1
|
||||
):
|
||||
golden.append(
|
||||
((iji * maxangle * (1 + pow(5, 0.5)) / 2) * 1000 % (maxangle * 1000)) / 1000
|
||||
)
|
||||
golden.sort()
|
||||
subtomo_number = int(ii / howmany_sorted) + 1
|
||||
if reverse and not subtomo_number % 2:
|
||||
golden.reverse()
|
||||
return (golden[ii % howmany_sorted], subtomo_number)
|
||||
|
||||
def _golden_equally_spaced(self, ii, number_of_projections_per_subtomo, maxangle=360, reverse=True, verbose=False):
|
||||
"""Return angles for equally spaced tomography with sub-tomogram starting angles
|
||||
shifted according to the golden ratio. Operates over maxangle degrees (360 for LamNI).
|
||||
ii is the projection number starting at 0."""
|
||||
angular_step = maxangle / number_of_projections_per_subtomo
|
||||
subtomo_number = int(((ii) * angular_step) / maxangle) + 1
|
||||
start_angle = self._golden(subtomo_number - 1, 1, angular_step)[0]
|
||||
projection_number_of_subtomo = (
|
||||
ii - (subtomo_number - 1) * number_of_projections_per_subtomo
|
||||
)
|
||||
|
||||
if reverse:
|
||||
if subtomo_number % 2:
|
||||
angle = start_angle + projection_number_of_subtomo * angular_step
|
||||
else:
|
||||
angle = (
|
||||
start_angle
|
||||
+ (number_of_projections_per_subtomo - 1) * angular_step
|
||||
- projection_number_of_subtomo * angular_step
|
||||
)
|
||||
else:
|
||||
angle = start_angle + projection_number_of_subtomo * angular_step
|
||||
|
||||
if verbose:
|
||||
print(
|
||||
f"Equally spaced golden ratio tomography.\n"
|
||||
f"Angular step: {angular_step}\n"
|
||||
f"Subtomo Number: {subtomo_number}\n"
|
||||
f"Angle: {angle}"
|
||||
)
|
||||
|
||||
return angle, subtomo_number
|
||||
|
||||
def tomo_scan(self, subtomo_start=1, start_angle=None, projection_number=None):
|
||||
"""Start a tomo scan.
|
||||
|
||||
Args:
|
||||
subtomo_start (int): For tomo_type 1, the sub-tomogram number to start from. Defaults to 1.
|
||||
start_angle (float, optional): Override the starting angle of the first sub-tomogram. Defaults to None.
|
||||
projection_number (int, optional): For tomo_types 2 and 3, resume from this projection index. Defaults to None.
|
||||
"""
|
||||
bec = builtins.__dict__.get("bec")
|
||||
scans = builtins.__dict__.get("scans")
|
||||
self._current_special_angles = self.special_angles.copy()
|
||||
|
||||
if self.tomo_type == 1 and subtomo_start == 1 and start_angle is None:
|
||||
# Register a new tomo scan in the database and write the PDF report
|
||||
# only when starting fresh (not resuming mid-scan)
|
||||
if (
|
||||
(self.tomo_type == 1 and subtomo_start == 1 and start_angle is None)
|
||||
or (self.tomo_type == 2 and projection_number is None)
|
||||
or (self.tomo_type == 3 and projection_number is None)
|
||||
):
|
||||
# pylint: disable=undefined-variable
|
||||
self.tomo_id = self.add_sample_database(
|
||||
self.sample_name,
|
||||
@@ -1077,10 +1242,108 @@ class LamNI(LamNIOpticsMixin):
|
||||
"BEC",
|
||||
)
|
||||
self.write_pdf_report()
|
||||
|
||||
with scans.dataset_id_on_hold:
|
||||
for ii in range(subtomo_start, 9):
|
||||
self.sub_tomo_scan(ii, start_angle=start_angle)
|
||||
start_angle = None
|
||||
if self.tomo_type == 1:
|
||||
# 8 equally spaced sub-tomograms over 360 degrees
|
||||
self.progress["tomo_type"] = "Equally spaced sub-tomograms"
|
||||
for ii in range(subtomo_start, 9):
|
||||
self.sub_tomo_scan(ii, start_angle=start_angle)
|
||||
start_angle = None
|
||||
|
||||
elif self.tomo_type == 2:
|
||||
# Golden ratio tomography (sorted bunches) over 360 degrees
|
||||
self.progress["tomo_type"] = "Golden ratio tomography"
|
||||
previous_subtomo_number = -1
|
||||
ii = 0 if projection_number is None else projection_number
|
||||
while True:
|
||||
angle, subtomo_number = self._golden(
|
||||
ii, self.golden_ratio_bunch_size, maxangle=360, reverse=True
|
||||
)
|
||||
if previous_subtomo_number != subtomo_number:
|
||||
self._write_subtomo_to_scilog(subtomo_number)
|
||||
if (
|
||||
subtomo_number % 2 == 1
|
||||
and ii > 10
|
||||
and self.golden_projections_at_0_deg_for_damage_estimation == 1
|
||||
):
|
||||
self._tomo_scan_at_angle(0, subtomo_number)
|
||||
previous_subtomo_number = subtomo_number
|
||||
|
||||
self.progress["subtomo"] = subtomo_number
|
||||
self.progress["projection"] = ii
|
||||
self.progress["angle"] = angle
|
||||
if self.golden_ratio_bunch_size > 0:
|
||||
self.progress["subtomo_total_projections"] = self.golden_ratio_bunch_size
|
||||
self.progress["subtomo_projection"] = (
|
||||
ii - (subtomo_number - 1) * self.golden_ratio_bunch_size
|
||||
)
|
||||
else:
|
||||
self.progress["subtomo_total_projections"] = 0
|
||||
self.progress["subtomo_projection"] = 0
|
||||
self.progress["total_projections"] = (
|
||||
self.golden_max_number_of_projections
|
||||
if self.golden_max_number_of_projections > 0
|
||||
else 0
|
||||
)
|
||||
|
||||
self._tomo_scan_at_angle(angle, subtomo_number)
|
||||
ii += 1
|
||||
if (
|
||||
self.golden_max_number_of_projections > 0
|
||||
and ii > self.golden_max_number_of_projections
|
||||
):
|
||||
print(
|
||||
f"Golden ratio tomography stopped automatically after the requested"
|
||||
f" {self.golden_max_number_of_projections} projections."
|
||||
)
|
||||
break
|
||||
|
||||
elif self.tomo_type == 3:
|
||||
# Equally spaced tomography with golden ratio starting angles over 360 degrees
|
||||
self.progress["tomo_type"] = "Equally spaced, golden ratio starting angles"
|
||||
previous_subtomo_number = -1
|
||||
ii = 0 if projection_number is None else projection_number
|
||||
while True:
|
||||
angle, subtomo_number = self._golden_equally_spaced(
|
||||
ii, int(360 / self.tomo_angle_stepsize), maxangle=360, reverse=True
|
||||
)
|
||||
if previous_subtomo_number != subtomo_number:
|
||||
self._write_subtomo_to_scilog(subtomo_number)
|
||||
if (
|
||||
subtomo_number % 2 == 1
|
||||
and ii > 10
|
||||
and self.golden_projections_at_0_deg_for_damage_estimation == 1
|
||||
):
|
||||
self._tomo_scan_at_angle(0, subtomo_number)
|
||||
previous_subtomo_number = subtomo_number
|
||||
|
||||
self.progress["subtomo"] = subtomo_number
|
||||
self.progress["projection"] = ii
|
||||
self.progress["angle"] = angle
|
||||
self.progress["subtomo_total_projections"] = 360 / self.tomo_angle_stepsize
|
||||
self.progress["subtomo_projection"] = (
|
||||
ii - (subtomo_number - 1) * self.progress["subtomo_total_projections"]
|
||||
)
|
||||
self.progress["total_projections"] = (
|
||||
self.golden_max_number_of_projections
|
||||
if self.golden_max_number_of_projections > 0
|
||||
else 0
|
||||
)
|
||||
|
||||
self._tomo_scan_at_angle(angle, subtomo_number)
|
||||
ii += 1
|
||||
if (
|
||||
self.golden_max_number_of_projections > 0
|
||||
and ii > self.golden_max_number_of_projections
|
||||
):
|
||||
print(
|
||||
f"Golden ratio tomography stopped automatically after the requested"
|
||||
f" {self.golden_max_number_of_projections} projections."
|
||||
)
|
||||
break
|
||||
else:
|
||||
raise ValueError(f"Unknown tomo_type: {self.tomo_type}.")
|
||||
|
||||
def tomo_parameters(self):
|
||||
"""print and update the tomo parameters"""
|
||||
@@ -1103,9 +1366,37 @@ class LamNI(LamNIOpticsMixin):
|
||||
print(f" _tomo_fovy_offset <mm> = {self.align.tomo_fovy_offset}")
|
||||
print(f" _manual_shift_x <mm> = {self.manual_shift_x}")
|
||||
print(f" _manual_shift_y <mm> = {self.manual_shift_y}")
|
||||
print(f"Angular step within sub-tomogram: {self.tomo_angle_stepsize} degrees")
|
||||
print(f"Resulting in number of projections: {360/self.tomo_angle_stepsize*8}")
|
||||
print(f"Sample name: {self.sample_name}\n")
|
||||
print("")
|
||||
if self.tomo_type == 1:
|
||||
print("\x1b[1mTomo type 1:\x1b[0m 8 equally spaced sub-tomograms")
|
||||
print(f"Angular step within sub-tomogram: {self.tomo_angle_stepsize} degrees")
|
||||
print(f"Resulting in number of projections: {360/self.tomo_angle_stepsize*8}")
|
||||
elif self.tomo_type == 2:
|
||||
print("\x1b[1mTomo type 2:\x1b[0m Golden ratio tomography")
|
||||
print(f"Sorted in bunches of: {self.golden_ratio_bunch_size}")
|
||||
if self.golden_max_number_of_projections > 0:
|
||||
print(f"Ending after {self.golden_max_number_of_projections} projections.")
|
||||
else:
|
||||
print("Ending by manual interruption.")
|
||||
if self.golden_projections_at_0_deg_for_damage_estimation == 1:
|
||||
print(
|
||||
"Repeating projections at 0 degrees at the beginning of every second subtomogram."
|
||||
)
|
||||
elif self.tomo_type == 3:
|
||||
print(
|
||||
"\x1b[1mTomo type 3:\x1b[0m Equally spaced tomography, golden ratio starting angle"
|
||||
)
|
||||
print(f"Angular step within sub-tomogram: {self.tomo_angle_stepsize} degrees")
|
||||
print(f"Number of projections per sub-tomogram: {360/self.tomo_angle_stepsize}")
|
||||
if self.golden_max_number_of_projections > 0:
|
||||
print(f"Ending after {self.golden_max_number_of_projections} projections.")
|
||||
else:
|
||||
print("Ending by manual interruption.")
|
||||
if self.golden_projections_at_0_deg_for_damage_estimation == 1:
|
||||
print(
|
||||
"Repeating projections at 0 degrees at the beginning of every second subtomogram."
|
||||
)
|
||||
print(f"\nSample name: {self.sample_name}\n")
|
||||
|
||||
user_input = input("Are these parameters correctly set for your scan? ")
|
||||
if user_input == "y":
|
||||
@@ -1125,13 +1416,61 @@ class LamNI(LamNIOpticsMixin):
|
||||
self.ptycho_reconstruct_foldername = self._get_val(
|
||||
"Reconstruction queue ", self.ptycho_reconstruct_foldername, str
|
||||
)
|
||||
tomo_numberofprojections = self._get_val(
|
||||
"Number of projections", 360 / self.tomo_angle_stepsize * 8, int
|
||||
)
|
||||
|
||||
print(f"The angular step will be {360/tomo_numberofprojections}")
|
||||
self.tomo_angle_stepsize = 360 / tomo_numberofprojections * 8
|
||||
print(f"The angular step in a subtomogram it will be {self.tomo_angle_stepsize}")
|
||||
print("Tomography type:")
|
||||
print(" 1: 8 equally spaced sub-tomograms (360 deg)")
|
||||
print(" 2: Golden ratio tomography")
|
||||
print(" 3: Equally spaced tomography, golden ratio starting angle")
|
||||
self.tomo_type = self._get_val("Tomography type", self.tomo_type, int)
|
||||
|
||||
if self.tomo_type == 1:
|
||||
tomo_numberofprojections = self._get_val(
|
||||
"Number of projections", 360 / self.tomo_angle_stepsize * 8, int
|
||||
)
|
||||
print(f"The angular step will be {360/tomo_numberofprojections}")
|
||||
self.tomo_angle_stepsize = 360 / tomo_numberofprojections * 8
|
||||
print(f"The angular step in a subtomogram it will be {self.tomo_angle_stepsize}")
|
||||
|
||||
elif self.tomo_type == 2:
|
||||
while True:
|
||||
bunch_size = self._get_val(
|
||||
"Number of projections sorted per bunch (minimum 20)",
|
||||
self.golden_ratio_bunch_size,
|
||||
int,
|
||||
)
|
||||
if bunch_size >= 20:
|
||||
self.golden_ratio_bunch_size = bunch_size
|
||||
break
|
||||
print("Bunch size must be at least 20. Please try again.")
|
||||
self.golden_max_number_of_projections = self._get_val(
|
||||
"Stop after number of projections (0 for endless)",
|
||||
self.golden_max_number_of_projections,
|
||||
int,
|
||||
)
|
||||
self.golden_projections_at_0_deg_for_damage_estimation = self._get_val(
|
||||
"Repeat projections at 0 deg every second subtomo 1/0?",
|
||||
self.golden_projections_at_0_deg_for_damage_estimation,
|
||||
int,
|
||||
)
|
||||
|
||||
elif self.tomo_type == 3:
|
||||
numprj = self._get_val(
|
||||
"Number of projections per sub-tomogram",
|
||||
int(360 / self.tomo_angle_stepsize),
|
||||
int,
|
||||
)
|
||||
self.tomo_angle_stepsize = 360 / numprj
|
||||
self.golden_max_number_of_projections = self._get_val(
|
||||
"Stop after number of projections (0 for endless)",
|
||||
self.golden_max_number_of_projections,
|
||||
int,
|
||||
)
|
||||
self.golden_projections_at_0_deg_for_damage_estimation = self._get_val(
|
||||
"Repeat projections at 0 deg every second subtomo 1/0?",
|
||||
self.golden_projections_at_0_deg_for_damage_estimation,
|
||||
int,
|
||||
)
|
||||
|
||||
self.sample_name = self._get_val("sample name", self.sample_name, str)
|
||||
|
||||
@staticmethod
|
||||
@@ -1197,6 +1536,7 @@ class LamNI(LamNIOpticsMixin):
|
||||
(
|
||||
f"{'Angular step within sub-tomogram:':<{padding}}{self.tomo_angle_stepsize:>{padding}.2f}\n"
|
||||
),
|
||||
f"{'Tomo type:':<{padding}}{self.tomo_type:>{padding}}\n",
|
||||
]
|
||||
content = "".join(content)
|
||||
user_target = os.path.expanduser(f"~/Data10/documentation/tomo_scan_ID_{self.tomo_id}.pdf")
|
||||
@@ -1389,3 +1729,4 @@ class DataDrivenLamNI(LamNI):
|
||||
shapes.append(data.shape)
|
||||
if len(set(shapes)) > 1:
|
||||
raise ValueError(f"Tomo data file has entries of inconsistent lengths: {shapes}.")
|
||||
|
||||
@@ -35,32 +35,32 @@ class FlomniInitError(Exception):
|
||||
class FlomniError(Exception):
|
||||
pass
|
||||
|
||||
class FlomniTools:
|
||||
def yesno(self, message: str, default="none", autoconfirm=0) -> bool:
|
||||
if autoconfirm and default == "y":
|
||||
self.printgreen(message + " Automatically confirming default: yes")
|
||||
return True
|
||||
elif autoconfirm and default == "n":
|
||||
self.printgreen(message + " Automatically confirming default: no")
|
||||
return False
|
||||
if default == "y":
|
||||
message_ending = " [Y]/n? "
|
||||
elif default == "n":
|
||||
message_ending = " y/[N]? "
|
||||
else:
|
||||
message_ending = " y/n? "
|
||||
while True:
|
||||
user_input = input(self.OKBLUE + message + message_ending + self.ENDC)
|
||||
if (
|
||||
user_input == "Y" or user_input == "y" or user_input == "yes" or user_input == "Yes"
|
||||
) or (default == "y" and user_input == ""):
|
||||
return True
|
||||
if (
|
||||
user_input == "N" or user_input == "n" or user_input == "no" or user_input == "No"
|
||||
) or (default == "n" and user_input == ""):
|
||||
return False
|
||||
else:
|
||||
print("Please expicitely confirm y or n.")
|
||||
# class FlomniTools:
|
||||
# def yesno(self, message: str, default="none", autoconfirm=0) -> bool:
|
||||
# if autoconfirm and default == "y":
|
||||
# self.printgreen(message + " Automatically confirming default: yes")
|
||||
# return True
|
||||
# elif autoconfirm and default == "n":
|
||||
# self.printgreen(message + " Automatically confirming default: no")
|
||||
# return False
|
||||
# if default == "y":
|
||||
# message_ending = " [Y]/n? "
|
||||
# elif default == "n":
|
||||
# message_ending = " y/[N]? "
|
||||
# else:
|
||||
# message_ending = " y/n? "
|
||||
# while True:
|
||||
# user_input = input(self.OKBLUE + message + message_ending + self.ENDC)
|
||||
# if (
|
||||
# user_input == "Y" or user_input == "y" or user_input == "yes" or user_input == "Yes"
|
||||
# ) or (default == "y" and user_input == ""):
|
||||
# return True
|
||||
# if (
|
||||
# user_input == "N" or user_input == "n" or user_input == "no" or user_input == "No"
|
||||
# ) or (default == "n" and user_input == ""):
|
||||
# return False
|
||||
# else:
|
||||
# print("Please expicitely confirm y or n.")
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -9,6 +9,17 @@ eiger_1_5:
|
||||
readoutPriority: async
|
||||
softwareTrigger: False
|
||||
|
||||
eiger_9:
|
||||
description: Eiger 9M detector
|
||||
deviceClass: csaxs_bec.devices.jungfraujoch.eiger_9m.Eiger9M
|
||||
deviceConfig:
|
||||
detector_distance: 100
|
||||
beam_center: [0, 0]
|
||||
onFailure: raise
|
||||
enabled: true
|
||||
readoutPriority: async
|
||||
softwareTrigger: False
|
||||
|
||||
ids_cam:
|
||||
description: IDS camera for live image acquisition
|
||||
deviceClass: csaxs_bec.devices.ids_cameras.IDSCamera
|
||||
|
||||
@@ -37,6 +37,21 @@ mcs:
|
||||
readoutPriority: monitored
|
||||
softwareTrigger: false
|
||||
|
||||
|
||||
|
||||
##########################################################################
|
||||
########################### FAST SHUTTER #################################
|
||||
##########################################################################
|
||||
|
||||
fsh:
|
||||
description: Fast shutter manual control and readback
|
||||
deviceClass: csaxs_bec.devices.epics.fast_shutter.cSAXSFastEpicsShutter
|
||||
deviceConfig:
|
||||
prefix: 'X12SA-ES1-TTL:'
|
||||
onFailure: raise
|
||||
enabled: true
|
||||
readoutPriority: monitored
|
||||
|
||||
##########################################################################
|
||||
######################## SMARACT STAGES ##################################
|
||||
##########################################################################
|
||||
@@ -60,6 +75,7 @@ xbpm3x:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: -22.5
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -82,6 +98,7 @@ xbpm3y:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: -2
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -104,6 +121,7 @@ sl3trxi:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: -5.5
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -126,6 +144,7 @@ sl3trxo:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: 6
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -148,6 +167,7 @@ sl3trxb:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: -5.8
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -170,6 +190,7 @@ sl3trxt:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: 5.5
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -192,6 +213,7 @@ fast_shutter_n1_x:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: -7
|
||||
in: 0
|
||||
@@ -215,6 +237,7 @@ fast_shutter_o1_x:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: -15.8
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -237,6 +260,7 @@ fast_shutter_o2_x:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: -15.5
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -259,6 +283,7 @@ filter_array_1_x:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: 25
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -281,6 +306,7 @@ filter_array_2_x:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: 25.5
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -303,6 +329,7 @@ filter_array_3_x:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: 25.8
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -325,6 +352,7 @@ filter_array_4_x:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: 25
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -347,6 +375,7 @@ sl4trxi:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: -5.5
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -369,6 +398,7 @@ sl4trxo:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: 6
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -391,6 +421,7 @@ sl4trxb:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: -5.8
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -413,6 +444,7 @@ sl4trxt:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: 5.5
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -425,7 +457,7 @@ sl5trxi:
|
||||
deviceClass: csaxs_bec.devices.smaract.smaract_ophyd.SmaractMotor
|
||||
deviceConfig:
|
||||
axis_Id: C
|
||||
host: x12sa-eb-smaract-mcs-02.psi.ch
|
||||
host: x12sa-eb-smaract-mcs-05.psi.ch
|
||||
limits:
|
||||
- -200
|
||||
- 200
|
||||
@@ -437,6 +469,7 @@ sl5trxi:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: -6
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -447,7 +480,7 @@ sl5trxo:
|
||||
deviceClass: csaxs_bec.devices.smaract.smaract_ophyd.SmaractMotor
|
||||
deviceConfig:
|
||||
axis_Id: D
|
||||
host: x12sa-eb-smaract-mcs-02.psi.ch
|
||||
host: x12sa-eb-smaract-mcs-05.psi.ch
|
||||
limits:
|
||||
- -200
|
||||
- 200
|
||||
@@ -459,6 +492,7 @@ sl5trxo:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: 5.5
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -469,7 +503,7 @@ sl5trxb:
|
||||
deviceClass: csaxs_bec.devices.smaract.smaract_ophyd.SmaractMotor
|
||||
deviceConfig:
|
||||
axis_Id: E
|
||||
host: x12sa-eb-smaract-mcs-02.psi.ch
|
||||
host: x12sa-eb-smaract-mcs-05.psi.ch
|
||||
limits:
|
||||
- -200
|
||||
- 200
|
||||
@@ -481,6 +515,7 @@ sl5trxb:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: -5.5
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -491,7 +526,7 @@ sl5trxt:
|
||||
deviceClass: csaxs_bec.devices.smaract.smaract_ophyd.SmaractMotor
|
||||
deviceConfig:
|
||||
axis_Id: F
|
||||
host: x12sa-eb-smaract-mcs-02.psi.ch
|
||||
host: x12sa-eb-smaract-mcs-05.psi.ch
|
||||
limits:
|
||||
- -200
|
||||
- 200
|
||||
@@ -503,6 +538,7 @@ sl5trxt:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: 6
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -513,7 +549,7 @@ xbimtrx:
|
||||
deviceClass: csaxs_bec.devices.smaract.smaract_ophyd.SmaractMotor
|
||||
deviceConfig:
|
||||
axis_Id: A
|
||||
host: x12sa-eb-smaract-mcs-02.psi.ch
|
||||
host: x12sa-eb-smaract-mcs-05.psi.ch
|
||||
limits:
|
||||
- -200
|
||||
- 200
|
||||
@@ -525,6 +561,7 @@ xbimtrx:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: -14.7
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
@@ -535,7 +572,7 @@ xbimtry:
|
||||
deviceClass: csaxs_bec.devices.smaract.smaract_ophyd.SmaractMotor
|
||||
deviceConfig:
|
||||
axis_Id: B
|
||||
host: x12sa-eb-smaract-mcs-02.psi.ch
|
||||
host: x12sa-eb-smaract-mcs-05.psi.ch
|
||||
limits:
|
||||
- -200
|
||||
- 200
|
||||
@@ -547,6 +584,7 @@ xbimtry:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
init_position: 0
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
|
||||
@@ -89,6 +89,7 @@ xbpm2x:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
bl_smar_stage: 0
|
||||
@@ -108,6 +109,7 @@ xbpm2y:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
bl_smar_stage: 1
|
||||
@@ -127,6 +129,7 @@ cu_foilx:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
bl_smar_stage: 2
|
||||
@@ -146,6 +149,7 @@ scinx:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
# bl_smar_stage to use csaxs reference method. assign number according to axis channel
|
||||
bl_smar_stage: 3
|
||||
|
||||
@@ -17,6 +17,7 @@ feyex:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: -16.267
|
||||
out: -1
|
||||
@@ -35,6 +36,7 @@ feyey:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: -10.467
|
||||
fheater:
|
||||
@@ -52,6 +54,7 @@ fheater:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
foptx:
|
||||
description: Optics X
|
||||
deviceClass: csaxs_bec.devices.omny.galil.fgalil_ophyd.FlomniGalilMotor
|
||||
@@ -67,6 +70,7 @@ foptx:
|
||||
onFailure: buffer
|
||||
readOnly: true
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: -13.761
|
||||
fopty:
|
||||
@@ -84,6 +88,7 @@ fopty:
|
||||
onFailure: buffer
|
||||
readOnly: true
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 0.552
|
||||
out: 0.752
|
||||
@@ -102,6 +107,7 @@ foptz:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 23
|
||||
fsamroy:
|
||||
@@ -119,6 +125,7 @@ fsamroy:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
fsamx:
|
||||
description: Sample coarse X
|
||||
deviceClass: csaxs_bec.devices.omny.galil.fgalil_ophyd.FlomniGalilMotor
|
||||
@@ -134,6 +141,7 @@ fsamx:
|
||||
onFailure: buffer
|
||||
readOnly: true
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: -1.1
|
||||
fsamy:
|
||||
@@ -151,6 +159,7 @@ fsamy:
|
||||
onFailure: buffer
|
||||
readOnly: true
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 2.75
|
||||
ftracky:
|
||||
@@ -168,6 +177,7 @@ ftracky:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
ftrackz:
|
||||
description: Laser Tracker coarse Z
|
||||
deviceClass: csaxs_bec.devices.omny.galil.fgalil_ophyd.FlomniGalilMotor
|
||||
@@ -183,6 +193,7 @@ ftrackz:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
ftransx:
|
||||
description: Sample transer X
|
||||
deviceClass: csaxs_bec.devices.omny.galil.fgalil_ophyd.FlomniGalilMotor
|
||||
@@ -198,6 +209,7 @@ ftransx:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
ftransy:
|
||||
description: Sample transer Y
|
||||
deviceClass: csaxs_bec.devices.omny.galil.fgalil_ophyd.FlomniGalilMotor
|
||||
@@ -213,6 +225,7 @@ ftransy:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
sensor_voltage: -2.4
|
||||
ftransz:
|
||||
@@ -230,6 +243,7 @@ ftransz:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
ftray:
|
||||
description: Sample transfer tray
|
||||
deviceClass: csaxs_bec.devices.omny.galil.fgalil_ophyd.FlomniGalilMotor
|
||||
@@ -245,6 +259,7 @@ ftray:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
|
||||
|
||||
############################################################
|
||||
@@ -279,6 +294,7 @@ fosax:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 9.124
|
||||
out: 5.3
|
||||
@@ -297,6 +313,7 @@ fosay:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 0.367
|
||||
fosaz:
|
||||
@@ -314,6 +331,7 @@ fosaz:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 8.5
|
||||
out: 6
|
||||
@@ -334,6 +352,7 @@ rtx:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: on_request
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
low_signal: 10000
|
||||
min_signal: 9000
|
||||
@@ -350,6 +369,7 @@ rty:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: on_request
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
tomo_additional_offsety: 0
|
||||
rtz:
|
||||
@@ -364,6 +384,7 @@ rtz:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: on_request
|
||||
connectionTimeout: 20
|
||||
|
||||
############################################################
|
||||
####################### Cameras ############################
|
||||
@@ -408,20 +429,20 @@ cam_xeye:
|
||||
readOnly: false
|
||||
readoutPriority: async
|
||||
|
||||
cam_ids_rgb:
|
||||
description: Camera flOMNI Xray eye ID203
|
||||
deviceClass: csaxs_bec.devices.ids_cameras.ids_camera.IDSCamera
|
||||
deviceConfig:
|
||||
camera_id: 203
|
||||
bits_per_pixel: 24
|
||||
num_rotation_90: 3
|
||||
transpose: false
|
||||
force_monochrome: true
|
||||
m_n_colormode: 1
|
||||
enabled: true
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: async
|
||||
# cam_ids_rgb:
|
||||
# description: Camera flOMNI Xray eye ID203
|
||||
# deviceClass: csaxs_bec.devices.ids_cameras.ids_camera.IDSCamera
|
||||
# deviceConfig:
|
||||
# camera_id: 203
|
||||
# bits_per_pixel: 24
|
||||
# num_rotation_90: 3
|
||||
# transpose: false
|
||||
# force_monochrome: true
|
||||
# m_n_colormode: 1
|
||||
# enabled: true
|
||||
# onFailure: buffer
|
||||
# readOnly: false
|
||||
# readoutPriority: async
|
||||
|
||||
|
||||
# ############################################################
|
||||
@@ -439,8 +460,8 @@ flomni_temphum:
|
||||
# ########## OMNY / flOMNI / LamNI fast shutter ##############
|
||||
# ############################################################
|
||||
omnyfsh:
|
||||
description: omnyfsh connects to read fast shutter at X12 if in that network
|
||||
deviceClass: csaxs_bec.devices.omny.shutter.OMNYFastEpicsShutter
|
||||
description: omnyfsh connects to fast shutter at X12 if device fsh exists
|
||||
deviceClass: csaxs_bec.devices.omny.shutter.OMNYFastShutter
|
||||
deviceConfig: {}
|
||||
enabled: true
|
||||
onFailure: buffer
|
||||
|
||||
@@ -19,6 +19,7 @@ leyex:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 14.117
|
||||
leyey:
|
||||
@@ -38,6 +39,7 @@ leyey:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 48.069
|
||||
out: 0.5
|
||||
@@ -58,6 +60,7 @@ loptx:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: -0.244
|
||||
out: -0.699
|
||||
@@ -78,6 +81,7 @@ lopty:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 3.724
|
||||
out: 3.53
|
||||
@@ -98,6 +102,7 @@ loptz:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
lsamrot:
|
||||
description: Sample rotation
|
||||
deviceClass: csaxs_bec.devices.omny.galil.lgalil_ophyd.LamniGalilMotor
|
||||
@@ -115,6 +120,7 @@ lsamrot:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
lsamx:
|
||||
description: Sample coarse X
|
||||
deviceClass: csaxs_bec.devices.omny.galil.lgalil_ophyd.LamniGalilMotor
|
||||
@@ -132,6 +138,7 @@ lsamx:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
center: 8.768
|
||||
lsamy:
|
||||
@@ -151,6 +158,7 @@ lsamy:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
center: 10.041
|
||||
|
||||
@@ -176,6 +184,7 @@ losax:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: -1.442
|
||||
losay:
|
||||
@@ -195,6 +204,7 @@ losay:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: -0.171
|
||||
out: 3.8
|
||||
@@ -215,6 +225,7 @@ losaz:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: -1
|
||||
out: -3
|
||||
@@ -238,6 +249,7 @@ rtx:
|
||||
deviceTags:
|
||||
- lamni
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
enabled: true
|
||||
readOnly: False
|
||||
rty:
|
||||
@@ -255,6 +267,7 @@ rty:
|
||||
deviceTags:
|
||||
- lamni
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
enabled: true
|
||||
readOnly: False
|
||||
|
||||
|
||||
@@ -69,6 +69,7 @@ rtx:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: on_request
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
low_signal: 8500
|
||||
min_signal: 8000
|
||||
@@ -84,6 +85,7 @@ rty:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: on_request
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
tomo_additional_offsety: 0
|
||||
rtz:
|
||||
@@ -98,6 +100,7 @@ rtz:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: on_request
|
||||
connectionTimeout: 20
|
||||
|
||||
# ############################################################
|
||||
# ##################### OMNY samples #########################
|
||||
@@ -165,6 +168,7 @@ ofzpx:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: -0.4317
|
||||
ofzpy:
|
||||
@@ -184,6 +188,7 @@ ofzpy:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 0.7944
|
||||
out: 0.6377
|
||||
@@ -204,6 +209,7 @@ ofzpz:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 0
|
||||
otransx:
|
||||
@@ -223,6 +229,7 @@ otransx:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 0
|
||||
otransy:
|
||||
@@ -242,6 +249,7 @@ otransy:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
up_position: -1.2
|
||||
gripper_sensorvoltagetarget: -2.30
|
||||
@@ -262,6 +270,7 @@ otransz:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 0
|
||||
osamx:
|
||||
@@ -281,6 +290,7 @@ osamx:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: -0.1
|
||||
osamz:
|
||||
@@ -300,6 +310,7 @@ osamz:
|
||||
onFailure: buffer
|
||||
readOnly: true
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 0
|
||||
oosay:
|
||||
@@ -319,6 +330,7 @@ oosay:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
near_field_in: 0.531
|
||||
far_field_in: 0.4122
|
||||
@@ -339,6 +351,7 @@ oosax:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
near_field_in: 3.2044
|
||||
far_field_in: 3.022
|
||||
@@ -359,6 +372,7 @@ oosaz:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
near_field_in: -0.4452
|
||||
far_field_in: 6.5
|
||||
@@ -379,6 +393,7 @@ oparkz:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 0
|
||||
oshuttleopen:
|
||||
@@ -398,6 +413,7 @@ oshuttleopen:
|
||||
onFailure: buffer
|
||||
readOnly: true
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 0
|
||||
oshuttlealign:
|
||||
@@ -417,6 +433,7 @@ oshuttlealign:
|
||||
onFailure: buffer
|
||||
readOnly: true
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 0
|
||||
osamy:
|
||||
@@ -436,6 +453,7 @@ osamy:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 0
|
||||
otracky:
|
||||
@@ -455,6 +473,7 @@ otracky:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
start_pos: -4.3431
|
||||
osamroy:
|
||||
@@ -474,6 +493,7 @@ osamroy:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
in: 0
|
||||
otrackz:
|
||||
@@ -493,6 +513,7 @@ otrackz:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
start_pos: -0.6948
|
||||
oeyex:
|
||||
@@ -512,6 +533,7 @@ oeyex:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
xray_in: -45.7394
|
||||
oeyez:
|
||||
@@ -531,6 +553,7 @@ oeyez:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
xray_in: -2
|
||||
oeyey:
|
||||
@@ -550,6 +573,7 @@ oeyey:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
xray_in: 0.0229
|
||||
|
||||
@@ -572,6 +596,7 @@ ocsx:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
nothing: 0
|
||||
ocsy:
|
||||
@@ -589,6 +614,7 @@ ocsy:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
nothing: 0
|
||||
oshield:
|
||||
@@ -606,5 +632,6 @@ oshield:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
userParameter:
|
||||
nothing: 0
|
||||
|
||||
@@ -1 +1,15 @@
|
||||
############################################################
|
||||
|
||||
|
||||
|
||||
############################################################
|
||||
##################### EPS ##################################
|
||||
############################################################
|
||||
x12saEPS:
|
||||
description: X12SA EPS info and control
|
||||
deviceClass: csaxs_bec.devices.epics.eps.EPS
|
||||
deviceConfig: {}
|
||||
enabled: true
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
@@ -64,6 +64,7 @@ npx:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
deviceTags:
|
||||
- npoint
|
||||
npy:
|
||||
@@ -81,5 +82,6 @@ npy:
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: baseline
|
||||
connectionTimeout: 20
|
||||
deviceTags:
|
||||
- npoint
|
||||
@@ -37,7 +37,7 @@ to interrupt and ongoing sequence if needed.
|
||||
- a = t0 + 2ms (2ms delay to allow the shutter to open)
|
||||
- b = a + 1us (short pulse)
|
||||
- c = t0
|
||||
- d = a + exp_time * burst_count + 1ms (to allow the shutter to close)
|
||||
- d = a + exp_time * burst_count
|
||||
- e = d
|
||||
- f = e + 1us (short pulse to OR gate for MCS triggering)
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ DELAY CHANNELS:
|
||||
- a = t0 + 2ms (2ms delay to allow the shutter to open)
|
||||
- b = a + 1us (short pulse)
|
||||
- c = t0
|
||||
- d = a + exp_time * burst_count + 1ms (to allow the shutter to close)
|
||||
- d = a + exp_time * burst_count
|
||||
- e = d
|
||||
- f = e + 1us (short pulse to OR gate for MCS triggering)
|
||||
"""
|
||||
@@ -37,7 +37,9 @@ import traceback
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from bec_lib.logger import bec_logger
|
||||
from ophyd_devices import CompareStatus, DeviceStatus, TransitionStatus
|
||||
from ophyd import Component as Cpt
|
||||
from ophyd import EpicsSignalRO, Kind
|
||||
from ophyd_devices import CompareStatus, DeviceStatus, StatusBase, TransitionStatus
|
||||
from ophyd_devices.interfaces.base_classes.psi_device_base import PSIDeviceBase
|
||||
|
||||
from csaxs_bec.devices.epics.delay_generator_csaxs.delay_generator_csaxs import (
|
||||
@@ -69,7 +71,7 @@ logger = bec_logger.logger
|
||||
# This can be adapted as needed, or fine-tuned per channel. On every reload of the
|
||||
# device configuration in BEC, these values will be set into the DDG1 device.
|
||||
_DEFAULT_CHANNEL_CONFIG: ChannelConfig = {
|
||||
"amplitude": 5.0,
|
||||
"amplitude": 4.5,
|
||||
"offset": 0.0,
|
||||
"polarity": OUTPUTPOLARITY.POSITIVE,
|
||||
"mode": "ttl",
|
||||
@@ -131,6 +133,27 @@ class DDG1(PSIDeviceBase, DelayGeneratorCSAXS):
|
||||
device_manager (DeviceManagerBase | None, optional): Device manager. Defaults to None.
|
||||
"""
|
||||
|
||||
USER_ACCESS = ["keep_shutter_open_during_scan", "set_trigger"]
|
||||
|
||||
# TODO Consider using the 'fsh' device instead.
|
||||
fast_shutter_readback = Cpt(
|
||||
EpicsSignalRO,
|
||||
read_pv="X12SA-ES1-TTL:INP_01",
|
||||
add_prefix=("",), # Add this to prevent the prefix to be added to the signal
|
||||
kind=Kind.omitted,
|
||||
auto_monitor=True,
|
||||
)
|
||||
# The shutter control PV can indicate if the shutter is requested to be kept open. If that
|
||||
# is the case, we can not use the signal shutter_readback signal to check if the delay cycle
|
||||
# finishes but have to use the polling of the event status register to check if the burst finished.
|
||||
fast_shutter_control = Cpt(
|
||||
EpicsSignalRO,
|
||||
read_pv="X12SA-ES1-TTL:OUT_01",
|
||||
add_prefix=("",), # Add this to prevent the prefix to be added to the signal
|
||||
kind=Kind.omitted,
|
||||
auto_monitor=True,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
@@ -142,6 +165,7 @@ class DDG1(PSIDeviceBase, DelayGeneratorCSAXS):
|
||||
super().__init__(
|
||||
name=name, prefix=prefix, scan_info=scan_info, device_manager=device_manager, **kwargs
|
||||
)
|
||||
self._shutter_to_open_delay = 2e-3
|
||||
self.device_manager = device_manager
|
||||
self._poll_thread = threading.Thread(target=self._poll_event_status, daemon=True)
|
||||
self._poll_thread_run_event = threading.Event()
|
||||
@@ -192,6 +216,21 @@ class DDG1(PSIDeviceBase, DelayGeneratorCSAXS):
|
||||
self.burst_delay.put(0)
|
||||
self.burst_count.put(1)
|
||||
|
||||
def keep_shutter_open_during_scan(self, open: True) -> None:
|
||||
"""
|
||||
Method to configure the delay generator for keeping the shutter open during a scans.
|
||||
This means that the additional delay to open the shutter needs to be removed (2e-3)
|
||||
from the timing of the signals.
|
||||
|
||||
Args:
|
||||
open (bool): If True, the shutter will be kept open during the scan.
|
||||
If False, the shutter will be opened and closed for each trigger cycle.
|
||||
"""
|
||||
if open is True:
|
||||
self._shutter_to_open_delay = 0
|
||||
else:
|
||||
self._shutter_to_open_delay = 2e-3
|
||||
|
||||
def on_stage(self) -> None:
|
||||
"""
|
||||
|
||||
@@ -230,6 +269,18 @@ class DDG1(PSIDeviceBase, DelayGeneratorCSAXS):
|
||||
if self.burst_count.get() != 1:
|
||||
self.burst_count.put(1)
|
||||
|
||||
#####################################
|
||||
## Setup trigger source if needed ###
|
||||
#####################################
|
||||
|
||||
# NOTE Some scans may change the trigger source to an external trigger,
|
||||
# so we will make sure that the default trigger source is set for the DDG1
|
||||
# before each scan. If a scan requires a different trigger source, i.e.
|
||||
# external triggers then the scan should implement this change after the
|
||||
# on_stage method was called.
|
||||
if self.trigger_source.get() != DEFAULT_TRIGGER_SOURCE:
|
||||
self.set_trigger(DEFAULT_TRIGGER_SOURCE)
|
||||
|
||||
#########################################
|
||||
### Setup timing for burst and delays ###
|
||||
#########################################
|
||||
@@ -239,27 +290,34 @@ class DDG1(PSIDeviceBase, DelayGeneratorCSAXS):
|
||||
|
||||
# Burst Period DDG1
|
||||
# Set burst_period to shutter width
|
||||
# c/t0 + 2ms + exp_time * burst_count + 1ms
|
||||
shutter_width = 2e-3 + exp_time * frames_per_trigger + 1e-3
|
||||
# c/t0 + self._shutter_to_open_delay + exp_time * burst_count
|
||||
shutter_width = (
|
||||
self._shutter_to_open_delay + exp_time * frames_per_trigger
|
||||
) # Shutter starts closing at end of exposure
|
||||
if self.burst_period.get() != shutter_width:
|
||||
self.burst_period.put(shutter_width)
|
||||
|
||||
# Trigger DDG2
|
||||
# a = t0 + 2ms, b = a + 1us
|
||||
# a has reference to t0, b has reference to a
|
||||
# Add delay of 2ms to allow shutter to open
|
||||
self.set_delay_pairs(channel="ab", delay=2e-3, width=1e-6)
|
||||
# Add delay of self._shutter_to_open_delay to allow shutter to open
|
||||
self.set_delay_pairs(channel="ab", delay=self._shutter_to_open_delay, width=1e-6)
|
||||
|
||||
# Trigger shutter
|
||||
# d = c/t0 + 2ms + exp_time * burst_count + 1ms
|
||||
# d = c/t0 + self._shutter_to_open_delay + exp_time * burst_count + 1ms
|
||||
# c has reference to t0, d has reference to c
|
||||
# Shutter opens without delay at t0, closes after exp_time * burst_count + 3ms (2ms open, 1ms close)
|
||||
# Shutter opens without delay at t0, closes after exp_time * burst_count + 2ms (self._shutter_to_open_delay)
|
||||
self.set_delay_pairs(channel="cd", delay=0, width=shutter_width)
|
||||
|
||||
self.set_delay_pairs(channel="gh", delay=self._shutter_to_open_delay, width=(shutter_width-self._shutter_to_open_delay))
|
||||
|
||||
# Trigger extra pulse for MCS OR gate
|
||||
# f = e + 1us
|
||||
# e has refernce to d, f has reference to e
|
||||
self.set_delay_pairs(channel="ef", delay=0, width=1e-6)
|
||||
if self.scan_info.msg.scan_type == "fly":
|
||||
self.set_delay_pairs(channel="ef", delay=0, width=0)
|
||||
else:
|
||||
self.set_delay_pairs(channel="ef", delay=0, width=1e-6)
|
||||
|
||||
# NOTE Add additional sleep to make sure that the IOC and DDG HW process the values properly
|
||||
# This value has been choosen empirically after testing with the HW. It's
|
||||
@@ -431,7 +489,19 @@ class DDG1(PSIDeviceBase, DelayGeneratorCSAXS):
|
||||
|
||||
def on_trigger(self) -> DeviceStatus:
|
||||
"""
|
||||
This method is called from BEC as a software trigger.
|
||||
This method is called from BEC as a software trigger. Here the logic is as follows:
|
||||
|
||||
We first check if the trigger_source is set to SINGLE_SHOT. Only then will we received,
|
||||
otherwise we return a status object directly as the DDG is triggered by an external
|
||||
source which will have to implement its own logic to wait for trigger signals to
|
||||
be received.
|
||||
|
||||
I SINGLE_SHOT, the implementation here will send a software trigger. Now there are
|
||||
two options to wait for the trigger (burst) cycle to be done. One is to rely on the
|
||||
signal of the "mcs" card if it is present. However, this is only possible if the
|
||||
scan_type is not "fly" as in fly scans the ef channel is not triggered to send the last
|
||||
pulse to the card (but the card is finishing its acquisition in complete itself). Then
|
||||
we rely on the polling of the event status register to check if the burst cycle is done.
|
||||
|
||||
It follows a specific procedure to ensure that the DDG1 and MCS card are properly handled
|
||||
on a trigger event. The established logic is as follows:
|
||||
@@ -450,33 +520,46 @@ class DDG1(PSIDeviceBase, DelayGeneratorCSAXS):
|
||||
- Return the status object to BEC which will automatically resolve once the status register has
|
||||
the END_OF_BURST bit set. The callback of the status object will also stop the polling loop.
|
||||
"""
|
||||
overall_start = time.time()
|
||||
self._stop_polling()
|
||||
self._poll_thread_poll_loop_done.wait(timeout=1)
|
||||
# NOTE: This sleep is important to ensure that the HW is ready to process new commands.
|
||||
# It has been empirically determined after long testing that this improves stability.
|
||||
time.sleep(0.02)
|
||||
|
||||
# NOTE If the trigger source is not SINGLE_SHOT, the DDG is triggered by an external source
|
||||
# thus we can not expect that trigger signals are meant to be awaited for. In this case,
|
||||
# we can directly return.
|
||||
if self.trigger_source.get() != TRIGGERSOURCE.SINGLE_SHOT.value:
|
||||
status = StatusBase(obj=self)
|
||||
status.set_finished()
|
||||
return status
|
||||
|
||||
# NOTE If the MCS card is present in the current session of BEC,
|
||||
# we prepare the card for the next trigger. The procedure is implemented
|
||||
# in the '_prepare_mcs_on_trigger' method.
|
||||
# Prepare the MCS card for the next software trigger
|
||||
# in the '_prepare_mcs_on_trigger' method. We will also use the mcs card
|
||||
# to indicate when the burst cycle is done. If no mcs card is available
|
||||
# the fallback is to use the polling of the DDG
|
||||
mcs = self.device_manager.devices.get("mcs", None)
|
||||
if mcs is None or mcs.enabled is False:
|
||||
logger.info("Did not find mcs card with name 'mcs' in current session")
|
||||
if mcs is None or mcs.enabled is False or self.scan_info.msg.scan_type == "fly":
|
||||
self._poll_thread_poll_loop_done.wait(timeout=1)
|
||||
logger.warning("Did not find mcs card with name 'mcs' in current session")
|
||||
time.sleep(0.02)
|
||||
# Shutter is kept open, we can only rely on the event status register
|
||||
status = self._prepare_trigger_status_event()
|
||||
# Start polling thread again to monitor event status
|
||||
self._start_polling()
|
||||
else:
|
||||
start_time = time.time()
|
||||
logger.debug(f"Preparing mcs card ")
|
||||
status_mcs = self._prepare_mcs_on_trigger(mcs)
|
||||
# NOTE Timeout of 3s should be plenty, any longer wait should checked. If this happens to crash
|
||||
# an acquisition regularly with a WaitTimeoutError, the timeout can be increased but it should
|
||||
# be investigated why the EPICS interface is slow to respond.
|
||||
status_mcs.wait(timeout=3)
|
||||
status = TransitionStatus(mcs.acquiring, [ACQUIRING.ACQUIRING, ACQUIRING.DONE])
|
||||
logger.debug(f"Finished preparing mcs card {time.time()-start_time}")
|
||||
|
||||
# Prepare StatusBitsCompareStatus to resolve once the END_OF_BURST bit was set.
|
||||
status = self._prepare_trigger_status_event()
|
||||
|
||||
# Start polling thread again to monitor event status
|
||||
self._start_polling()
|
||||
# Trigger the DDG1
|
||||
# Send trigger
|
||||
self.trigger_shot.put(1, use_complete=True)
|
||||
self.cancel_on_stop(status)
|
||||
logger.info(f"Configured ddg in {time.time()-overall_start}")
|
||||
return status
|
||||
|
||||
def on_stop(self) -> None:
|
||||
|
||||
@@ -37,6 +37,7 @@ from csaxs_bec.devices.epics.delay_generator_csaxs.delay_generator_csaxs import
|
||||
ChannelConfig,
|
||||
DelayGeneratorCSAXS,
|
||||
LiteralChannels,
|
||||
BURSTCONFIG,
|
||||
)
|
||||
|
||||
logger = bec_logger.logger
|
||||
@@ -47,7 +48,7 @@ logger = bec_logger.logger
|
||||
|
||||
# NOTE Default channel configuration for the DDG2 delay generator channels
|
||||
_DEFAULT_CHANNEL_CONFIG: ChannelConfig = {
|
||||
"amplitude": 5.0,
|
||||
"amplitude": 4.5,
|
||||
"offset": 0.0,
|
||||
"polarity": OUTPUTPOLARITY.POSITIVE,
|
||||
"mode": "ttl",
|
||||
@@ -134,6 +135,9 @@ class DDG2(PSIDeviceBase, DelayGeneratorCSAXS):
|
||||
self.set_trigger(DEFAULT_TRIGGER_SOURCE)
|
||||
self.set_references_for_channels(DEFAULT_REFERENCES)
|
||||
|
||||
# Set burst config
|
||||
self.burst_config.put(BURSTCONFIG.FIRST_CYCLE.value)
|
||||
|
||||
def on_stage(self) -> DeviceStatus | StatusBase | None:
|
||||
"""
|
||||
|
||||
|
||||
@@ -488,6 +488,7 @@ class DelayGeneratorCSAXS(Device):
|
||||
name="trigger_source",
|
||||
kind=Kind.omitted,
|
||||
doc="Trigger Source for the DDG, options in TRIGGERSOURCE",
|
||||
auto_monitor=True,
|
||||
)
|
||||
trigger_level = Cpt(
|
||||
EpicsSignal,
|
||||
|
||||
435
csaxs_bec/devices/epics/eps.py
Normal file
435
csaxs_bec/devices/epics/eps.py
Normal file
@@ -0,0 +1,435 @@
|
||||
"""EPS module for cSAXS beamline: defines the EPS device with its components and methods."""
|
||||
|
||||
# fmt: off
|
||||
# Disable Black formatting for this file to preserve an easier readable structure for the component definitions.
|
||||
|
||||
# pylint: disable=line-too-long
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
|
||||
from bec_lib.logger import bec_logger
|
||||
from ophyd import Component as Cpt
|
||||
from ophyd import Device, EpicsSignal, EpicsSignalRO, Kind
|
||||
from ophyd_devices import PSIDeviceBase
|
||||
|
||||
logger = bec_logger.logger
|
||||
|
||||
# ---------------------------
|
||||
# Registry: sections/channels
|
||||
# ---------------------------
|
||||
|
||||
|
||||
class EPSSubDevices(Device):
|
||||
"""Base class for EPS sub-device components (e.g. alarms, valves, shutters). with common methods if needed."""
|
||||
|
||||
def describe(self) -> dict:
|
||||
desc = super().describe()
|
||||
for walk in self.walk_signals():
|
||||
if walk.item.attr_name not in desc:
|
||||
desc[walk.item.attr_name] = walk.item.describe()
|
||||
return desc
|
||||
|
||||
|
||||
class EPSAlarms(EPSSubDevices):
|
||||
"""EPS alarms at the cSAXS beamline."""
|
||||
|
||||
eps_alarm_cnt = Cpt(EpicsSignalRO, read_pv="X12SA-EPS-PLC:AlarmCnt_EPS", add_prefix=("",), name="eps_alarm_cnt", kind=Kind.omitted, doc="X12SA EPS Alarm count", auto_monitor=True, labels={"alarm"})
|
||||
mis_alarm_cnt = Cpt(EpicsSignalRO, read_pv="ARS00-MIS-PLC-01:AlarmCnt_Frontends", add_prefix=("",), name="mis_alarm_cnt", kind=Kind.omitted, doc="FrontEnd MIS Alarm count", auto_monitor=True, labels={"alarm"})
|
||||
|
||||
|
||||
class ValvesFrontend(EPSSubDevices):
|
||||
"""Valves frontend at the cSAXS beamline."""
|
||||
|
||||
|
||||
fe_vvpg_0000 = Cpt(EpicsSignalRO, read_pv="X12SA-FE-VVPG-0000:PLC_OPEN", add_prefix=("",), name="fevvpg0000", kind=Kind.omitted, doc="FE-VVPG-0000", auto_monitor=True, labels={"valve"})
|
||||
fe_vvpg_1010 = Cpt(EpicsSignalRO, read_pv="X12SA-FE-VVPG-1010:PLC_OPEN", add_prefix=("",), name="fevvpg1010", kind=Kind.omitted, doc="FE-VVPG-1010", auto_monitor=True, labels={"valve"})
|
||||
fe_vvfv_2010 = Cpt(EpicsSignalRO, read_pv="X12SA-FE-VVFV-2010:PLC_OPEN", add_prefix=("",), name="fevvfv2010", kind=Kind.omitted, doc="FE-VVFV-2010", auto_monitor=True, labels={"valve"})
|
||||
fe_vvpg_2010 = Cpt(EpicsSignalRO, read_pv="X12SA-FE-VVPG-2010:PLC_OPEN", add_prefix=("",), name="fevvpg2010", kind=Kind.omitted, doc="FE-VVPG-2010", auto_monitor=True, labels={"valve"})
|
||||
|
||||
class ValvesOptics(EPSSubDevices):
|
||||
"""Valves at the optics hutch."""
|
||||
|
||||
op_vvpg_1010 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-VVPG-1010:PLC_OPEN", add_prefix=("",), name="opvvpg1010", kind=Kind.omitted, doc="OP-VVPG-1010", auto_monitor=True, labels={"valve"})
|
||||
op_vvpg_2010 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-VVPG-2010:PLC_OPEN", add_prefix=("",), name="opvvpg2010", kind=Kind.omitted, doc="OP-VVPG-2010", auto_monitor=True, labels={"valve"})
|
||||
op_vvpg_3010 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-VVPG-3010:PLC_OPEN", add_prefix=("",), name="opvvpg3010", kind=Kind.omitted, doc="OP-VVPG-3010", auto_monitor=True, labels={"valve"})
|
||||
op_vvpg_3020 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-VVPG-3020:PLC_OPEN", add_prefix=("",), name="opvvpg3020", kind=Kind.omitted, doc="OP-VVPG-3020", auto_monitor=True, labels={"valve"})
|
||||
op_vvpg_4010 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-VVPG-4010:PLC_OPEN", add_prefix=("",), name="opvvpg4010", kind=Kind.omitted, doc="OP-VVPG-4010", auto_monitor=True, labels={"valve"})
|
||||
op_vvpg_5010 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-VVPG-5010:PLC_OPEN", add_prefix=("",), name="opvvpg5010", kind=Kind.omitted, doc="OP-VVPG-5010", auto_monitor=True, labels={"valve"})
|
||||
op_vvpg_6010 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-VVPG-6010:PLC_OPEN", add_prefix=("",), name="opvvpg6010", kind=Kind.omitted, doc="OP-VVPG-6010", auto_monitor=True, labels={"valve"})
|
||||
op_vvpg_7010 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-VVPG-7010:PLC_OPEN", add_prefix=("",), name="opvvpg7010", kind=Kind.omitted, doc="OP-VVPG-7010", auto_monitor=True, labels={"valve"})
|
||||
|
||||
|
||||
class ValvesEndstation(EPSSubDevices):
|
||||
"""Endstation valves at the cSAXS beamline."""
|
||||
|
||||
es_vvpg_1010 = Cpt(EpicsSignalRO, read_pv="X12SA-ES-VVPG-1010:PLC_OPEN", add_prefix=("",), name="esvvpg1010", kind=Kind.omitted, doc="ES-VVPG-1010", auto_monitor=True, labels={"valve"})
|
||||
|
||||
|
||||
class ShuttersFrontend(EPSSubDevices):
|
||||
"""Shutters frontend."""
|
||||
|
||||
fe_psh1 = Cpt(EpicsSignalRO, read_pv="X12SA-FE-PSH1-EMLS-0010:OPEN", add_prefix=("",), name="fepsh1", kind=Kind.omitted, doc="FE-PSH1-EMLS-0010", auto_monitor=True, labels={"shutter"})
|
||||
fe_sto1 = Cpt(EpicsSignalRO, read_pv="X12SA-FE-STO1-EMLS-0010:OPEN", add_prefix=("",), name="festo1", kind=Kind.omitted, doc="FE-STO1-EMLS-0010", auto_monitor=True, labels={"shutter"})
|
||||
|
||||
|
||||
class ShuttersEndstation(EPSSubDevices):
|
||||
"""Shutters at the endstation."""
|
||||
|
||||
es_psh17010 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-PSH1-EMLS-7010:OPEN", add_prefix=("",), name="espsh17010", kind=Kind.omitted, doc="OP-PSH1-EMLS-7010", auto_monitor=True, labels={"shutter"})
|
||||
|
||||
|
||||
class DMMMonochromator(EPSSubDevices):
|
||||
"""DMM monochromator signals at the cSAXS beamline."""
|
||||
|
||||
dmm_temp_surface_1 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-DMM-ETTC-3010:TEMP", add_prefix=("",), name="dmm_temp_surface_1", kind=Kind.omitted, doc="DMM Temp Surface 1", auto_monitor=True, labels={"temp"})
|
||||
dmm_temp_surface_2 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-DMM-ETTC-3020:TEMP", add_prefix=("",), name="dmm_temp_surface_2", kind=Kind.omitted, doc="DMM Temp Surface 2", auto_monitor=True, labels={"temp"})
|
||||
dmm_temp_shield_1_disaster = Cpt(EpicsSignalRO, read_pv="X12SA-OP-DMM-ETTC-3030:TEMP", add_prefix=("",), name="dmm_temp_shield_1_disaster", kind=Kind.omitted, doc="DMM Temp Shield 1 (disaster)", auto_monitor=True, labels={"temp"})
|
||||
dmm_temp_shield_2_disaster = Cpt(EpicsSignalRO, read_pv="X12SA-OP-DMM-ETTC-3040:TEMP", add_prefix=("",), name="dmm_temp_shield_2_disaster", kind=Kind.omitted, doc="DMM Temp Shield 2 (disaster)", auto_monitor=True, labels={"temp"})
|
||||
|
||||
dmm_translation_thru = Cpt(EpicsSignalRO, read_pv="X12SA-OP-DMM-EMLS-3010:THRU", add_prefix=("",), name="dmm_translation_thru", kind=Kind.omitted, doc="DMM Translation ThruPos", auto_monitor=True, labels={"switch"})
|
||||
dmm_translation_in = Cpt(EpicsSignalRO, read_pv="X12SA-OP-DMM-EMLS-3020:IN", add_prefix=("",), name="dmm_translation_in", kind=Kind.omitted, doc="DMM Translation InPos", auto_monitor=True, labels={"switch"})
|
||||
dmm_bragg_thru = Cpt(EpicsSignalRO, read_pv="X12SA-OP-DMM-EMLS-3030:THRU", add_prefix=("",), name="dmm_bragg_thru", kind=Kind.omitted, doc="DMM Bragg ThruPos", auto_monitor=True, labels={"switch"})
|
||||
dmm_bragg_in = Cpt(EpicsSignalRO, read_pv="X12SA-OP-DMM-EMLS-3040:IN", add_prefix=("",), name="dmm_bragg_in", kind=Kind.omitted, doc="DMM Bragg InPos", auto_monitor=True, labels={"switch"})
|
||||
|
||||
dmm_heater_fault_xtal_1 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-DMM-EMSW-3050:SWITCH", add_prefix=("",), name="dmm_heater_fault_xtal_1", kind=Kind.omitted, doc="DMM Heater Fault XTAL 1", auto_monitor=True, labels={"fault"})
|
||||
dmm_heater_fault_xtal_2 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-DMM-EMSW-3060:SWITCH", add_prefix=("",), name="dmm_heater_fault_xtal_2", kind=Kind.omitted, doc="DMM Heater Fault XTAL 2", auto_monitor=True, labels={"fault"})
|
||||
dmm_heater_fault_support_1 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-DMM-EMSW-3070:SWITCH", add_prefix=("",), name="dmm_heater_fault_support_1", kind=Kind.omitted, doc="DMM Heater Fault Support 1", auto_monitor=True, labels={"fault"})
|
||||
|
||||
dmm_energy = Cpt(EpicsSignalRO, read_pv="X12SA-OP-DMM1:ENERGY-GET", add_prefix=("",), name="dmm_energy", kind=Kind.omitted, doc="DMM Energy", auto_monitor=True, labels={"energy"})
|
||||
dmm_position = Cpt(EpicsSignalRO, read_pv="X12SA-OP-DMM1:POSITION", add_prefix=("",), name="dmm_position", kind=Kind.omitted, doc="DMM Position", auto_monitor=True, labels={"string"})
|
||||
dmm_stripe = Cpt(EpicsSignalRO, read_pv="X12SA-OP-DMM1:STRIPE", add_prefix=("",), name="dmm_stripe", kind=Kind.omitted, doc="DMM Stripe", auto_monitor=True, labels={"string"})
|
||||
|
||||
|
||||
class CCMMonochromator(EPSSubDevices):
|
||||
"""CCM monochromator signals at the cSAXS beamline."""
|
||||
|
||||
ccm_temp_crystal = Cpt(EpicsSignalRO, read_pv="X12SA-OP-CCM-ETTC-4010:TEMP", add_prefix=("",), name="ccm_temp_crystal", kind=Kind.omitted, doc="CCM Temp Crystal", auto_monitor=True, labels={"temp"})
|
||||
ccm_temp_shield_disaster = Cpt(EpicsSignalRO, read_pv="X12SA-OP-CCM-ETTC-4020:TEMP", add_prefix=("",), name="ccm_temp_shield_disaster", kind=Kind.omitted, doc="CCM Temp Shield (disaster)", auto_monitor=True, labels={"temp"})
|
||||
|
||||
ccm_heater_fault_1 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-CCM-EMSW-4010:SWITCH", add_prefix=("",), name="ccm_heater_fault_1", kind=Kind.omitted, doc="CCM Heater Fault 1", auto_monitor=True, labels={"fault"})
|
||||
ccm_heater_fault_2 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-CCM-EMSW-4020:SWITCH", add_prefix=("",), name="ccm_heater_fault_2", kind=Kind.omitted, doc="CCM Heater Fault 2", auto_monitor=True, labels={"fault"})
|
||||
ccm_heater_fault_3 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-CCM-EMSW-4030:SWITCH", add_prefix=("",), name="ccm_heater_fault_3", kind=Kind.omitted, doc="CCM Heater Fault 3", auto_monitor=True, labels={"fault"})
|
||||
|
||||
ccm_energy = Cpt(EpicsSignalRO, read_pv="X12SA-OP-CCM1:ENERGY-GET", add_prefix=("",), name="ccm_energy", kind=Kind.omitted, doc="CCM Energy", auto_monitor=True, labels={"energy"})
|
||||
ccm_position = Cpt(EpicsSignalRO, read_pv="X12SA-OP-CCM1:POSITION", add_prefix=("",), name="ccm_position", kind=Kind.omitted, doc="CCM Position", auto_monitor=True, labels={"string"})
|
||||
|
||||
|
||||
class CoolingWater(EPSSubDevices):
|
||||
"""Cooling water signals at the cSAXS beamline."""
|
||||
|
||||
op_sl1_efsw_2010_flow = Cpt(EpicsSignalRO, read_pv="X12SA-OP-SL1-EFSW-2010:FLOW", add_prefix=("",), name="op_sl1_efsw_2010_flow", kind=Kind.omitted, doc="OP-SL1-EFSW-2010", auto_monitor=True, labels={"flow"})
|
||||
op_sl2_efsw_2010_flow = Cpt(EpicsSignalRO, read_pv="X12SA-OP-SL2-EFSW-2010:FLOW", add_prefix=("",), name="op_sl2_efsw_2010_flow", kind=Kind.omitted, doc="OP-SL2-EFSW-2010", auto_monitor=True, labels={"flow"})
|
||||
op_eb1_efsw_5010_flow = Cpt(EpicsSignalRO, read_pv="X12SA-OP-EB1-EFSW-5010:FLOW", add_prefix=("",), name="op_eb1_efsw_5010_flow", kind=Kind.omitted, doc="OP-EB1-EFSW-5010", auto_monitor=True, labels={"flow"})
|
||||
op_eb1_efsw_5020_flow = Cpt(EpicsSignalRO, read_pv="X12SA-OP-EB1-EFSW-5020:FLOW", add_prefix=("",), name="op_eb1_efsw_5020_flow", kind=Kind.omitted, doc="OP-EB1-EFSW-5020", auto_monitor=True, labels={"flow"})
|
||||
op_sl3_efsw_5010_flow = Cpt(EpicsSignalRO, read_pv="X12SA-OP-SL3-EFSW-5010:FLOW", add_prefix=("",), name="op_sl3_efsw_5010_flow", kind=Kind.omitted, doc="OP-SL3-EFSW-5010", auto_monitor=True, labels={"flow"})
|
||||
op_kb_efsw_6010_flow = Cpt(EpicsSignalRO, read_pv="X12SA-OP-KB-EFSW-6010:FLOW", add_prefix=("",), name="op_kb_efsw_6010_flow", kind=Kind.omitted, doc="OP-KB-EFSW-6010", auto_monitor=True, labels={"flow"})
|
||||
op_psh1_efsw_7010_flow = Cpt(EpicsSignalRO, read_pv="X12SA-OP-PSH1-EFSW-7010:FLOW", add_prefix=("",), name="op_psh1_efsw_7010_flow", kind=Kind.omitted, doc="OP-PSH1-EFSW-7010", auto_monitor=True, labels={"flow"})
|
||||
es_eb2_efsw_1010_flow = Cpt(EpicsSignalRO, read_pv="X12SA-ES-EB2-EFSW-1010:FLOW", add_prefix=("",), name="es_eb2_efsw_1010_flow", kind=Kind.omitted, doc="ES-EB2-EFSW-1010", auto_monitor=True, labels={"flow"})
|
||||
|
||||
op_cs_ecvw_0010 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-CS-ECVW-0010:PLC_OPEN", add_prefix=("",), name="op_cs_ecvw_0010", kind=Kind.omitted, doc="OP-CS-ECVW-0010", auto_monitor=True, labels={"valve"})
|
||||
op_cs_ecvw_0020 = Cpt(EpicsSignalRO, read_pv="X12SA-OP-CS-ECVW-0020:PLC_OPEN", add_prefix=("",), name="op_cs_ecvw_0020", kind=Kind.omitted, doc="OP-CS-ECVW-0020", auto_monitor=True, labels={"valve"})
|
||||
|
||||
|
||||
class EPS(PSIDeviceBase):
|
||||
"""EPS device for the cSAXS beamline."""
|
||||
USER_ACCESS = [
|
||||
"show_all",
|
||||
"water_cooling_op",
|
||||
]
|
||||
alarms = Cpt(EPSAlarms, name="alarms", doc="EPS Alarms")
|
||||
valves_frontend = Cpt(ValvesFrontend, name="valves_frontend", doc="Valves Frontend")
|
||||
valves_optics = Cpt(ValvesOptics, name="valves_optics", doc="Valves Optics Hutch")
|
||||
valves_es = Cpt(ValvesEndstation, name="valves_es", doc="Valves ES Hutch")
|
||||
shutters_frontend = Cpt(ShuttersFrontend, name="shutters_frontend", doc="Shutters Frontend")
|
||||
shutters_es = Cpt(ShuttersEndstation, name="shutters_es", doc="Shutters Endstation")
|
||||
dmm_monochromator = Cpt(DMMMonochromator, name="dmm_monochromator", doc="DMM Monochromator")
|
||||
ccm_monochromator = Cpt(CCMMonochromator, name="ccm_monochromator", doc="CCM Monochromator")
|
||||
cooling_water = Cpt(CoolingWater, name="cooling_water", doc="Cooling Water")
|
||||
|
||||
# Acknowledgment signals for PLC communication (if needed for future use)
|
||||
ackerr = Cpt(EpicsSignal, read_pv="X12SA-EPS-PLC:ACKERR-REQUEST", add_prefix=("",), name="ackerr", kind=Kind.omitted, doc="ACKERR request - OP-CS-ECVW-0020", auto_monitor=True, labels={"request"})
|
||||
request = Cpt(EpicsSignal, read_pv="X12SA-OP-CS-ECVW:PLC_REQUEST", add_prefix=("",), name="op_cs_ecvw_request", kind=Kind.omitted, doc="PLC request - OP-CS-ECVW-PLC_REQUEST", auto_monitor=True, labels={"request"})
|
||||
|
||||
def _notify(self, msg: str, show_as_client_msg: bool = True):
|
||||
"""Utility method to print a message, and optionally send it to the client UI if it should be shown also as a client message."""
|
||||
try:
|
||||
if show_as_client_msg:
|
||||
self.device_manager.connector.send_client_info(msg, scope="", show_asap=True)
|
||||
else:
|
||||
print(msg)
|
||||
except Exception:
|
||||
logger.error(f"Failed to send client message, falling back to print: {msg}")
|
||||
print(str(msg))
|
||||
|
||||
# ----------------------------------------------------------
|
||||
# Water cooling operation
|
||||
# ----------------------------------------------------------
|
||||
|
||||
def safe_get(self, sig, default=None):
|
||||
"""Helper method to safely get a signal value, returning a default if there's an error."""
|
||||
try:
|
||||
return sig.get()
|
||||
except Exception as ex:
|
||||
logger.warning(f"Failed to get signal {sig.pvname}: {ex}")
|
||||
return default
|
||||
|
||||
def water_cooling_op(self):
|
||||
"""
|
||||
Open ECVW valves, reset EPS alarms, monitor for 20s,
|
||||
then ensure stability (valves remain open) for 10s.
|
||||
All messages sent to client.
|
||||
"""
|
||||
|
||||
POLL_PERIOD = 2
|
||||
TIMEOUT = 20
|
||||
STABILITY = 15
|
||||
|
||||
self._notify("=== Water Cooling Operation ===")
|
||||
|
||||
# --- Signals ---
|
||||
eps_alarm_sig = self.alarms.eps_alarm_cnt
|
||||
ackerr = self.ackerr
|
||||
request = self.request
|
||||
|
||||
valves = [self.cooling_water.op_cs_ecvw_0010, self.cooling_water.op_cs_ecvw_0020]
|
||||
|
||||
# Flow channels list extracted from CHANNELS
|
||||
flow_items = [walk.item for walk in self.cooling_water.walk_signals() if "flow" in walk.item._ophyd_labels_]
|
||||
|
||||
# --- Step 1: EPS alarm reset ---
|
||||
alarm_value = self.safe_get(eps_alarm_sig, 0)
|
||||
if alarm_value and alarm_value > 0:
|
||||
self._notify(f"[WaterCooling] EPS alarms present ({alarm_value}) → resetting…")
|
||||
try:
|
||||
ackerr.put(1)
|
||||
except Exception as ex:
|
||||
self._notify(f"[WaterCooling] WARNING: ACKERR write failed: {ex}")
|
||||
time.sleep(0.3)
|
||||
else:
|
||||
self._notify("[WaterCooling] No EPS alarms detected.")
|
||||
|
||||
# --- Step 2: Issue open request ---
|
||||
self._notify("[WaterCooling] Sending cooling-valve OPEN request…")
|
||||
try:
|
||||
request.put(1)
|
||||
except Exception as ex:
|
||||
self._notify(f"[WaterCooling] ERROR: Failed to send OPEN request: {ex}")
|
||||
return False
|
||||
|
||||
# --- Step 3: Monitoring loop (clean client table output) ---
|
||||
start = time.time()
|
||||
end = start + TIMEOUT
|
||||
stable_until = None
|
||||
|
||||
# Print (server-side) header once
|
||||
print("Monitoring valves and flow sensors...")
|
||||
print(f" Valves: {valves[0].attr_name[-4:]}, {valves[1].attr_name[-4:]}")
|
||||
print(f" Note: stability requires valves to remain OPEN for {STABILITY} seconds.")
|
||||
|
||||
# One table header to the client (via device manager)
|
||||
# Fixed-width columns for alignment in monospaced UI
|
||||
table_header = f"{'Time':>6} | {'Valves':<21} | {'Flows (OK/FAIL/N/A)':<20}"
|
||||
self._notify(table_header)
|
||||
|
||||
def snapshot():
|
||||
# Valve snapshot
|
||||
v_states = [self.safe_get(v, None) for v in valves]
|
||||
v1 = f"{valves[0].attr_name[-4:]}=" + ("OPEN " if v_states[0] is True or v_states[0] == 1 else "CLOSED" if v_states[0] is False or v_states[0] == 0 else "N/A ")
|
||||
v2 = f"{valves[1].attr_name[-4:]}=" + ("OPEN " if v_states[1] is True or v_states[1] == 1 else "CLOSED" if v_states[1] is False or v_states[1] == 0 else "N/A ")
|
||||
# 2 valves with a single space between => width ~ 21
|
||||
valve_str = f"{v1} {v2}"
|
||||
|
||||
# Flow summary: OK/FAIL/N/A counts (compact)
|
||||
flow_states = []
|
||||
for fsig in flow_items:
|
||||
fval = self.safe_get(fsig, None)
|
||||
flow_states.append(True if fval in (1, True) else False if fval in (0, False) else None)
|
||||
|
||||
ok = sum(1 for f in flow_states if f is True)
|
||||
fail = sum(1 for f in flow_states if f is False)
|
||||
na = sum(1 for f in flow_states if f is None)
|
||||
flow_summary = f"{ok:>2} / {fail:>2} / {na:>2}"
|
||||
|
||||
return v_states, valve_str, flow_summary
|
||||
|
||||
while True:
|
||||
# TODO Consider adding a timeout to avoid infinite loop.
|
||||
now = time.time()
|
||||
elapsed = int(now - start)
|
||||
|
||||
if now > end:
|
||||
# One last line to client
|
||||
v_states, valves_s, flows_s = snapshot()
|
||||
self._notify(f"{elapsed:>6}s | {valves_s:<21} | {flows_s:<20}")
|
||||
print("→ TIMEOUT: Cooling valves failed to remain OPEN.")
|
||||
return False
|
||||
|
||||
# Live snapshot
|
||||
v_states, valves_s, flows_s = snapshot()
|
||||
# Exactly one concise line to client per cycle
|
||||
self._notify(f"{elapsed:>6}s | {valves_s:<21} | {flows_s:<20}")
|
||||
|
||||
both_open = all(s is not None and bool(s) for s in v_states)
|
||||
|
||||
if both_open:
|
||||
if stable_until is None:
|
||||
stable_until = now + STABILITY
|
||||
print(f"[WaterCooling] Both valves OPEN → starting {STABILITY}s stability window…")
|
||||
else:
|
||||
if now >= stable_until:
|
||||
print("→ SUCCESS: Valves remained OPEN during stability window.")
|
||||
return True
|
||||
else:
|
||||
if stable_until is not None:
|
||||
print("[WaterCooling] Valve closed again → restarting stability window.")
|
||||
stable_until = None
|
||||
|
||||
time.sleep(POLL_PERIOD)
|
||||
|
||||
def show_all(self):
|
||||
red = "\x1b[91m"
|
||||
green = "\x1b[92m"
|
||||
white = "\x1b[0m"
|
||||
bold = "\x1b[1m"
|
||||
cyan = "\x1b[96m"
|
||||
|
||||
# ---- New: enum maps for numeric -> string rendering ----
|
||||
POSITION_ENUM = {0: "out of beam", 1: "in beam"}
|
||||
STRIPE_ENUM = {0: "Stripe 1 W/B4C", 1: "Stripe 2 NiV/B4C"}
|
||||
POSITION_ATTRS = {self.dmm_monochromator.dmm_position.attr_name, self.ccm_monochromator.ccm_position.attr_name}
|
||||
STRIPE_ATTRS = {self.dmm_monochromator.dmm_stripe.attr_name}
|
||||
|
||||
def is_bool_like(v):
|
||||
return isinstance(v, (bool, int)) and v in (0, 1, True, False)
|
||||
|
||||
# ---- Changed: accept attr in formatter so we can apply enum mapping ----
|
||||
def fmt_value(value: any, signal: EpicsSignalRO):
|
||||
if value is None:
|
||||
return f"{red}MISSING{white}"
|
||||
|
||||
attr = signal.attr_name
|
||||
|
||||
# ---------- Explicit enum mappings by attribute ----------
|
||||
if attr in POSITION_ATTRS:
|
||||
# Position comes as numeric 0/1
|
||||
try:
|
||||
iv = int(value)
|
||||
return POSITION_ENUM.get(iv, f"{iv}")
|
||||
except Exception:
|
||||
# Fallback if it’s already a string or unexpected
|
||||
return f"{value}"
|
||||
|
||||
if attr in STRIPE_ATTRS:
|
||||
# Stripe comes as numeric 0/1
|
||||
try:
|
||||
iv = int(value)
|
||||
return STRIPE_ENUM.get(iv, f"{iv}")
|
||||
except Exception:
|
||||
return f"{value}"
|
||||
|
||||
# ------------------- TEMPERATURE -------------------
|
||||
if "temp" in signal._ophyd_labels_ and isinstance(value, (int, float)):
|
||||
return f"{value:.1f}"
|
||||
|
||||
# ------------------- ENERGY ------------------------
|
||||
if "energy" in signal._ophyd_labels_ and isinstance(value, (int, float)):
|
||||
return f"{value:.4f}"
|
||||
|
||||
# ------------------- STRINGS -----------------------
|
||||
if "string" in signal._ophyd_labels_ or "position" in signal._ophyd_labels_:
|
||||
# For other strings, just echo the value
|
||||
return f"{value}"
|
||||
|
||||
# ------------------- SWITCH (ACTIVE/INACTIVE) ------
|
||||
if "switch" in signal._ophyd_labels_ and is_bool_like(value):
|
||||
return f"{green+'ACTIVE'+white if value else red+'INACTIVE'+white}"
|
||||
|
||||
# ------------------- FAULT (OK/FAULT) --------------
|
||||
if "fault" in signal._ophyd_labels_ and is_bool_like(value):
|
||||
return f"{green+'OK'+white if not value else red+'FAULT'+white}"
|
||||
|
||||
# ------------------- VALVE/SHUTTER -----------------
|
||||
if ("valve" in signal._ophyd_labels_ or "shutter" in signal._ophyd_labels_) and is_bool_like(value):
|
||||
return f"{green+'OPEN'+white if value else red+'CLOSED'+white}"
|
||||
|
||||
# ------------------- FLOW (OK/FAIL) ----------------
|
||||
if "flow" in signal._ophyd_labels_ and is_bool_like(value):
|
||||
return f"{green}OK{white}" if bool(value) else f"{red}FAIL{white}"
|
||||
|
||||
# ------------------- FALLBACK -----------------------
|
||||
return f"{value}"
|
||||
|
||||
# ------------------- PRINT START ---------------------
|
||||
print(f"{bold}X12SA EPS status{white}")
|
||||
|
||||
for name, component in self._sig_attrs.items():
|
||||
sub_device = getattr(self, name)
|
||||
rows = []
|
||||
# Only print sub-devices, not individual request signals
|
||||
if not isinstance(sub_device, Device):
|
||||
continue
|
||||
print(f"\n{bold}{component.doc}{white}")
|
||||
for sub_walk in sub_device.walk_components():
|
||||
cpt: Cpt = sub_walk.item
|
||||
it: EpicsSignalRO = getattr(sub_device, cpt.attr)
|
||||
val = self.safe_get(it)
|
||||
rows.append((cpt.doc, val, it))
|
||||
|
||||
label_width = max(32, *(len(label) for (label, _, _) in rows))
|
||||
|
||||
for label, value, it in rows:
|
||||
fv = fmt_value(value, it) # <-- pass attr to formatter
|
||||
print(f" - {label:<{label_width}} {fv}")
|
||||
|
||||
if sub_device.attr_name == "cooling_water":
|
||||
v1 = self.safe_get(self.cooling_water.op_cs_ecvw_0010)
|
||||
v2 = self.safe_get(self.cooling_water.op_cs_ecvw_0020)
|
||||
|
||||
def closed(v):
|
||||
return is_bool_like(v) and not bool(v)
|
||||
|
||||
if closed(v1) and closed(v2):
|
||||
print(f"\n{cyan}Hint:{white} Both water cooling valves are CLOSED.\n" f"You can open them using: {bold}dev.x12saEPS.water_cooling_op(){white}")
|
||||
|
||||
# fmt: on
|
||||
# ----------------------------------------------------------
|
||||
# Consistency report
|
||||
# ----------------------------------------------------------
|
||||
# def consistency_report(self, *, verbose=True):
|
||||
# missing = []
|
||||
# dupes = []
|
||||
# seen = {}
|
||||
|
||||
# for sub_device in self.walk_components():
|
||||
# section = sub_device.name
|
||||
# for walk in sub_device.walk_components():
|
||||
# cpt: Cpt = walk.ancestors[-1]
|
||||
# it: EpicsSignalRO = walk.item
|
||||
# if not hasattr(self, it["attr"]):
|
||||
# missing.append((section, it["attr"], it["label"], it["pv"]))
|
||||
|
||||
# pv = it["pv"]
|
||||
# if pv in seen:
|
||||
# dupes.append((pv, seen[pv], (section, it["attr"], it["label"])))
|
||||
# else:
|
||||
# seen[pv] = (section, it["attr"], it["label"])
|
||||
|
||||
# if verbose:
|
||||
# print("=== Consistency Report ===")
|
||||
|
||||
# if missing:
|
||||
# print("\nMissing attributes:")
|
||||
# for sec, a, lbl, pv in missing:
|
||||
# print(f" - [{sec}] {a} {lbl} pv={pv}")
|
||||
# else:
|
||||
# print("\nNo missing attributes.")
|
||||
|
||||
# if dupes:
|
||||
# print("\nDuplicate PVs:")
|
||||
# for pv, f1, f2 in dupes:
|
||||
# print(f" {pv} → {f1} AND {f2}")
|
||||
# else:
|
||||
# print("\nNo duplicate PVs.")
|
||||
|
||||
# return {"missing_attrs": missing, "duplicate_pvs": dupes}
|
||||
67
csaxs_bec/devices/epics/fast_shutter.py
Normal file
67
csaxs_bec/devices/epics/fast_shutter.py
Normal file
@@ -0,0 +1,67 @@
|
||||
"""
|
||||
Shutter device for the cSAXS beamline with 2 PVs. One is connected to a
|
||||
signal can be set to control the shutter signal, and the other is a readback signal
|
||||
that can be monitored to check the shutter status as it may be controlled directly by
|
||||
the delay generator."""
|
||||
|
||||
from ophyd import Component as Cpt
|
||||
from ophyd import Device, EpicsSignal, EpicsSignalRO, Kind
|
||||
|
||||
|
||||
class cSAXSFastEpicsShutter(Device):
|
||||
"""
|
||||
Fast EPICS shutter with automatic PV selection based on host subnet. IOC prefix is 'X12SA-ES1-TTL:'
|
||||
"""
|
||||
|
||||
USER_ACCESS = ["fshopen", "fshclose", "fshstatus", "fshinfo", "fshstatus_readback", "help"]
|
||||
SUB_VALUE = "value"
|
||||
_default_sub = SUB_VALUE
|
||||
|
||||
# PVs
|
||||
shutter = Cpt(EpicsSignal, "OUT_01", kind=Kind.normal, auto_monitor=True)
|
||||
shutter_readback = Cpt(EpicsSignalRO, "INP_01", kind=Kind.normal, auto_monitor=True)
|
||||
|
||||
# -----------------------------------------------------
|
||||
# User-facing shutter control functions
|
||||
# -----------------------------------------------------
|
||||
|
||||
# pylint: disable=protetced-access
|
||||
def fshopen(self) -> None:
|
||||
"""Open the fast shutter."""
|
||||
self.shutter.set(1).wait(timeout=self.shutter._timeout) # 2s default for ES
|
||||
|
||||
# pylint: disable=protetced-access
|
||||
def fshclose(self) -> None:
|
||||
"""Close the fast shutter."""
|
||||
self.shutter.set(0).wait(timeout=self.shutter._timeout) # 2s default for ES
|
||||
|
||||
def fshstatus(self) -> int:
|
||||
"""Return the fast shutter control status (0=closed, 1=open)."""
|
||||
return self.shutter.get() # Ensure we have the latest value from EPICS
|
||||
|
||||
def fshstatus_readback(self) -> int:
|
||||
"""Return the fast shutter status (0=closed, 1=open)."""
|
||||
return self.shutter_readback.get() # Ensure we have the latest value from EPICS
|
||||
|
||||
def fshinfo(self) -> None:
|
||||
"""Print information about which EPICS PV channel is being used."""
|
||||
pvname = self.shutter.pvname
|
||||
shutter_readback_pvname = self.shutter_readback.pvname
|
||||
print(
|
||||
f"Fast shutter connected to EPICS channel: {pvname} with shutter readback: {shutter_readback_pvname}"
|
||||
)
|
||||
|
||||
def stop(self, *, success: bool = False) -> None:
|
||||
"""Stop the shutter device. Make sure to close it."""
|
||||
self.shutter.put(0)
|
||||
super().stop(success=success)
|
||||
|
||||
def help(self):
|
||||
"""Display available user methods."""
|
||||
print("Available methods:")
|
||||
for method in self.USER_ACCESS:
|
||||
print(f" - {method}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
fsh = cSAXSFastEpicsShutter(name="fsh", prefix="X12SA-ES1-TTL:")
|
||||
@@ -22,7 +22,13 @@ import numpy as np
|
||||
from bec_lib.logger import bec_logger
|
||||
from ophyd import Component as Cpt
|
||||
from ophyd import EpicsSignalRO, Kind
|
||||
from ophyd_devices import AsyncMultiSignal, CompareStatus, ProgressSignal, StatusBase
|
||||
from ophyd_devices import (
|
||||
AsyncMultiSignal,
|
||||
CompareStatus,
|
||||
ProgressSignal,
|
||||
StatusBase,
|
||||
TransitionStatus,
|
||||
)
|
||||
from ophyd_devices.interfaces.base_classes.psi_device_base import PSIDeviceBase
|
||||
|
||||
from csaxs_bec.devices.epics.mcs_card.mcs_card import (
|
||||
@@ -255,6 +261,7 @@ class MCSCardCSAXS(PSIDeviceBase, MCSCard):
|
||||
**kwargs: Additional keyword arguments from the subscription, including 'obj' (the EpicsSignalRO instance).
|
||||
"""
|
||||
with self._rlock:
|
||||
logger.info(f"Received update on mcs card {self.name}")
|
||||
if self._omit_mca_callbacks.is_set():
|
||||
return # Suppress callbacks when erasing all channels
|
||||
self._mca_counter_index += 1
|
||||
@@ -286,7 +293,7 @@ class MCSCardCSAXS(PSIDeviceBase, MCSCard):
|
||||
)
|
||||
|
||||
# Once we have received all channels, push data to BEC and reset for next accumulation
|
||||
logger.debug(
|
||||
logger.info(
|
||||
f"Received update for {attr_name}, index {self._mca_counter_index}/{self.NUM_MCA_CHANNELS}"
|
||||
)
|
||||
if len(self._current_data) == self.NUM_MCA_CHANNELS:
|
||||
@@ -310,10 +317,14 @@ class MCSCardCSAXS(PSIDeviceBase, MCSCard):
|
||||
old_value: Previous value of the signal.
|
||||
value: New value of the signal.
|
||||
"""
|
||||
scan_done = bool(value == self._num_total_triggers)
|
||||
self.progress.put(value=value, max_value=self._num_total_triggers, done=scan_done)
|
||||
if scan_done:
|
||||
self._scan_done_event.set()
|
||||
try:
|
||||
scan_done = bool(value == self._num_total_triggers)
|
||||
self.progress.put(value=value, max_value=self._num_total_triggers, done=scan_done)
|
||||
if scan_done:
|
||||
self._scan_done_event.set()
|
||||
except Exception:
|
||||
content = traceback.format_exc()
|
||||
logger.info(f"Device {self.name} error: {content}")
|
||||
|
||||
def on_stage(self) -> None:
|
||||
"""
|
||||
@@ -363,7 +374,10 @@ class MCSCardCSAXS(PSIDeviceBase, MCSCard):
|
||||
self._num_total_triggers = triggers * num_points
|
||||
self._acquisition_group = "monitored" if triggers == 1 else "burst_group"
|
||||
self.preset_real.set(0).wait(timeout=self._pv_timeout)
|
||||
self.num_use_all.set(triggers).wait(timeout=self._pv_timeout)
|
||||
if self.scan_info.msg.scan_type == "step":
|
||||
self.num_use_all.set(triggers).wait(timeout=self._pv_timeout)
|
||||
elif self.scan_info.msg.scan_type == "fly":
|
||||
self.num_use_all.set(self._num_total_triggers).wait(timeout=self._pv_timeout)
|
||||
|
||||
# Clear any previous data, just to be sure
|
||||
with self._rlock:
|
||||
@@ -385,6 +399,21 @@ class MCSCardCSAXS(PSIDeviceBase, MCSCard):
|
||||
self._omit_mca_callbacks.clear()
|
||||
|
||||
logger.info(f"MCS Card {self.name} on_stage completed in {time.time() - start_time:.3f}s.")
|
||||
# For a fly scan we need to start the mcs card ourselves
|
||||
if self.scan_info.msg.scan_type == "fly":
|
||||
self.erase_start.put(1)
|
||||
|
||||
def on_prescan(self) -> None | StatusBase:
|
||||
"""
|
||||
This method is called after on_stage and before the scan starts. For the MCS card, we need to make sure
|
||||
that the card is properly started for fly scans. For step scans, this will be handled by the DDG,
|
||||
so no action is required here.
|
||||
"""
|
||||
if self.scan_info.msg.scan_type == "fly":
|
||||
status_acquiring = CompareStatus(self.acquiring, ACQUIRING.ACQUIRING)
|
||||
self.cancel_on_stop(status_acquiring)
|
||||
return status_acquiring
|
||||
return None
|
||||
|
||||
def on_unstage(self) -> None:
|
||||
"""
|
||||
@@ -422,9 +451,16 @@ class MCSCardCSAXS(PSIDeviceBase, MCSCard):
|
||||
hasattr(self.scan_info.msg, "num_points")
|
||||
and self.scan_info.msg.num_points is not None
|
||||
):
|
||||
if self._current_data_index == self.scan_info.msg.num_points:
|
||||
for callback in self._scan_done_callbacks:
|
||||
callback(exception=None)
|
||||
if self.scan_info.msg.scan_type == "step":
|
||||
if self._current_data_index == self.scan_info.msg.num_points:
|
||||
for callback in self._scan_done_callbacks:
|
||||
callback(exception=None)
|
||||
else:
|
||||
logger.info(f"Current data index is {self._current_data_index}")
|
||||
if self._current_data_index >= 1:
|
||||
for callback in self._scan_done_callbacks:
|
||||
callback(exception=None)
|
||||
|
||||
time.sleep(0.02) # 20ms delay to avoid busy loop
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
content = traceback.format_exc()
|
||||
@@ -452,7 +488,6 @@ class MCSCardCSAXS(PSIDeviceBase, MCSCard):
|
||||
"""Callback for status failure, the monitoring thread should be stopped."""
|
||||
# NOTE Check for status.done and status.success is important to avoid
|
||||
if status.done:
|
||||
|
||||
self._start_monitor_async_data_emission.clear() # Stop monitoring
|
||||
|
||||
def on_complete(self) -> CompareStatus:
|
||||
@@ -478,6 +513,13 @@ class MCSCardCSAXS(PSIDeviceBase, MCSCard):
|
||||
monitoring thread is stopped properly.
|
||||
|
||||
"""
|
||||
# NOTE For fly scans with EXT/EN enabled triggering, the MCS card needs to receive an
|
||||
# additional trigger at the end of the scan to advance the channel. This will ensure
|
||||
# that the acquisition finishes on the card and that data is emitted to BEC. If the acquisition
|
||||
# was already finished (i.e. normal step scan sends 1 extra pulse per burst cycle), this will
|
||||
# not have any effect as the card will already be in DONE state and signal.
|
||||
self.software_channel_advance.put(1)
|
||||
|
||||
# Prepare and register status callback for the async monitoring loop
|
||||
status_async_data = StatusBase(obj=self)
|
||||
self._scan_done_callbacks.append(partial(self._status_callback, status_async_data))
|
||||
@@ -491,7 +533,7 @@ class MCSCardCSAXS(PSIDeviceBase, MCSCard):
|
||||
|
||||
# Combine both statuses
|
||||
ret_status = status & status_async_data
|
||||
# Handle external stop/cancel, and stop monitoring
|
||||
# NOTE: Handle external stop/cancel, and stop monitoring
|
||||
ret_status.add_callback(self._status_failed_callback)
|
||||
self.cancel_on_stop(ret_status)
|
||||
return ret_status
|
||||
|
||||
48
csaxs_bec/devices/jungfraujoch/README.MD
Normal file
48
csaxs_bec/devices/jungfraujoch/README.MD
Normal file
@@ -0,0 +1,48 @@
|
||||
# Overview
|
||||
Integration module for Eiger detectors at the cSAXS beamline with JungfrauJoch backend.
|
||||
There are currently two supported Eiger detectors:
|
||||
- EIGER 1.5M
|
||||
- EIGER 9M
|
||||
|
||||
This module provides a base integration for both detectors. A short list of useful
|
||||
information is also provided below.
|
||||
|
||||
## JungfrauJoch Service
|
||||
The JungfrauJoch WEB UI is available on http://sls-jfjoch-001:8080. This is an interface
|
||||
to the broker which runs on sls-jfjoch-001.psi.ch. The writer service runs on
|
||||
xbl-daq-34.psi.ch. Permissions to get access to these machines and run systemctl or
|
||||
journalctl commands can be requested with the Infrastructure and Services group in AWI.
|
||||
Beamline scientists need to check if they have the necessary permissions to connect
|
||||
to these machines and run the commands below.
|
||||
|
||||
Useful commands for the broker service on sls-jfjoch-001.psi.ch:
|
||||
- sudo systemctl status jfjoch_broker # Check status
|
||||
- sudo systemctl start jfjoch_broker # Start service
|
||||
- sudo systemctl stop jfjoch_broker # Stop service
|
||||
- sudo systemctl restart jfjoch_broker # Restart service
|
||||
|
||||
For the writer service on xbl-daq-34.psi.ch:
|
||||
- sudo journalctl -u jfjoch_writer -f # streams live logs
|
||||
- sudo systemctl status jfjoch_writer # Check status
|
||||
- sudo systemctl start jfjoch_writer # Start service
|
||||
- sudo systemctl stop jfjoch_writer # Stop service
|
||||
- sudo systemctl restart jfjoch_writer # Restart service
|
||||
|
||||
More information about the JungfrauJoch and API client can be found at: (https://jungfraujoch.readthedocs.io/en/latest/index.html)
|
||||
|
||||
### JungfrauJoch API Client
|
||||
A thin wrapper for the JungfrauJoch API client is provided in the [jungfrau_joch_client](./jungfrau_joch_client.py).
|
||||
Details about the specific integration are provided in the code.
|
||||
|
||||
|
||||
## Eiger implementation
|
||||
The Eiger detector integration is provided in the [eiger.py](./eiger.py) module. It provides a base integration for both Eiger 1.5M and Eiger 9M detectors.
|
||||
Logic specific to each detector is implemented in the respective modules:
|
||||
- [eiger_1_5m.py](./eiger_1_5m.py)
|
||||
- [eiger_9m.py](./eiger_9m.py)
|
||||
|
||||
With the current implementation, the detector initialization should be done by a beamline scientist through the JungfrauJoch WEB UI by choosing the
|
||||
appropriate detector (1.5M or 9M) before loading the device config with BEC. BEC will check upon connecting if the selected detector matches the expected one.
|
||||
A preview stream for images is also provided which is forwarded and accessible through the `preview_image` signal.
|
||||
|
||||
For more specific details, please check the code documentation.
|
||||
@@ -1,34 +1,23 @@
|
||||
"""
|
||||
Generic integration of JungfrauJoch backend with Eiger detectors
|
||||
for the cSAXS beamline at the Swiss Light Source.
|
||||
|
||||
The WEB UI is available on http://sls-jfjoch-001:8080
|
||||
Integration module for Eiger detectors at the cSAXS beamline with JungfrauJoch backend.
|
||||
|
||||
NOTE: this may not be the best place to store this information. It should be migrated to
|
||||
beamline documentation for debugging of Eiger & JungfrauJoch.
|
||||
A few notes on setup and operation of the Eiger detectors through the JungfrauJoch broker:
|
||||
|
||||
The JungfrauJoch server for cSAXS runs on sls-jfjoch-001.psi.ch
|
||||
User with sufficient rights may use:
|
||||
- sudo systemctl restart jfjoch_broker
|
||||
- sudo systemctl status jfjoch_broker
|
||||
to check and/or restart the broker for the JungfrauJoch server.
|
||||
|
||||
Some extra notes for setting up the detector:
|
||||
- If the energy on JFJ is set via DetectorSettings, the variable in DatasetSettings will be ignored
|
||||
- Changes in energy may take time, good to implement logic that only resets energy if needed.
|
||||
- For the Eiger, the frame_time_us in DetectorSettings is ignored, only the frame_time_us in
|
||||
the DatasetSettings is relevant
|
||||
- The bit_depth will be adjusted automatically based on the exp_time. Here, we need to ensure
|
||||
that subsequent triggers properly
|
||||
consider the readout_time of the boards. For Jungfrau detectors, the difference between
|
||||
count_time_us and frame_time_us is the readout_time of the boards. For the Eiger, this needs
|
||||
to be taken into account during the integration.
|
||||
that subsequent triggers properly consider the readout_time of the boards. For the Eiger detectors
|
||||
at cSAXS, a readout time of 20us is configured through the JungfrauJoch deployment config. This
|
||||
setting is sufficiently large for the detectors if they run in parallel mode.
|
||||
- beam_center and detector settings are required input arguments, thus, they may be set to wrong
|
||||
values for acquisitions to start. Please keep this in mind.
|
||||
|
||||
Hardware related notes:
|
||||
- If there is an HW issue with the detector, power cycling may help.
|
||||
- The sls_detector package is available on console on /sls/X12SA/data/gac-x12sa/erik/micromamba
|
||||
- The sls_detector package is available on console on /sls/x12sa/applications/erik/micromamba
|
||||
- Run: source setup_9m.sh # Be careful, this connects to the detector, so it should not be
|
||||
used during operation
|
||||
- Useful commands:
|
||||
@@ -39,9 +28,6 @@ Hardware related notes:
|
||||
- cd power_control_user/
|
||||
- ./on
|
||||
- ./off
|
||||
|
||||
Further information that may be relevant for debugging:
|
||||
JungfrauJoch - one needs to connect to the jfj-server (sls-jfjoch-001)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -84,10 +70,19 @@ class EigerError(Exception):
|
||||
|
||||
class Eiger(PSIDeviceBase):
|
||||
"""
|
||||
Base integration of the Eiger1.5M and Eiger9M at cSAXS. All relevant
|
||||
Base integration of the Eiger1.5M and Eiger9M at cSAXS.
|
||||
|
||||
Args:
|
||||
name (str) : Name of the device
|
||||
detector_name (str): Name of the detector. Supports ["EIGER 9M", "EIGER 8.5M (tmp)", "EIGER 1.5M"]
|
||||
host (str): Hostname of the Jungfrau Joch server.
|
||||
port (int): Port of the Jungfrau Joch server.
|
||||
scan_info (ScanInfo): The scan info to use.
|
||||
device_manager (DeviceManagerDS): The device manager to use.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
USER_ACCESS = ["detector_distance", "beam_center"]
|
||||
USER_ACCESS = ["set_detector_distance", "set_beam_center"]
|
||||
|
||||
file_event = Cpt(FileEventSignal, name="file_event")
|
||||
preview_image = Cpt(PreviewSignal, name="preview_image", ndim=2)
|
||||
@@ -105,23 +100,12 @@ class Eiger(PSIDeviceBase):
|
||||
device_manager=None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Initialize the PSI Device Base class.
|
||||
|
||||
Args:
|
||||
name (str) : Name of the device
|
||||
detector_name (str): Name of the detector. Supports ["EIGER 9M", "EIGER 8.5M (tmp)", "EIGER 1.5M"]
|
||||
host (str): Hostname of the Jungfrau Joch server.
|
||||
port (int): Port of the Jungfrau Joch server.
|
||||
scan_info (ScanInfo): The scan info to use.
|
||||
device_manager (DeviceManagerDS): The device manager to use.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
super().__init__(name=name, scan_info=scan_info, device_manager=device_manager, **kwargs)
|
||||
self._host = f"{host}:{port}"
|
||||
self.jfj_client = JungfrauJochClient(host=self._host, parent=self)
|
||||
# NOTE: fetch this information from JungfrauJochClient during on_connected!
|
||||
self.jfj_preview_client = JungfrauJochPreview(
|
||||
url="tcp://129.129.95.114:5400", cb=self.preview_image.put
|
||||
url="tcp://129.129.95.114:5400", cb=self._preview_callback
|
||||
) # IP of sls-jfjoch-001.psi.ch on port 5400 for ZMQ stream
|
||||
self.device_manager = device_manager
|
||||
self.detector_name = detector_name
|
||||
@@ -129,53 +113,102 @@ class Eiger(PSIDeviceBase):
|
||||
self._beam_center = beam_center
|
||||
self._readout_time = readout_time
|
||||
self._full_path = ""
|
||||
self._num_triggers = 0
|
||||
self._wait_for_on_complete = 20 # seconds
|
||||
if self.device_manager is not None:
|
||||
self.device_manager: DeviceManagerDS
|
||||
|
||||
def _preview_callback(self, message: dict) -> None:
|
||||
"""
|
||||
Callback method for handling preview messages as received from the JungfrauJoch preview stream.
|
||||
These messages are dictionary dumps as described in the JFJ ZMQ preview stream documentation.
|
||||
(https://jungfraujoch.readthedocs.io/en/latest/ZEROMQ_STREAM.html#preview-stream).
|
||||
|
||||
Args:
|
||||
message (dict): The message received from the preview stream.
|
||||
"""
|
||||
if message.get("type", "") == "image":
|
||||
data = message.get("data", {}).get("default", None)
|
||||
if data is None:
|
||||
logger.error(f"Received image message on device {self.name} without data.")
|
||||
return
|
||||
logger.info(f"Received preview image on device {self.name}")
|
||||
self.preview_image.put(data)
|
||||
|
||||
# pylint: disable=missing-function-docstring
|
||||
@property
|
||||
def detector_distance(self) -> float:
|
||||
"""The detector distance in mm."""
|
||||
return self._detector_distance
|
||||
|
||||
@detector_distance.setter
|
||||
def detector_distance(self, value: float) -> None:
|
||||
"""Set the detector distance in mm."""
|
||||
if value <= 0:
|
||||
raise ValueError("Detector distance must be a positive value.")
|
||||
self._detector_distance = value
|
||||
|
||||
def set_detector_distance(self, distance: float) -> None:
|
||||
"""
|
||||
Set the detector distance in mm.
|
||||
|
||||
Args:
|
||||
distance (float): The detector distance in mm.
|
||||
"""
|
||||
self.detector_distance = distance
|
||||
|
||||
# pylint: disable=missing-function-docstring
|
||||
@property
|
||||
def beam_center(self) -> tuple[float, float]:
|
||||
"""The beam center in pixels. (x,y)"""
|
||||
return self._beam_center
|
||||
|
||||
@beam_center.setter
|
||||
def beam_center(self, value: tuple[float, float]) -> None:
|
||||
"""Set the beam center in pixels. (x,y)"""
|
||||
if any(coord < 0 for coord in value):
|
||||
raise ValueError("Beam center coordinates must be non-negative.")
|
||||
self._beam_center = value
|
||||
|
||||
def on_init(self) -> None:
|
||||
def set_beam_center(self, x: float, y: float) -> None:
|
||||
"""
|
||||
Called when the device is initialized.
|
||||
Set the beam center coordinates in pixels.
|
||||
|
||||
No siganls are connected at this point,
|
||||
thus should not be set here but in on_connected instead.
|
||||
Args:
|
||||
x (float): The x coordinate of the beam center in pixels.
|
||||
y (float): The y coordinate of the beam center in pixels.
|
||||
"""
|
||||
self.beam_center = (x, y)
|
||||
|
||||
def on_init(self) -> None:
|
||||
"""Hook called during device initialization."""
|
||||
|
||||
# pylint: disable=arguments-differ
|
||||
def wait_for_connection(self, timeout: float = 10) -> None:
|
||||
"""
|
||||
Wait for the device to be connected to the JungfrauJoch backend.
|
||||
|
||||
Args:
|
||||
timeout (float): Timeout in seconds to wait for the connection.
|
||||
"""
|
||||
self.jfj_client.api.status_get(_request_timeout=timeout) # If connected, this responds
|
||||
|
||||
def on_connected(self) -> None:
|
||||
"""
|
||||
Hook called after the device is connected to through the device server.
|
||||
|
||||
Called after the device is connected and its signals are connected.
|
||||
Default values for signals should be set here.
|
||||
Default values for signals should be set here. Currently, the detector needs to be
|
||||
initialised manually through the WEB UI of JungfrauJoch. Once agreed upon, the automated
|
||||
initialisation can be re-enabled here (code commented below).
|
||||
"""
|
||||
start_time = time.time()
|
||||
logger.debug(f"On connected called for {self.name}")
|
||||
self.jfj_client.stop(request_timeout=3)
|
||||
# Check which detector is selected
|
||||
|
||||
# Get available detectors
|
||||
available_detectors = self.jfj_client.api.config_select_detector_get(_request_timeout=5)
|
||||
logger.debug(f"Available detectors {available_detectors}")
|
||||
# Get current detector
|
||||
current_detector_name = ""
|
||||
if available_detectors.current_id:
|
||||
if available_detectors.current_id is not None:
|
||||
detector_selection = [
|
||||
det.description
|
||||
for det in available_detectors.detectors
|
||||
@@ -190,8 +223,9 @@ class Eiger(PSIDeviceBase):
|
||||
raise RuntimeError(
|
||||
f"Detector {self.detector_name} is not in IDLE state, current state: {self.jfj_client.detector_state}. Please initialize the detector in the WEB UI: {self._host}."
|
||||
)
|
||||
# TODO - check again once Eiger should be initialized automatically, currently human initialization is expected
|
||||
# # Once the automation should be enabled, we may use here
|
||||
|
||||
# TODO - Currently the initialisation of the detector is done manually through the WEB UI. Once adjusted
|
||||
# this can be automated here again.
|
||||
# detector_selection = [
|
||||
# det for det in available_detectors.detectors if det.id == self.detector_name
|
||||
# ]
|
||||
@@ -207,41 +241,51 @@ class Eiger(PSIDeviceBase):
|
||||
|
||||
# Setup Detector settings, here we may also set the energy already as this might be time consuming
|
||||
settings = DetectorSettings(frame_time_us=int(500), timing=DetectorTiming.TRIGGER)
|
||||
self.jfj_client.set_detector_settings(settings, timeout=10)
|
||||
self.jfj_client.set_detector_settings(settings, timeout=5)
|
||||
|
||||
# Set the file writer to the appropriate output for the HDF5 file
|
||||
file_writer_settings = FileWriterSettings(overwrite=True, format=FileWriterFormat.NXMXVDS)
|
||||
logger.debug(
|
||||
f"Setting writer_settings: {yaml.dump(file_writer_settings.to_dict(), indent=4)}"
|
||||
)
|
||||
|
||||
# Setup the file writer settings
|
||||
self.jfj_client.api.config_file_writer_put(
|
||||
file_writer_settings=file_writer_settings, _request_timeout=10
|
||||
)
|
||||
|
||||
# Start the preview client
|
||||
self.jfj_preview_client.connect()
|
||||
self.jfj_preview_client.start()
|
||||
logger.info(f"Connected to JungfrauJoch preview stream at {self.jfj_preview_client.url}")
|
||||
logger.info(
|
||||
f"Device {self.name} initialized after {time.time()-start_time:.2f}s. Preview stream connected on url: {self.jfj_preview_client.url}"
|
||||
)
|
||||
|
||||
def on_stage(self) -> DeviceStatus | None:
|
||||
"""
|
||||
Called while staging the device.
|
||||
|
||||
Information about the upcoming scan can be accessed from the scan_info object.
|
||||
Hook called when staging the device. Information about the upcoming scan can be accessed from the scan_info object.
|
||||
scan_msg = self.scan_info.msg
|
||||
"""
|
||||
start_time = time.time()
|
||||
scan_msg = self.scan_info.msg
|
||||
# Set acquisition parameter
|
||||
# TODO add check of mono energy, this can then also be passed to DatasetSettings
|
||||
|
||||
# TODO: Check mono energy from device in BEC
|
||||
# Setting incident energy in keV
|
||||
incident_energy = 12.0
|
||||
# Setting up exp_time and num_triggers acquisition parameter
|
||||
exp_time = scan_msg.scan_parameters.get("exp_time", 0)
|
||||
if exp_time <= self._readout_time:
|
||||
if exp_time <= self._readout_time: # Exp_time must be at least the readout time
|
||||
raise ValueError(
|
||||
f"Receive scan request for scan {scan_msg.scan_name} with exp_time {exp_time}s, which must be larger than the readout time {self._readout_time}s of the detector {self.detector_name}."
|
||||
f"Value error on device {self.name}: Exposure time {exp_time}s is less than readout time {self._readout_time}s."
|
||||
)
|
||||
frame_time_us = exp_time #
|
||||
ntrigger = int(scan_msg.num_points * scan_msg.scan_parameters["frames_per_trigger"])
|
||||
# Fetch file path
|
||||
self._num_triggers = int(
|
||||
scan_msg.num_points * scan_msg.scan_parameters["frames_per_trigger"]
|
||||
)
|
||||
|
||||
# Setting up the full path for file writing
|
||||
self._full_path = get_full_path(scan_msg, name=f"{self.name}_master")
|
||||
self._full_path = os.path.abspath(os.path.expanduser(self._full_path))
|
||||
|
||||
# Inform BEC about upcoming file event
|
||||
self.file_event.put(
|
||||
file_path=self._full_path,
|
||||
@@ -249,11 +293,14 @@ class Eiger(PSIDeviceBase):
|
||||
successful=False,
|
||||
hinted_h5_entries={"data": "entry/data/data"},
|
||||
)
|
||||
|
||||
# JFJ adds _master.h5 automatically
|
||||
path = os.path.relpath(self._full_path, start="/sls/x12sa/data").removesuffix("_master.h5")
|
||||
|
||||
# Create dataset settings for API call.
|
||||
data_settings = DatasetSettings(
|
||||
image_time_us=int(frame_time_us * 1e6), # This is currently ignored
|
||||
ntrigger=ntrigger,
|
||||
image_time_us=int(exp_time * 1e6),
|
||||
ntrigger=self._num_triggers,
|
||||
file_prefix=path,
|
||||
beam_x_pxl=int(self._beam_center[0]),
|
||||
beam_y_pxl=int(self._beam_center[1]),
|
||||
@@ -261,11 +308,15 @@ class Eiger(PSIDeviceBase):
|
||||
incident_energy_ke_v=incident_energy,
|
||||
)
|
||||
logger.debug(f"Setting data_settings: {yaml.dump(data_settings.to_dict(), indent=4)}")
|
||||
prep_time = start_time - time.time()
|
||||
logger.debug(f"Prepared information for eiger to start acquisition in {prep_time:.2f}s")
|
||||
self.jfj_client.wait_for_idle(timeout=10, request_timeout=10) # Ensure we are in IDLE state
|
||||
prep_time = time.time()
|
||||
self.jfj_client.wait_for_idle(timeout=10) # Ensure we are in IDLE state
|
||||
self.jfj_client.start(settings=data_settings) # Takes around ~0.6s
|
||||
logger.debug(f"Wait for IDLE and start call took {time.time()-start_time-prep_time:.2f}s")
|
||||
|
||||
# Time the stage process
|
||||
logger.info(
|
||||
f"Device {self.name} staged for scan. Time spent {time.time()-start_time:.2f}s,"
|
||||
f" with {time.time()-prep_time:.2f}s spent with communication to JungfrauJoch."
|
||||
)
|
||||
|
||||
def on_unstage(self) -> DeviceStatus:
|
||||
"""Called while unstaging the device."""
|
||||
@@ -278,7 +329,9 @@ class Eiger(PSIDeviceBase):
|
||||
|
||||
def _file_event_callback(self, status: DeviceStatus) -> None:
|
||||
"""Callback to update the file_event signal when the acquisition is done."""
|
||||
logger.info(f"Acquisition done callback called for {self.name} for status {status.success}")
|
||||
logger.debug(
|
||||
f"File event callback on complete status for device {self.name}: done={status.done}, successful={status.success}"
|
||||
)
|
||||
self.file_event.put(
|
||||
file_path=self._full_path,
|
||||
done=status.done,
|
||||
@@ -287,19 +340,44 @@ class Eiger(PSIDeviceBase):
|
||||
)
|
||||
|
||||
def on_complete(self) -> DeviceStatus:
|
||||
"""Called to inquire if a device has completed a scans."""
|
||||
"""
|
||||
Called at the end of the scan. The method should implement an asynchronous wait for the
|
||||
device to complete the acquisition. A callback to update the file_event signal is
|
||||
attached that resolves the file event when the acquisition is done.
|
||||
|
||||
Returns:
|
||||
DeviceStatus: The status object representing the completion of the acquisition.
|
||||
"""
|
||||
|
||||
def wait_for_complete():
|
||||
start_time = time.time()
|
||||
timeout = 10
|
||||
for _ in range(timeout):
|
||||
if self.jfj_client.wait_for_idle(timeout=1, request_timeout=10):
|
||||
# NOTE: This adjust the time (s) that should be waited for completion of the scan.
|
||||
timeout = self._wait_for_on_complete
|
||||
while time.time() - start_time < timeout:
|
||||
if self.jfj_client.wait_for_idle(timeout=1, raise_on_timeout=False):
|
||||
# TODO: Once available, add check for
|
||||
statistics: MeasurementStatistics = (
|
||||
self.jfj_client.api.statistics_data_collection_get(_request_timeout=5)
|
||||
)
|
||||
if statistics.images_collected < self._num_triggers:
|
||||
raise EigerError(
|
||||
f"Device {self.name} acquisition incomplete. "
|
||||
f"Expected {self._num_triggers} triggers, "
|
||||
f"but only {statistics.images_collected} were collected."
|
||||
)
|
||||
return
|
||||
logger.info(
|
||||
f"Waiting for device {self.name} to finish complete, time elapsed: "
|
||||
f"{time.time() - start_time}."
|
||||
)
|
||||
statistics: MeasurementStatistics = self.jfj_client.api.statistics_data_collection_get(
|
||||
_request_timeout=5
|
||||
)
|
||||
broker_status = self.jfj_client.jfj_status
|
||||
raise TimeoutError(
|
||||
f"Timeout after waiting for detector {self.name} to complete for {time.time()-start_time:.2f}s, measurement statistics: {yaml.dump(statistics.to_dict(), indent=4)}"
|
||||
f"Timeout after waiting for device {self.name} to complete for {time.time()-start_time:.2f}s \n \n"
|
||||
f"Broker status: \n{yaml.dump(broker_status.to_dict(), indent=4)} \n \n"
|
||||
f"Measurement statistics: \n{yaml.dump(statistics.to_dict(), indent=4)}"
|
||||
)
|
||||
|
||||
status = self.task_handler.submit_task(wait_for_complete, run=True)
|
||||
@@ -312,7 +390,11 @@ class Eiger(PSIDeviceBase):
|
||||
|
||||
def on_stop(self) -> None:
|
||||
"""Called when the device is stopped."""
|
||||
self.jfj_client.stop(
|
||||
request_timeout=0.5
|
||||
) # Call should not block more than 0.5 seconds to stop all devices...
|
||||
self.jfj_client.stop(request_timeout=0.5)
|
||||
self.task_handler.shutdown()
|
||||
|
||||
def on_destroy(self):
|
||||
"""Called when the device is destroyed."""
|
||||
self.jfj_preview_client.stop()
|
||||
self.on_stop()
|
||||
return super().on_destroy()
|
||||
|
||||
@@ -21,18 +21,18 @@ if TYPE_CHECKING: # pragma no cover
|
||||
from bec_server.device_server.device_server import DeviceManagerDS
|
||||
|
||||
EIGER9M_READOUT_TIME_US = 500e-6 # 500 microseconds in s
|
||||
DETECTOR_NAME = "EIGER 8.5M (tmp)" # "EIGER 9M""
|
||||
DETECTOR_NAME = "EIGER 9M" # "EIGER 9M""
|
||||
|
||||
|
||||
# pylint:disable=invalid-name
|
||||
class Eiger9M(Eiger):
|
||||
"""
|
||||
Eiger 1.5M specific integration for the in-vaccum Eiger.
|
||||
EIGER 9M specific integration for the in-vaccum Eiger.
|
||||
|
||||
The logic implemented here is coupled to the DelayGenerator integration,
|
||||
repsonsible for the global triggering of all devices through a single Trigger logic.
|
||||
Please check the eiger.py class for more details about the integration of relevant backend
|
||||
services. The detector_name must be set to "EIGER 1.5M:
|
||||
services. The detector_name must be set to "EIGER 9M":
|
||||
"""
|
||||
|
||||
USER_ACCESS = Eiger.USER_ACCESS + [] # Add more user_access methods here.
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
"""Module with client interface for the Jungfrau Joch detector API"""
|
||||
"""Module with a thin client wrapper around the Jungfrau Joch detector API"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import enum
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import requests
|
||||
import yaml
|
||||
from bec_lib.logger import bec_logger
|
||||
from jfjoch_client.api.default_api import DefaultApi
|
||||
from jfjoch_client.api_client import ApiClient
|
||||
@@ -18,7 +20,7 @@ from jfjoch_client.models.detector_settings import DetectorSettings
|
||||
|
||||
logger = bec_logger.logger
|
||||
|
||||
if TYPE_CHECKING:
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from ophyd import Device
|
||||
|
||||
|
||||
@@ -29,7 +31,10 @@ class JungfrauJochClientError(Exception):
|
||||
|
||||
|
||||
class DetectorState(str, enum.Enum):
|
||||
"""Possible Detector states for Jungfrau Joch detector"""
|
||||
"""
|
||||
Enum states of the BrokerStatus state. The pydantic model validates in runtime,
|
||||
thus we keep the possible states here for a convenient overview and access.
|
||||
"""
|
||||
|
||||
INACTIVE = "Inactive"
|
||||
IDLE = "Idle"
|
||||
@@ -40,13 +45,15 @@ class DetectorState(str, enum.Enum):
|
||||
|
||||
|
||||
class JungfrauJochClient:
|
||||
"""Thin wrapper around the Jungfrau Joch API client.
|
||||
"""
|
||||
Jungfrau Joch API client wrapper. It provides a thin wrapper methods around the API client,
|
||||
that allow to connect, initialise, wait for state changes, set settings, start and stop
|
||||
acquisitions.
|
||||
|
||||
sudo systemctl restart jfjoch_broker
|
||||
sudo systemctl status jfjoch_broker
|
||||
|
||||
It looks as if the detector is not being stopped properly.
|
||||
One module remains running, how can we restart the detector?
|
||||
Args:
|
||||
host (str): Hostname of the Jungfrau Joch broker service.
|
||||
Default is "http://sls-jfjoch-001:8080"
|
||||
parent (Device, optional): Parent ophyd device, used for logging purposes.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -59,50 +66,63 @@ class JungfrauJochClient:
|
||||
self._parent_name = parent.name if parent else self.__class__.__name__
|
||||
|
||||
@property
|
||||
def jjf_state(self) -> BrokerStatus:
|
||||
"""Get the status of JungfrauJoch"""
|
||||
def jfj_status(self) -> BrokerStatus:
|
||||
"""Broker status of JungfrauJoch."""
|
||||
response = self.api.status_get()
|
||||
return BrokerStatus(**response.to_dict())
|
||||
|
||||
# pylint: disable=missing-function-docstring
|
||||
@property
|
||||
def initialised(self) -> bool:
|
||||
"""Check if jfj is connected and ready to receive commands"""
|
||||
return self._initialised
|
||||
|
||||
@initialised.setter
|
||||
def initialised(self, value: bool) -> None:
|
||||
"""Set the connected status"""
|
||||
self._initialised = value
|
||||
|
||||
# TODO this is not correct, as it may be that the state in INACTIVE. Models are not in sync...
|
||||
# REMOVE all model enums as most of the validation takes place in the Pydantic models, i.e. BrokerStatus here..
|
||||
# pylint: disable=missing-function-docstring
|
||||
@property
|
||||
def detector_state(self) -> DetectorState:
|
||||
"""Get the status of JungfrauJoch"""
|
||||
return DetectorState(self.jjf_state.state)
|
||||
return DetectorState(self.jfj_status.state)
|
||||
|
||||
def connect_and_initialise(self, timeout: int = 10, **kwargs) -> None:
|
||||
"""Check if JungfrauJoch is connected and ready to receive commands"""
|
||||
def connect_and_initialise(self, timeout: int = 10) -> None:
|
||||
"""
|
||||
Connect and initialise the JungfrauJoch detector. The detector must be in
|
||||
IDLE state to become initialised. This is a blocking call, the timeout parameter
|
||||
will be passed to the HTTP requests timeout method of the wait_for_idle method.
|
||||
|
||||
Args:
|
||||
timeout (int): Timeout in seconds for the initialisation and waiting for IDLE state.
|
||||
"""
|
||||
status = self.detector_state
|
||||
# TODO: #135 Check if the detector has to be in INACTIVE state before initialisation
|
||||
if status != DetectorState.IDLE:
|
||||
self.api.initialize_post() # This is a blocking call....
|
||||
self.wait_for_idle(timeout, request_timeout=timeout) # Blocking call
|
||||
self.api.initialize_post()
|
||||
self.wait_for_idle(timeout)
|
||||
self.initialised = True
|
||||
|
||||
def set_detector_settings(self, settings: dict | DetectorSettings, timeout: int = 10) -> None:
|
||||
"""Set the detector settings. JungfrauJoch must be in IDLE, Error or Inactive state.
|
||||
Note, the full settings have to be provided, otherwise the settings will be overwritten with default values.
|
||||
"""
|
||||
Set the detector settings. The state of JungfrauJoch must be in IDLE,
|
||||
Error or Inactive state. Please note: a full set of setttings has to be provided,
|
||||
otherwise the settings will be overwritten with default values.
|
||||
|
||||
Args:
|
||||
settings (dict): dictionary of settings
|
||||
timeout (int): Timeout in seconds for the HTTP request to set the settings.
|
||||
"""
|
||||
state = self.detector_state
|
||||
if state not in [DetectorState.IDLE, DetectorState.ERROR, DetectorState.INACTIVE]:
|
||||
logger.info(
|
||||
f"JungfrauJoch backend fo device {self._parent_name} is not in IDLE state,"
|
||||
" waiting 1s before retrying..."
|
||||
)
|
||||
time.sleep(1) # Give the detector 1s to become IDLE, retry
|
||||
state = self.detector_state
|
||||
if state not in [DetectorState.IDLE, DetectorState.ERROR, DetectorState.INACTIVE]:
|
||||
raise JungfrauJochClientError(
|
||||
f"Error in {self._parent_name}. Detector must be in IDLE, ERROR or INACTIVE state to set settings. Current state: {state}"
|
||||
f"Error on {self._parent_name}. Detector must be in IDLE, ERROR or INACTIVE"
|
||||
" state to set settings. Current state: {state}"
|
||||
)
|
||||
|
||||
if isinstance(settings, dict):
|
||||
@@ -110,28 +130,36 @@ class JungfrauJochClient:
|
||||
try:
|
||||
self.api.config_detector_put(detector_settings=settings, _request_timeout=timeout)
|
||||
except requests.exceptions.Timeout:
|
||||
raise TimeoutError(f"Timeout while setting detector settings for {self._parent_name}")
|
||||
raise TimeoutError(
|
||||
f"Timeout on device {self._parent_name} while setting detector settings:\n "
|
||||
f"{yaml.dump(settings, indent=4)}."
|
||||
)
|
||||
except Exception:
|
||||
content = traceback.format_exc()
|
||||
logger.error(
|
||||
f"Error on device {self._parent_name} while setting detector settings:\n "
|
||||
f"{yaml.dump(settings, indent=4)}. Error traceback: {content}"
|
||||
)
|
||||
raise JungfrauJochClientError(
|
||||
f"Error while setting detector settings for {self._parent_name}: {content}"
|
||||
f"Error on device {self._parent_name} while setting detector settings:\n "
|
||||
f"{yaml.dump(settings, indent=4)}. Full traceback: {content}."
|
||||
)
|
||||
|
||||
def start(self, settings: dict | DatasetSettings, request_timeout: float = 10) -> None:
|
||||
"""Start the mesaurement. DatasetSettings must be provided, and JungfrauJoch must be in IDLE state.
|
||||
The method call is blocking and JungfrauJoch will be ready to measure after the call resolves.
|
||||
"""
|
||||
Start the acquisition with the provided dataset settings.
|
||||
The detector must be in IDLE state. Settings must always provide a full set of
|
||||
parameters, missing parameters will be set to default values.
|
||||
|
||||
Args:
|
||||
settings (dict): dictionary of settings
|
||||
|
||||
Please check the DataSettings class for the available settings. Minimum required settings are
|
||||
beam_x_pxl, beam_y_pxl, detector_distance_mm, incident_energy_keV.
|
||||
|
||||
settings (dict | DatasetSettings): Dataset settings to start the acquisition with.
|
||||
request_timeout (float): Timeout in sec for the HTTP request to start the acquisition.
|
||||
"""
|
||||
state = self.detector_state
|
||||
if state != DetectorState.IDLE:
|
||||
raise JungfrauJochClientError(
|
||||
f"Error in {self._parent_name}. Detector must be in IDLE state to set settings. Current state: {state}"
|
||||
f"Error on device {self._parent_name}. "
|
||||
f"Detector must be in IDLE state to start acquisition. Current state: {state}"
|
||||
)
|
||||
|
||||
if isinstance(settings, dict):
|
||||
@@ -141,46 +169,80 @@ class JungfrauJochClient:
|
||||
dataset_settings=settings, _request_timeout=request_timeout
|
||||
)
|
||||
except requests.exceptions.Timeout:
|
||||
content = traceback.format_exc()
|
||||
logger.error(
|
||||
f"Timeout error after {request_timeout} seconds on device {self._parent_name} "
|
||||
f"during 'start' call with dataset settings: {yaml.dump(settings, indent=4)}. \n"
|
||||
f"Traceback: {content}"
|
||||
)
|
||||
raise TimeoutError(
|
||||
f"TimeoutError in JungfrauJochClient for parent device {self._parent_name} for 'start' call"
|
||||
f"Timeout error after {request_timeout} seconds on device {self._parent_name} "
|
||||
f"during 'start' call with dataset settings: {yaml.dump(settings, indent=4)}."
|
||||
)
|
||||
except Exception:
|
||||
content = traceback.format_exc()
|
||||
logger.error(
|
||||
f"Error on device {self._parent_name} during 'start' post with dataset settings: \n"
|
||||
f"{yaml.dump(settings, indent=4)}. \nTraceback: {content}"
|
||||
)
|
||||
raise JungfrauJochClientError(
|
||||
f"Error in JungfrauJochClient for parent device {self._parent_name} during 'start' call: {content}"
|
||||
f"Error on device {self._parent_name} during 'start' post with dataset settings: \n"
|
||||
f"{yaml.dump(settings, indent=4)}. \nTraceback: {content}."
|
||||
)
|
||||
|
||||
def stop(self, request_timeout: float = 0.5) -> None:
|
||||
"""Stop the acquisition, this only logs errors and is not raising."""
|
||||
try:
|
||||
self.api.cancel_post_with_http_info(_request_timeout=request_timeout)
|
||||
except requests.exceptions.Timeout:
|
||||
content = traceback.format_exc()
|
||||
logger.error(
|
||||
f"Timeout in JungFrauJochClient for device {self._parent_name} during stop: {content}"
|
||||
)
|
||||
except Exception:
|
||||
content = traceback.format_exc()
|
||||
logger.error(
|
||||
f"Error in JungFrauJochClient for device {self._parent_name} during stop: {content}"
|
||||
)
|
||||
|
||||
def wait_for_idle(self, timeout: int = 10, request_timeout: float | None = None) -> bool:
|
||||
"""Wait for JungfrauJoch to be in Idle state. Blocking call with timeout.
|
||||
def _stop_call(self):
|
||||
try:
|
||||
self.api.cancel_post_with_http_info() # (_request_timeout=request_timeout)
|
||||
except requests.exceptions.Timeout:
|
||||
content = traceback.format_exc()
|
||||
logger.error(
|
||||
f"Timeout error after {request_timeout} seconds on device {self._parent_name} "
|
||||
f"during stop: {content}"
|
||||
)
|
||||
except Exception:
|
||||
content = traceback.format_exc()
|
||||
logger.error(f"Error on device {self._parent_name} during stop: {content}")
|
||||
|
||||
thread = threading.Thread(
|
||||
target=_stop_call, daemon=True, args=(self,), name="stop_jungfraujoch_thread"
|
||||
)
|
||||
thread.start()
|
||||
|
||||
def wait_for_idle(self, timeout: int = 10, raise_on_timeout: bool = True) -> bool:
|
||||
"""
|
||||
Method to wait until the detector is in IDLE state. This is a blocking call with a
|
||||
timeout that can be specified. The additional parameter raise_on_timeout can be used to
|
||||
raise an exception on timeout instead of returning boolean True/False.
|
||||
|
||||
Args:
|
||||
timeout (int): timeout in seconds
|
||||
raise_on_timeout (bool): If True, raises an exception on timeout. Default is True.
|
||||
Returns:
|
||||
bool: True if the detector is in IDLE state, False if timeout occurred
|
||||
"""
|
||||
if request_timeout is None:
|
||||
request_timeout = timeout
|
||||
try:
|
||||
self.api.wait_till_done_post(timeout=timeout, _request_timeout=request_timeout)
|
||||
self.api.wait_till_done_post(timeout=timeout, _request_timeout=timeout)
|
||||
except requests.exceptions.Timeout:
|
||||
raise TimeoutError(f"HTTP request timeout in wait_for_idle for {self._parent_name}")
|
||||
except Exception:
|
||||
content = traceback.format_exc()
|
||||
logger.debug(f"Waiting for device {self._parent_name} to become IDLE: {content}")
|
||||
logger.info(
|
||||
f"Timeout after {timeout} seconds on device {self._parent_name} in wait_for_idle: {content}"
|
||||
)
|
||||
if raise_on_timeout:
|
||||
raise TimeoutError(
|
||||
f"Timeout after {timeout} seconds on device {self._parent_name} in wait_for_idle."
|
||||
)
|
||||
return False
|
||||
except Exception as exc:
|
||||
content = traceback.format_exc()
|
||||
logger.info(
|
||||
f"Error on device {self._parent_name} in wait_for_idle. Full traceback: {content}"
|
||||
)
|
||||
if raise_on_timeout:
|
||||
raise JungfrauJochClientError(
|
||||
f"Error on device {self._parent_name} in wait_for_idle: {content}"
|
||||
) from exc
|
||||
return False
|
||||
return True
|
||||
|
||||
@@ -1,22 +1,136 @@
|
||||
"""Module for the Eiger preview ZMQ stream."""
|
||||
"""
|
||||
Module for the JungfrauJoch preview ZMQ stream for the Eiger detector at cSAXS.
|
||||
The Preview client is implemented for the JungfrauJoch ZMQ PUB-SUB interface, and
|
||||
should be independent of the EIGER detector type.
|
||||
|
||||
The client connects to the ZMQ PUB-SUB preview stream and calls a user provided callback
|
||||
function with the decompressed messages received from the stream. The callback needs to be
|
||||
able to deal with the different message types sent by the JungfrauJoch server ("start",
|
||||
"image", "end") as described in the JungfrauJoch ZEROMQ preview stream documentation.
|
||||
(https://jungfraujoch.readthedocs.io/en/latest/ZEROMQ_STREAM.html#preview-stream).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import cbor2
|
||||
import numpy as np
|
||||
import zmq
|
||||
from bec_lib.logger import bec_logger
|
||||
from dectris.compression import decompress
|
||||
|
||||
logger = bec_logger.logger
|
||||
|
||||
ZMQ_TOPIC_FILTER = b""
|
||||
###############################
|
||||
###### CBOR TAG DECODERS ######
|
||||
###############################
|
||||
# Dectris specific CBOR tags and decoders for Jungfrau data
|
||||
# Reference:
|
||||
# https://github.com/dectris/documentation/blob/main/stream_v2/examples/client.py
|
||||
|
||||
|
||||
def decode_multi_dim_array(tag: cbor2.CBORTag, column_major: bool = False):
|
||||
"""Decode a multi-dimensional array from a CBOR tag."""
|
||||
dimensions, contents = tag.value
|
||||
if isinstance(contents, list):
|
||||
array = np.empty((len(contents),), dtype=object)
|
||||
array[:] = contents
|
||||
elif isinstance(contents, (np.ndarray, np.generic)):
|
||||
array = contents
|
||||
else:
|
||||
raise cbor2.CBORDecodeValueError("expected array or typed array")
|
||||
return array.reshape(dimensions, order="F" if column_major else "C")
|
||||
|
||||
|
||||
def decode_typed_array(tag: cbor2.CBORTag, dtype: str):
|
||||
"""Decode a typed array from a CBOR tag."""
|
||||
if not isinstance(tag.value, bytes):
|
||||
raise cbor2.CBORDecodeValueError("expected byte string in typed array")
|
||||
return np.frombuffer(tag.value, dtype=dtype)
|
||||
|
||||
|
||||
def decode_dectris_compression(tag: cbor2.CBORTag):
|
||||
"""Decode a Dectris compressed array from a CBOR tag."""
|
||||
algorithm, elem_size, encoded = tag.value
|
||||
return decompress(encoded, algorithm, elem_size=elem_size)
|
||||
|
||||
|
||||
#########################################
|
||||
#### Dectris CBOR TAG Extensions ########
|
||||
#########################################
|
||||
|
||||
# Mapping of various additional CBOR tags from Dectris to decoder functions
|
||||
tag_decoders = {
|
||||
40: lambda tag: decode_multi_dim_array(tag, column_major=False),
|
||||
64: lambda tag: decode_typed_array(tag, dtype="u1"),
|
||||
65: lambda tag: decode_typed_array(tag, dtype=">u2"),
|
||||
66: lambda tag: decode_typed_array(tag, dtype=">u4"),
|
||||
67: lambda tag: decode_typed_array(tag, dtype=">u8"),
|
||||
68: lambda tag: decode_typed_array(tag, dtype="u1"),
|
||||
69: lambda tag: decode_typed_array(tag, dtype="<u2"),
|
||||
70: lambda tag: decode_typed_array(tag, dtype="<u4"),
|
||||
71: lambda tag: decode_typed_array(tag, dtype="<u8"),
|
||||
72: lambda tag: decode_typed_array(tag, dtype="i1"),
|
||||
73: lambda tag: decode_typed_array(tag, dtype=">i2"),
|
||||
74: lambda tag: decode_typed_array(tag, dtype=">i4"),
|
||||
75: lambda tag: decode_typed_array(tag, dtype=">i8"),
|
||||
77: lambda tag: decode_typed_array(tag, dtype="<i2"),
|
||||
78: lambda tag: decode_typed_array(tag, dtype="<i4"),
|
||||
79: lambda tag: decode_typed_array(tag, dtype="<i8"),
|
||||
80: lambda tag: decode_typed_array(tag, dtype=">f2"),
|
||||
81: lambda tag: decode_typed_array(tag, dtype=">f4"),
|
||||
82: lambda tag: decode_typed_array(tag, dtype=">f8"),
|
||||
83: lambda tag: decode_typed_array(tag, dtype=">f16"),
|
||||
84: lambda tag: decode_typed_array(tag, dtype="<f2"),
|
||||
85: lambda tag: decode_typed_array(tag, dtype="<f4"),
|
||||
86: lambda tag: decode_typed_array(tag, dtype="<f8"),
|
||||
87: lambda tag: decode_typed_array(tag, dtype="<f16"),
|
||||
1040: lambda tag: decode_multi_dim_array(tag, column_major=True),
|
||||
56500: lambda tag: decode_dectris_compression(tag), # pylint: disable=unnecessary-lambda
|
||||
}
|
||||
|
||||
|
||||
def tag_hook(decoder, tag: int):
|
||||
"""
|
||||
Tag hook for the cbor2.loads method. Both arguments "decoder" and "tag" mus be present.
|
||||
We use the tag to choose the respective decoder from the tag_decoders registry if available.
|
||||
"""
|
||||
tag_decoder = tag_decoders.get(tag.tag)
|
||||
return tag_decoder(tag) if tag_decoder else tag
|
||||
|
||||
|
||||
######################
|
||||
#### ZMQ Settings ####
|
||||
######################
|
||||
|
||||
ZMQ_TOPIC_FILTER = b"" # Subscribe to all topics
|
||||
ZMQ_CONFLATE_SETTING = 1 # Keep only the most recent message
|
||||
ZMQ_RCVHWM_SETTING = 1 # Set high water mark to 1, this configures the max number of queue messages
|
||||
|
||||
|
||||
#################################
|
||||
#### Jungfrau Preview Client ####
|
||||
#################################
|
||||
|
||||
|
||||
class JungfrauJochPreview:
|
||||
"""
|
||||
Preview client for the JungfrauJoch ZMQ preview stream. The client is started with
|
||||
a URL to receive the data from the JungfrauJoch PUB-SUB preview interface, and a
|
||||
callback function that is called with messages received from the preview stream.
|
||||
The callback needs to be able to deal with the different message types sent
|
||||
by the JungfrauJoch server ("start", "image", "end") as described in the
|
||||
JungfrauJoch ZEROMQ preview stream documentation. Messages are dictionary dumps.
|
||||
(https://jungfraujoch.readthedocs.io/en/latest/ZEROMQ_STREAM.html#preview-stream).
|
||||
|
||||
Args:
|
||||
url (str): ZMQ PUB-SUB preview stream URL.
|
||||
cb (Callable): Callback function called with messages received from the stream.
|
||||
"""
|
||||
|
||||
USER_ACCESS = ["start", "stop"]
|
||||
|
||||
def __init__(self, url: str, cb: Callable):
|
||||
@@ -27,16 +141,18 @@ class JungfrauJochPreview:
|
||||
self._on_update_callback = cb
|
||||
|
||||
def connect(self):
|
||||
"""Connect to the JungfrauJoch PUB-SUB streaming interface
|
||||
|
||||
JungfrauJoch may reject connection for a few seconds when it restarts,
|
||||
so if it fails, wait a bit and try to connect again.
|
||||
"""
|
||||
Connect to the JungfrauJoch PUB-SUB streaming interface. If the connection is refused
|
||||
it will reattempt a second time after a one second delay.
|
||||
"""
|
||||
# pylint: disable=no-member
|
||||
|
||||
context = zmq.Context()
|
||||
self._socket = context.socket(zmq.SUB)
|
||||
self._socket.setsockopt(zmq.CONFLATE, ZMQ_CONFLATE_SETTING)
|
||||
self._socket.setsockopt(zmq.SUBSCRIBE, ZMQ_TOPIC_FILTER)
|
||||
self._socket.setsockopt(zmq.RCVHWM, ZMQ_RCVHWM_SETTING)
|
||||
|
||||
try:
|
||||
self._socket.connect(self.url)
|
||||
except ConnectionRefusedError:
|
||||
@@ -44,17 +160,26 @@ class JungfrauJochPreview:
|
||||
self._socket.connect(self.url)
|
||||
|
||||
def start(self):
|
||||
"""Start the ZMQ update loop in a background thread."""
|
||||
self._zmq_thread = threading.Thread(
|
||||
target=self._zmq_update_loop, daemon=True, name="JungfrauJoch_live_preview"
|
||||
)
|
||||
self._zmq_thread.start()
|
||||
|
||||
def stop(self):
|
||||
"""Stop the ZMQ update loop and wait for the thread to finish."""
|
||||
self._shutdown_event.set()
|
||||
if self._zmq_thread:
|
||||
self._zmq_thread.join()
|
||||
self._zmq_thread.join(timeout=1.0)
|
||||
|
||||
def _zmq_update_loop(self):
|
||||
def _zmq_update_loop(self, poll_interval: float = 0.2):
|
||||
"""
|
||||
ZMQ update loop running in a background thread. The polling is throttled by
|
||||
the poll_interval parameter.
|
||||
|
||||
Args:
|
||||
poll_interval (float): Time in seconds to wait between polling attempts.
|
||||
"""
|
||||
while not self._shutdown_event.is_set():
|
||||
if self._socket is None:
|
||||
self.connect()
|
||||
@@ -64,18 +189,21 @@ class JungfrauJochPreview:
|
||||
# Happens when ZMQ partially delivers the multipart message
|
||||
pass
|
||||
except zmq.error.Again:
|
||||
# Happens when receive queue is empty
|
||||
time.sleep(0.1)
|
||||
logger.debug(
|
||||
f"ZMQ Again exception, receive queue is empty for JFJ preview at {self.url}."
|
||||
)
|
||||
finally:
|
||||
# We throttle the polling to avoid heavy load on the device server
|
||||
time.sleep(poll_interval)
|
||||
|
||||
def _poll(self):
|
||||
"""
|
||||
Poll the ZMQ socket for new data. It will throttle the data update and
|
||||
only subscribe to the topic for a single update. This is not very nice
|
||||
but it seems like there is currently no option to set the update rate on
|
||||
the backend.
|
||||
Poll the ZMQ socket for new data. We are currently subscribing and unsubscribing
|
||||
for each poll loop to avoid receiving too many messages. Throttling of the update
|
||||
loop is handled in the _zmq_update_loop method.
|
||||
"""
|
||||
|
||||
if self._shutdown_event.wait(0.2):
|
||||
if self._shutdown_event.is_set():
|
||||
return
|
||||
|
||||
try:
|
||||
@@ -90,7 +218,19 @@ class JungfrauJochPreview:
|
||||
# Unsubscribe from the topic
|
||||
self._socket.setsockopt(zmq.UNSUBSCRIBE, ZMQ_TOPIC_FILTER)
|
||||
|
||||
def _parse_data(self, data):
|
||||
# TODO decode and parse the data
|
||||
# self._on_update_callback(data)
|
||||
pass
|
||||
def _parse_data(self, bytes_list: list[bytes]):
|
||||
"""
|
||||
Parse the received ZMQ data from the JungfrauJoch preview stream.
|
||||
We will call the _on_update_callback with the decompressed messages as a dictionary.
|
||||
|
||||
The callback needs to be able to deal with the different message types sent
|
||||
by the JungfrauJoch server ("start", "image", "end") as described in the
|
||||
JungfrauJoch ZEROMQ preview stream documentation. Messages are dictionary dumps.
|
||||
(https://jungfraujoch.readthedocs.io/en/latest/ZEROMQ_STREAM.html#preview-stream).
|
||||
|
||||
Args:
|
||||
bytes_list (list[bytes]): List of byte messages received from ZMQ recv_multipart.
|
||||
"""
|
||||
for byte_msg in bytes_list:
|
||||
msg = cbor2.loads(byte_msg, tag_hook=tag_hook)
|
||||
self._on_update_callback(msg)
|
||||
|
||||
@@ -4,6 +4,7 @@ This module contains the base class for Galil controllers as well as the signals
|
||||
|
||||
import functools
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from bec_lib import bec_logger
|
||||
from ophyd.utils import ReadOnlyError
|
||||
@@ -347,7 +348,7 @@ class GalilSignalBase(SocketSignal):
|
||||
def __init__(self, signal_name, **kwargs):
|
||||
self.signal_name = signal_name
|
||||
super().__init__(**kwargs)
|
||||
self.controller = self.parent.controller
|
||||
self.controller = self.root.controller if hasattr(self.root, "controller") else None
|
||||
|
||||
|
||||
class GalilSignalRO(GalilSignalBase):
|
||||
|
||||
@@ -1,13 +1,47 @@
|
||||
"""
|
||||
Module for the Galil RIO (RIO-471xx) controller interface. The controller is a compact PLC
|
||||
with Ethernet. It has digital and analog I/O as well as counters and timers.
|
||||
|
||||
Link to the Galil RIO vendor page:
|
||||
https://www.galil.com/plcs/remote-io/rio-471xx
|
||||
|
||||
This module provides the GalilRIOController for communication with the RIO controller
|
||||
over TCP/IP. It also provides a device integration that interfaces to its
|
||||
8 analog channels, and 16 digital output channels. Some PLCs may have 24 digital output channels,
|
||||
which can be easily supported by changing the _NUM_DIGITAL_OUTPUT_CHANNELS variable.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Literal
|
||||
|
||||
from bec_lib.logger import bec_logger
|
||||
from ophyd import DynamicDeviceComponent as DDC
|
||||
from ophyd import Kind
|
||||
from ophyd.utils import ReadOnlyError
|
||||
from ophyd_devices import PSIDeviceBase
|
||||
from ophyd_devices.utils.controller import Controller, threadlocked
|
||||
from ophyd_devices.utils.socket import SocketSignal
|
||||
from ophyd_devices.utils.socket import SocketIO
|
||||
|
||||
from csaxs_bec.devices.omny.galil.galil_ophyd import GalilCommunicationError, retry_once
|
||||
from csaxs_bec.devices.omny.galil.galil_ophyd import (
|
||||
GalilCommunicationError,
|
||||
GalilSignalBase,
|
||||
retry_once,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from bec_lib.devicemanager import ScanInfo
|
||||
from bec_server.device_server.devices.devicemanager import DeviceManagerDS
|
||||
|
||||
logger = bec_logger.logger
|
||||
|
||||
|
||||
class GalilRIO(Controller):
|
||||
class GalilRIOController(Controller):
|
||||
"""Controller Class for Galil RIO controller communication."""
|
||||
|
||||
@threadlocked
|
||||
def socket_put(self, val: str) -> None:
|
||||
"""Socker put method."""
|
||||
self.sock.put(f"{val}\r".encode())
|
||||
|
||||
@retry_once
|
||||
@@ -28,8 +62,229 @@ class GalilRIO(Controller):
|
||||
)
|
||||
|
||||
|
||||
class GalilRIOSignalBase(SocketSignal):
|
||||
def __init__(self, signal_name, **kwargs):
|
||||
self.signal_name = signal_name
|
||||
super().__init__(**kwargs)
|
||||
self.rio_controller = self.parent.rio_controller
|
||||
class GalilRIOAnalogSignalRO(GalilSignalBase):
|
||||
"""
|
||||
Signal for reading analog input channels of the Galil RIO controller. This signal is read-only, so
|
||||
the set method raises a ReadOnlyError. The get method retrieves the values of all analog
|
||||
channels in a single socket command. The readback values of all channels are updated based
|
||||
on the response, and subscriptions are run for all channels. Readings are cached as implemented
|
||||
in the SocketSignal class, so that multiple reads of the same channel within an update cycle do
|
||||
not result in multiple socket calls.
|
||||
|
||||
Args:
|
||||
signal_name (str): The name of the signal, e.g. "ch0", "ch1", ..., "ch7"
|
||||
channel (int): The channel number corresponding to the signal, e.g. 0 for "ch0", 1 for "ch1", ...
|
||||
parent (GalilRIO): The parent device instance that this signal belongs to.
|
||||
"""
|
||||
|
||||
_NUM_ANALOG_CHANNELS = 8
|
||||
|
||||
def __init__(self, signal_name: str, channel: int, parent: GalilRIO, **kwargs):
|
||||
super().__init__(signal_name=signal_name, parent=parent, **kwargs)
|
||||
self._channel = channel
|
||||
self._metadata["connected"] = False
|
||||
self._metadata["write_access"] = False
|
||||
|
||||
def _socket_set(self, val):
|
||||
"""Read-only signal, so set method raises an error."""
|
||||
raise ReadOnlyError(f"Signal {self.name} is read-only.")
|
||||
|
||||
def _socket_get(self) -> float:
|
||||
"""Get command for the readback signal"""
|
||||
cmd = "MG@" + ", @".join([f"AN[{ii}]" for ii in range(self._NUM_ANALOG_CHANNELS)])
|
||||
ret = self.controller.socket_put_and_receive(cmd)
|
||||
values = [float(val) for val in ret.strip().split(" ")]
|
||||
# Run updates for all channels. This also updates the _readback and metadata timestamp
|
||||
# value of this channel.
|
||||
self._update_all_channels(values)
|
||||
return self._readback
|
||||
|
||||
# pylint: disable=protected-access
|
||||
def _update_all_channels(self, values: list[float]) -> None:
|
||||
"""
|
||||
Method to receive a list of readback values for channels 0 to 7. Updates for each channel idx
|
||||
are applied to the corresponding GalilRIOAnalogSignalRO signal with matching attr_name "ch{idx}".
|
||||
|
||||
We also update the _last_readback attribute of each of the signals, to avoid multiple socket calls,
|
||||
but rather use the cached value of the combined reading for all channels.
|
||||
|
||||
Args:
|
||||
values (list[float]): List of new readback values for all channels, where the
|
||||
index corresponds to the channel number (0-7).
|
||||
"""
|
||||
updates: dict[str, tuple[float, float]] = {} # attr_name -> (new_val, old_val)
|
||||
# Update all readbacks first
|
||||
for walk in self.parent.walk_signals():
|
||||
if isinstance(walk.item, GalilRIOAnalogSignalRO):
|
||||
idx = int(walk.item.attr_name[-1])
|
||||
if 0 <= idx < len(values):
|
||||
old_val = walk.item._readback
|
||||
new_val = values[idx]
|
||||
walk.item._metadata["timestamp"] = self._last_readback
|
||||
walk.item._last_readback = self._last_readback
|
||||
walk.item._readback = new_val
|
||||
if (
|
||||
idx != self._channel
|
||||
): # Only run subscriptions on other channels, not on itself
|
||||
# as this is handled by the SocketSignal and we want to avoid running multiple
|
||||
# subscriptions for the same channel update
|
||||
updates[walk.item.attr_name] = (new_val, old_val)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Received {len(values)} values but found channel index {idx} in signal {walk.item.name}. Skipping update for this signal."
|
||||
)
|
||||
|
||||
# Run subscriptions after all readbacks have been updated
|
||||
# on all channels except the one that triggered the update
|
||||
for walk in self.parent.walk_signals():
|
||||
if walk.item.attr_name in updates:
|
||||
new_val, old_val = updates[walk.item.attr_name]
|
||||
walk.item._run_subs(
|
||||
sub_type=walk.item.SUB_VALUE,
|
||||
old_value=old_val,
|
||||
value=new_val,
|
||||
timestamp=self._last_readback,
|
||||
)
|
||||
|
||||
|
||||
class GalilRIODigitalOutSignal(GalilSignalBase):
|
||||
"""Signal for controlling digital outputs of the Galil RIO controller."""
|
||||
|
||||
_NUM_DIGITAL_OUTPUT_CHANNELS = 16
|
||||
|
||||
def __init__(self, signal_name: str, channel: int, parent: GalilRIO, **kwargs):
|
||||
super().__init__(signal_name, parent=parent, **kwargs)
|
||||
self._channel = channel
|
||||
self._metadata["connected"] = False
|
||||
|
||||
def _socket_get(self) -> float:
|
||||
"""Get command for the readback signal"""
|
||||
cmd = f"MG@OUT[{self._channel}]"
|
||||
ret = self.controller.socket_put_and_receive(cmd)
|
||||
self._readback = float(ret.strip())
|
||||
return self._readback
|
||||
|
||||
def _socket_set(self, val: Literal[0, 1]) -> None:
|
||||
"""Set command for the digital output signal. Value should be 0 or 1."""
|
||||
|
||||
if val not in (0, 1):
|
||||
raise ValueError("Digital output value must be 0 or 1.")
|
||||
cmd = f"SB{self._channel}" if val == 1 else f"CB{self._channel}"
|
||||
self.controller.socket_put_confirmed(cmd)
|
||||
|
||||
|
||||
def _create_analog_channels(num_channels: int) -> dict[str, tuple]:
|
||||
"""
|
||||
Helper method to create a dictionary of analog channel definitions for the DynamicDeviceComponent.
|
||||
|
||||
Args:
|
||||
num_channels (int): The number of analog channels to create.
|
||||
"""
|
||||
an_channels = {}
|
||||
for i in range(0, num_channels):
|
||||
an_channels[f"ch{i}"] = (
|
||||
GalilRIOAnalogSignalRO,
|
||||
f"ch{i}",
|
||||
{"kind": Kind.normal, "notify_bec": True, "channel": i, "doc": f"Analog channel {i}."},
|
||||
)
|
||||
return an_channels
|
||||
|
||||
|
||||
def _create_digital_output_channels(num_channels: int) -> dict[str, tuple]:
|
||||
"""
|
||||
Helper method to create a dictionary of digital output channel definitions for the DynamicDeviceComponent.
|
||||
|
||||
Args:
|
||||
num_channels (int): The number of digital output channels to create.
|
||||
"""
|
||||
di_out_channels = {}
|
||||
for i in range(0, num_channels):
|
||||
di_out_channels[f"ch{i}"] = (
|
||||
GalilRIODigitalOutSignal,
|
||||
f"ch{i}",
|
||||
{
|
||||
"kind": Kind.config,
|
||||
"notify_bec": True,
|
||||
"channel": i,
|
||||
"doc": f"Digital output channel {i}.",
|
||||
},
|
||||
)
|
||||
return di_out_channels
|
||||
|
||||
|
||||
class GalilRIO(PSIDeviceBase):
|
||||
"""
|
||||
Galil RIO controller integration with 16 digital output channels and 8 analog input channels.
|
||||
The default port for the controller is 23.
|
||||
|
||||
Args:
|
||||
host (str): Hostname or IP address of the Galil RIO controller.
|
||||
port (int, optional): Port number for the TCP/IP connection. Defaults to 23.
|
||||
socket_cls (type[SocketIO], optional): Socket class to use for communication. Defaults to SocketIO.
|
||||
scan_info (ScanInfo, optional): ScanInfo object for the device.
|
||||
device_manager (DeviceManagerDS): The device manager instance that manages this device.
|
||||
**kwargs: Additional keyword arguments passed to the PSIDeviceBase constructor.
|
||||
"""
|
||||
|
||||
SUB_CONNECTION_CHANGE = "connection_change"
|
||||
|
||||
#############################
|
||||
### Analog input channels ###
|
||||
#############################
|
||||
|
||||
analog_in = DDC(_create_analog_channels(GalilRIOAnalogSignalRO._NUM_ANALOG_CHANNELS))
|
||||
digital_out = DDC(
|
||||
_create_digital_output_channels(GalilRIODigitalOutSignal._NUM_DIGITAL_OUTPUT_CHANNELS)
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
name: str,
|
||||
host: str,
|
||||
device_manager: DeviceManagerDS,
|
||||
port: int | None = None,
|
||||
socket_cls: type[SocketIO] = SocketIO,
|
||||
scan_info: ScanInfo | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
if port is None:
|
||||
port = 23 # Default port for Galil RIO controller
|
||||
self.controller = GalilRIOController(
|
||||
name=f"GalilRIOController_{name}",
|
||||
socket_cls=socket_cls,
|
||||
socket_host=host,
|
||||
socket_port=port,
|
||||
device_manager=device_manager,
|
||||
)
|
||||
self._readback_metadata: dict[str, float] = {"last_readback": 0.0}
|
||||
super().__init__(name=name, device_manager=device_manager, scan_info=scan_info, **kwargs)
|
||||
self.controller.subscribe(
|
||||
self._update_connection_state, event_type=self.SUB_CONNECTION_CHANGE
|
||||
)
|
||||
|
||||
# pylint: disable=arguments-differ
|
||||
def wait_for_connection(self, timeout: float = 30.0) -> None:
|
||||
"""Wait for the RIO controller to be connected within timeout period."""
|
||||
self.controller.on(timeout=timeout)
|
||||
|
||||
def destroy(self) -> None:
|
||||
"""Make sure to turn off the controller socket on destroy."""
|
||||
self.controller.off(update_config=False)
|
||||
return super().destroy()
|
||||
|
||||
# pylint: disable=protected-access
|
||||
def _update_connection_state(self, **kwargs):
|
||||
for walk in self.walk_signals():
|
||||
walk.item._metadata["connected"] = self.controller.connected
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
HOST_NAME = "129.129.122.14"
|
||||
from bec_server.device_server.tests.utils import DMMock
|
||||
|
||||
dm = DMMock()
|
||||
rio = GalilRIO(name="rio", host=HOST_NAME, device_manager=dm)
|
||||
rio.wait_for_connection(timeout=10)
|
||||
print("Connected:", rio.an_ch1.read())
|
||||
print("All channels:", rio.read())
|
||||
|
||||
@@ -25,6 +25,34 @@ logger = bec_logger.logger
|
||||
|
||||
|
||||
class LamniGalilController(GalilController):
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Error status
|
||||
# ============================================================
|
||||
|
||||
caperr_bits = {
|
||||
0x01: "Cap1 outside expected left-stop range (early check)",
|
||||
0x02: "Cap2 outside expected left-stop range (early check)",
|
||||
0x04: "Cap1 too low during pressure-off check (near right boundary)",
|
||||
0x08: "Cap2 too low during pressure-off check (near right boundary)",
|
||||
0x10: "Cap1 exceeded allowed left-stop boundary during movement",
|
||||
0x20: "Cap2 exceeded allowed left-stop boundary during movement (disabled in code)",
|
||||
0x40: "Cap1 did not respond to test movement",
|
||||
0x80: "Cap2 did not respond to test movement"
|
||||
}
|
||||
|
||||
allaxrer_table = {
|
||||
1: "Not all axes referenced after reference search",
|
||||
2: "Pressure-loss emergency stop (pressure 14/15 active while motor C off)",
|
||||
3: "Unexpected pressure OFF while soft-limits not yet set",
|
||||
4: "Pressure valve mismatch (OUT13=0 but IN13=1)",
|
||||
5: "Capacitive sensor boundary violations (caperr > 0)",
|
||||
6: "Emergency Stop triggered (IN[5]=0)",
|
||||
7: "Following error detected on one or more axes"
|
||||
}
|
||||
|
||||
|
||||
USER_ACCESS = [
|
||||
"describe",
|
||||
"show_running_threads",
|
||||
@@ -37,6 +65,8 @@ class LamniGalilController(GalilController):
|
||||
"get_motor_limit_switch",
|
||||
"is_motor_on",
|
||||
"all_axes_referenced",
|
||||
"lamni_lights_off",
|
||||
"lamni_lights_on"
|
||||
]
|
||||
|
||||
def show_status_other(self):
|
||||
@@ -60,6 +90,47 @@ class LamniGalilController(GalilController):
|
||||
print("There is air pressure at the outer rotation radial.")
|
||||
swver = float(self.socket_put_and_receive("MGswver"))
|
||||
print(f"Lgalil LAMNI firmware version {swver:2.0f}.")
|
||||
allaxref = int(float(self.socket_put_and_receive("MGallaxref")))
|
||||
print(f"Error statuts:")
|
||||
if allaxref == 1:
|
||||
print(f"Allaxref = 1, all OK.")
|
||||
else:
|
||||
print(f"Allaxref = {allaxref}. Not all axes are referenced or error introduced preventing motion.")
|
||||
allaxrer = int(float(self.socket_put_and_receive("MGallaxrer")))
|
||||
print("\nallaxrer =", allaxrer)
|
||||
print(self.decode_allaxrer(allaxrer))
|
||||
caperr = int(float(self.socket_put_and_receive("MGcaperr")))
|
||||
print("\nDecoding caperr =", caperr)
|
||||
self.visualize_caperr(caperr)
|
||||
|
||||
def decode_allaxrer(self, code: int) -> str:
|
||||
"""Return human-readable meaning of allaxrer code."""
|
||||
return self.allaxrer_table.get(code, "Unknown allaxrer code")
|
||||
|
||||
def visualize_caperr(self, mask: int):
|
||||
"""Pretty-print a bitmask visualization for caperr."""
|
||||
print("\n=== CAPERR BITMASK VISUALIZER ===")
|
||||
print(f"Raw value: {mask} (0x{mask:02X})")
|
||||
print("----------------------------------\n")
|
||||
|
||||
print("Bit | Hex | Active | Meaning")
|
||||
print("----------------------------------")
|
||||
|
||||
for bit, meaning in self.caperr_bits.items():
|
||||
active = "YES" if mask & bit else "no"
|
||||
print(f"{bit:3d} | 0x{bit:02X} | {active:6} | {meaning}")
|
||||
|
||||
print("\nActive flags:")
|
||||
active_flags = [meaning for bit, meaning in self.caperr_bits.items() if mask & bit]
|
||||
|
||||
if active_flags:
|
||||
for f in active_flags:
|
||||
print(" ✓", f)
|
||||
else:
|
||||
print(" (none)")
|
||||
|
||||
print("\n==================================\n")
|
||||
|
||||
|
||||
def lamni_lights_off(self):
|
||||
self.socket_put_confirmed("SB1")
|
||||
@@ -93,7 +164,7 @@ class LamniGalilReadbackSignal(GalilSignalRO):
|
||||
val = super().read()
|
||||
if self.parent.axis_Id_numeric == 2:
|
||||
try:
|
||||
rt = self.parent.device_manager.devices[self.parent.rtx]
|
||||
rt = self.parent.device_manager.devices[self.parent.rt]
|
||||
if rt.enabled:
|
||||
rt.obj.controller.set_rotation_angle(val[self.parent.name]["value"])
|
||||
except KeyError:
|
||||
@@ -147,7 +218,7 @@ class LamniGalilMotor(Device, PositionerBase):
|
||||
raise BECConfigError(
|
||||
"device_mapping has been specified but the device_manager cannot be accessed."
|
||||
)
|
||||
self.rt = self.device_mapping.get("rt")
|
||||
self.rt = self.device_mapping.get("rt", "rtx")
|
||||
|
||||
super().__init__(
|
||||
prefix,
|
||||
|
||||
@@ -498,6 +498,9 @@ class RtFlomniController(Controller):
|
||||
)
|
||||
# while scan is running
|
||||
while mode > 0:
|
||||
|
||||
#TODO here?: scan abortion if no progress in scan *raise error
|
||||
|
||||
# logger.info(f"Current scan position {current_position_in_scan} out of {number_of_positions_planned}")
|
||||
mode, number_of_positions_planned, current_position_in_scan = self.get_scan_status()
|
||||
time.sleep(0.01)
|
||||
@@ -629,6 +632,8 @@ class RtFlomniMotor(Device, PositionerBase):
|
||||
SUB_CONNECTION_CHANGE = "connection_change"
|
||||
_default_sub = SUB_READBACK
|
||||
|
||||
connectionTimeout = 20
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
axis_Id,
|
||||
|
||||
@@ -11,6 +11,7 @@ from ophyd.status import wait as status_wait
|
||||
from ophyd.utils import LimitError, ReadOnlyError
|
||||
from ophyd_devices.utils.controller import Controller, threadlocked
|
||||
from ophyd_devices.utils.socket import SocketIO, SocketSignal, raise_if_disconnected
|
||||
from prettytable import PrettyTable
|
||||
|
||||
from csaxs_bec.devices.omny.rt.rt_ophyd import RtCommunicationError, RtError
|
||||
|
||||
@@ -51,6 +52,7 @@ class RtLamniController(Controller):
|
||||
_axes_per_controller = 3
|
||||
USER_ACCESS = [
|
||||
"socket_put_and_receive",
|
||||
"socket_put",
|
||||
"set_rotation_angle",
|
||||
"feedback_disable",
|
||||
"feedback_enable_without_reset",
|
||||
@@ -62,6 +64,11 @@ class RtLamniController(Controller):
|
||||
"_set_axis_velocity_maximum_speed",
|
||||
"_position_sampling_single_read",
|
||||
"_position_sampling_single_reset_and_start_sampling",
|
||||
"show_signal_strength_interferometer",
|
||||
"show_interferometer_positions",
|
||||
"show_analog_signals",
|
||||
"show_feedback_status",
|
||||
|
||||
]
|
||||
|
||||
def __init__(
|
||||
@@ -208,8 +215,9 @@ class RtLamniController(Controller):
|
||||
|
||||
@threadlocked
|
||||
def start_scan(self):
|
||||
interferometer_feedback_not_running = int((self.socket_put_and_receive("J2")).split(",")[0])
|
||||
if interferometer_feedback_not_running == 1:
|
||||
# interferometer_feedback_not_running = int((self.socket_put_and_receive("J2")).split(",")[0])
|
||||
# if interferometer_feedback_not_running == 1:
|
||||
if not self.feedback_is_running():
|
||||
logger.error(
|
||||
"Cannot start scan because feedback loop is not running or there is an interferometer error."
|
||||
)
|
||||
@@ -270,6 +278,44 @@ class RtLamniController(Controller):
|
||||
"average_lamni_angle": {"value": self.average_lamni_angle / (int(return_table[0]) + 1)},
|
||||
}
|
||||
return signals
|
||||
|
||||
def feedback_is_running(self) -> bool:
|
||||
status = int(float((self.socket_put_and_receive("J2")).split(",")[0]))
|
||||
return status == 0 # 0 means running, 1 means error/disabled
|
||||
|
||||
def show_feedback_status(self):
|
||||
if self.feedback_is_running():
|
||||
print("Loop is running, no error on interferometer.")
|
||||
else:
|
||||
print("Loop is not running, either it is turned off or an interferometer error occurred.")
|
||||
|
||||
|
||||
def show_analog_signals(self) -> dict:
|
||||
self.socket_put("As") # start sampling
|
||||
time.sleep(0.01)
|
||||
return_table = (self.socket_put_and_receive("Ar")).split(",")
|
||||
|
||||
number_of_samples = int(float(return_table[0]))
|
||||
signals = {
|
||||
"number_of_samples": number_of_samples,
|
||||
"piezo_0": float(return_table[1]),
|
||||
"piezo_1": float(return_table[2]),
|
||||
"cap_0": float(return_table[3]),
|
||||
"cap_1": float(return_table[4]),
|
||||
"cap_2": float(return_table[5]),
|
||||
"cap_3": float(return_table[6]),
|
||||
"cap_4": float(return_table[7]),
|
||||
}
|
||||
|
||||
t = PrettyTable()
|
||||
t.title = f"LamNI Analog Signals ({number_of_samples} samples)"
|
||||
t.field_names = ["Signal", "Value"]
|
||||
for key, val in signals.items():
|
||||
if key != "number_of_samples":
|
||||
t.add_row([key, f"{val:.4f}"])
|
||||
print(t)
|
||||
|
||||
return
|
||||
|
||||
def read_positions_from_sampler(self):
|
||||
# this was for reading after the scan completed
|
||||
@@ -347,6 +393,48 @@ class RtLamniController(Controller):
|
||||
)
|
||||
return bool(return_table[0])
|
||||
|
||||
def show_signal_strength_interferometer(self):
|
||||
# trigger SSI averaging before reading
|
||||
self.socket_put("J3")
|
||||
time.sleep(0.05)
|
||||
return_table = (self.socket_put_and_receive("J2")).split(",")
|
||||
ssi_0 = float(return_table[1])
|
||||
ssi_1 = float(return_table[2])
|
||||
|
||||
return_table_angle = (self.socket_put_and_receive("J7")).split(",")
|
||||
angle_running = bool(int(float(return_table_angle[0])))
|
||||
angle_position = float(return_table_angle[1])
|
||||
angle_signal = float(return_table_angle[2])
|
||||
|
||||
t = PrettyTable()
|
||||
t.title = "Interferometer signal strength"
|
||||
t.field_names = ["Axis", "Description", "Value", "Running"]
|
||||
t.add_row([0, "ST FZP horizontal", ssi_0, "-"])
|
||||
t.add_row([1, "ST FZP vertical", ssi_1, "-"])
|
||||
t.add_row([2, "Angle interferometer", angle_signal, angle_running])
|
||||
print(t)
|
||||
|
||||
if angle_running:
|
||||
print(f"Angle interferometer position: {angle_position:.4f} um")
|
||||
else:
|
||||
print("Warning: angle interferometer is not running.")
|
||||
|
||||
def show_interferometer_positions(self) -> dict:
|
||||
return_table = (self.socket_put_and_receive("J4")).split(",")
|
||||
loop_status = bool(int(float(return_table[0])))
|
||||
pos_y = float(return_table[1])
|
||||
pos_x = float(return_table[2])
|
||||
|
||||
t = PrettyTable()
|
||||
t.title = "LamNI Interferometer Positions"
|
||||
t.field_names = ["Axis", "Description", "Position (um)"]
|
||||
t.add_row([0, "X", f"{pos_x:.4f}"])
|
||||
t.add_row([1, "Y", f"{pos_y:.4f}"])
|
||||
print(t)
|
||||
print(f"Feedback loop running: {loop_status}")
|
||||
|
||||
return {"x": pos_x, "y": pos_y, "loop_running": loop_status}
|
||||
|
||||
def feedback_enable_with_reset(self):
|
||||
if not self.feedback_status_angle_lamni():
|
||||
self.feedback_disable_and_even_reset_lamni_angle_interferometer()
|
||||
|
||||
@@ -1,82 +1,87 @@
|
||||
import time
|
||||
import socket
|
||||
"""
|
||||
Fast Shutter control for OMNY setup. If started with a config file in which the device_manager
|
||||
has a 'fsh' device (cSAXSFastEpicsShutter), this device will be used as the shutter.
|
||||
Otherwise, the device will create a dummy shutter device that will log warnings when shutter
|
||||
methods are called, but will not raise exceptions.
|
||||
"""
|
||||
|
||||
from bec_lib.logger import bec_logger
|
||||
from ophyd import Component as Cpt
|
||||
from ophyd import Device
|
||||
from ophyd import EpicsSignal
|
||||
from ophyd import Device, Signal
|
||||
from ophyd_devices import PSIDeviceBase
|
||||
|
||||
logger = bec_logger.logger
|
||||
|
||||
|
||||
class OMNYFastEpicsShutterError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _detect_host_pv():
|
||||
"""Detect host subnet and return appropriate PV name."""
|
||||
try:
|
||||
hostname = socket.gethostname()
|
||||
local_ip = socket.gethostbyname(hostname)
|
||||
if local_ip.startswith("129.129.122."):
|
||||
return "X12SA-ES1-TTL:OUT_01"
|
||||
else:
|
||||
return "XOMNYI-XEYE-DUMMYSHUTTER:0"
|
||||
except Exception as ex:
|
||||
print(f"Warning: could not detect IP subnet ({ex}), using dummy shutter.")
|
||||
return "XOMNYI-XEYE-DUMMYSHUTTER:0"
|
||||
|
||||
|
||||
class OMNYFastEpicsShutter(Device):
|
||||
class OMNYFastShutter(PSIDeviceBase, Device):
|
||||
"""
|
||||
Fast EPICS shutter with automatic PV selection based on host subnet.
|
||||
Fast Shutter control for OMNY setup. If started with at the beamline, it will expose
|
||||
the shutter control methods (fshopen, fshclose, fshstatus, fshinfo) from the
|
||||
cSAXSFastEpicsShutter device. The device is identified by the 'fsh' name in the device manager.
|
||||
If the 'fsh' device is not found in the device manager, this device will create a dummy shutter
|
||||
and log warnings when shutter methods are called, but will not raise exceptions.
|
||||
"""
|
||||
|
||||
USER_ACCESS = ["fshopen", "fshclose", "fshstatus", "fshinfo", "help"]
|
||||
USER_ACCESS = ["fshopen", "fshclose", "fshstatus", "fshinfo", "help", "fshstatus_readback"]
|
||||
SUB_VALUE = "value"
|
||||
_default_sub = SUB_VALUE
|
||||
|
||||
# PV is detected dynamically at import time
|
||||
shutter = Cpt(EpicsSignal, name="shutter", read_pv=_detect_host_pv(), auto_monitor=True)
|
||||
|
||||
def __init__(self, prefix="", *, name, **kwargs):
|
||||
super().__init__(prefix, name=name, **kwargs)
|
||||
self.shutter.subscribe(self._emit_value)
|
||||
|
||||
def _emit_value(self, **kwargs):
|
||||
timestamp = kwargs.pop("timestamp", time.time())
|
||||
self.wait_for_connection()
|
||||
self._run_subs(sub_type=self.SUB_VALUE, timestamp=timestamp, obj=self)
|
||||
shutter = Cpt(Signal, name="shutter")
|
||||
|
||||
# -----------------------------------------------------
|
||||
# User-facing shutter control functions
|
||||
# -----------------------------------------------------
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
def _check_if_cSAXS_shutter_exists_in_config(self) -> bool:
|
||||
"""
|
||||
Check on the device manager if the shutter device exists.
|
||||
|
||||
Returns:
|
||||
bool: True if the 'fsh' device exists in the device manager, False otherwise
|
||||
"""
|
||||
if self.device_manager.devices.get("fsh", None) is None:
|
||||
logger.warning(f"Fast shutter device not found for {self.name}.")
|
||||
return False
|
||||
return True
|
||||
|
||||
def fshopen(self):
|
||||
"""Open the fast shutter."""
|
||||
try:
|
||||
self.shutter.put(1, wait=True)
|
||||
except Exception as ex:
|
||||
raise OMNYFastEpicsShutterError(f"Failed to open shutter: {ex}")
|
||||
if self._check_if_cSAXS_shutter_exists_in_config():
|
||||
return self.device_manager.devices["fsh"].fshopen()
|
||||
else:
|
||||
self.shutter.put(1)
|
||||
|
||||
def fshclose(self):
|
||||
"""Close the fast shutter."""
|
||||
try:
|
||||
self.shutter.put(0, wait=True)
|
||||
except Exception as ex:
|
||||
raise OMNYFastEpicsShutterError(f"Failed to close shutter: {ex}")
|
||||
if self._check_if_cSAXS_shutter_exists_in_config():
|
||||
return self.device_manager.devices["fsh"].fshclose()
|
||||
else:
|
||||
self.shutter.put(0)
|
||||
|
||||
def fshstatus(self):
|
||||
"""Return the fast shutter status (0=closed, 1=open)."""
|
||||
try:
|
||||
if self._check_if_cSAXS_shutter_exists_in_config():
|
||||
return self.device_manager.devices["fsh"].fshstatus()
|
||||
else:
|
||||
return self.shutter.get()
|
||||
except Exception as ex:
|
||||
raise OMNYFastEpicsShutterError(f"Failed to read shutter status: {ex}")
|
||||
|
||||
def fshinfo(self):
|
||||
"""Print information about which EPICS PV channel is being used."""
|
||||
pvname = self.shutter.pvname
|
||||
print(f"Fast shutter connected to EPICS channel: {pvname}")
|
||||
return pvname
|
||||
if self._check_if_cSAXS_shutter_exists_in_config():
|
||||
return self.device_manager.devices["fsh"].fshinfo()
|
||||
else:
|
||||
print("Using dummy fast shutter device. No EPICS channel is connected.")
|
||||
|
||||
def help(self):
|
||||
"""Display available user methods."""
|
||||
print("Available methods:")
|
||||
for method in self.USER_ACCESS:
|
||||
print(f" - {method}")
|
||||
|
||||
def fshstatus_readback(self):
|
||||
"""Return the fast shutter status (0=closed, 1=open) from the readback signal."""
|
||||
if self._check_if_cSAXS_shutter_exists_in_config():
|
||||
return self.device_manager.devices["fsh"].fshstatus_readback()
|
||||
else:
|
||||
self.shutter.get()
|
||||
|
||||
@@ -27,6 +27,7 @@ from bec_lib import bec_logger, messages
|
||||
from bec_lib.endpoints import MessageEndpoints
|
||||
from bec_server.scan_server.errors import ScanAbortion
|
||||
from bec_server.scan_server.scans import SyncFlyScanBase
|
||||
from csaxs_bec.devices.epics.delay_generator_csaxs.delay_generator_csaxs import TRIGGERSOURCE
|
||||
|
||||
logger = bec_logger.logger
|
||||
|
||||
@@ -73,14 +74,13 @@ class FlomniFermatScan(SyncFlyScanBase):
|
||||
>>> scans.flomni_fermat_scan(fovx=20, fovy=25, cenx=0.02, ceny=0, zshift=0, angle=0, step=0.5, exp_time=0.01)
|
||||
"""
|
||||
|
||||
super().__init__(parameter=parameter, **kwargs)
|
||||
super().__init__(parameter=parameter, exp_time=exp_time, **kwargs)
|
||||
self.show_live_table = False
|
||||
self.axis = []
|
||||
self.fovx = fovx
|
||||
self.fovy = fovy
|
||||
self.cenx = cenx
|
||||
self.ceny = ceny
|
||||
self.exp_time = exp_time
|
||||
self.step = step
|
||||
self.zshift = zshift
|
||||
self.angle = angle
|
||||
@@ -151,6 +151,9 @@ class FlomniFermatScan(SyncFlyScanBase):
|
||||
yield from self.stubs.send_rpc_and_wait("rty", "set", self.positions[0][1])
|
||||
|
||||
def _prepare_setup_part2(self):
|
||||
# Prepare DDG1 to use
|
||||
yield from self.stubs.send_rpc_and_wait("ddg1", "set_trigger", TRIGGERSOURCE.EXT_RISING_EDGE.value)
|
||||
|
||||
if self.flomni_rotation_status:
|
||||
self.flomni_rotation_status.wait()
|
||||
|
||||
@@ -307,6 +310,10 @@ class FlomniFermatScan(SyncFlyScanBase):
|
||||
|
||||
logger.warning("No positions found to return to start")
|
||||
|
||||
def cleanup(self):
|
||||
yield from self.stubs.send_rpc_and_wait("ddg1", "set_trigger", TRIGGERSOURCE.SINGLE_SHOT.value)
|
||||
yield from super().cleanup()
|
||||
|
||||
def run(self):
|
||||
self.initialize()
|
||||
yield from self.read_scan_motors()
|
||||
|
||||
@@ -25,6 +25,8 @@ dependencies = [
|
||||
"bec_widgets",
|
||||
"zmq",
|
||||
"opencv-python",
|
||||
"dectris-compression", # for JFJ preview stream decompression
|
||||
"cbor2",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
|
||||
@@ -282,10 +282,11 @@ def test_ddg1_stage(mock_ddg1: DDG1):
|
||||
|
||||
mock_ddg1.scan_info.msg.scan_parameters["exp_time"] = exp_time
|
||||
mock_ddg1.scan_info.msg.scan_parameters["frames_per_trigger"] = frames_per_trigger
|
||||
mock_ddg1.fast_shutter_control._read_pv.mock_data = 0 # Simulate shutter control
|
||||
|
||||
mock_ddg1.stage()
|
||||
|
||||
shutter_width = 2e-3 + exp_time * frames_per_trigger + 1e-3
|
||||
shutter_width = mock_ddg1._shutter_to_open_delay + exp_time * frames_per_trigger
|
||||
|
||||
assert np.isclose(mock_ddg1.burst_mode.get(), 1) # burst mode is enabled
|
||||
assert np.isclose(mock_ddg1.burst_delay.get(), 0)
|
||||
@@ -302,6 +303,25 @@ def test_ddg1_stage(mock_ddg1: DDG1):
|
||||
assert np.isclose(mock_ddg1.ef.width.get(), 1e-6)
|
||||
|
||||
assert mock_ddg1.staged == ophyd.Staged.yes
|
||||
mock_ddg1.unstage()
|
||||
|
||||
# Test if shutter is kept open..
|
||||
mock_ddg1.fast_shutter_control._read_pv.mock_data = 1 # Simulate shutter control is kept open
|
||||
# Test method
|
||||
mock_ddg1.keep_shutter_open_during_scan(True)
|
||||
shutter_width = mock_ddg1._shutter_to_open_delay + exp_time * frames_per_trigger
|
||||
assert np.isclose(
|
||||
shutter_width, exp_time * frames_per_trigger
|
||||
) # Shutter to open delay is not added as shutter is kept open
|
||||
# Simulate fly scan, so no extra trigger for MCS card.
|
||||
mock_ddg1.scan_info.msg.scan_type = "fly"
|
||||
mock_ddg1.stage()
|
||||
# Shutter channel cd
|
||||
assert np.isclose(mock_ddg1.cd.delay.get(), 0)
|
||||
assert np.isclose(mock_ddg1.cd.width.get(), shutter_width)
|
||||
# MCS channel ef or gate
|
||||
assert np.isclose(mock_ddg1.ef.delay.get(), 0)
|
||||
assert np.isclose(mock_ddg1.ef.width.get(), 0) # No triggering of MCS due to shutter fly scan
|
||||
|
||||
|
||||
def test_ddg1_on_trigger(mock_ddg1: DDG1):
|
||||
@@ -331,9 +351,28 @@ def test_ddg1_on_trigger(mock_ddg1: DDG1):
|
||||
#################################
|
||||
with mock.patch.object(ddg, "_prepare_mcs_on_trigger") as mock_prepare_mcs:
|
||||
mock_prepare_mcs.return_value = ophyd.StatusBase(done=True, success=True)
|
||||
# MCS card is present and enabled, should call prepare_mcs_on_trigger
|
||||
# and the status should resolve once acuiring goes from 1 to 0.
|
||||
status = ddg.trigger()
|
||||
assert status.done is False
|
||||
mcs = ddg.device_manager.devices.get("mcs", None)
|
||||
assert mcs is not None
|
||||
mcs.acquiring._read_pv.mock_data = 1 # Simulate acquiring started
|
||||
assert status.done is False
|
||||
mcs.acquiring._read_pv.mock_data = 0 # Simulate acquiring stopped
|
||||
status.wait(timeout=1) # Wait for the status to be done
|
||||
assert status.done is True
|
||||
assert status.success is True
|
||||
mock_prepare_mcs.assert_called_once()
|
||||
|
||||
# Now we disable the mcs card, and trigger again. This should not call prepare_mcs_on_trigger
|
||||
# and should fallback to polling the DDG for END_OF_BURST status bit.
|
||||
|
||||
# Disable mcs card
|
||||
mcs.enabled = False
|
||||
status = ddg.trigger()
|
||||
# Check that the poll thread run event is set
|
||||
# Careful in debugger, there is a timeout based on the exp_time + 5s default
|
||||
assert ddg._poll_thread_run_event.is_set()
|
||||
assert not ddg._poll_thread_poll_loop_done.is_set()
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ from time import time
|
||||
from typing import TYPE_CHECKING, Generator
|
||||
from unittest import mock
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from bec_lib.messages import FileMessage, ScanStatusMessage
|
||||
from jfjoch_client.models.broker_status import BrokerStatus
|
||||
@@ -78,7 +79,7 @@ def detector_list(request) -> Generator[DetectorList, None, None]:
|
||||
),
|
||||
DetectorListElement(
|
||||
id=2,
|
||||
description="EIGER 8.5M (tmp)",
|
||||
description="EIGER 9M",
|
||||
serial_number="123456",
|
||||
base_ipv4_addr="192.168.0.1",
|
||||
udp_interface_count=1,
|
||||
@@ -103,7 +104,11 @@ def eiger_1_5m(mock_scan_info) -> Generator[Eiger1_5M, None, None]:
|
||||
name = "eiger_1_5m"
|
||||
dev = Eiger1_5M(name=name, beam_center=(256, 256), detector_distance=100.0)
|
||||
dev.scan_info.msg = mock_scan_info
|
||||
yield dev
|
||||
try:
|
||||
yield dev
|
||||
finally:
|
||||
if dev._destroyed is False:
|
||||
dev.destroy()
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
@@ -113,7 +118,19 @@ def eiger_9m(mock_scan_info) -> Generator[Eiger9M, None, None]:
|
||||
name = "eiger_9m"
|
||||
dev = Eiger9M(name=name)
|
||||
dev.scan_info.msg = mock_scan_info
|
||||
yield dev
|
||||
try:
|
||||
yield dev
|
||||
finally:
|
||||
if dev._destroyed is False:
|
||||
dev.destroy()
|
||||
|
||||
|
||||
def test_eiger_wait_for_connection(eiger_1_5m, eiger_9m):
|
||||
"""Test the wait_for_connection metho is calling status_get on the JFJ API client."""
|
||||
for eiger in (eiger_1_5m, eiger_9m):
|
||||
with mock.patch.object(eiger.jfj_client.api, "status_get") as mock_status_get:
|
||||
eiger.wait_for_connection(timeout=1)
|
||||
mock_status_get.assert_called_once_with(_request_timeout=1)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("detector_state", ["Idle", "Inactive"])
|
||||
@@ -141,7 +158,7 @@ def test_eiger_1_5m_on_connected(eiger_1_5m, detector_list, detector_state):
|
||||
else:
|
||||
eiger.on_connected()
|
||||
assert mock_set_det.call_args == mock.call(
|
||||
DetectorSettings(frame_time_us=500, timing=DetectorTiming.TRIGGER), timeout=10
|
||||
DetectorSettings(frame_time_us=500, timing=DetectorTiming.TRIGGER), timeout=5
|
||||
)
|
||||
assert mock_file_writer.call_args == mock.call(
|
||||
file_writer_settings=FileWriterSettings(
|
||||
@@ -179,7 +196,7 @@ def test_eiger_9m_on_connected(eiger_9m, detector_list, detector_state):
|
||||
else:
|
||||
eiger.on_connected()
|
||||
assert mock_set_det.call_args == mock.call(
|
||||
DetectorSettings(frame_time_us=500, timing=DetectorTiming.TRIGGER), timeout=10
|
||||
DetectorSettings(frame_time_us=500, timing=DetectorTiming.TRIGGER), timeout=5
|
||||
)
|
||||
assert mock_file_writer.call_args == mock.call(
|
||||
file_writer_settings=FileWriterSettings(
|
||||
@@ -216,11 +233,39 @@ def test_eiger_on_stop(eiger_1_5m):
|
||||
stop_event.wait(timeout=5) # Thread should be killed from task_handler
|
||||
|
||||
|
||||
def test_eiger_on_destroy(eiger_1_5m):
|
||||
"""Test the on_destroy logic of the Eiger detector. This is equivalent for 9M and 1_5M."""
|
||||
eiger = eiger_1_5m
|
||||
start_event = threading.Event()
|
||||
stop_event = threading.Event()
|
||||
|
||||
def tmp_task():
|
||||
start_event.set()
|
||||
try:
|
||||
while True:
|
||||
time.sleep(0.1)
|
||||
finally:
|
||||
stop_event.set()
|
||||
|
||||
eiger.task_handler.submit_task(tmp_task)
|
||||
start_event.wait(timeout=5)
|
||||
|
||||
with (
|
||||
mock.patch.object(eiger.jfj_preview_client, "stop") as mock_jfj_preview_client_stop,
|
||||
mock.patch.object(eiger.jfj_client, "stop") as mock_jfj_client_stop,
|
||||
):
|
||||
eiger.on_destroy()
|
||||
mock_jfj_preview_client_stop.assert_called_once()
|
||||
mock_jfj_client_stop.assert_called_once()
|
||||
stop_event.wait(timeout=5)
|
||||
|
||||
|
||||
@pytest.mark.timeout(25)
|
||||
@pytest.mark.parametrize("raise_timeout", [True, False])
|
||||
def test_eiger_on_complete(eiger_1_5m, raise_timeout):
|
||||
"""Test the on_complete logic of the Eiger detector. This is equivalent for 9M and 1_5M."""
|
||||
eiger = eiger_1_5m
|
||||
eiger._wait_for_on_complete = 1 # reduce wait time for testing
|
||||
|
||||
callback_completed_event = threading.Event()
|
||||
|
||||
@@ -230,7 +275,7 @@ def test_eiger_on_complete(eiger_1_5m, raise_timeout):
|
||||
|
||||
unblock_wait_for_idle = threading.Event()
|
||||
|
||||
def mock_wait_for_idle(timeout: int, request_timeout: float):
|
||||
def mock_wait_for_idle(timeout: float, raise_on_timeout: bool) -> bool:
|
||||
if unblock_wait_for_idle.wait(timeout):
|
||||
if raise_timeout:
|
||||
return False
|
||||
@@ -238,11 +283,18 @@ def test_eiger_on_complete(eiger_1_5m, raise_timeout):
|
||||
return False
|
||||
|
||||
with (
|
||||
mock.patch.object(
|
||||
eiger.jfj_client.api, "status_get", return_value=BrokerStatus(state="Idle")
|
||||
),
|
||||
mock.patch.object(eiger.jfj_client, "wait_for_idle", side_effect=mock_wait_for_idle),
|
||||
mock.patch.object(
|
||||
eiger.jfj_client.api,
|
||||
"statistics_data_collection_get",
|
||||
return_value=MeasurementStatistics(run_number=1),
|
||||
return_value=MeasurementStatistics(
|
||||
run_number=1,
|
||||
images_collected=eiger.scan_info.msg.num_points
|
||||
* eiger.scan_info.msg.scan_parameters["frames_per_trigger"],
|
||||
),
|
||||
),
|
||||
):
|
||||
status = eiger.complete()
|
||||
@@ -284,7 +336,7 @@ def test_eiger_file_event_callback(eiger_1_5m, tmp_path):
|
||||
assert file_msg.hinted_h5_entries == {"data": "entry/data/data"}
|
||||
|
||||
|
||||
def test_eiger_on_sage(eiger_1_5m):
|
||||
def test_eiger_on_stage(eiger_1_5m):
|
||||
"""Test the on_stage and on_unstage logic of the Eiger detector. This is equivalent for 9M and 1_5M."""
|
||||
eiger = eiger_1_5m
|
||||
scan_msg = eiger.scan_info.msg
|
||||
@@ -316,3 +368,35 @@ def test_eiger_on_sage(eiger_1_5m):
|
||||
)
|
||||
assert mock_start.call_args == mock.call(settings=data_settings)
|
||||
assert eiger.staged is Staged.yes
|
||||
|
||||
|
||||
def test_eiger_set_det_distance_test_beam_center(eiger_1_5m):
|
||||
"""Test the set_detector_distance and set_beam_center methods. Equivalent for 9M and 1_5M."""
|
||||
eiger = eiger_1_5m
|
||||
old_distance = eiger.detector_distance
|
||||
new_distance = old_distance + 100
|
||||
old_beam_center = eiger.beam_center
|
||||
new_beam_center = (old_beam_center[0] + 20, old_beam_center[1] + 50)
|
||||
eiger.set_detector_distance(new_distance)
|
||||
assert eiger.detector_distance == new_distance
|
||||
eiger.set_beam_center(x=new_beam_center[0], y=new_beam_center[1])
|
||||
assert eiger.beam_center == new_beam_center
|
||||
with pytest.raises(ValueError):
|
||||
eiger.set_beam_center(x=-10, y=100) # Cannot set negative beam center
|
||||
with pytest.raises(ValueError):
|
||||
eiger.detector_distance = -50 # Cannot set negative detector distance
|
||||
|
||||
|
||||
def test_eiger_preview_callback(eiger_1_5m):
|
||||
"""Preview callback test for the Eiger detector. This is equivalent for 9M and 1_5M."""
|
||||
eiger = eiger_1_5m
|
||||
# NOTE: I don't find models for the CBOR messages used by JFJ, currently using a dummay dict.
|
||||
# Please adjust once the proper model is found.
|
||||
for msg_type in ["start", "end", "image", "calibration", "metadata"]:
|
||||
msg = {"type": msg_type, "data": {"default": np.array([[1, 2], [3, 4]])}}
|
||||
with mock.patch.object(eiger.preview_image, "put") as mock_preview_put:
|
||||
eiger._preview_callback(msg)
|
||||
if msg_type == "image":
|
||||
mock_preview_put.assert_called_once_with(msg["data"]["default"])
|
||||
else:
|
||||
mock_preview_put.assert_not_called()
|
||||
|
||||
37
tests/tests_devices/test_epics_devices.py
Normal file
37
tests/tests_devices/test_epics_devices.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""Module to test epics devices."""
|
||||
|
||||
import pytest
|
||||
from ophyd_devices.tests.utils import patched_device
|
||||
|
||||
from csaxs_bec.devices.epics.fast_shutter import cSAXSFastEpicsShutter
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def fast_shutter_device():
|
||||
"""Fixture to create a patched cSAXSFastEpicsShutter device for testing."""
|
||||
with patched_device(cSAXSFastEpicsShutter, name="fsh", prefix="X12SA-ES1-TTL:") as device:
|
||||
yield device
|
||||
|
||||
|
||||
def test_fast_shutter_methods(fast_shutter_device):
|
||||
"""Test the user-facing methods of the cSAXSFastEpicsShutter device."""
|
||||
assert fast_shutter_device.name == "fsh", "Device name should be 'fsh'"
|
||||
assert fast_shutter_device.prefix == "X12SA-ES1-TTL:", "Device prefix is 'X12SA-ES1-TTL:'"
|
||||
# Test fshopen and fshclose
|
||||
fast_shutter_device.fshopen()
|
||||
assert fast_shutter_device.shutter.get() == 1, "Shutter should be open (1) after fshopen()"
|
||||
assert fast_shutter_device.fshstatus() == 1, "fshstatus should return 1 when shutter is open"
|
||||
|
||||
fast_shutter_device.fshclose()
|
||||
assert fast_shutter_device.shutter.get() == 0, "Shutter should be closed (0) after fshclose()"
|
||||
assert fast_shutter_device.fshstatus() == 0, "fshstatus should return 0 when shutter is closed"
|
||||
|
||||
# shutter_readback is connected to separate PV.
|
||||
fast_shutter_device.shutter_readback._read_pv.mock_data = 1 # Simulate readback showing open
|
||||
assert (
|
||||
fast_shutter_device.fshstatus_readback() == 1
|
||||
), "fshstatus_readback should return 1 when shutter readback shows open"
|
||||
fast_shutter_device.shutter_readback._read_pv.mock_data = 0 # Simulate readback showing closed
|
||||
assert (
|
||||
fast_shutter_device.fshstatus_readback() == 0
|
||||
), "fshstatus_readback should return 0 when shutter readback shows closed"
|
||||
99
tests/tests_devices/test_eps.py
Normal file
99
tests/tests_devices/test_eps.py
Normal file
@@ -0,0 +1,99 @@
|
||||
# pylint: skip-file
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
from ophyd_devices.tests.utils import patched_device
|
||||
|
||||
from csaxs_bec.devices.epics.eps import EPS
|
||||
|
||||
ALL_PVS = [
|
||||
# ALARMS
|
||||
"X12SA-EPS-PLC:AlarmCnt_EPS",
|
||||
"ARS00-MIS-PLC-01:AlarmCnt_Frontends",
|
||||
# FRONTEND VALVES
|
||||
"X12SA-FE-VVPG-0000:PLC_OPEN",
|
||||
"X12SA-FE-VVPG-1010:PLC_OPEN",
|
||||
"X12SA-FE-VVFV-2010:PLC_OPEN",
|
||||
"X12SA-FE-VVPG-2010:PLC_OPEN",
|
||||
# Optics VALVES
|
||||
"X12SA-OP-VVPG-1010:PLC_OPEN",
|
||||
"X12SA-OP-VVPG-2010:PLC_OPEN",
|
||||
"X12SA-OP-VVPG-3010:PLC_OPEN",
|
||||
"X12SA-OP-VVPG-3020:PLC_OPEN",
|
||||
"X12SA-OP-VVPG-4010:PLC_OPEN",
|
||||
"X12SA-OP-VVPG-5010:PLC_OPEN",
|
||||
"X12SA-OP-VVPG-6010:PLC_OPEN",
|
||||
"X12SA-OP-VVPG-7010:PLC_OPEN",
|
||||
# Endstation VALVES
|
||||
"X12SA-ES-VVPG-1010:PLC_OPEN",
|
||||
# Frontend SHUTTERS
|
||||
"X12SA-FE-PSH1-EMLS-0010:OPEN",
|
||||
"X12SA-FE-STO1-EMLS-0010:OPEN",
|
||||
# Optics SHUTTERS
|
||||
"X12SA-OP-PSH1-EMLS-7010:OPEN",
|
||||
# DMM Monochromator
|
||||
"X12SA-OP-DMM-ETTC-3010:TEMP",
|
||||
"X12SA-OP-DMM-ETTC-3020:TEMP",
|
||||
"X12SA-OP-DMM-ETTC-3030:TEMP",
|
||||
"X12SA-OP-DMM-ETTC-3040:TEMP",
|
||||
"X12SA-OP-DMM-EMLS-3010:THRU",
|
||||
"X12SA-OP-DMM-EMLS-3020:IN",
|
||||
"X12SA-OP-DMM-EMLS-3030:THRU",
|
||||
"X12SA-OP-DMM-EMLS-3040:IN",
|
||||
"X12SA-OP-DMM-EMSW-3050:SWITCH",
|
||||
"X12SA-OP-DMM-EMSW-3060:SWITCH",
|
||||
"X12SA-OP-DMM-EMSW-3070:SWITCH",
|
||||
"X12SA-OP-DMM1:ENERGY-GET",
|
||||
"X12SA-OP-DMM1:POSITION",
|
||||
"X12SA-OP-DMM1:STRIPE",
|
||||
# CCM Monochromator
|
||||
"X12SA-OP-CCM-ETTC-4010:TEMP",
|
||||
"X12SA-OP-CCM-ETTC-4020:TEMP",
|
||||
"X12SA-OP-CCM-EMSW-4010:SWITCH",
|
||||
"X12SA-OP-CCM-EMSW-4020:SWITCH",
|
||||
"X12SA-OP-CCM-EMSW-4030:SWITCH",
|
||||
"X12SA-OP-CCM1:ENERGY-GET",
|
||||
"X12SA-OP-CCM1:POSITION",
|
||||
# Water Cooling
|
||||
"X12SA-OP-SL1-EFSW-2010:FLOW",
|
||||
"X12SA-OP-SL2-EFSW-2010:FLOW",
|
||||
"X12SA-OP-EB1-EFSW-5010:FLOW",
|
||||
"X12SA-OP-EB1-EFSW-5020:FLOW",
|
||||
"X12SA-OP-SL3-EFSW-5010:FLOW",
|
||||
"X12SA-OP-KB-EFSW-6010:FLOW",
|
||||
"X12SA-OP-PSH1-EFSW-7010:FLOW",
|
||||
"X12SA-ES-EB2-EFSW-1010:FLOW",
|
||||
"X12SA-OP-CS-ECVW-0010:PLC_OPEN",
|
||||
"X12SA-OP-CS-ECVW-0020:PLC_OPEN",
|
||||
# Request PVs
|
||||
"X12SA-EPS-PLC:ACKERR-REQUEST",
|
||||
"X12SA-OP-CS-ECVW:PLC_REQUEST",
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def eps():
|
||||
dev_name = "EPS"
|
||||
with patched_device(EPS, name=dev_name) as eps:
|
||||
yield eps
|
||||
|
||||
|
||||
def test_eps_has_signals(eps):
|
||||
"""Test that all expected PVs are present in the eps device."""
|
||||
found_pvs = [walk.item._read_pv.pvname for walk in eps.walk_signals()]
|
||||
assert set(found_pvs) == set(
|
||||
ALL_PVS
|
||||
), f"Expected PVs {ALL_PVS} but found {set(ALL_PVS) - set(found_pvs)}"
|
||||
|
||||
|
||||
# pylint: disable=line-too-long
|
||||
expected_show_all_output = "\x1b[1mX12SA EPS status\x1b[0m\n\n\x1b[1mEPS Alarms\x1b[0m\n - X12SA EPS Alarm count 0\n - FrontEnd MIS Alarm count 0\n\n\x1b[1mValves Frontend\x1b[0m\n - FE-VVPG-0000 \x1b[91mCLOSED\x1b[0m\n - FE-VVPG-1010 \x1b[91mCLOSED\x1b[0m\n - FE-VVFV-2010 \x1b[91mCLOSED\x1b[0m\n - FE-VVPG-2010 \x1b[91mCLOSED\x1b[0m\n\n\x1b[1mValves Optics Hutch\x1b[0m\n - OP-VVPG-1010 \x1b[91mCLOSED\x1b[0m\n - OP-VVPG-2010 \x1b[91mCLOSED\x1b[0m\n - OP-VVPG-3010 \x1b[91mCLOSED\x1b[0m\n - OP-VVPG-3020 \x1b[91mCLOSED\x1b[0m\n - OP-VVPG-4010 \x1b[91mCLOSED\x1b[0m\n - OP-VVPG-5010 \x1b[91mCLOSED\x1b[0m\n - OP-VVPG-6010 \x1b[91mCLOSED\x1b[0m\n - OP-VVPG-7010 \x1b[91mCLOSED\x1b[0m\n\n\x1b[1mValves ES Hutch\x1b[0m\n - ES-VVPG-1010 \x1b[91mCLOSED\x1b[0m\n\n\x1b[1mShutters Frontend\x1b[0m\n - FE-PSH1-EMLS-0010 \x1b[91mCLOSED\x1b[0m\n - FE-STO1-EMLS-0010 \x1b[91mCLOSED\x1b[0m\n\n\x1b[1mShutters Endstation\x1b[0m\n - OP-PSH1-EMLS-7010 \x1b[91mCLOSED\x1b[0m\n\n\x1b[1mDMM Monochromator\x1b[0m\n - DMM Temp Surface 1 0.0\n - DMM Temp Surface 2 0.0\n - DMM Temp Shield 1 (disaster) 0.0\n - DMM Temp Shield 2 (disaster) 0.0\n - DMM Translation ThruPos \x1b[91mINACTIVE\x1b[0m\n - DMM Translation InPos \x1b[91mINACTIVE\x1b[0m\n - DMM Bragg ThruPos \x1b[91mINACTIVE\x1b[0m\n - DMM Bragg InPos \x1b[91mINACTIVE\x1b[0m\n - DMM Heater Fault XTAL 1 \x1b[92mOK\x1b[0m\n - DMM Heater Fault XTAL 2 \x1b[92mOK\x1b[0m\n - DMM Heater Fault Support 1 \x1b[92mOK\x1b[0m\n - DMM Energy 0.0000\n - DMM Position out of beam\n - DMM Stripe Stripe 1 W/B4C\n\n\x1b[1mCCM Monochromator\x1b[0m\n - CCM Temp Crystal 0.0\n - CCM Temp Shield (disaster) 0.0\n - CCM Heater Fault 1 \x1b[92mOK\x1b[0m\n - CCM Heater Fault 2 \x1b[92mOK\x1b[0m\n - CCM Heater Fault 3 \x1b[92mOK\x1b[0m\n - CCM Energy 0.0000\n - CCM Position out of beam\n\n\x1b[1mCooling Water\x1b[0m\n - OP-SL1-EFSW-2010 \x1b[91mFAIL\x1b[0m\n - OP-SL2-EFSW-2010 \x1b[91mFAIL\x1b[0m\n - OP-EB1-EFSW-5010 \x1b[91mFAIL\x1b[0m\n - OP-EB1-EFSW-5020 \x1b[91mFAIL\x1b[0m\n - OP-SL3-EFSW-5010 \x1b[91mFAIL\x1b[0m\n - OP-KB-EFSW-6010 \x1b[91mFAIL\x1b[0m\n - OP-PSH1-EFSW-7010 \x1b[91mFAIL\x1b[0m\n - ES-EB2-EFSW-1010 \x1b[91mFAIL\x1b[0m\n - OP-CS-ECVW-0010 \x1b[91mCLOSED\x1b[0m\n - OP-CS-ECVW-0020 \x1b[91mCLOSED\x1b[0m\n\n\x1b[96mHint:\x1b[0m Both water cooling valves are CLOSED.\nYou can open them using: \x1b[1mdev.x12saEPS.water_cooling_op()\x1b[0m\n"
|
||||
|
||||
|
||||
def test_eps_show_all(eps, capsys):
|
||||
"""Test that the show_all method outputs the expected status."""
|
||||
eps.show_all()
|
||||
output = capsys.readouterr().out
|
||||
assert (
|
||||
output == expected_show_all_output
|
||||
), f"Expected output does not match actual output.\nExpected:\n{expected_show_all_output}\nActual:\n{output}"
|
||||
@@ -2,6 +2,7 @@ from unittest import mock
|
||||
|
||||
import pytest
|
||||
from ophyd_devices.tests.utils import SocketMock
|
||||
from ophyd_devices.utils.socket import SocketSignal
|
||||
|
||||
from csaxs_bec.devices.omny.galil.fupr_ophyd import FuprGalilController, FuprGalilMotor
|
||||
|
||||
@@ -17,6 +18,11 @@ def fsamroy(dm_with_devices):
|
||||
socket_cls=SocketMock,
|
||||
device_manager=dm_with_devices,
|
||||
)
|
||||
for walk in fsamroy_motor.walk_signals():
|
||||
if isinstance(walk.item, SocketSignal):
|
||||
walk.item._readback_timeout = (
|
||||
0.0 # Set the readback timeout to 0 to avoid waiting during tests
|
||||
)
|
||||
fsamroy_motor.controller.on()
|
||||
assert isinstance(fsamroy_motor.controller, FuprGalilController)
|
||||
yield fsamroy_motor
|
||||
|
||||
@@ -1,13 +1,19 @@
|
||||
import copy
|
||||
import inspect
|
||||
from unittest import mock
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from bec_server.device_server.tests.utils import DMMock
|
||||
from ophyd_devices.tests.utils import SocketMock
|
||||
|
||||
from csaxs_bec.devices.npoint.npoint import NPointAxis, NPointController
|
||||
from csaxs_bec.devices.omny.galil.fgalil_ophyd import FlomniGalilController, FlomniGalilMotor
|
||||
from csaxs_bec.devices.omny.galil.fupr_ophyd import FuprGalilController, FuprGalilMotor
|
||||
from csaxs_bec.devices.omny.galil.galil_rio import (
|
||||
GalilRIO,
|
||||
GalilRIOAnalogSignalRO,
|
||||
GalilRIOController,
|
||||
)
|
||||
from csaxs_bec.devices.omny.galil.lgalil_ophyd import LamniGalilController, LamniGalilMotor
|
||||
from csaxs_bec.devices.omny.galil.ogalil_ophyd import OMNYGalilController, OMNYGalilMotor
|
||||
from csaxs_bec.devices.omny.galil.sgalil_ophyd import GalilController, SGalilMotor
|
||||
@@ -173,9 +179,9 @@ def test_find_reference(leyex, axis_nr, socket_put_messages, socket_get_messages
|
||||
assert leyex.controller.sock.buffer_put == socket_put_messages
|
||||
|
||||
|
||||
def test_wait_for_connection_called():
|
||||
def test_wait_for_connection_called(dm_with_devices):
|
||||
"""Test that wait_for_connection is called on all motors that have a socket controller."""
|
||||
dm = DMMock()
|
||||
dm = dm_with_devices
|
||||
testable_connections = [
|
||||
(NPointAxis, NPointController),
|
||||
(FlomniGalilMotor, FlomniGalilController),
|
||||
@@ -187,6 +193,7 @@ def test_wait_for_connection_called():
|
||||
(RtLamniMotor, RtLamniController),
|
||||
(RtOMNYMotor, RtOMNYController),
|
||||
(SmaractMotor, SmaractController),
|
||||
(GalilRIO, GalilRIOController),
|
||||
]
|
||||
for motor_cls, controller_cls in testable_connections:
|
||||
# Store values to restore later
|
||||
@@ -195,14 +202,20 @@ def test_wait_for_connection_called():
|
||||
controller_cls._reset_controller()
|
||||
controller_cls._axes_per_controller = 3
|
||||
|
||||
motor = motor_cls(
|
||||
"C",
|
||||
name="test_motor",
|
||||
host="mpc2680.psi.ch",
|
||||
port=8081,
|
||||
socket_cls=SocketMock,
|
||||
device_manager=dm,
|
||||
)
|
||||
inspect_args = inspect.getfullargspec(motor_cls.__init__).args
|
||||
inspect_kwargs = inspect.getfullargspec(motor_cls.__init__).kwonlyargs
|
||||
if len(inspect_args) > 1:
|
||||
args = ("C",)
|
||||
else:
|
||||
args = ()
|
||||
kwargs = {
|
||||
"name": "test_motor",
|
||||
"host": "mpc2680.psi.ch",
|
||||
"port": 8081,
|
||||
"device_manager": dm,
|
||||
"socket_cls": SocketMock,
|
||||
}
|
||||
motor = motor_cls(*args, **kwargs)
|
||||
with mock.patch.object(motor.controller, "on") as mock_on:
|
||||
|
||||
motor.wait_for_connection(timeout=5.0)
|
||||
@@ -219,3 +232,172 @@ def test_wait_for_connection_called():
|
||||
finally:
|
||||
controller_cls._reset_controller()
|
||||
controller_cls._axes_per_controller = ctrl_axis_backup
|
||||
|
||||
|
||||
########################
|
||||
#### Test Galil RIO ####
|
||||
########################
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def galil_rio(dm_with_devices):
|
||||
try:
|
||||
rio = GalilRIO(
|
||||
name="galil_rio",
|
||||
host="129.129.0.1",
|
||||
socket_cls=SocketMock,
|
||||
device_manager=dm_with_devices,
|
||||
)
|
||||
rio.wait_for_connection()
|
||||
yield rio
|
||||
finally:
|
||||
rio.destroy()
|
||||
|
||||
|
||||
def test_galil_rio_initialization(galil_rio):
|
||||
"""
|
||||
Test that the Galil RIO signal can establish a connection.
|
||||
"""
|
||||
assert galil_rio.controller.connected is True
|
||||
# All signals should be connected if the controller is connected
|
||||
for walk in galil_rio.walk_signals():
|
||||
signal = walk.item
|
||||
assert signal.connected is True
|
||||
|
||||
assert galil_rio.controller._socket_host == "129.129.0.1"
|
||||
assert galil_rio.controller._socket_port == 23 # Default port
|
||||
|
||||
|
||||
def test_galil_rio_signal_read(galil_rio):
|
||||
"""
|
||||
Test that the Galil RIO signal can read values correctly.
|
||||
"""
|
||||
###########
|
||||
## Test read of all channels
|
||||
###########
|
||||
|
||||
assert galil_rio.analog_in.ch0._readback_timeout == 0.1 # Default read timeout of 100ms
|
||||
# Mock the socket to return specific values
|
||||
analog_bufffer = b" 1.234 2.345 3.456 4.567 5.678 6.789 7.890 8.901\r\n"
|
||||
galil_rio.controller.sock.buffer_recv = [] # Clear any existing buffer
|
||||
galil_rio.controller.sock.buffer_recv.append(analog_bufffer)
|
||||
read_values = galil_rio.read()
|
||||
assert len(read_values) == 8 # 8 channels
|
||||
|
||||
expected_values = {
|
||||
galil_rio.analog_in.ch0.name: {"value": 1.234},
|
||||
galil_rio.analog_in.ch1.name: {"value": 2.345},
|
||||
galil_rio.analog_in.ch2.name: {"value": 3.456},
|
||||
galil_rio.analog_in.ch3.name: {"value": 4.567},
|
||||
galil_rio.analog_in.ch4.name: {"value": 5.678},
|
||||
galil_rio.analog_in.ch5.name: {"value": 6.789},
|
||||
galil_rio.analog_in.ch6.name: {"value": 7.890},
|
||||
galil_rio.analog_in.ch7.name: {"value": 8.901},
|
||||
}
|
||||
# All timestamps should be the same
|
||||
assert all(
|
||||
ret["timestamp"] == read_values[galil_rio.analog_in.ch0.name]["timestamp"]
|
||||
for signal_name, ret in read_values.items()
|
||||
)
|
||||
# Check values
|
||||
for signal_name, expected in expected_values.items():
|
||||
assert np.isclose(read_values[signal_name]["value"], expected["value"])
|
||||
assert "timestamp" in read_values[signal_name]
|
||||
|
||||
# Check communication command to socker
|
||||
assert galil_rio.controller.sock.buffer_put == [
|
||||
b"MG@AN[0], @AN[1], @AN[2], @AN[3], @AN[4], @AN[5], @AN[6], @AN[7]\r"
|
||||
]
|
||||
|
||||
###########
|
||||
## Test read of single channel with callback
|
||||
###########
|
||||
|
||||
# Add callback to update readback
|
||||
value_callback_buffer: list[tuple] = []
|
||||
|
||||
def value_callback(value, old_value, **kwargs):
|
||||
obj = kwargs.get("obj")
|
||||
galil = obj.parent.parent
|
||||
readback = galil.read()
|
||||
value_callback_buffer.append(readback)
|
||||
|
||||
galil_rio.analog_in.ch0.subscribe(value_callback, run=False)
|
||||
galil_rio.controller.sock.buffer_recv = [b" 2.5 2.6 2.7 2.8 2.9 3.0 3.1 3.2"]
|
||||
expected_values = [2.5, 2.6, 2.7, 2.8, 2.9, 3.0, 3.1, 3.2]
|
||||
|
||||
##################
|
||||
## Test cached readback
|
||||
##################
|
||||
|
||||
# Should have used the cached value
|
||||
for walk in galil_rio.walk_signals():
|
||||
walk.item._readback_timeout = 10 # Make sure cached read is used
|
||||
ret = galil_rio.analog_in.ch0.read()
|
||||
|
||||
# Should not trigger callback since value did not change
|
||||
assert np.isclose(ret[galil_rio.analog_in.ch0.name]["value"], 1.234)
|
||||
# Same timestamp as for another channel as this is cached read
|
||||
assert np.isclose(
|
||||
ret[galil_rio.analog_in.ch0.name]["timestamp"], galil_rio.analog_in.ch7.timestamp
|
||||
)
|
||||
assert len(value_callback_buffer) == 0
|
||||
|
||||
##################
|
||||
## Test unchached read from controller
|
||||
##################
|
||||
|
||||
# Now force a read from the controller
|
||||
galil_rio.analog_in.ch0._last_readback = 0 # Force read from controller
|
||||
ret = galil_rio.analog_in.ch0.read()
|
||||
|
||||
assert np.isclose(ret[galil_rio.analog_in.ch0.name]["value"], 2.5)
|
||||
|
||||
# Check callback invocation, but only 1 callback even with galil_rio.read() call in callback
|
||||
assert len(value_callback_buffer) == 1
|
||||
values = [value["value"] for value in value_callback_buffer[0].values()]
|
||||
assert np.isclose(values, expected_values).all()
|
||||
assert all(
|
||||
[
|
||||
value["timestamp"]
|
||||
== value_callback_buffer[0][galil_rio.analog_in.ch0.name]["timestamp"]
|
||||
for value in value_callback_buffer[0].values()
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_galil_rio_digital_out_signal(galil_rio):
|
||||
"""
|
||||
Test that the Galil RIO digital output signal can be set correctly.
|
||||
"""
|
||||
## Test Read from digital output channels
|
||||
buffer_receive = []
|
||||
excepted_put_buffer = []
|
||||
for ii in range(galil_rio.digital_out.ch0._NUM_DIGITAL_OUTPUT_CHANNELS):
|
||||
cmd = f"MG@OUT[{ii}]\r".encode()
|
||||
excepted_put_buffer.append(cmd)
|
||||
recv = " 1.000".encode()
|
||||
buffer_receive.append(recv)
|
||||
|
||||
galil_rio.controller.sock.buffer_recv = buffer_receive # Mock response for readback
|
||||
|
||||
digital_read = galil_rio.read_configuration() # Read to populate readback values
|
||||
|
||||
for walk in galil_rio.digital_out.walk_signals():
|
||||
assert np.isclose(digital_read[walk.item.name]["value"], 1.0)
|
||||
|
||||
assert galil_rio.controller.sock.buffer_put == excepted_put_buffer
|
||||
|
||||
# Test writing to digital output channels
|
||||
galil_rio.controller.sock.buffer_put = [] # Clear buffer put
|
||||
galil_rio.controller.sock.buffer_recv = [b":"] # Mock response for readback
|
||||
|
||||
# Set digital output channel 0 to high
|
||||
galil_rio.digital_out.ch0.put(1)
|
||||
assert galil_rio.controller.sock.buffer_put == [b"SB0\r"]
|
||||
|
||||
# Set digital output channel 0 to low
|
||||
galil_rio.controller.sock.buffer_put = [] # Clear buffer put
|
||||
galil_rio.controller.sock.buffer_recv = [b":"] # Mock response for readback
|
||||
galil_rio.digital_out.ch0.put(0)
|
||||
assert galil_rio.controller.sock.buffer_put == [b"CB0\r"]
|
||||
|
||||
Reference in New Issue
Block a user