refactor: std daq integration
Major refactor of the std daq integration for the PCO Edge camera and Gigafrost camera. New live processing capabilities have been added, and the code has been cleaned up for better maintainability.
This commit is contained in:
@@ -33,6 +33,7 @@ dev = [
|
||||
"ophyd_devices",
|
||||
"bec_server",
|
||||
"requests-mock",
|
||||
"fakeredis",
|
||||
]
|
||||
|
||||
[project.entry-points."bec"]
|
||||
|
||||
337
tests/tests_devices/test_gfcam.py
Normal file
337
tests/tests_devices/test_gfcam.py
Normal file
@@ -0,0 +1,337 @@
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
||||
from tomcat_bec.devices.gigafrost.gigafrost_base import GigaFrostBase
|
||||
from tomcat_bec.devices.gigafrost.gigafrostcamera import GigaFrostCamera, default_config
|
||||
from tomcat_bec.devices.std_daq.std_daq_client import StdDaqClient
|
||||
from tomcat_bec.devices.std_daq.std_daq_preview import StdDaqPreview
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def gfcam_base():
|
||||
gfcam = GigaFrostCamera(
|
||||
"X02DA-CAM-GF2:",
|
||||
name="gfcam",
|
||||
std_daq_rest="http://example.com/rest",
|
||||
std_daq_ws="ws://example.com/ws",
|
||||
)
|
||||
for component in gfcam.component_names:
|
||||
type.__setattr__(GigaFrostCamera, component, mock.MagicMock())
|
||||
|
||||
yield gfcam
|
||||
|
||||
|
||||
def test_gfcam_init_raises_without_rest_ws():
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
GigaFrostCamera("X02DA-CAM-GF2:", name="gfcam")
|
||||
excinfo.match("std_daq_rest and std_daq_ws must be provided")
|
||||
|
||||
|
||||
def test_gfcam_init():
|
||||
gfcam = GigaFrostCamera(
|
||||
"X02DA-CAM-GF2:",
|
||||
name="gfcam",
|
||||
std_daq_rest="http://example.com/rest",
|
||||
std_daq_ws="ws://example.com/ws",
|
||||
)
|
||||
assert gfcam.name == "gfcam"
|
||||
assert isinstance(gfcam.backend, StdDaqClient)
|
||||
assert gfcam.live_preview is None
|
||||
|
||||
|
||||
def test_gfcam_init_with_live_preview():
|
||||
gfcam = GigaFrostCamera(
|
||||
"X02DA-CAM-GF2:",
|
||||
name="gfcam",
|
||||
std_daq_rest="http://example.com/rest",
|
||||
std_daq_ws="ws://example.com/ws",
|
||||
std_daq_live="http://example.com/live_preview",
|
||||
)
|
||||
assert gfcam.live_preview is not None
|
||||
assert isinstance(gfcam.live_preview, StdDaqPreview)
|
||||
|
||||
|
||||
def test_gfcam_configure(gfcam_base):
|
||||
with mock.patch.object(gfcam_base, "stop_camera") as stop_camera:
|
||||
with mock.patch.object(gfcam_base.backend, "set_config") as set_config:
|
||||
with mock.patch.object(GigaFrostBase, "configure") as base_configure:
|
||||
gfcam_base.configure({})
|
||||
stop_camera.assert_called_once()
|
||||
stop_camera().wait.assert_called_once()
|
||||
|
||||
set_config.assert_not_called()
|
||||
config = default_config()
|
||||
base_configure.assert_called_once_with(config)
|
||||
|
||||
|
||||
def test_gfcam_default_config_copies():
|
||||
assert isinstance(default_config(), dict)
|
||||
assert id(default_config()) != id(default_config())
|
||||
|
||||
|
||||
def test_gfcam_configure_sets_exp_time_in_ms(gfcam_base):
|
||||
with mock.patch.object(gfcam_base, "stop_camera") as stop_camera:
|
||||
with mock.patch.object(gfcam_base.backend, "set_config") as set_config:
|
||||
with mock.patch.object(GigaFrostBase, "configure") as base_configure:
|
||||
gfcam_base.configure({"exp_time": 0.1})
|
||||
stop_camera.assert_called_once()
|
||||
stop_camera().wait.assert_called_once()
|
||||
|
||||
set_config.assert_not_called()
|
||||
config = default_config()
|
||||
config.update({"exposure": 100}) # in ms
|
||||
base_configure.assert_called_once_with(config)
|
||||
|
||||
|
||||
def test_gfcam_set_acquisition_mode_invalid(gfcam_base):
|
||||
"""Test setting invalid acquisition mode"""
|
||||
with pytest.raises(RuntimeError) as excinfo:
|
||||
gfcam_base.set_acquisition_mode("invalid_mode")
|
||||
|
||||
excinfo.match("Unsupported acquisition mode: invalid_mode")
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"mode_soft, mode_external, mode_always, expected_result",
|
||||
[
|
||||
(0, 0, 0, None), # No enable mode set
|
||||
(1, 0, 0, "soft"), # Only soft mode enabled
|
||||
(0, 1, 0, "external"), # Only external mode enabled
|
||||
(1, 1, 0, "soft+ext"), # Both soft and external enabled
|
||||
(0, 0, 1, "always"), # Always mode enabled
|
||||
(1, 0, 1, "always"), # Always overrides soft
|
||||
(0, 1, 1, "always"), # Always overrides external
|
||||
(1, 1, 1, "always"), # Always overrides both soft and external
|
||||
],
|
||||
)
|
||||
def test_gfcam_enable_mode_property(
|
||||
gfcam_base, mode_soft, mode_external, mode_always, expected_result
|
||||
):
|
||||
"""Test that the enable_mode property returns the correct mode based on signal values"""
|
||||
# Configure the mock return values for the mode signals
|
||||
gfcam_base.mode_endbl_soft.get.return_value = mode_soft
|
||||
gfcam_base.mode_enbl_ext.get.return_value = mode_external
|
||||
gfcam_base.mode_enbl_auto.get.return_value = mode_always
|
||||
|
||||
# Check that the property returns the expected result
|
||||
assert gfcam_base.enable_mode == expected_result
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"mode,expected_settings",
|
||||
[
|
||||
("soft", {"mode_enbl_ext": 0, "mode_endbl_soft": 1, "mode_enbl_auto": 0}),
|
||||
("external", {"mode_enbl_ext": 1, "mode_endbl_soft": 0, "mode_enbl_auto": 0}),
|
||||
("soft+ext", {"mode_enbl_ext": 1, "mode_endbl_soft": 1, "mode_enbl_auto": 0}),
|
||||
("always", {"mode_enbl_ext": 0, "mode_endbl_soft": 0, "mode_enbl_auto": 1}),
|
||||
],
|
||||
)
|
||||
def test_gfcam_enable_mode_setter(gfcam_base, mode, expected_settings):
|
||||
"""Test setting the enable mode of the GigaFRoST camera"""
|
||||
# Mock the const.gf_valid_enable_modes to avoid importing the constants
|
||||
with mock.patch(
|
||||
"tomcat_bec.devices.gigafrost.gigafrostcamera.const.gf_valid_enable_modes",
|
||||
["soft", "external", "soft+ext", "always"],
|
||||
):
|
||||
# Set the enable mode
|
||||
gfcam_base.enable_mode = mode
|
||||
|
||||
# Verify the correct signals were set
|
||||
gfcam_base.mode_enbl_ext.set.assert_called_once_with(expected_settings["mode_enbl_ext"])
|
||||
gfcam_base.mode_endbl_soft.set.assert_called_once_with(expected_settings["mode_endbl_soft"])
|
||||
gfcam_base.mode_enbl_auto.set.assert_called_once_with(expected_settings["mode_enbl_auto"])
|
||||
|
||||
# Verify wait was called on each set operation
|
||||
gfcam_base.mode_enbl_ext.set().wait.assert_called_once()
|
||||
gfcam_base.mode_endbl_soft.set().wait.assert_called_once()
|
||||
gfcam_base.mode_enbl_auto.set().wait.assert_called_once()
|
||||
|
||||
# Verify parameters were committed
|
||||
gfcam_base.set_param.set.assert_called_once_with(1)
|
||||
gfcam_base.set_param.set().wait.assert_called_once()
|
||||
|
||||
|
||||
def test_gfcam_enable_mode_setter_invalid(gfcam_base):
|
||||
"""Test setting an invalid enable mode raises an error"""
|
||||
# Mock the const.gf_valid_enable_modes to avoid importing the constants
|
||||
with mock.patch(
|
||||
"tomcat_bec.devices.gigafrost.gigafrostcamera.const.gf_valid_enable_modes",
|
||||
["soft", "external", "soft+ext", "always"],
|
||||
):
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
gfcam_base.enable_mode = "invalid_mode"
|
||||
|
||||
assert "Invalid enable mode invalid_mode!" in str(excinfo.value)
|
||||
assert "Valid modes are:" in str(excinfo.value)
|
||||
|
||||
# Verify no signals were set
|
||||
gfcam_base.mode_enbl_ext.set.assert_not_called()
|
||||
gfcam_base.mode_endbl_soft.set.assert_not_called()
|
||||
gfcam_base.mode_enbl_auto.set.assert_not_called()
|
||||
gfcam_base.set_param.set.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"mode_auto, mode_soft, mode_timer, mode_external, expected_result",
|
||||
[
|
||||
(0, 0, 0, 0, None), # No trigger mode set
|
||||
(1, 0, 0, 0, "auto"), # Only auto mode enabled
|
||||
(0, 1, 0, 0, "soft"), # Only soft mode enabled
|
||||
(0, 0, 1, 0, "timer"), # Only timer mode enabled
|
||||
(0, 0, 0, 1, "external"), # Only external mode enabled
|
||||
(1, 1, 0, 0, "auto"), # Auto takes precedence over soft
|
||||
(1, 0, 1, 0, "auto"), # Auto takes precedence over timer
|
||||
(1, 0, 0, 1, "auto"), # Auto takes precedence over external
|
||||
(0, 1, 1, 0, "soft"), # Soft takes precedence over timer
|
||||
(0, 1, 0, 1, "soft"), # Soft takes precedence over external
|
||||
(0, 0, 1, 1, "timer"), # Timer takes precedence over external
|
||||
(1, 1, 1, 1, "auto"), # Auto takes precedence over all
|
||||
],
|
||||
)
|
||||
def test_gfcam_trigger_mode_property(
|
||||
gfcam_base, mode_auto, mode_soft, mode_timer, mode_external, expected_result
|
||||
):
|
||||
"""Test that the trigger_mode property returns the correct mode based on signal values"""
|
||||
# Configure the mock return values for the mode signals
|
||||
gfcam_base.mode_trig_auto.get.return_value = mode_auto
|
||||
gfcam_base.mode_trig_soft.get.return_value = mode_soft
|
||||
gfcam_base.mode_trig_timer.get.return_value = mode_timer
|
||||
gfcam_base.mode_trig_ext.get.return_value = mode_external
|
||||
|
||||
# Check that the property returns the expected result
|
||||
assert gfcam_base.trigger_mode == expected_result
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"mode,expected_settings",
|
||||
[
|
||||
(
|
||||
"auto",
|
||||
{"mode_trig_auto": 1, "mode_trig_soft": 0, "mode_trig_timer": 0, "mode_trig_ext": 0},
|
||||
),
|
||||
(
|
||||
"soft",
|
||||
{"mode_trig_auto": 0, "mode_trig_soft": 1, "mode_trig_timer": 0, "mode_trig_ext": 0},
|
||||
),
|
||||
(
|
||||
"timer",
|
||||
{"mode_trig_auto": 0, "mode_trig_soft": 0, "mode_trig_timer": 1, "mode_trig_ext": 0},
|
||||
),
|
||||
(
|
||||
"external",
|
||||
{"mode_trig_auto": 0, "mode_trig_soft": 0, "mode_trig_timer": 0, "mode_trig_ext": 1},
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_gfcam_trigger_mode_setter(gfcam_base, mode, expected_settings):
|
||||
"""Test setting the trigger mode of the GigaFRoST camera"""
|
||||
# Set the trigger mode
|
||||
gfcam_base.trigger_mode = mode
|
||||
|
||||
# Verify the correct signals were set
|
||||
gfcam_base.mode_trig_auto.set.assert_called_with(expected_settings["mode_trig_auto"])
|
||||
gfcam_base.mode_trig_soft.set.assert_called_with(expected_settings["mode_trig_soft"])
|
||||
gfcam_base.mode_trig_timer.set.assert_called_with(expected_settings["mode_trig_timer"])
|
||||
gfcam_base.mode_trig_ext.set.assert_called_with(expected_settings["mode_trig_ext"])
|
||||
|
||||
# Verify wait was called on each set operation
|
||||
gfcam_base.mode_trig_auto.set().wait.assert_called_once()
|
||||
gfcam_base.mode_trig_soft.set().wait.assert_called_once()
|
||||
gfcam_base.mode_trig_timer.set().wait.assert_called_once()
|
||||
gfcam_base.mode_trig_ext.set().wait.assert_called_once()
|
||||
|
||||
# Verify parameters were committed
|
||||
gfcam_base.set_param.set.assert_called_once_with(1)
|
||||
gfcam_base.set_param.set().wait.assert_called_once()
|
||||
|
||||
|
||||
def test_gfcam_trigger_mode_setter_invalid(gfcam_base):
|
||||
"""Test setting an invalid trigger mode raises an error"""
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
gfcam_base.trigger_mode = "invalid_mode"
|
||||
|
||||
assert "Invalid trigger mode!" in str(excinfo.value)
|
||||
assert "Valid modes are: ['auto', 'external', 'timer', 'soft']" in str(excinfo.value)
|
||||
|
||||
# Verify no signals were set
|
||||
gfcam_base.mode_trig_auto.set.assert_not_called()
|
||||
gfcam_base.mode_trig_soft.set.assert_not_called()
|
||||
gfcam_base.mode_trig_timer.set.assert_not_called()
|
||||
gfcam_base.mode_trig_ext.set.assert_not_called()
|
||||
gfcam_base.set_param.set.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"start_bit, end_bit, expected_result",
|
||||
[
|
||||
(0, 0, "off"), # Both bits off
|
||||
(1, 0, "start"), # Only start bit on
|
||||
(0, 1, "end"), # Only end bit on
|
||||
(1, 1, "start+end"), # Both bits on
|
||||
],
|
||||
)
|
||||
def test_gfcam_fix_nframes_mode_property(gfcam_base, start_bit, end_bit, expected_result):
|
||||
"""Test that the fix_nframes_mode property returns the correct mode based on bit values"""
|
||||
# Configure the mock return values for the bits
|
||||
gfcam_base.cnt_startbit.get.return_value = start_bit
|
||||
# Note: The original code has a bug here - it calls cnt_startbit.get() twice instead of cnt_endbit.get()
|
||||
# For testing purposes, we'll mock both appropriately
|
||||
gfcam_base.cnt_endbit.get.return_value = end_bit
|
||||
|
||||
# Check that the property returns the expected result
|
||||
assert gfcam_base.fix_nframes_mode == expected_result
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"mode, expected_settings",
|
||||
[
|
||||
("off", {"cnt_startbit": 0, "cnt_endbit": 0}),
|
||||
("start", {"cnt_startbit": 1, "cnt_endbit": 0}),
|
||||
("end", {"cnt_startbit": 0, "cnt_endbit": 1}),
|
||||
("start+end", {"cnt_startbit": 1, "cnt_endbit": 1}),
|
||||
],
|
||||
)
|
||||
def test_gfcam_fix_nframes_mode_setter(gfcam_base, mode, expected_settings):
|
||||
"""Test setting the fixed number of frames mode of the GigaFRoST camera"""
|
||||
# Mock the const.gf_valid_fix_nframe_modes to avoid importing the constants
|
||||
with mock.patch(
|
||||
"tomcat_bec.devices.gigafrost.gigafrostcamera.const.gf_valid_fix_nframe_modes",
|
||||
["off", "start", "end", "start+end"],
|
||||
):
|
||||
# Set the mode
|
||||
gfcam_base.fix_nframes_mode = mode
|
||||
|
||||
# Verify the class attribute was set
|
||||
assert gfcam_base._fix_nframes_mode == mode
|
||||
|
||||
# Verify the correct signals were set
|
||||
gfcam_base.cnt_startbit.set.assert_called_once_with(expected_settings["cnt_startbit"])
|
||||
gfcam_base.cnt_endbit.set.assert_called_once_with(expected_settings["cnt_endbit"])
|
||||
|
||||
# Verify wait was called on each set operation
|
||||
gfcam_base.cnt_startbit.set().wait.assert_called_once()
|
||||
gfcam_base.cnt_endbit.set().wait.assert_called_once()
|
||||
|
||||
# Verify parameters were committed
|
||||
gfcam_base.set_param.set.assert_called_once_with(1)
|
||||
gfcam_base.set_param.set().wait.assert_called_once()
|
||||
|
||||
|
||||
def test_gfcam_fix_nframes_mode_setter_invalid(gfcam_base):
|
||||
"""Test setting an invalid fixed number of frames mode raises an error"""
|
||||
# Mock the const.gf_valid_fix_nframe_modes to avoid importing the constants
|
||||
with mock.patch(
|
||||
"tomcat_bec.devices.gigafrost.gigafrostcamera.const.gf_valid_fix_nframe_modes",
|
||||
["off", "start", "end", "start+end"],
|
||||
):
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
gfcam_base.fix_nframes_mode = "invalid_mode"
|
||||
|
||||
assert "Invalid fixed frame number mode!" in str(excinfo.value)
|
||||
assert "Valid modes are:" in str(excinfo.value)
|
||||
|
||||
# Verify no signals were set
|
||||
gfcam_base.cnt_startbit.set.assert_not_called()
|
||||
gfcam_base.cnt_endbit.set.assert_not_called()
|
||||
gfcam_base.set_param.set.assert_not_called()
|
||||
25
tests/tests_devices/test_pcocam.py
Normal file
25
tests/tests_devices/test_pcocam.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
||||
from tomcat_bec.devices.pco_edge.pcoedgecamera import PcoEdge5M
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def pcocam_base():
|
||||
gfcam = PcoEdge5M(
|
||||
"X02DA-CAM-GF2:",
|
||||
name="pco_edge_camera",
|
||||
std_daq_rest="http://example.com/rest",
|
||||
std_daq_ws="ws://example.com/ws",
|
||||
)
|
||||
for component in gfcam.component_names:
|
||||
type.__setattr__(PcoEdge5M, component, mock.MagicMock())
|
||||
|
||||
yield gfcam
|
||||
|
||||
|
||||
def test_pcocam_init_raises_without_rest_ws():
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
PcoEdge5M("X02DA-CAM-GF2:", name="pco_edge_camera")
|
||||
excinfo.match("std_daq_rest and std_daq_ws must be provided")
|
||||
150
tests/tests_devices/test_std_daq_live_processing.py
Normal file
150
tests/tests_devices/test_std_daq_live_processing.py
Normal file
@@ -0,0 +1,150 @@
|
||||
from unittest import mock
|
||||
|
||||
import fakeredis
|
||||
import h5py
|
||||
import numpy as np
|
||||
import pytest
|
||||
from bec_lib.redis_connector import RedisConnector
|
||||
from ophyd_devices.interfaces.base_classes.psi_device_base import PSIDeviceBase
|
||||
from typeguard import TypeCheckError
|
||||
|
||||
from tomcat_bec.devices.std_daq.std_daq_live_processing import StdDaqLiveProcessing
|
||||
|
||||
|
||||
def fake_redis_server(host, port):
|
||||
redis = fakeredis.FakeRedis()
|
||||
return redis
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def connected_connector():
|
||||
connector = RedisConnector("localhost:1", redis_cls=fake_redis_server) # type: ignore
|
||||
connector._redis_conn.flushall()
|
||||
try:
|
||||
yield connector
|
||||
finally:
|
||||
connector.shutdown()
|
||||
|
||||
|
||||
class MockPSIDeviceBase(PSIDeviceBase):
|
||||
def __init__(self, *args, device_manager=None, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.device_manager = device_manager
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_device(connected_connector):
|
||||
device_manager = mock.Mock()
|
||||
device_manager.connector = connected_connector
|
||||
device = MockPSIDeviceBase(name="mock_device", device_manager=device_manager)
|
||||
yield device
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def std_daq_live_processing(mock_device):
|
||||
signal = mock.Mock()
|
||||
signal2 = mock.Mock()
|
||||
live_processing = StdDaqLiveProcessing(mock_device, signal, signal2)
|
||||
yield live_processing
|
||||
|
||||
|
||||
def test_std_daq_live_processing_set_mode(std_daq_live_processing):
|
||||
|
||||
std_daq_live_processing.set_mode("sum")
|
||||
assert std_daq_live_processing.get_mode() == "sum"
|
||||
with pytest.raises(TypeCheckError):
|
||||
std_daq_live_processing.set_mode("average")
|
||||
with pytest.raises(TypeCheckError):
|
||||
std_daq_live_processing.set_mode(123)
|
||||
|
||||
|
||||
@pytest.fixture(params=["flat", "dark"])
|
||||
def reference_type(request):
|
||||
return request.param
|
||||
|
||||
|
||||
def test_std_daq_live_processing_flat_default(std_daq_live_processing, reference_type):
|
||||
with mock.patch.object(
|
||||
std_daq_live_processing, "_get_from_redis", return_value=None
|
||||
) as mock_get_from_redis:
|
||||
get_method = (
|
||||
std_daq_live_processing.get_flat
|
||||
if reference_type == "flat"
|
||||
else std_daq_live_processing.get_dark
|
||||
)
|
||||
out = get_method((100, 100))
|
||||
mock_get_from_redis.assert_called_once_with(
|
||||
std_daq_live_processing._redis_endpoint_name(ref_type=reference_type, shape=(100, 100))
|
||||
)
|
||||
assert isinstance(out, np.ndarray)
|
||||
assert out.shape == (100, 100)
|
||||
if reference_type == "flat":
|
||||
assert np.all(out == 1), "Default should be all ones"
|
||||
else:
|
||||
assert np.all(out == 0), "Default should be all zeros"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("value", [np.random.rand(100, 100), np.random.rand(3, 100, 100)])
|
||||
def test_std_daq_live_processing_fetch(tmp_path, std_daq_live_processing, value, reference_type):
|
||||
|
||||
with h5py.File(tmp_path / "test_data.h5", "w") as f:
|
||||
f.create_dataset("tomcat-pco/data", data=value)
|
||||
|
||||
status = std_daq_live_processing.update_reference_with_file(
|
||||
reference_type, tmp_path / "test_data.h5", "tomcat-pco/data"
|
||||
)
|
||||
status.wait()
|
||||
|
||||
get_method = (
|
||||
std_daq_live_processing.get_flat
|
||||
if reference_type == "flat"
|
||||
else std_daq_live_processing.get_dark
|
||||
)
|
||||
|
||||
out = get_method((100, 100))
|
||||
assert isinstance(out, np.ndarray)
|
||||
assert out.shape == (100, 100)
|
||||
|
||||
# Check that the data is cached locally
|
||||
assert np.array_equal(
|
||||
std_daq_live_processing.references[f"{reference_type}_(100, 100)"], out
|
||||
), "Cached flat data should match fetched data"
|
||||
|
||||
redis_data = std_daq_live_processing._get_from_redis(
|
||||
std_daq_live_processing._redis_endpoint_name(ref_type=reference_type, shape=(100, 100))
|
||||
)
|
||||
assert isinstance(redis_data, np.ndarray)
|
||||
assert redis_data.shape == (100, 100)
|
||||
assert np.array_equal(redis_data, out), "Redis data should match the locally cached data"
|
||||
|
||||
|
||||
def test_std_daq_live_processing_apply_flat_dark_correction(std_daq_live_processing):
|
||||
# Create a mock image
|
||||
image = np.random.rand(100, 100)
|
||||
|
||||
# Set flat and dark references
|
||||
std_daq_live_processing.references["flat_(100, 100)"] = np.ones((100, 100))
|
||||
std_daq_live_processing.references["dark_(100, 100)"] = np.zeros((100, 100))
|
||||
|
||||
# Apply flat and dark correction
|
||||
corrected_image = std_daq_live_processing.apply_flat_dark_correction(image)
|
||||
assert isinstance(corrected_image, np.ndarray)
|
||||
assert corrected_image.shape == (100, 100)
|
||||
assert np.all(corrected_image >= 0), "Corrected image should not have negative values"
|
||||
|
||||
|
||||
def test_std_daq_live_processing_apply_flat_dark_correction_with_dark(std_daq_live_processing):
|
||||
# Create a mock image
|
||||
image = np.random.rand(100, 100) * 1000 # Scale to simulate a realistic image
|
||||
|
||||
dark = np.random.rand(100, 100) * 100 # Simulate a dark reference
|
||||
image += dark # Add dark to the image to simulate a realistic scenario
|
||||
|
||||
# Set flat and dark references
|
||||
std_daq_live_processing.references["flat_(100, 100)"] = np.ones((100, 100))
|
||||
std_daq_live_processing.references["dark_(100, 100)"] = dark
|
||||
# Apply flat and dark correction
|
||||
corrected_image = std_daq_live_processing.apply_flat_dark_correction(image)
|
||||
assert isinstance(corrected_image, np.ndarray)
|
||||
assert corrected_image.shape == (100, 100)
|
||||
assert np.all(corrected_image >= 0), "Corrected image should not have negative values"
|
||||
@@ -1,353 +1,361 @@
|
||||
# import json
|
||||
# from unittest import mock
|
||||
|
||||
# import pytest
|
||||
# import requests
|
||||
# import requests_mock
|
||||
# import typeguard
|
||||
# from ophyd import StatusBase
|
||||
# from websockets import WebSocketException
|
||||
|
||||
# from tomcat_bec.devices.gigafrost.std_daq_client import StdDaqClient, StdDaqError, StdDaqStatus
|
||||
|
||||
|
||||
# @pytest.fixture
|
||||
# def client():
|
||||
# parent_device = mock.MagicMock()
|
||||
# _client = StdDaqClient(
|
||||
# parent=parent_device, ws_url="ws://localhost:5001", rest_url="http://localhost:5000"
|
||||
# )
|
||||
# yield _client
|
||||
# _client.shutdown()
|
||||
|
||||
|
||||
# @pytest.fixture
|
||||
# def full_config():
|
||||
# full_config = dict(
|
||||
# detector_name="tomcat-gf",
|
||||
# detector_type="gigafrost",
|
||||
# n_modules=8,
|
||||
# bit_depth=16,
|
||||
# image_pixel_height=2016,
|
||||
# image_pixel_width=2016,
|
||||
# start_udp_port=2000,
|
||||
# writer_user_id=18600,
|
||||
# max_number_of_forwarders_spawned=8,
|
||||
# use_all_forwarders=True,
|
||||
# module_sync_queue_size=4096,
|
||||
# number_of_writers=12,
|
||||
# module_positions={},
|
||||
# ram_buffer_gb=150,
|
||||
# delay_filter_timeout=10,
|
||||
# live_stream_configs={
|
||||
# "tcp://129.129.95.111:20000": {"type": "periodic", "config": [1, 5]},
|
||||
# "tcp://129.129.95.111:20001": {"type": "periodic", "config": [1, 5]},
|
||||
# "tcp://129.129.95.38:20000": {"type": "periodic", "config": [1, 1]},
|
||||
# },
|
||||
# )
|
||||
# return full_config
|
||||
|
||||
|
||||
# def test_stddaq_client(client):
|
||||
# assert client is not None
|
||||
|
||||
|
||||
# def test_stddaq_client_get_daq_config(client, full_config):
|
||||
# with requests_mock.Mocker() as m:
|
||||
# response = full_config
|
||||
# m.get("http://localhost:5000/api/config/get?user=ioc", json=response.model_dump())
|
||||
# out = client.get_config()
|
||||
|
||||
# # Check that the response is simply the json response
|
||||
# assert out == response.model_dump()
|
||||
|
||||
# assert client._config == response
|
||||
|
||||
|
||||
# def test_stddaq_client_set_config_pydantic(client, full_config):
|
||||
# """Test setting configurations through the StdDAQ client"""
|
||||
# with requests_mock.Mocker() as m:
|
||||
# m.post("http://localhost:5000/api/config/set?user=ioc")
|
||||
|
||||
# # Test with StdDaqConfig object
|
||||
# config = full_config
|
||||
# with mock.patch.object(client, "_pre_restart"), mock.patch.object(client, "_post_restart"):
|
||||
# client.set_config(config)
|
||||
|
||||
# # Verify the last request
|
||||
# assert m.last_request.json() == full_config.model_dump()
|
||||
|
||||
|
||||
# def test_std_daq_client_set_config_dict(client, full_config):
|
||||
# """
|
||||
# Test setting configurations through the StdDAQ client with a dictionary input.
|
||||
# """
|
||||
import json
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
import requests_mock
|
||||
import typeguard
|
||||
from ophyd import StatusBase
|
||||
from websockets import WebSocketException
|
||||
|
||||
from tomcat_bec.devices.std_daq.std_daq_client import (
|
||||
StdDaqClient,
|
||||
StdDaqConfig,
|
||||
StdDaqError,
|
||||
StdDaqStatus,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def client():
|
||||
parent_device = mock.MagicMock()
|
||||
_client = StdDaqClient(
|
||||
parent=parent_device, ws_url="http://localhost:5000", rest_url="http://localhost:5000"
|
||||
)
|
||||
yield _client
|
||||
_client.shutdown()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def full_config():
|
||||
full_config = StdDaqConfig(
|
||||
detector_name="tomcat-gf",
|
||||
detector_type="gigafrost",
|
||||
n_modules=8,
|
||||
bit_depth=16,
|
||||
image_pixel_height=2016,
|
||||
image_pixel_width=2016,
|
||||
start_udp_port=2000,
|
||||
writer_user_id=18600,
|
||||
max_number_of_forwarders_spawned=8,
|
||||
use_all_forwarders=True,
|
||||
module_sync_queue_size=4096,
|
||||
number_of_writers=12,
|
||||
module_positions={},
|
||||
ram_buffer_gb=150,
|
||||
delay_filter_timeout=10,
|
||||
live_stream_configs={
|
||||
"tcp://129.129.95.111:20000": {"type": "periodic", "config": [1, 5]},
|
||||
"tcp://129.129.95.111:20001": {"type": "periodic", "config": [1, 5]},
|
||||
"tcp://129.129.95.38:20000": {"type": "periodic", "config": [1, 1]},
|
||||
},
|
||||
)
|
||||
return full_config
|
||||
|
||||
|
||||
def test_stddaq_client(client):
|
||||
assert client is not None
|
||||
|
||||
|
||||
def test_stddaq_client_get_daq_config(client, full_config):
|
||||
with requests_mock.Mocker() as m:
|
||||
response = full_config
|
||||
m.get(
|
||||
"http://localhost:5000/api/config/get?user=ioc",
|
||||
json=response.model_dump(exclude_defaults=True),
|
||||
)
|
||||
out = client.get_config()
|
||||
|
||||
# Check that the response is simply the json response
|
||||
assert out == response.model_dump(exclude_defaults=True)
|
||||
|
||||
assert client._config == response
|
||||
|
||||
|
||||
def test_stddaq_client_set_config_pydantic(client, full_config):
|
||||
"""Test setting configurations through the StdDAQ client"""
|
||||
with requests_mock.Mocker() as m:
|
||||
m.post("http://localhost:5000/api/config/set?user=ioc")
|
||||
|
||||
# Test with StdDaqConfig object
|
||||
config = full_config
|
||||
with mock.patch.object(client, "_pre_restart"), mock.patch.object(client, "_post_restart"):
|
||||
client.set_config(config)
|
||||
|
||||
# Verify the last request
|
||||
assert m.last_request.json() == full_config.model_dump(exclude_defaults=True)
|
||||
|
||||
|
||||
def test_std_daq_client_set_config_dict(client, full_config):
|
||||
"""
|
||||
Test setting configurations through the StdDAQ client with a dictionary input.
|
||||
"""
|
||||
|
||||
with requests_mock.Mocker() as m:
|
||||
m.post("http://localhost:5000/api/config/set?user=ioc")
|
||||
|
||||
# Test with dictionary input
|
||||
config_dict = full_config.model_dump()
|
||||
with mock.patch.object(client, "_pre_restart"), mock.patch.object(client, "_post_restart"):
|
||||
client.set_config(config_dict)
|
||||
assert m.last_request.json() == full_config.model_dump(exclude_defaults=True)
|
||||
|
||||
|
||||
def test_stddaq_client_set_config_ignores_extra_keys(client, full_config):
|
||||
"""
|
||||
Test that the set_config method ignores extra keys in the input dictionary.
|
||||
"""
|
||||
|
||||
with requests_mock.Mocker() as m:
|
||||
m.post("http://localhost:5000/api/config/set?user=ioc")
|
||||
|
||||
# Test with dictionary input
|
||||
config_dict = full_config.model_dump()
|
||||
config_dict["extra_key"] = "extra_value"
|
||||
with mock.patch.object(client, "_pre_restart"), mock.patch.object(client, "_post_restart"):
|
||||
client.set_config(config_dict)
|
||||
assert m.last_request.json() == full_config.model_dump(exclude_defaults=True)
|
||||
|
||||
|
||||
def test_stddaq_client_set_config_error(client, full_config):
|
||||
"""
|
||||
Test error handling in the set_config method.
|
||||
"""
|
||||
with requests_mock.Mocker() as m:
|
||||
config = full_config
|
||||
m.post("http://localhost:5000/api/config/set?user=ioc", status_code=500)
|
||||
with mock.patch.object(client, "_pre_restart"), mock.patch.object(client, "_post_restart"):
|
||||
with pytest.raises(requests.exceptions.HTTPError):
|
||||
client.set_config(config)
|
||||
|
||||
# with requests_mock.Mocker() as m:
|
||||
# m.post("http://localhost:5000/api/config/set?user=ioc")
|
||||
|
||||
# # Test with dictionary input
|
||||
# config_dict = full_config.model_dump()
|
||||
# with mock.patch.object(client, "_pre_restart"), mock.patch.object(client, "_post_restart"):
|
||||
# client.set_config(config_dict)
|
||||
# assert m.last_request.json() == full_config.model_dump()
|
||||
|
||||
def test_stddaq_client_get_config_cached(client, full_config):
|
||||
"""
|
||||
Test that the client returns the cached configuration if it is available.
|
||||
"""
|
||||
|
||||
# def test_stddaq_client_set_config_ignores_extra_keys(client, full_config):
|
||||
# """
|
||||
# Test that the set_config method ignores extra keys in the input dictionary.
|
||||
# """
|
||||
|
||||
# with requests_mock.Mocker() as m:
|
||||
# m.post("http://localhost:5000/api/config/set?user=ioc")
|
||||
# Set the cached configuration
|
||||
config = full_config
|
||||
client._config = config
|
||||
|
||||
# # Test with dictionary input
|
||||
# config_dict = full_config.model_dump()
|
||||
# config_dict["extra_key"] = "extra_value"
|
||||
# with mock.patch.object(client, "_pre_restart"), mock.patch.object(client, "_post_restart"):
|
||||
# client.set_config(config_dict)
|
||||
# assert m.last_request.json() == full_config.model_dump()
|
||||
# Test that the client returns the cached configuration
|
||||
assert client.get_config(cached=True) == config
|
||||
|
||||
|
||||
# def test_stddaq_client_set_config_error(client, full_config):
|
||||
# """
|
||||
# Test error handling in the set_config method.
|
||||
# """
|
||||
# with requests_mock.Mocker() as m:
|
||||
# config = full_config
|
||||
# m.post("http://localhost:5000/api/config/set?user=ioc", status_code=500)
|
||||
# with mock.patch.object(client, "_pre_restart"), mock.patch.object(client, "_post_restart"):
|
||||
# with pytest.raises(requests.exceptions.HTTPError):
|
||||
# client.set_config(config)
|
||||
def test_stddaq_client_status(client):
|
||||
client._status = StdDaqStatus.FILE_CREATED
|
||||
assert client.status == StdDaqStatus.FILE_CREATED
|
||||
|
||||
|
||||
# def test_stddaq_client_get_config_cached(client, full_config):
|
||||
# """
|
||||
# Test that the client returns the cached configuration if it is available.
|
||||
# """
|
||||
def test_stddaq_client_start(client):
|
||||
|
||||
# # Set the cached configuration
|
||||
# config = full_config
|
||||
# client._config = config
|
||||
with mock.patch("tomcat_bec.devices.std_daq.std_daq_client.StatusBase") as StatusBase:
|
||||
client.start(file_path="test_file_path", file_prefix="test_file_prefix", num_images=10)
|
||||
out = client._send_queue.get()
|
||||
assert out == {
|
||||
"command": "start",
|
||||
"path": "test_file_path",
|
||||
"file_prefix": "test_file_prefix",
|
||||
"n_image": 10,
|
||||
}
|
||||
StatusBase().wait.assert_called_once()
|
||||
|
||||
# # Test that the client returns the cached configuration
|
||||
# assert client.get_config(cached=True) == config
|
||||
|
||||
def test_stddaq_client_start_type_error(client):
|
||||
with pytest.raises(typeguard.TypeCheckError):
|
||||
client.start(file_path="test_file_path", file_prefix="test_file_prefix", num_images="10")
|
||||
|
||||
# def test_stddaq_client_status(client):
|
||||
# client._status = StdDaqStatus.FILE_CREATED
|
||||
# assert client.status == StdDaqStatus.FILE_CREATED
|
||||
|
||||
def test_stddaq_client_stop(client):
|
||||
"""
|
||||
Check that the stop method puts the stop command in the send queue.
|
||||
"""
|
||||
client.stop()
|
||||
client._send_queue.get() == {"command": "stop"}
|
||||
|
||||
# def test_stddaq_client_start(client):
|
||||
|
||||
# with mock.patch("tomcat_bec.devices.gigafrost.std_daq_client.StatusBase") as StatusBase:
|
||||
# client.start(file_path="test_file_path", file_prefix="test_file_prefix", num_images=10)
|
||||
# out = client._send_queue.get()
|
||||
# assert out == {
|
||||
# "command": "start",
|
||||
# "path": "test_file_path",
|
||||
# "file_prefix": "test_file_prefix",
|
||||
# "n_image": 10,
|
||||
# }
|
||||
# StatusBase().wait.assert_called_once()
|
||||
def test_stddaq_client_update_config(client, full_config):
|
||||
"""
|
||||
Test that the update_config method updates the configuration with the provided dictionary.
|
||||
"""
|
||||
|
||||
config = full_config
|
||||
with requests_mock.Mocker() as m:
|
||||
m.get("http://localhost:5000/api/config/get?user=ioc", json=config.model_dump())
|
||||
|
||||
# def test_stddaq_client_start_type_error(client):
|
||||
# with pytest.raises(typeguard.TypeCheckError):
|
||||
# client.start(file_path="test_file_path", file_prefix="test_file_prefix", num_images="10")
|
||||
# Update the configuration
|
||||
update_dict = {"detector_name": "new_name"}
|
||||
with mock.patch.object(client, "set_config") as set_config:
|
||||
client.update_config(update_dict)
|
||||
|
||||
assert set_config.call_count == 1
|
||||
|
||||
# def test_stddaq_client_stop(client):
|
||||
# """
|
||||
# Check that the stop method puts the stop command in the send queue.
|
||||
# """
|
||||
# client.stop()
|
||||
# client._send_queue.get() == {"command": "stop"}
|
||||
|
||||
|
||||
# def test_stddaq_client_update_config(client, full_config):
|
||||
# """
|
||||
# Test that the update_config method updates the configuration with the provided dictionary.
|
||||
# """
|
||||
|
||||
# config = full_config
|
||||
# with requests_mock.Mocker() as m:
|
||||
# m.get("http://localhost:5000/api/config/get?user=ioc", json=config.model_dump())
|
||||
|
||||
# # Update the configuration
|
||||
# update_dict = {"detector_name": "new_name"}
|
||||
# with mock.patch.object(client, "set_config") as set_config:
|
||||
# client.update_config(update_dict)
|
||||
|
||||
# assert set_config.call_count == 1
|
||||
|
||||
|
||||
# def test_stddaq_client_updates_only_changed_configs(client, full_config):
|
||||
# """
|
||||
# Test that the update_config method only updates the configuration if the config has changed.
|
||||
# """
|
||||
|
||||
# config = full_config
|
||||
# with requests_mock.Mocker() as m:
|
||||
# m.get("http://localhost:5000/api/config/get?user=ioc", json=config.model_dump())
|
||||
|
||||
# # Update the configuration
|
||||
# update_dict = {"detector_name": "tomcat-gf"}
|
||||
# with mock.patch.object(client, "set_config") as set_config:
|
||||
# client.update_config(update_dict)
|
||||
|
||||
# assert set_config.call_count == 0
|
||||
|
||||
|
||||
# def test_stddaq_client_updates_only_changed_configs_empty(client, full_config):
|
||||
# """
|
||||
# Test that the update_config method only updates the configuration if the config has changed.
|
||||
# """
|
||||
|
||||
# config = full_config
|
||||
# with requests_mock.Mocker() as m:
|
||||
# m.get("http://localhost:5000/api/config/get?user=ioc", json=config.model_dump())
|
||||
|
||||
# # Update the configuration
|
||||
# update_dict = {}
|
||||
# with mock.patch.object(client, "set_config") as set_config:
|
||||
# client.update_config(update_dict)
|
||||
|
||||
# assert set_config.call_count == 0
|
||||
|
||||
|
||||
# def test_stddaq_client_pre_restart(client):
|
||||
# """
|
||||
# Test that the pre_restart method sets the status to RESTARTING.
|
||||
# """
|
||||
# # let's assume the websocket loop is already idle
|
||||
# client._ws_idle_event.set()
|
||||
# client.ws_client = mock.MagicMock()
|
||||
# client._pre_restart()
|
||||
# client.ws_client.close.assert_called_once()
|
||||
|
||||
|
||||
# def test_stddaq_client_post_restart(client):
|
||||
# """
|
||||
# Test that the post_restart method sets the status to IDLE.
|
||||
# """
|
||||
# with mock.patch.object(client, "wait_for_connection") as wait_for_connection:
|
||||
# client._post_restart()
|
||||
# wait_for_connection.assert_called_once()
|
||||
# assert client._daq_is_running.is_set()
|
||||
|
||||
|
||||
# def test_stddaq_client_reset(client):
|
||||
# """
|
||||
# Test that the reset method calls get_config and set_config.
|
||||
# """
|
||||
# with (
|
||||
# mock.patch.object(client, "get_config") as get_config,
|
||||
# mock.patch.object(client, "set_config") as set_config,
|
||||
# ):
|
||||
# client.reset()
|
||||
# get_config.assert_called_once()
|
||||
# set_config.assert_called_once()
|
||||
|
||||
|
||||
# def test_stddaq_client_run_status_callbacks(client):
|
||||
# """
|
||||
# Test that the run_status_callback method runs the status callback.
|
||||
# """
|
||||
# status = StatusBase()
|
||||
# client.add_status_callback(status, success=[StdDaqStatus.FILE_CREATED], error=[])
|
||||
# client._status = StdDaqStatus.FILE_CREATED
|
||||
# client._run_status_callbacks()
|
||||
# status.wait()
|
||||
|
||||
# assert len(status._callbacks) == 0
|
||||
|
||||
|
||||
# def test_stddaq_client_run_status_callbacks_error(client):
|
||||
# """
|
||||
# Test that the run_status_callback method runs the status callback.
|
||||
# """
|
||||
# status = StatusBase()
|
||||
# client.add_status_callback(status, success=[], error=[StdDaqStatus.FILE_CREATED])
|
||||
# client._status = StdDaqStatus.FILE_CREATED
|
||||
# client._run_status_callbacks()
|
||||
# with pytest.raises(StdDaqError):
|
||||
# status.wait()
|
||||
|
||||
# assert len(status._callbacks) == 0
|
||||
|
||||
|
||||
# @pytest.mark.parametrize(
|
||||
# "msg, updated",
|
||||
# [({"status": "IDLE"}, False), (json.dumps({"status": "waiting_for_first_image"}), True)],
|
||||
# )
|
||||
# def test_stddaq_client_on_received_ws_message(client, msg, updated):
|
||||
# """
|
||||
# Test that the on_received_ws_message method runs the status callback.
|
||||
# """
|
||||
# client._status = None
|
||||
# with mock.patch.object(client, "_run_status_callbacks") as run_status_callbacks:
|
||||
# client._on_received_ws_message(msg)
|
||||
# if updated:
|
||||
# run_status_callbacks.assert_called_once()
|
||||
# assert client._status == StdDaqStatus.WAITING_FOR_FIRST_IMAGE
|
||||
# else:
|
||||
# run_status_callbacks.assert_not_called()
|
||||
# assert client._status is None
|
||||
|
||||
|
||||
# def test_stddaq_client_ws_send_and_receive(client):
|
||||
|
||||
# client.ws_client = mock.MagicMock()
|
||||
# client._send_queue.put({"command": "test"})
|
||||
# client._ws_send_and_receive()
|
||||
# # queue is not empty, so we should send the message
|
||||
# client.ws_client.send.assert_called_once()
|
||||
# client.ws_client.recv.assert_called_once()
|
||||
|
||||
# client.ws_client.reset_mock()
|
||||
# client._ws_send_and_receive()
|
||||
# # queue is empty, so we should not send the message
|
||||
# client.ws_client.send.assert_not_called()
|
||||
# client.ws_client.recv.assert_called_once()
|
||||
|
||||
|
||||
# def test_stddaq_client_ws_send_and_receive_websocket_error(client):
|
||||
# """
|
||||
# Test that the ws_send_and_receive method handles websocket errors.
|
||||
# """
|
||||
# client.ws_client = mock.MagicMock()
|
||||
# client.ws_client.send.side_effect = WebSocketException()
|
||||
# client._send_queue.put({"command": "test"})
|
||||
# with mock.patch.object(client, "wait_for_connection") as wait_for_connection:
|
||||
# client._ws_send_and_receive()
|
||||
# wait_for_connection.assert_called_once()
|
||||
|
||||
|
||||
# def test_stddaq_client_ws_send_and_receive_timeout_error(client):
|
||||
# """
|
||||
# Test that the ws_send_and_receive method handles timeout errors.
|
||||
# """
|
||||
# client.ws_client = mock.MagicMock()
|
||||
# client.ws_client.recv.side_effect = TimeoutError()
|
||||
# client._send_queue.put({"command": "test"})
|
||||
# with mock.patch.object(client, "wait_for_connection") as wait_for_connection:
|
||||
# client._ws_send_and_receive()
|
||||
# wait_for_connection.assert_not_called()
|
||||
|
||||
|
||||
# def test_stddaq_client_ws_update_loop(client):
|
||||
# """
|
||||
# Test that the ws_update_loop method runs the status callback.
|
||||
# """
|
||||
# client._shutdown_event = mock.MagicMock()
|
||||
# client._shutdown_event.is_set.side_effect = [False, True]
|
||||
# with (
|
||||
# mock.patch.object(client, "_ws_send_and_receive") as ws_send_and_receive,
|
||||
# mock.patch.object(client, "_wait_for_server_running") as wait_for_server_running,
|
||||
# ):
|
||||
# client._ws_update_loop()
|
||||
|
||||
# ws_send_and_receive.assert_called_once()
|
||||
# wait_for_server_running.assert_called_once()
|
||||
def test_stddaq_client_updates_only_changed_configs(client, full_config):
|
||||
"""
|
||||
Test that the update_config method only updates the configuration if the config has changed.
|
||||
"""
|
||||
|
||||
config = full_config
|
||||
with requests_mock.Mocker() as m:
|
||||
m.get("http://localhost:5000/api/config/get?user=ioc", json=config.model_dump())
|
||||
|
||||
# Update the configuration
|
||||
update_dict = {"detector_name": "tomcat-gf"}
|
||||
with mock.patch.object(client, "set_config") as set_config:
|
||||
client.update_config(update_dict)
|
||||
|
||||
assert set_config.call_count == 0
|
||||
|
||||
|
||||
def test_stddaq_client_updates_only_changed_configs_empty(client, full_config):
|
||||
"""
|
||||
Test that the update_config method only updates the configuration if the config has changed.
|
||||
"""
|
||||
|
||||
config = full_config
|
||||
with requests_mock.Mocker() as m:
|
||||
m.get("http://localhost:5000/api/config/get?user=ioc", json=config.model_dump())
|
||||
|
||||
# Update the configuration
|
||||
update_dict = {}
|
||||
with mock.patch.object(client, "set_config") as set_config:
|
||||
client.update_config(update_dict)
|
||||
|
||||
assert set_config.call_count == 0
|
||||
|
||||
|
||||
def test_stddaq_client_pre_restart(client):
|
||||
"""
|
||||
Test that the pre_restart method sets the status to RESTARTING.
|
||||
"""
|
||||
# let's assume the websocket loop is already idle
|
||||
client._ws_idle_event.set()
|
||||
client.ws_client = mock.MagicMock()
|
||||
client._pre_restart()
|
||||
client.ws_client.close.assert_called_once()
|
||||
|
||||
|
||||
def test_stddaq_client_post_restart(client):
|
||||
"""
|
||||
Test that the post_restart method sets the status to IDLE.
|
||||
"""
|
||||
with mock.patch.object(client, "wait_for_connection") as wait_for_connection:
|
||||
client._post_restart()
|
||||
wait_for_connection.assert_called_once()
|
||||
assert client._daq_is_running.is_set()
|
||||
|
||||
|
||||
def test_stddaq_client_reset(client):
|
||||
"""
|
||||
Test that the reset method calls get_config and set_config.
|
||||
"""
|
||||
with (
|
||||
mock.patch.object(client, "get_config") as get_config,
|
||||
mock.patch.object(client, "set_config") as set_config,
|
||||
):
|
||||
client.reset()
|
||||
get_config.assert_called_once()
|
||||
set_config.assert_called_once()
|
||||
|
||||
|
||||
def test_stddaq_client_run_status_callbacks(client):
|
||||
"""
|
||||
Test that the run_status_callback method runs the status callback.
|
||||
"""
|
||||
status = StatusBase()
|
||||
client.add_status_callback(status, success=[StdDaqStatus.FILE_CREATED], error=[])
|
||||
client._status = StdDaqStatus.FILE_CREATED
|
||||
client._run_status_callbacks()
|
||||
status.wait()
|
||||
|
||||
assert len(status._callbacks) == 0
|
||||
|
||||
|
||||
def test_stddaq_client_run_status_callbacks_error(client):
|
||||
"""
|
||||
Test that the run_status_callback method runs the status callback.
|
||||
"""
|
||||
status = StatusBase()
|
||||
client.add_status_callback(status, success=[], error=[StdDaqStatus.FILE_CREATED])
|
||||
client._status = StdDaqStatus.FILE_CREATED
|
||||
client._run_status_callbacks()
|
||||
with pytest.raises(StdDaqError):
|
||||
status.wait()
|
||||
|
||||
assert len(status._callbacks) == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"msg, updated",
|
||||
[({"status": "IDLE"}, False), (json.dumps({"status": "waiting_for_first_image"}), True)],
|
||||
)
|
||||
def test_stddaq_client_on_received_ws_message(client, msg, updated):
|
||||
"""
|
||||
Test that the on_received_ws_message method runs the status callback.
|
||||
"""
|
||||
client._status = None
|
||||
with mock.patch.object(client, "_run_status_callbacks") as run_status_callbacks:
|
||||
client._on_received_ws_message(msg)
|
||||
if updated:
|
||||
run_status_callbacks.assert_called_once()
|
||||
assert client._status == StdDaqStatus.WAITING_FOR_FIRST_IMAGE
|
||||
else:
|
||||
run_status_callbacks.assert_not_called()
|
||||
assert client._status is None
|
||||
|
||||
|
||||
def test_stddaq_client_ws_send_and_receive(client):
|
||||
|
||||
client.ws_client = mock.MagicMock()
|
||||
client._send_queue.put({"command": "test"})
|
||||
client._ws_send_and_receive()
|
||||
# queue is not empty, so we should send the message
|
||||
client.ws_client.send.assert_called_once()
|
||||
client.ws_client.recv.assert_called_once()
|
||||
|
||||
client.ws_client.reset_mock()
|
||||
client._ws_send_and_receive()
|
||||
# queue is empty, so we should not send the message
|
||||
client.ws_client.send.assert_not_called()
|
||||
client.ws_client.recv.assert_called_once()
|
||||
|
||||
|
||||
def test_stddaq_client_ws_send_and_receive_websocket_error(client):
|
||||
"""
|
||||
Test that the ws_send_and_receive method handles websocket errors.
|
||||
"""
|
||||
client.ws_client = mock.MagicMock()
|
||||
client.ws_client.send.side_effect = WebSocketException()
|
||||
client._send_queue.put({"command": "test"})
|
||||
with mock.patch.object(client, "wait_for_connection") as wait_for_connection:
|
||||
client._ws_send_and_receive()
|
||||
wait_for_connection.assert_called_once()
|
||||
|
||||
|
||||
def test_stddaq_client_ws_send_and_receive_timeout_error(client):
|
||||
"""
|
||||
Test that the ws_send_and_receive method handles timeout errors.
|
||||
"""
|
||||
client.ws_client = mock.MagicMock()
|
||||
client.ws_client.recv.side_effect = TimeoutError()
|
||||
client._send_queue.put({"command": "test"})
|
||||
with mock.patch.object(client, "wait_for_connection") as wait_for_connection:
|
||||
client._ws_send_and_receive()
|
||||
wait_for_connection.assert_not_called()
|
||||
|
||||
|
||||
def test_stddaq_client_ws_update_loop(client):
|
||||
"""
|
||||
Test that the ws_update_loop method runs the status callback.
|
||||
"""
|
||||
client._shutdown_event = mock.MagicMock()
|
||||
client._shutdown_event.is_set.side_effect = [False, True]
|
||||
with (
|
||||
mock.patch.object(client, "_ws_send_and_receive") as ws_send_and_receive,
|
||||
mock.patch.object(client, "_wait_for_server_running") as wait_for_server_running,
|
||||
):
|
||||
client._ws_update_loop()
|
||||
|
||||
ws_send_and_receive.assert_called_once()
|
||||
wait_for_server_running.assert_called_once()
|
||||
|
||||
@@ -40,7 +40,7 @@ femto_mean_curr:
|
||||
|
||||
es1_roty:
|
||||
readoutPriority: monitored
|
||||
description: 'Test rotation stage'
|
||||
description: "Test rotation stage"
|
||||
deviceClass: ophyd.EpicsMotor
|
||||
deviceConfig:
|
||||
prefix: X02DA-ES1-SMP1:ROTY
|
||||
@@ -49,11 +49,11 @@ es1_roty:
|
||||
onFailure: buffer
|
||||
enabled: true
|
||||
readOnly: false
|
||||
softwareTrigger: false
|
||||
softwareTrigger: false
|
||||
|
||||
es1_trx:
|
||||
readoutPriority: monitored
|
||||
description: 'Test translation stage'
|
||||
description: "Test translation stage"
|
||||
deviceClass: ophyd.EpicsMotor
|
||||
deviceConfig:
|
||||
prefix: X02DA-ES1-SMP1:TRX
|
||||
@@ -62,15 +62,15 @@ es1_trx:
|
||||
onFailure: buffer
|
||||
enabled: true
|
||||
readOnly: false
|
||||
softwareTrigger: false
|
||||
softwareTrigger: false
|
||||
|
||||
es1_ismc:
|
||||
description: 'Automation1 iSMC interface'
|
||||
deviceClass: tomcat_bec.devices.aa1Controller
|
||||
deviceConfig:
|
||||
prefix: 'X02DA-ES1-SMP1:CTRL:'
|
||||
description: "Automation1 iSMC interface"
|
||||
deviceClass: tomcat_bec.devices.aa1Controller
|
||||
deviceConfig:
|
||||
prefix: "X02DA-ES1-SMP1:CTRL:"
|
||||
deviceTags:
|
||||
- es1
|
||||
- es1
|
||||
enabled: true
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
@@ -78,24 +78,23 @@ es1_ismc:
|
||||
softwareTrigger: false
|
||||
|
||||
es1_tasks:
|
||||
description: 'Automation1 task management interface'
|
||||
description: "Automation1 task management interface"
|
||||
deviceClass: tomcat_bec.devices.aa1Tasks
|
||||
deviceConfig:
|
||||
prefix: 'X02DA-ES1-SMP1:TASK:'
|
||||
prefix: "X02DA-ES1-SMP1:TASK:"
|
||||
deviceTags:
|
||||
- es1
|
||||
- es1
|
||||
enabled: false
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: monitored
|
||||
softwareTrigger: false
|
||||
|
||||
|
||||
es1_psod:
|
||||
description: 'AA1 PSO output interface (trigger)'
|
||||
deviceClass: tomcat_bec.devices.aa1AxisPsoDistance
|
||||
description: "AA1 PSO output interface (trigger)"
|
||||
deviceClass: tomcat_bec.devices.aa1AxisPsoDistance
|
||||
deviceConfig:
|
||||
prefix: 'X02DA-ES1-SMP1:ROTY:PSO:'
|
||||
prefix: "X02DA-ES1-SMP1:ROTY:PSO:"
|
||||
deviceTags:
|
||||
- es1
|
||||
enabled: true
|
||||
@@ -104,12 +103,11 @@ es1_psod:
|
||||
readoutPriority: monitored
|
||||
softwareTrigger: false
|
||||
|
||||
|
||||
es1_ddaq:
|
||||
description: 'Automation1 position recording interface'
|
||||
description: "Automation1 position recording interface"
|
||||
deviceClass: tomcat_bec.devices.aa1AxisDriveDataCollection
|
||||
deviceConfig:
|
||||
prefix: 'X02DA-ES1-SMP1:ROTY:DDC:'
|
||||
prefix: "X02DA-ES1-SMP1:ROTY:DDC:"
|
||||
deviceTags:
|
||||
- es1
|
||||
enabled: true
|
||||
@@ -118,7 +116,6 @@ es1_ddaq:
|
||||
readoutPriority: monitored
|
||||
softwareTrigger: false
|
||||
|
||||
|
||||
#camera:
|
||||
# description: Grashopper Camera
|
||||
# deviceClass: tomcat_bec.devices.GrashopperTOMCAT
|
||||
@@ -132,7 +129,6 @@ es1_ddaq:
|
||||
# readoutPriority: monitored
|
||||
# softwareTrigger: true
|
||||
|
||||
|
||||
# gfcam:
|
||||
# description: GigaFrost camera client
|
||||
# deviceClass: tomcat_bec.devices.GigaFrostCamera
|
||||
@@ -150,27 +146,25 @@ es1_ddaq:
|
||||
# readoutPriority: monitored
|
||||
# softwareTrigger: true
|
||||
|
||||
|
||||
gfcam:
|
||||
description: GigaFrost camera client
|
||||
deviceClass: tomcat_bec.devices.GigaFrostCamera
|
||||
deviceConfig:
|
||||
prefix: 'X02DA-CAM-GF2:'
|
||||
backend_url: 'http://sls-daq-001:8080'
|
||||
prefix: "X02DA-CAM-GF2:"
|
||||
backend_url: "http://sls-daq-001:8080"
|
||||
auto_soft_enable: true
|
||||
std_daq_live: 'tcp://129.129.95.111:20000'
|
||||
std_daq_ws: 'ws://129.129.95.111:8080'
|
||||
std_daq_rest: 'http://129.129.95.111:5000'
|
||||
std_daq_live: "tcp://129.129.95.111:20000"
|
||||
std_daq_ws: "ws://129.129.95.111:8080"
|
||||
std_daq_rest: "http://129.129.95.111:5000"
|
||||
deviceTags:
|
||||
- camera
|
||||
- trigger
|
||||
- gfcam
|
||||
- camera
|
||||
- trigger
|
||||
- gfcam
|
||||
enabled: true
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: monitored
|
||||
softwareTrigger: true
|
||||
|
||||
# gfdaq:
|
||||
# description: GigaFrost stdDAQ client
|
||||
# deviceClass: tomcat_bec.devices.StdDaqClient
|
||||
@@ -201,7 +195,6 @@ gfcam:
|
||||
# readoutPriority: monitored
|
||||
# softwareTrigger: false
|
||||
|
||||
|
||||
# pcocam:
|
||||
# description: PCO.edge camera client
|
||||
# deviceClass: tomcat_bec.devices.PcoEdge5M
|
||||
@@ -217,24 +210,23 @@ gfcam:
|
||||
# readoutPriority: monitored
|
||||
# softwareTrigger: true
|
||||
|
||||
|
||||
pcocam:
|
||||
description: PCO.edge camera client
|
||||
deviceClass: tomcat_bec.devices.PcoEdge5M
|
||||
deviceConfig:
|
||||
prefix: 'X02DA-CCDCAM2:'
|
||||
std_daq_live: 'tcp://129.129.95.111:20010'
|
||||
std_daq_ws: 'ws://129.129.95.111:8081'
|
||||
std_daq_rest: 'http://129.129.95.111:5010'
|
||||
deviceTags:
|
||||
- camera
|
||||
- trigger
|
||||
- pcocam
|
||||
enabled: true
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: monitored
|
||||
softwareTrigger: true
|
||||
# pcocam:
|
||||
# description: PCO.edge camera client
|
||||
# deviceClass: tomcat_bec.devices.PcoEdge5M
|
||||
# deviceConfig:
|
||||
# prefix: 'X02DA-CCDCAM2:'
|
||||
# std_daq_live: 'tcp://129.129.95.111:20010'
|
||||
# std_daq_ws: 'ws://129.129.95.111:8081'
|
||||
# std_daq_rest: 'http://129.129.95.111:5010'
|
||||
# deviceTags:
|
||||
# - camera
|
||||
# - trigger
|
||||
# - pcocam
|
||||
# enabled: true
|
||||
# onFailure: buffer
|
||||
# readOnly: false
|
||||
# readoutPriority: monitored
|
||||
# softwareTrigger: true
|
||||
|
||||
# pcodaq:
|
||||
# description: GigaFrost stdDAQ client
|
||||
|
||||
@@ -21,7 +21,7 @@ eyex:
|
||||
# onFailure: buffer
|
||||
# enabled: true
|
||||
# readOnly: false
|
||||
# softwareTrigger: false
|
||||
# softwareTrigger: false
|
||||
# eyez:
|
||||
# readoutPriority: baseline
|
||||
# description: X-ray eye axis Z
|
||||
@@ -50,7 +50,7 @@ femto_mean_curr:
|
||||
|
||||
es1_roty:
|
||||
readoutPriority: baseline
|
||||
description: 'Test rotation stage'
|
||||
description: "Test rotation stage"
|
||||
deviceClass: tomcat_bec.devices.psimotor.EpicsMotorMR
|
||||
deviceConfig:
|
||||
prefix: X02DA-ES1-SMP1:ROTY
|
||||
@@ -59,15 +59,15 @@ es1_roty:
|
||||
onFailure: buffer
|
||||
enabled: false
|
||||
readOnly: false
|
||||
softwareTrigger: false
|
||||
softwareTrigger: false
|
||||
|
||||
es1_ismc:
|
||||
description: 'Automation1 iSMC interface'
|
||||
deviceClass: tomcat_bec.devices.aa1Controller
|
||||
deviceConfig:
|
||||
prefix: 'X02DA-ES1-SMP1:CTRL:'
|
||||
description: "Automation1 iSMC interface"
|
||||
deviceClass: tomcat_bec.devices.aa1Controller
|
||||
deviceConfig:
|
||||
prefix: "X02DA-ES1-SMP1:CTRL:"
|
||||
deviceTags:
|
||||
- es1
|
||||
- es1
|
||||
enabled: true
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
@@ -75,47 +75,44 @@ es1_ismc:
|
||||
softwareTrigger: false
|
||||
|
||||
es1_tasks:
|
||||
description: 'Automation1 task management interface'
|
||||
deviceClass: tomcat_bec.devices.aa1Tasks
|
||||
deviceConfig:
|
||||
prefix: 'X02DA-ES1-SMP1:TASK:'
|
||||
description: "Automation1 task management interface"
|
||||
deviceClass: tomcat_bec.devices.aa1Tasks
|
||||
deviceConfig:
|
||||
prefix: "X02DA-ES1-SMP1:TASK:"
|
||||
deviceTags:
|
||||
- es1
|
||||
- es1
|
||||
enabled: false
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: monitored
|
||||
softwareTrigger: false
|
||||
|
||||
|
||||
es1_psod:
|
||||
description: 'AA1 PSO output interface (trigger)'
|
||||
deviceClass: tomcat_bec.devices.aa1AxisPsoDistance
|
||||
description: "AA1 PSO output interface (trigger)"
|
||||
deviceClass: tomcat_bec.devices.aa1AxisPsoDistance
|
||||
deviceConfig:
|
||||
prefix: 'X02DA-ES1-SMP1:ROTY:PSO:'
|
||||
prefix: "X02DA-ES1-SMP1:ROTY:PSO:"
|
||||
deviceTags:
|
||||
- es1
|
||||
- es1
|
||||
enabled: true
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: monitored
|
||||
softwareTrigger: true
|
||||
|
||||
|
||||
es1_ddaq:
|
||||
description: 'Automation1 position recording interface'
|
||||
deviceClass: tomcat_bec.devices.aa1AxisDriveDataCollection
|
||||
description: "Automation1 position recording interface"
|
||||
deviceClass: tomcat_bec.devices.aa1AxisDriveDataCollection
|
||||
deviceConfig:
|
||||
prefix: 'X02DA-ES1-SMP1:ROTY:DDC:'
|
||||
prefix: "X02DA-ES1-SMP1:ROTY:DDC:"
|
||||
deviceTags:
|
||||
- es1
|
||||
- es1
|
||||
enabled: false
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: monitored
|
||||
softwareTrigger: false
|
||||
|
||||
|
||||
#camera:
|
||||
# description: Grashopper Camera
|
||||
# deviceClass: tomcat_bec.devices.GrashopperTOMCAT
|
||||
@@ -133,54 +130,53 @@ gfcam:
|
||||
description: GigaFrost camera client
|
||||
deviceClass: tomcat_bec.devices.GigaFrostCamera
|
||||
deviceConfig:
|
||||
prefix: 'X02DA-CAM-GF2:'
|
||||
backend_url: 'http://sls-daq-001:8080'
|
||||
prefix: "X02DA-CAM-GF2:"
|
||||
backend_url: "http://sls-daq-001:8080"
|
||||
auto_soft_enable: true
|
||||
deviceTags:
|
||||
- camera
|
||||
- trigger
|
||||
- camera
|
||||
- trigger
|
||||
enabled: true
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: monitored
|
||||
softwareTrigger: true
|
||||
# gfdaq:
|
||||
# description: GigaFrost stdDAQ client
|
||||
# deviceClass: tomcat_bec.devices.StdDaqClient
|
||||
# deviceConfig:
|
||||
# ws_url: 'ws://129.129.95.111:8080'
|
||||
# rest_url: 'http://129.129.95.111:5000'
|
||||
# deviceTags:
|
||||
# - std-daq
|
||||
# enabled: true
|
||||
# onFailure: buffer
|
||||
# readOnly: false
|
||||
# readoutPriority: monitored
|
||||
# softwareTrigger: false
|
||||
|
||||
gfdaq:
|
||||
description: GigaFrost stdDAQ client
|
||||
deviceClass: tomcat_bec.devices.StdDaqClient
|
||||
deviceConfig:
|
||||
ws_url: 'ws://129.129.95.111:8080'
|
||||
rest_url: 'http://129.129.95.111:5000'
|
||||
deviceTags:
|
||||
- std-daq
|
||||
enabled: true
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: monitored
|
||||
softwareTrigger: false
|
||||
# daq_stream0:
|
||||
# description: stdDAQ preview (2 every 555)
|
||||
# deviceClass: tomcat_bec.devices.StdDaqPreviewDetector
|
||||
# deviceConfig:
|
||||
# url: 'tcp://129.129.95.111:20000'
|
||||
# deviceTags:
|
||||
# - std-daq
|
||||
# enabled: true
|
||||
# onFailure: buffer
|
||||
# readOnly: false
|
||||
# readoutPriority: monitored
|
||||
# softwareTrigger: false
|
||||
|
||||
daq_stream0:
|
||||
description: stdDAQ preview (2 every 555)
|
||||
deviceClass: tomcat_bec.devices.StdDaqPreviewDetector
|
||||
deviceConfig:
|
||||
url: 'tcp://129.129.95.111:20000'
|
||||
deviceTags:
|
||||
- std-daq
|
||||
enabled: true
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: monitored
|
||||
softwareTrigger: false
|
||||
|
||||
daq_stream1:
|
||||
description: stdDAQ preview (1 at 5 Hz)
|
||||
deviceClass: tomcat_bec.devices.StdDaqPreviewDetector
|
||||
deviceConfig:
|
||||
url: 'tcp://129.129.95.111:20001'
|
||||
deviceTags:
|
||||
- std-daq
|
||||
enabled: true
|
||||
onFailure: buffer
|
||||
readOnly: false
|
||||
readoutPriority: monitored
|
||||
softwareTrigger: false
|
||||
# daq_stream1:
|
||||
# description: stdDAQ preview (1 at 5 Hz)
|
||||
# deviceClass: tomcat_bec.devices.StdDaqPreviewDetector
|
||||
# deviceConfig:
|
||||
# url: 'tcp://129.129.95.111:20001'
|
||||
# deviceTags:
|
||||
# - std-daq
|
||||
# enabled: true
|
||||
# onFailure: buffer
|
||||
# readOnly: false
|
||||
# readoutPriority: monitored
|
||||
# softwareTrigger: false
|
||||
|
||||
@@ -7,8 +7,6 @@ from .aerotech import (
|
||||
aa1Tasks,
|
||||
)
|
||||
from .gigafrost.gigafrostcamera import GigaFrostCamera
|
||||
from .gigafrost.pcoedgecamera import PcoEdge5M
|
||||
from .gigafrost.stddaq_client import StdDaqClient
|
||||
from .gigafrost.stddaq_preview import StdDaqPreviewDetector
|
||||
from .grashopper_tomcat import GrashopperTOMCAT
|
||||
from .pco_edge.pcoedgecamera import PcoEdge5M
|
||||
from .psimotor import EpicsMotorEC, EpicsMotorMR
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
This module contains the PV definitions for the Gigafrost camera at Tomcat. It
|
||||
This module contains the PV definitions for the Gigafrost camera at Tomcat. It
|
||||
does not contain any logic to control the camera.
|
||||
"""
|
||||
|
||||
@@ -7,7 +7,57 @@ from ophyd import Component as Cpt
|
||||
from ophyd import Device, DynamicDeviceComponent, EpicsSignal, EpicsSignalRO, Kind, Signal
|
||||
|
||||
import tomcat_bec.devices.gigafrost.gfconstants as const
|
||||
from tomcat_bec.devices.gigafrost.gfutils import extend_header_table
|
||||
|
||||
|
||||
class GigaFrostSignalWithValidation(EpicsSignal):
|
||||
"""
|
||||
Custom EpicsSignal class that validates the value with the specified validator
|
||||
before setting the value.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
read_pv,
|
||||
write_pv=None,
|
||||
*,
|
||||
put_complete=False,
|
||||
string=False,
|
||||
limits=False,
|
||||
name=None,
|
||||
validator=None,
|
||||
**kwargs,
|
||||
):
|
||||
self._validator = validator
|
||||
super().__init__(
|
||||
read_pv,
|
||||
write_pv,
|
||||
put_complete=put_complete,
|
||||
string=string,
|
||||
limits=limits,
|
||||
name=name,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def check_value(self, value):
|
||||
if self._validator is not None:
|
||||
self._validator(value)
|
||||
return super().check_value(value)
|
||||
|
||||
|
||||
def check_image_width(value):
|
||||
"""
|
||||
The Gigafrost camera requires the image width to be a multiple of 48.
|
||||
"""
|
||||
if value % 48 != 0:
|
||||
raise ValueError("Image width must be a multiple of 48")
|
||||
|
||||
|
||||
def check_image_height(value):
|
||||
"""
|
||||
The Gigafrost camera requires the image height to be a multiple of 16.
|
||||
"""
|
||||
if value % 16 != 0:
|
||||
raise ValueError("Image height must be a multiple of 16")
|
||||
|
||||
|
||||
class GigaFrostBase(Device):
|
||||
@@ -36,51 +86,62 @@ class GigaFrostBase(Device):
|
||||
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
|
||||
# Standard camera configs
|
||||
acquire = Cpt(EpicsSignal, "START_CAM", put_complete=True, kind=Kind.omitted)
|
||||
acquire_time = Cpt(
|
||||
EpicsSignal, "EXPOSURE", put_complete=True, auto_monitor=True, kind=Kind.config
|
||||
)
|
||||
acquire_period = Cpt(
|
||||
EpicsSignal, "FRAMERATE", put_complete=True, auto_monitor=True, kind=Kind.config
|
||||
)
|
||||
num_exposures = Cpt(
|
||||
EpicsSignal, "CNT_NUM", put_complete=True, auto_monitor=True, kind=Kind.config
|
||||
)
|
||||
busy_stat = Cpt(EpicsSignalRO, "BUSY_STAT", auto_monitor=True)
|
||||
sync_flag = Cpt(EpicsSignalRO, "SYNC_FLAG", auto_monitor=True)
|
||||
sync_swhw = Cpt(EpicsSignal, "SYNC_SWHW.PROC", put_complete=True, kind=Kind.omitted)
|
||||
start_cam = Cpt(EpicsSignal, "START_CAM", put_complete=True, kind=Kind.omitted)
|
||||
set_param = Cpt(EpicsSignal, "SET_PARAM.PROC", put_complete=True, kind=Kind.omitted)
|
||||
acqmode = Cpt(EpicsSignal, "ACQMODE", auto_monitor=True, put_complete=True, kind=Kind.config)
|
||||
|
||||
array_size = DynamicDeviceComponent(
|
||||
{
|
||||
"array_size_x": (EpicsSignal, "ROIX", {"auto_monitor": True, "put_complete": True}),
|
||||
"array_size_y": (EpicsSignal, "ROIY", {"auto_monitor": True, "put_complete": True}),
|
||||
"array_size_x": (EpicsSignalRO, "ROIX", {"auto_monitor": True}),
|
||||
"array_size_y": (EpicsSignalRO, "ROIY", {"auto_monitor": True}),
|
||||
},
|
||||
doc="Size of the array in the XY dimensions",
|
||||
)
|
||||
|
||||
# DAQ parameters
|
||||
file_path = Cpt(Signal, kind=Kind.config, value="/gpfs/test/test-beamline")
|
||||
file_prefix = Cpt(Signal, kind=Kind.config, value="scan_")
|
||||
num_images = Cpt(Signal, kind=Kind.config, value=1000)
|
||||
num_images_counter = Cpt(Signal, kind=Kind.hinted, value=0)
|
||||
# UDP header
|
||||
ports = Cpt(EpicsSignal, "PORTS", auto_monitor=True, put_complete=True, kind=Kind.config)
|
||||
framenum = Cpt(EpicsSignal, "FRAMENUM", auto_monitor=True, put_complete=True, kind=Kind.config)
|
||||
ht_offset = Cpt(
|
||||
EpicsSignal, "HT_OFFSET", auto_monitor=True, put_complete=True, kind=Kind.config
|
||||
)
|
||||
write_srv = Cpt(
|
||||
EpicsSignal, "WRITE_SRV.PROC", auto_monitor=True, put_complete=True, kind=Kind.omitted
|
||||
)
|
||||
|
||||
# GF specific interface
|
||||
acquire_block = Cpt(Signal, kind=Kind.config, value=0)
|
||||
busy_stat = Cpt(EpicsSignalRO, "BUSY_STAT", auto_monitor=True)
|
||||
sync_flag = Cpt(EpicsSignalRO, "SYNC_FLAG", auto_monitor=True)
|
||||
sync_swhw = Cpt(EpicsSignal, "SYNC_SWHW.PROC", put_complete=True, kind=Kind.omitted)
|
||||
set_param = Cpt(EpicsSignal, "SET_PARAM.PROC", put_complete=True, kind=Kind.omitted)
|
||||
acqmode = Cpt(EpicsSignal, "ACQMODE", put_complete=True, kind=Kind.config)
|
||||
# Standard camera configs
|
||||
exposure = Cpt(EpicsSignal, "EXPOSURE", put_complete=True, auto_monitor=True, kind=Kind.config)
|
||||
framerate = Cpt(
|
||||
EpicsSignal, "FRAMERATE", put_complete=True, auto_monitor=True, kind=Kind.config
|
||||
)
|
||||
roix = Cpt(
|
||||
GigaFrostSignalWithValidation,
|
||||
"ROIX",
|
||||
put_complete=True,
|
||||
auto_monitor=True,
|
||||
kind=Kind.config,
|
||||
validator=check_image_width,
|
||||
)
|
||||
roiy = Cpt(
|
||||
GigaFrostSignalWithValidation,
|
||||
"ROIY",
|
||||
put_complete=True,
|
||||
auto_monitor=True,
|
||||
kind=Kind.config,
|
||||
validator=check_image_height,
|
||||
)
|
||||
scan_id = Cpt(EpicsSignal, "SCAN_ID", put_complete=True, auto_monitor=True, kind=Kind.config)
|
||||
cnt_num = Cpt(EpicsSignal, "CNT_NUM", put_complete=True, auto_monitor=True, kind=Kind.config)
|
||||
corr_mode = Cpt(
|
||||
EpicsSignal, "CORR_MODE", put_complete=True, auto_monitor=True, kind=Kind.config
|
||||
)
|
||||
|
||||
# Software signals
|
||||
soft_enable = Cpt(EpicsSignal, "SOFT_ENABLE", put_complete=True)
|
||||
soft_enable = Cpt(EpicsSignal, "SOFT_ENABLE", put_complete=True, auto_monitor=True)
|
||||
soft_trig = Cpt(EpicsSignal, "SOFT_TRIG.PROC", put_complete=True, kind=Kind.omitted)
|
||||
soft_exp = Cpt(EpicsSignal, "SOFT_EXP", put_complete=True)
|
||||
|
||||
###############################################################################################
|
||||
# Automatically set modes on camera init
|
||||
auto_soft_enable = Cpt(Signal, kind=Kind.config, metadata={"write_access": False})
|
||||
soft_exp = Cpt(EpicsSignal, "SOFT_EXP", put_complete=True, auto_monitor=True)
|
||||
|
||||
###############################################################################################
|
||||
# Enable schemes
|
||||
@@ -89,6 +150,7 @@ class GigaFrostBase(Device):
|
||||
EpicsSignal,
|
||||
"MODE_ENBL_EXP_RBV",
|
||||
write_pv="MODE_ENBL_EXP",
|
||||
auto_monitor=True,
|
||||
put_complete=True,
|
||||
kind=Kind.config,
|
||||
)
|
||||
@@ -97,6 +159,7 @@ class GigaFrostBase(Device):
|
||||
EpicsSignal,
|
||||
"MODE_ENBL_EXT_RBV",
|
||||
write_pv="MODE_ENBL_EXT",
|
||||
auto_monitor=True,
|
||||
put_complete=True,
|
||||
kind=Kind.config,
|
||||
)
|
||||
@@ -104,6 +167,7 @@ class GigaFrostBase(Device):
|
||||
EpicsSignal,
|
||||
"MODE_ENBL_SOFT_RBV",
|
||||
write_pv="MODE_ENBL_SOFT",
|
||||
auto_monitor=True,
|
||||
put_complete=True,
|
||||
kind=Kind.config,
|
||||
)
|
||||
@@ -111,6 +175,7 @@ class GigaFrostBase(Device):
|
||||
EpicsSignal,
|
||||
"MODE_ENBL_AUTO_RBV",
|
||||
write_pv="MODE_ENBL_AUTO",
|
||||
auto_monitor=True,
|
||||
put_complete=True,
|
||||
kind=Kind.config,
|
||||
)
|
||||
@@ -121,6 +186,7 @@ class GigaFrostBase(Device):
|
||||
EpicsSignal,
|
||||
"MODE_TRIG_EXT_RBV",
|
||||
write_pv="MODE_TRIG_EXT",
|
||||
auto_monitor=True,
|
||||
put_complete=True,
|
||||
kind=Kind.config,
|
||||
)
|
||||
@@ -128,6 +194,7 @@ class GigaFrostBase(Device):
|
||||
EpicsSignal,
|
||||
"MODE_TRIG_SOFT_RBV",
|
||||
write_pv="MODE_TRIG_SOFT",
|
||||
auto_monitor=True,
|
||||
put_complete=True,
|
||||
kind=Kind.config,
|
||||
)
|
||||
@@ -135,6 +202,7 @@ class GigaFrostBase(Device):
|
||||
EpicsSignal,
|
||||
"MODE_TRIG_TIMER_RBV",
|
||||
write_pv="MODE_TRIG_TIMER",
|
||||
auto_monitor=True,
|
||||
put_complete=True,
|
||||
kind=Kind.config,
|
||||
)
|
||||
@@ -142,6 +210,7 @@ class GigaFrostBase(Device):
|
||||
EpicsSignal,
|
||||
"MODE_TRIG_AUTO_RBV",
|
||||
write_pv="MODE_TRIG_AUTO",
|
||||
auto_monitor=True,
|
||||
put_complete=True,
|
||||
kind=Kind.config,
|
||||
)
|
||||
@@ -153,6 +222,7 @@ class GigaFrostBase(Device):
|
||||
EpicsSignal,
|
||||
"MODE_EXP_EXT_RBV",
|
||||
write_pv="MODE_EXP_EXT",
|
||||
auto_monitor=True,
|
||||
put_complete=True,
|
||||
kind=Kind.config,
|
||||
)
|
||||
@@ -160,6 +230,7 @@ class GigaFrostBase(Device):
|
||||
EpicsSignal,
|
||||
"MODE_EXP_SOFT_RBV",
|
||||
write_pv="MODE_EXP_SOFT",
|
||||
auto_monitor=True,
|
||||
put_complete=True,
|
||||
kind=Kind.config,
|
||||
)
|
||||
@@ -167,6 +238,7 @@ class GigaFrostBase(Device):
|
||||
EpicsSignal,
|
||||
"MODE_EXP_TIMER_RBV",
|
||||
write_pv="MODE_EXP_TIMER",
|
||||
auto_monitor=True,
|
||||
put_complete=True,
|
||||
kind=Kind.config,
|
||||
)
|
||||
@@ -178,11 +250,31 @@ class GigaFrostBase(Device):
|
||||
EpicsSignal,
|
||||
"CNT_STARTBIT_RBV",
|
||||
write_pv="CNT_STARTBIT",
|
||||
auto_monitor=True,
|
||||
put_complete=True,
|
||||
kind=Kind.config,
|
||||
)
|
||||
cnt_endbit = Cpt(
|
||||
EpicsSignal, "CNT_ENDBIT_RBV", write_pv="CNT_ENDBIT", put_complete=True, kind=Kind.config
|
||||
EpicsSignal,
|
||||
"CNT_ENDBIT_RBV",
|
||||
write_pv="CNT_ENDBIT",
|
||||
auto_monitor=True,
|
||||
put_complete=True,
|
||||
kind=Kind.config,
|
||||
)
|
||||
|
||||
# Line swap selection
|
||||
ls_sw = Cpt(EpicsSignal, "LS_SW", put_complete=True, auto_monitor=True, kind=Kind.config)
|
||||
ls_nw = Cpt(EpicsSignal, "LS_NW", put_complete=True, auto_monitor=True, kind=Kind.config)
|
||||
ls_se = Cpt(EpicsSignal, "LS_SE", put_complete=True, auto_monitor=True, kind=Kind.config)
|
||||
ls_ne = Cpt(EpicsSignal, "LS_NE", put_complete=True, auto_monitor=True, kind=Kind.config)
|
||||
conn_parm = Cpt(
|
||||
EpicsSignal,
|
||||
"CONN_PARM",
|
||||
string=True,
|
||||
auto_monitor=True,
|
||||
put_complete=True,
|
||||
kind=Kind.config,
|
||||
)
|
||||
|
||||
# HW settings as read only
|
||||
@@ -197,121 +289,32 @@ class GigaFrostBase(Device):
|
||||
bnc5_rbv = Cpt(EpicsSignalRO, "BNC5_RBV", auto_monitor=True, kind=Kind.config)
|
||||
t_board = Cpt(EpicsSignalRO, "T_BOARD", auto_monitor=True)
|
||||
|
||||
### HW configuration parameters
|
||||
# TODO: Only used at INIT, signals not needed
|
||||
# UDP header configuration parameters
|
||||
auto_soft_enable = Cpt(Signal, kind=Kind.config)
|
||||
backend_url = Cpt(Signal, kind=Kind.config)
|
||||
mac_north = Cpt(Signal, kind=Kind.config)
|
||||
mac_south = Cpt(Signal, kind=Kind.config)
|
||||
ip_north = Cpt(Signal, kind=Kind.config)
|
||||
ip_south = Cpt(Signal, kind=Kind.config)
|
||||
udp_backend_url = Cpt(Signal, kind=Kind.config, metadata={"write_access": False})
|
||||
udp_ports = Cpt(EpicsSignal, "PORTS", put_complete=True, kind=Kind.config)
|
||||
udp_framenum = Cpt(EpicsSignal, "FRAMENUM", put_complete=True, kind=Kind.config)
|
||||
udp_ht_offset = Cpt(EpicsSignal, "HT_OFFSET", put_complete=True, kind=Kind.config)
|
||||
udp_write_srv = Cpt(EpicsSignal, "WRITE_SRV.PROC", put_complete=True, kind=Kind.omitted)
|
||||
conn_parm = Cpt(EpicsSignal, "CONN_PARM", string=True, put_complete=True, kind=Kind.config)
|
||||
|
||||
# Line swap selection
|
||||
ls_sw = Cpt(EpicsSignal, "LS_SW", put_complete=True, kind=Kind.config)
|
||||
ls_nw = Cpt(EpicsSignal, "LS_NW", put_complete=True, kind=Kind.config)
|
||||
ls_se = Cpt(EpicsSignal, "LS_SE", put_complete=True, kind=Kind.config)
|
||||
ls_ne = Cpt(EpicsSignal, "LS_NE", put_complete=True, kind=Kind.config)
|
||||
file_path = Cpt(Signal, kind=Kind.config, value="")
|
||||
file_prefix = Cpt(Signal, kind=Kind.config, value="")
|
||||
num_images = Cpt(Signal, kind=Kind.config, value=1)
|
||||
|
||||
# pylint: disable=protected-access
|
||||
def _define_backend_ip(self):
|
||||
"""Select backend IP address for UDP stream"""
|
||||
if self.udp_backend_url.get() == const.BE3_DAFL_CLIENT: # xbl-daq-33
|
||||
if self.backend_url.get() == const.BE3_DAFL_CLIENT: # xbl-daq-33
|
||||
return const.BE3_NORTH_IP, const.BE3_SOUTH_IP
|
||||
if self.udp_backend_url.get() == const.BE999_DAFL_CLIENT:
|
||||
if self.backend_url.get() == const.BE999_DAFL_CLIENT:
|
||||
return const.BE999_NORTH_IP, const.BE999_SOUTH_IP
|
||||
|
||||
raise RuntimeError(f"Backend {self.udp_backend_url.get()} not recognized.")
|
||||
raise RuntimeError(f"Backend {self.backend_url.get()} not recognized.")
|
||||
|
||||
def _define_backend_mac(self):
|
||||
"""Select backend MAC address for UDP stream"""
|
||||
if self.udp_backend_url.get() == const.BE3_DAFL_CLIENT: # xbl-daq-33
|
||||
if self.backend_url.get() == const.BE3_DAFL_CLIENT: # xbl-daq-33
|
||||
return const.BE3_NORTH_MAC, const.BE3_SOUTH_MAC
|
||||
if self.udp_backend_url.get() == const.BE999_DAFL_CLIENT:
|
||||
if self.backend_url.get() == const.BE999_DAFL_CLIENT:
|
||||
return const.BE999_NORTH_MAC, const.BE999_SOUTH_MAC
|
||||
|
||||
raise RuntimeError(f"Backend {self.udp_backend_url.get()} not recognized.")
|
||||
|
||||
def _build_udp_header_table(self):
|
||||
"""Build the header table for the UDP communication"""
|
||||
udp_header_table = []
|
||||
|
||||
for i in range(0, 64, 1):
|
||||
for j in range(0, 8, 1):
|
||||
dest_port = 2000 + 8 * i + j
|
||||
source_port = 3000 + j
|
||||
if j < 4:
|
||||
extend_header_table(
|
||||
udp_header_table,
|
||||
self.mac_south.get(),
|
||||
self.ip_south.get(),
|
||||
dest_port,
|
||||
source_port,
|
||||
)
|
||||
else:
|
||||
extend_header_table(
|
||||
udp_header_table,
|
||||
self.mac_north.get(),
|
||||
self.ip_north.get(),
|
||||
dest_port,
|
||||
source_port,
|
||||
)
|
||||
return udp_header_table
|
||||
|
||||
def initialize_gigafrost(self) -> None:
|
||||
"""Initialize the camera, set channel values"""
|
||||
# Stop acquisition
|
||||
self.acquire.set(0).wait()
|
||||
|
||||
# set entry to UDP table
|
||||
# number of UDP ports to use
|
||||
self.udp_ports.set(2).wait()
|
||||
# number of images to send to each UDP port before switching to next
|
||||
self.udp_framenum.set(5).wait()
|
||||
# offset in UDP table - where to find the first entry
|
||||
self.udp_ht_offset.set(0).wait()
|
||||
# activate changes
|
||||
self.udp_write_srv.set(1).wait()
|
||||
|
||||
# Configure triggering if needed
|
||||
if self.auto_soft_enable.get():
|
||||
# Set modes
|
||||
# self.fix_nframes_mode = "start"
|
||||
self.cnt_startbit.set(1).wait()
|
||||
self.cnt_endbit.set(0).wait()
|
||||
# self.enable_mode = "soft"
|
||||
self.mode_enbl_ext.set(0).wait()
|
||||
self.mode_endbl_soft.set(1).wait()
|
||||
self.mode_enbl_auto.set(0).wait()
|
||||
# self.trigger_mode = "auto"
|
||||
self.mode_trig_auto.set(1).wait()
|
||||
self.mode_trig_soft.set(0).wait()
|
||||
self.mode_trig_timer.set(0).wait()
|
||||
self.mode_trig_ext.set(0).wait()
|
||||
# self.exposure_mode = "timer"
|
||||
self.mode_exp_ext.set(0).wait()
|
||||
self.mode_exp_soft.set(0).wait()
|
||||
self.mode_exp_timer.set(1).wait()
|
||||
|
||||
# line swap - on for west, off for east
|
||||
self.ls_sw.set(1).wait()
|
||||
self.ls_nw.set(1).wait()
|
||||
self.ls_se.set(0).wait()
|
||||
self.ls_ne.set(0).wait()
|
||||
|
||||
# Commit parameters
|
||||
self.set_param.set(1).wait()
|
||||
|
||||
# Initialize data backend
|
||||
n, s = self._define_backend_ip()
|
||||
self.ip_north.put(n, force=True)
|
||||
self.ip_south.put(s, force=True)
|
||||
n, s = self._define_backend_mac()
|
||||
self.mac_north.put(n, force=True)
|
||||
self.mac_south.put(s, force=True)
|
||||
# Set udp header table (data communication parameters)
|
||||
self.conn_parm.set(self._build_udp_header_table()).wait()
|
||||
raise RuntimeError(f"Backend {self.backend_url.get()} not recognized.")
|
||||
|
||||
@@ -1,34 +1,36 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
GigaFrost camera class module
|
||||
from __future__ import annotations
|
||||
|
||||
Created on Thu Jun 27 17:28:43 2024
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Literal, cast
|
||||
|
||||
@author: mohacsi_i
|
||||
"""
|
||||
from time import sleep, time
|
||||
import numpy as np
|
||||
from bec_lib.logger import bec_logger
|
||||
from ophyd import DeviceStatus
|
||||
from ophyd import Component as Cpt
|
||||
from ophyd import DeviceStatus, Kind, Signal, StatusBase
|
||||
from ophyd_devices.interfaces.base_classes.psi_device_base import PSIDeviceBase
|
||||
from ophyd_devices.utils.bec_signals import PreviewSignal, ProgressSignal
|
||||
|
||||
import tomcat_bec.devices.gigafrost.gfconstants as const
|
||||
from tomcat_bec.devices.gigafrost.gfutils import extend_header_table
|
||||
from tomcat_bec.devices.gigafrost.gigafrost_base import GigaFrostBase
|
||||
from tomcat_bec.devices.gigafrost.std_daq_preview import StdDaqPreview
|
||||
from tomcat_bec.devices.gigafrost.std_daq_client import StdDaqClient, StdDaqStatus
|
||||
|
||||
from tomcat_bec.devices.std_daq.std_daq_client import (
|
||||
StdDaqClient,
|
||||
StdDaqConfigPartial,
|
||||
StdDaqStatus,
|
||||
)
|
||||
from tomcat_bec.devices.std_daq.std_daq_live_processing import StdDaqLiveProcessing
|
||||
from tomcat_bec.devices.std_daq.std_daq_preview import StdDaqPreview
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from bec_lib.devicemanager import DeviceManagerBase
|
||||
logger = bec_logger.logger
|
||||
|
||||
|
||||
"""
|
||||
TBD:
|
||||
- Why is mode_enbl_exp not set during the enable_mode setter, only in the set_acquisition_mode?
|
||||
- Why is set_acquisition_mode a method and not a property?
|
||||
- When is a call to set_param necessary?
|
||||
- Which access pattern is more common, setting the signal directly or using the method / property?
|
||||
If the latter, we may want to change the inheritance structure to 'hide' the signals in a sub-component.
|
||||
"""
|
||||
def default_config() -> dict:
|
||||
"""
|
||||
Minimal configuration for the GigaFrost camera.
|
||||
"""
|
||||
return {"corr_mode": 5, "scan_id": 0} # default correction mode # default scan id
|
||||
|
||||
|
||||
class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
@@ -57,7 +59,6 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
USER_ACCESS = [
|
||||
"complete",
|
||||
"exposure_mode",
|
||||
"fix_nframes_mode",
|
||||
"trigger_mode",
|
||||
@@ -65,13 +66,14 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
"backend",
|
||||
"acq_done",
|
||||
"live_preview",
|
||||
"arm",
|
||||
"disarm",
|
||||
"live_processing",
|
||||
]
|
||||
_initialized = False
|
||||
|
||||
# Placeholders for stdDAQ and livestream clients
|
||||
backend = None
|
||||
live_preview = None
|
||||
analysis_signal = Cpt(Signal, name="analysis_signal", kind=Kind.hinted, doc="Analysis Signal")
|
||||
analysis_signal2 = Cpt(Signal, name="analysis_signal2", kind=Kind.hinted, doc="Analysis Signal")
|
||||
preview = Cpt(PreviewSignal, name="preview", ndim=2, doc="Preview signal of the gfcam")
|
||||
progress = Cpt(ProgressSignal, name="progress")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -88,8 +90,14 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
std_daq_rest: str | None = None,
|
||||
std_daq_ws: str | None = None,
|
||||
std_daq_live: str | None = None,
|
||||
device_manager: DeviceManagerBase | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
self.device_manager = device_manager
|
||||
self._signals_to_be_set = {}
|
||||
self._signals_to_be_set["auto_soft_enable"] = auto_soft_enable
|
||||
self._signals_to_be_set["backend_url"] = backend_url
|
||||
|
||||
# super() will call the mixin class
|
||||
super().__init__(
|
||||
prefix=prefix,
|
||||
@@ -101,95 +109,83 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
scan_info=scan_info,
|
||||
**kwargs,
|
||||
)
|
||||
# Configure the stdDAQ client
|
||||
if std_daq_rest is None or std_daq_ws is None:
|
||||
# raise ValueError("Both std_daq_rest and std_daq_ws must be provided")
|
||||
logger.error("No stdDAQ address provided, launching without data backend!")
|
||||
else:
|
||||
self.backend = StdDaqClient(parent=self, ws_url=std_daq_ws, rest_url=std_daq_rest)
|
||||
# Configure image preview
|
||||
raise ValueError("Both std_daq_rest and std_daq_ws must be provided")
|
||||
self.live_processing = StdDaqLiveProcessing(
|
||||
parent=self, signal=self.analysis_signal, signal2=self.analysis_signal2
|
||||
)
|
||||
self.backend = StdDaqClient(parent=self, ws_url=std_daq_ws, rest_url=std_daq_rest)
|
||||
self.backend.add_count_callback(self._on_count_update)
|
||||
self.live_preview = None
|
||||
self.acq_configs = {}
|
||||
if std_daq_live is not None:
|
||||
self.live_preview = StdDaqPreview(url=std_daq_live, cb=self._on_preview_update)
|
||||
else:
|
||||
logger.error("No stdDAQ stream address provided, launching without preview!")
|
||||
# Configure camera backend
|
||||
self.auto_soft_enable.put(auto_soft_enable, force=True)
|
||||
self.udp_backend_url.put(backend_url, force=True)
|
||||
|
||||
def configure(self, d: dict = None):
|
||||
def configure(self, d: dict | None = None):
|
||||
"""Configure the next scan with the GigaFRoST camera
|
||||
|
||||
Parameters as 'd' dictionary
|
||||
----------------------------
|
||||
num_exposures : int, optional
|
||||
Number of images to be taken during each scan. Set to -1 for unlimited
|
||||
number of images (limited by the ringbuffer size and backend speed).
|
||||
num_images : int, optional
|
||||
Number of images to be taken during each scan. Set to -1 for an
|
||||
unlimited number of images (limited by the ringbuffer size and
|
||||
backend speed). (default = 10)
|
||||
exposure_time_ms : float, optional
|
||||
Exposure time [ms].
|
||||
Exposure time [ms]. (default = 0.2)
|
||||
exposure_period_ms : float, optional
|
||||
Exposure period [ms], ignored in soft trigger mode.
|
||||
Exposure period [ms], ignored in soft trigger mode. (default = 1.0)
|
||||
image_width : int, optional
|
||||
ROI size in the x-direction [pixels] (max. 2016)
|
||||
ROI size in the x-direction [pixels] (default = 2016)
|
||||
image_height : int, optional
|
||||
ROI size in the y-direction [pixels] (max. 2016)
|
||||
ROI size in the y-direction [pixels] (default = 2016)
|
||||
scanid : int, optional
|
||||
Scan identification number to be associated with the scan data
|
||||
(default = 0)
|
||||
correction_mode : int, optional
|
||||
The correction to be applied to the imaging data. The following
|
||||
modes are available (default = 5):
|
||||
|
||||
* 0: Bypass. No corrections are applied to the data.
|
||||
* 1: Send correction factor A instead of pixel values
|
||||
* 2: Send correction factor B instead of pixel values
|
||||
* 3: Send correction factor C instead of pixel values
|
||||
* 4: Invert pixel values, but do not apply any linearity correction
|
||||
* 5: Apply the full linearity correction
|
||||
acq_mode : str, optional
|
||||
Select one of the pre-configured trigger behavior
|
||||
"""
|
||||
if d is None:
|
||||
return
|
||||
|
||||
# Stop acquisition
|
||||
self.disarm()
|
||||
self.stop_camera().wait(timeout=10)
|
||||
|
||||
# If Bluesky style configure
|
||||
if d:
|
||||
# Commonly changed settings
|
||||
if "exposure_num_burst" in d:
|
||||
self.num_exposures.set(d["exposure_num_burst"]).wait()
|
||||
if "num_exposures" in d:
|
||||
self.num_exposures.set(d["num_exposures"]).wait()
|
||||
if "exposure_time_ms" in d:
|
||||
self.acquire_time.set(d["exposure_time_ms"]).wait()
|
||||
if "exposure_period_ms" in d:
|
||||
self.acquire_period.set(d["exposure_period_ms"]).wait()
|
||||
if "image_width" in d:
|
||||
if d["image_width"] % 48 != 0:
|
||||
raise RuntimeError(f"[{self.name}] image_width must be divisible by 48")
|
||||
self.array_size.array_size_x.set(d["image_width"]).wait()
|
||||
if "image_height" in d:
|
||||
if d["image_height"] % 16 != 0:
|
||||
raise RuntimeError(f"[{self.name}] image_height must be divisible by 16")
|
||||
self.array_size.array_size_y.set(d["image_height"]).wait()
|
||||
backend_config = StdDaqConfigPartial(**d)
|
||||
self.backend.update_config(backend_config)
|
||||
|
||||
self.corr_mode.set(d.get("corr_mode", 5)).wait()
|
||||
self.scan_id.set(d.get("scan_id", 0)).wait()
|
||||
# Update all specified ophyd signals
|
||||
config = default_config()
|
||||
|
||||
# If a pre-configured acquisition mode is specified, set it
|
||||
if "acq_mode" in d:
|
||||
self.set_acquisition_mode(d["acq_mode"])
|
||||
for key in self.component_names:
|
||||
val = d.get(key)
|
||||
if val is not None:
|
||||
config[key] = val
|
||||
|
||||
# Commit parameters to GigaFrost
|
||||
if d.get("exp_time", 0) > 0:
|
||||
config["exposure"] = d["exp_time"] * 1000 # exposure time in ms
|
||||
|
||||
super().configure(config)
|
||||
|
||||
# If the acquisition mode is specified, set it
|
||||
if "acq_mode" in d:
|
||||
self.set_acquisition_mode(config["acq_mode"])
|
||||
|
||||
# Commit parameters
|
||||
self.set_param.set(1).wait()
|
||||
|
||||
# Backend stdDAQ configuration
|
||||
if d and self.backend is not None:
|
||||
daq_update = {}
|
||||
if "image_height" in d:
|
||||
daq_update["image_pixel_height"] = d["image_height"]
|
||||
if "image_width" in d:
|
||||
daq_update["image_pixel_width"] = d["image_width"]
|
||||
if "bit_depth" in d:
|
||||
daq_update["bit_depth"] = d["bit_depth"]
|
||||
if "number_of_writers" in d:
|
||||
daq_update["number_of_writers"] = d["number_of_writers"]
|
||||
|
||||
if daq_update:
|
||||
self.backend.set_config(daq_update, force=False)
|
||||
|
||||
def set_acquisition_mode(self, acq_mode):
|
||||
def set_acquisition_mode(
|
||||
self, acq_mode: Literal["default", "ext_enable", "soft", "ext", "external"]
|
||||
):
|
||||
"""Set acquisition mode
|
||||
|
||||
Utility function to quickly select between pre-configured and tested
|
||||
@@ -199,10 +195,11 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
supplied signal. Use external enable instead, that works!
|
||||
"""
|
||||
|
||||
if acq_mode in ["default", "step"]:
|
||||
# NOTE: Software trigger via softEnable (actually works)
|
||||
if acq_mode == "default":
|
||||
# NOTE: Trigger using software events via softEnable (actually works)
|
||||
# Trigger parameters
|
||||
self.fix_nframes_mode = "start"
|
||||
|
||||
# Switch to physical enable signal
|
||||
self.mode_enbl_exp.set(0).wait()
|
||||
|
||||
@@ -270,34 +267,35 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
return None
|
||||
|
||||
@exposure_mode.setter
|
||||
def exposure_mode(self, mode):
|
||||
def exposure_mode(self, exp_mode: Literal["external", "timer", "soft"]):
|
||||
"""Apply the exposure mode for the GigaFRoST camera.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
mode : {'external', 'timer', 'soft'}
|
||||
exp_mode : {'external', 'timer', 'soft'}
|
||||
The exposure mode to be set.
|
||||
"""
|
||||
if mode == "external":
|
||||
self.mode_exp_ext.set(1).wait()
|
||||
self.mode_exp_soft.set(0).wait()
|
||||
self.mode_exp_timer.set(0).wait()
|
||||
elif mode == "timer":
|
||||
self.mode_exp_ext.set(0).wait()
|
||||
self.mode_exp_soft.set(0).wait()
|
||||
self.mode_exp_timer.set(1).wait()
|
||||
elif mode == "soft":
|
||||
self.mode_exp_ext.set(0).wait()
|
||||
self.mode_exp_soft.set(1).wait()
|
||||
self.mode_exp_timer.set(0).wait()
|
||||
else:
|
||||
raise ValueError(f"Invalid exposure mode: {mode}!")
|
||||
|
||||
modes = {
|
||||
"external": self.mode_exp_ext,
|
||||
"timer": self.mode_exp_timer,
|
||||
"soft": self.mode_exp_soft,
|
||||
}
|
||||
|
||||
if exp_mode not in const.gf_valid_exposure_modes:
|
||||
raise ValueError(
|
||||
f"Invalid exposure mode! Valid modes are:\n{const.gf_valid_exposure_modes}"
|
||||
)
|
||||
|
||||
for key, attr in modes.items():
|
||||
# set the desired mode to 1, all others to 0
|
||||
attr.set(int(key == exp_mode)).wait()
|
||||
|
||||
# Commit parameters
|
||||
self.set_param.set(1).wait()
|
||||
|
||||
@property
|
||||
def fix_nframes_mode(self) -> str | None:
|
||||
def fix_nframes_mode(self) -> Literal["off", "start", "end", "start+end"] | None:
|
||||
"""Return the current fixed number of frames mode of the GigaFRoST camera.
|
||||
|
||||
Returns
|
||||
@@ -306,7 +304,7 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
The camera's active fixed number of frames mode.
|
||||
"""
|
||||
start_bit = self.cnt_startbit.get()
|
||||
end_bit = self.cnt_startbit.get()
|
||||
end_bit = self.cnt_endbit.get()
|
||||
|
||||
if not start_bit and not end_bit:
|
||||
return "off"
|
||||
@@ -320,7 +318,7 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
return None
|
||||
|
||||
@fix_nframes_mode.setter
|
||||
def fix_nframes_mode(self, mode: str):
|
||||
def fix_nframes_mode(self, mode: Literal["off", "start", "end", "start+end"]):
|
||||
"""Apply the fixed number of frames settings to the GigaFRoST camera.
|
||||
|
||||
Parameters
|
||||
@@ -328,26 +326,29 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
mode : {'off', 'start', 'end', 'start+end'}
|
||||
The fixed number of frames mode to be applied.
|
||||
"""
|
||||
if mode == "off":
|
||||
self._fix_nframes_mode = mode
|
||||
if self._fix_nframes_mode == "off":
|
||||
self.cnt_startbit.set(0).wait()
|
||||
self.cnt_endbit.set(0).wait()
|
||||
elif mode == "start":
|
||||
elif self._fix_nframes_mode == "start":
|
||||
self.cnt_startbit.set(1).wait()
|
||||
self.cnt_endbit.set(0).wait()
|
||||
elif mode == "end":
|
||||
elif self._fix_nframes_mode == "end":
|
||||
self.cnt_startbit.set(0).wait()
|
||||
self.cnt_endbit.set(1).wait()
|
||||
elif mode == "start+end":
|
||||
elif self._fix_nframes_mode == "start+end":
|
||||
self.cnt_startbit.set(1).wait()
|
||||
self.cnt_endbit.set(1).wait()
|
||||
else:
|
||||
raise ValueError(f"Invalid fixed frame number mode: {mode}!")
|
||||
raise ValueError(
|
||||
f"Invalid fixed frame number mode! Valid modes are: {const.gf_valid_fix_nframe_modes}"
|
||||
)
|
||||
|
||||
# Commit parameters
|
||||
self.set_param.set(1).wait()
|
||||
|
||||
@property
|
||||
def trigger_mode(self) -> str | None:
|
||||
def trigger_mode(self) -> Literal["auto", "external", "timer", "soft"] | None:
|
||||
"""Method to detect the current trigger mode set in the GigaFRost camera.
|
||||
|
||||
Returns
|
||||
@@ -372,43 +373,34 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
return None
|
||||
|
||||
@trigger_mode.setter
|
||||
def trigger_mode(self, mode: str):
|
||||
def trigger_mode(self, mode: Literal["auto", "external", "timer", "soft"]):
|
||||
"""
|
||||
Set the trigger mode for the GigaFRoST camera.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
mode : {'auto', 'external', 'timer', 'soft'}
|
||||
The trigger mode to be set.
|
||||
Args:
|
||||
mode(str): The trigger mode to be set. Valid arguments are: ['auto', 'external', 'timer', 'soft']
|
||||
"""
|
||||
if mode == "auto":
|
||||
self.mode_trig_auto.set(1).wait()
|
||||
self.mode_trig_soft.set(0).wait()
|
||||
self.mode_trig_timer.set(0).wait()
|
||||
self.mode_trig_ext.set(0).wait()
|
||||
elif mode == "soft":
|
||||
self.mode_trig_auto.set(0).wait()
|
||||
self.mode_trig_soft.set(1).wait()
|
||||
self.mode_trig_timer.set(0).wait()
|
||||
self.mode_trig_ext.set(0).wait()
|
||||
elif mode == "timer":
|
||||
self.mode_trig_auto.set(0).wait()
|
||||
self.mode_trig_soft.set(0).wait()
|
||||
self.mode_trig_timer.set(1).wait()
|
||||
self.mode_trig_ext.set(0).wait()
|
||||
elif mode == "external":
|
||||
self.mode_trig_auto.set(0).wait()
|
||||
self.mode_trig_soft.set(0).wait()
|
||||
self.mode_trig_timer.set(0).wait()
|
||||
self.mode_trig_ext.set(1).wait()
|
||||
else:
|
||||
raise ValueError(f"Invalid trigger mode: {mode}!")
|
||||
modes = {
|
||||
"auto": self.mode_trig_auto,
|
||||
"soft": self.mode_trig_soft,
|
||||
"timer": self.mode_trig_timer,
|
||||
"external": self.mode_trig_ext,
|
||||
}
|
||||
|
||||
if mode not in modes:
|
||||
raise ValueError(
|
||||
"Invalid trigger mode! Valid modes are: ['auto', 'external', 'timer', 'soft']"
|
||||
)
|
||||
|
||||
for key, attr in modes.items():
|
||||
# set the desired mode to 1, all others to 0
|
||||
attr.set(int(key == mode)).wait()
|
||||
|
||||
# Commit parameters
|
||||
self.set_param.set(1).wait()
|
||||
|
||||
@property
|
||||
def enable_mode(self) -> str | None:
|
||||
def enable_mode(self) -> Literal["soft", "external", "soft+ext", "always"] | None:
|
||||
"""Return the enable mode set in the GigaFRoST camera.
|
||||
|
||||
Returns
|
||||
@@ -430,7 +422,7 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
return None
|
||||
|
||||
@enable_mode.setter
|
||||
def enable_mode(self, mode: str):
|
||||
def enable_mode(self, mode: Literal["soft", "external", "soft+ext", "always"]):
|
||||
"""
|
||||
Set the enable mode for the GigaFRoST camera.
|
||||
|
||||
@@ -442,17 +434,27 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
The GigaFRoST enable mode. Valid arguments are:
|
||||
|
||||
* 'soft':
|
||||
The GigaFRoST enable signal is supplied through a software signal
|
||||
The GigaFRoST enable signal is supplied through a software
|
||||
signal
|
||||
* 'external':
|
||||
The GigaFRoST enable signal is supplied through an external TTL gating
|
||||
signal from the rotaiton stage or some other control unit
|
||||
The GigaFRoST enable signal is supplied through an external TTL
|
||||
gating signal from the rotaiton stage or some other control
|
||||
unit
|
||||
* 'soft+ext':
|
||||
The GigaFRoST enable signal can be supplied either via the software signal
|
||||
or externally. The two signals are combined with a logical OR gate.
|
||||
The GigaFRoST enable signal can be supplied either via the
|
||||
software signal or externally. The two signals are combined
|
||||
with a logical OR gate.
|
||||
* 'always':
|
||||
The GigaFRoST is always enabled.
|
||||
CAUTION: This mode is not compatible with the fixed number of frames modes!
|
||||
CAUTION: This mode is not compatible with the fixed number of
|
||||
frames modes!
|
||||
"""
|
||||
|
||||
if mode not in const.gf_valid_enable_modes:
|
||||
raise ValueError(
|
||||
f"Invalid enable mode {mode}! Valid modes are:\n{const.gf_valid_enable_modes}"
|
||||
)
|
||||
|
||||
if mode == "soft":
|
||||
self.mode_enbl_ext.set(0).wait()
|
||||
self.mode_endbl_soft.set(1).wait()
|
||||
@@ -469,30 +471,115 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
self.mode_enbl_ext.set(0).wait()
|
||||
self.mode_endbl_soft.set(0).wait()
|
||||
self.mode_enbl_auto.set(1).wait()
|
||||
else:
|
||||
raise ValueError(f"Invalid enable mode {mode}!")
|
||||
# Commit parameters
|
||||
self.set_param.set(1).wait()
|
||||
|
||||
def arm(self) -> None:
|
||||
"""Prepare the camera to accept triggers"""
|
||||
self.acquire.set(1).wait()
|
||||
|
||||
def disarm(self):
|
||||
def set_idle(self):
|
||||
"""Set the camera to idle state"""
|
||||
self.acquire.set(0).wait()
|
||||
self.start_cam.set(0).wait()
|
||||
if self.auto_soft_enable.get():
|
||||
self.soft_enable.set(0).wait()
|
||||
|
||||
def initialize_gigafrost(self) -> None:
|
||||
"""Initialize the camera, set channel values"""
|
||||
# Stop acquisition
|
||||
self.start_cam.set(0).wait()
|
||||
|
||||
# set entry to UDP table
|
||||
# number of UDP ports to use
|
||||
self.ports.set(2).wait()
|
||||
# number of images to send to each UDP port before switching to next
|
||||
self.framenum.set(5).wait()
|
||||
# offset in UDP table - where to find the first entry
|
||||
self.ht_offset.set(0).wait()
|
||||
# activate changes
|
||||
self.write_srv.set(1).wait()
|
||||
|
||||
# Configure software triggering if needed
|
||||
if self.auto_soft_enable.get():
|
||||
# trigger modes
|
||||
self.cnt_startbit.set(1).wait()
|
||||
self.cnt_endbit.set(0).wait()
|
||||
|
||||
# set modes
|
||||
self.enable_mode = "soft"
|
||||
self.trigger_mode = "auto"
|
||||
self.exposure_mode = "timer"
|
||||
|
||||
# line swap - on for west, off for east
|
||||
self.ls_sw.set(1).wait()
|
||||
self.ls_nw.set(1).wait()
|
||||
self.ls_se.set(0).wait()
|
||||
self.ls_ne.set(0).wait()
|
||||
|
||||
# Commit parameters
|
||||
self.set_param.set(1).wait()
|
||||
|
||||
# Initialize data backend
|
||||
n, s = self._define_backend_ip()
|
||||
self.ip_north.put(n, force=True)
|
||||
self.ip_south.put(s, force=True)
|
||||
n, s = self._define_backend_mac()
|
||||
self.mac_north.put(n, force=True)
|
||||
self.mac_south.put(s, force=True)
|
||||
# Set udp header table
|
||||
self.set_udp_header_table()
|
||||
|
||||
def set_udp_header_table(self):
|
||||
"""Set the communication parameters for the camera module"""
|
||||
self.conn_parm.set(self._build_udp_header_table()).wait()
|
||||
|
||||
def destroy(self):
|
||||
if self.backend is not None:
|
||||
self.backend.shutdown()
|
||||
self.backend.shutdown()
|
||||
if self.live_preview:
|
||||
self.live_preview.stop()
|
||||
super().destroy()
|
||||
|
||||
def _on_preview_update(self, img: np.ndarray, header: dict):
|
||||
"""Send preview stream and update frame index counter"""
|
||||
self.num_images_counter.put(header["frame"], force=True)
|
||||
self._run_subs(sub_type=self.SUB_DEVICE_MONITOR_2D, obj=self, value=img)
|
||||
def _build_udp_header_table(self):
|
||||
"""Build the header table for the UDP communication"""
|
||||
udp_header_table = []
|
||||
|
||||
for i in range(0, 64, 1):
|
||||
for j in range(0, 8, 1):
|
||||
dest_port = 2000 + 8 * i + j
|
||||
source_port = 3000 + j
|
||||
if j < 4:
|
||||
extend_header_table(
|
||||
udp_header_table,
|
||||
self.mac_south.get(),
|
||||
self.ip_south.get(),
|
||||
dest_port,
|
||||
source_port,
|
||||
)
|
||||
else:
|
||||
extend_header_table(
|
||||
udp_header_table,
|
||||
self.mac_north.get(),
|
||||
self.ip_north.get(),
|
||||
dest_port,
|
||||
source_port,
|
||||
)
|
||||
|
||||
return udp_header_table
|
||||
|
||||
def _on_preview_update(self, img: np.ndarray):
|
||||
corrected_img = self.live_processing.apply_flat_dark_correction(img)
|
||||
self.live_processing.on_new_data(corrected_img)
|
||||
self.preview.put(corrected_img)
|
||||
self._run_subs(sub_type=self.SUB_DEVICE_MONITOR_2D, obj=self, value=corrected_img)
|
||||
|
||||
def _on_count_update(self, count: int):
|
||||
"""
|
||||
Callback for the count update from the backend.
|
||||
Updates the progress signal.
|
||||
|
||||
Args:
|
||||
count (int): The current count of images acquired by the camera.
|
||||
"""
|
||||
expected_counts = cast(int, self.num_images.get())
|
||||
self.progress.put(
|
||||
value=count, max_value=expected_counts, done=bool(count == expected_counts)
|
||||
)
|
||||
|
||||
def acq_done(self) -> DeviceStatus:
|
||||
"""
|
||||
@@ -504,16 +591,151 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
DeviceStatus: The status of the acquisition
|
||||
"""
|
||||
status = DeviceStatus(self)
|
||||
if self.backend is not None:
|
||||
self.backend.add_status_callback(
|
||||
status,
|
||||
success=[StdDaqStatus.IDLE, StdDaqStatus.FILE_SAVED],
|
||||
error=[StdDaqStatus.REJECTED, StdDaqStatus.ERROR],
|
||||
)
|
||||
else:
|
||||
status.set_finished()
|
||||
|
||||
self.backend.add_status_callback(
|
||||
status,
|
||||
success=[StdDaqStatus.IDLE, StdDaqStatus.FILE_SAVED],
|
||||
error=[StdDaqStatus.REJECTED, StdDaqStatus.ERROR],
|
||||
)
|
||||
return status
|
||||
|
||||
def restart_with_new_config(
|
||||
self,
|
||||
name: str,
|
||||
file_path: str = "",
|
||||
file_prefix: str = "",
|
||||
num_images: int | None = None,
|
||||
frames_per_trigger: int | None = None,
|
||||
) -> DeviceStatus:
|
||||
"""
|
||||
Restart the camera with a new configuration.
|
||||
This method allows to change the file path, file prefix, and number of images.
|
||||
|
||||
Args:
|
||||
name (str): Name of the configuration to be saved.
|
||||
file_path (str): New file path for the acquisition. If empty, the current file path is used.
|
||||
file_prefix (str): New file prefix for the acquisition. If empty, the current file prefix is used.
|
||||
num_images (int | None): New number of images to acquire. If None, the current number of images is used.
|
||||
frames_per_trigger (int | None): New number of frames per trigger. If None, the current value is used.
|
||||
Returns:
|
||||
DeviceStatus: The status of the restart operation. It resolves when the camera is ready to receive the first image.
|
||||
"""
|
||||
self.acq_configs[name] = {}
|
||||
conf = {}
|
||||
if file_path:
|
||||
self.acq_configs[name]["file_path"] = self.file_path.get()
|
||||
conf["file_path"] = file_path
|
||||
if file_prefix:
|
||||
self.acq_configs[name]["file_prefix"] = self.file_prefix.get()
|
||||
conf["file_prefix"] = file_prefix
|
||||
if num_images is not None:
|
||||
self.acq_configs[name]["num_images"] = self.num_images.get()
|
||||
conf["num_images"] = num_images
|
||||
if frames_per_trigger is not None:
|
||||
self.acq_configs[name]["cnt_num"] = self.cnt_num.get()
|
||||
conf["cnt_num"] = frames_per_trigger
|
||||
|
||||
# Stop the camera and wait for it to become idle
|
||||
status = self.stop_camera()
|
||||
status.wait(timeout=10)
|
||||
|
||||
# update the configuration
|
||||
self.configure(conf)
|
||||
|
||||
# Restart the camera with the new configuration
|
||||
return self.start_camera()
|
||||
|
||||
def restore_config(self, name: str) -> None:
|
||||
"""
|
||||
Restore a previously saved configuration and restart the camera.
|
||||
|
||||
Args:
|
||||
name (str): Name of the configuration to restore.
|
||||
"""
|
||||
status = self.stop_camera()
|
||||
status.wait(timeout=10)
|
||||
config = self.acq_configs.pop(name, {})
|
||||
self.configure(config)
|
||||
|
||||
def update_live_processing_reference(
|
||||
self, reference_type: Literal["dark", "flat"]
|
||||
) -> StatusBase:
|
||||
"""
|
||||
Update the flat or dark reference for the live processing.
|
||||
|
||||
Args:
|
||||
reference_type (Literal["dark", "flat"]): Type of the reference to update.
|
||||
If 'dark', the dark reference will be updated, if 'flat', the flat reference will be updated.
|
||||
|
||||
Returns:
|
||||
StatusBase: The status of the update operation.
|
||||
"""
|
||||
if reference_type not in ["dark", "flat"]:
|
||||
raise ValueError("Invalid reference type! Must be 'dark' or 'flat'.")
|
||||
|
||||
# Use the current acquisition to update the reference
|
||||
if self.live_processing is None:
|
||||
raise RuntimeError("Live processing is not available. Cannot update reference.")
|
||||
status = self.live_processing.update_reference_with_file(
|
||||
reference_type=reference_type,
|
||||
file_path=self.target_file,
|
||||
entry="tomcat-gf/data", # type: ignore
|
||||
wait=False, # Do not wait for the update to finish
|
||||
)
|
||||
return status
|
||||
|
||||
def start_camera(self) -> DeviceStatus:
|
||||
"""
|
||||
Start the camera and the backend.
|
||||
|
||||
Returns:
|
||||
DeviceStatus: The status of the startup. It resolves when the backend is ready to receive the first image.
|
||||
"""
|
||||
status = DeviceStatus(self)
|
||||
self.backend.add_status_callback(
|
||||
status,
|
||||
success=[StdDaqStatus.WAITING_FOR_FIRST_IMAGE],
|
||||
error=[StdDaqStatus.REJECTED, StdDaqStatus.ERROR],
|
||||
)
|
||||
self.backend.start(
|
||||
file_path=self.file_path.get(), # type: ignore
|
||||
file_prefix=self.file_prefix.get(), # type: ignore
|
||||
num_images=self.num_images.get(), # type: ignore
|
||||
)
|
||||
self.start_cam.set(1).wait()
|
||||
|
||||
def _emit_file_event(_status: DeviceStatus):
|
||||
"""
|
||||
Emit a file event when the camera is ready.
|
||||
"""
|
||||
self._run_subs(
|
||||
sub_type=self.SUB_FILE_EVENT,
|
||||
file_path=self.target_file,
|
||||
done=False,
|
||||
successful=False,
|
||||
hinted_h5_entries={"data": "data"},
|
||||
)
|
||||
|
||||
status.add_callback(_emit_file_event)
|
||||
return status
|
||||
|
||||
def stop_camera(self) -> DeviceStatus:
|
||||
"""Stop the camera acquisition and set it to idle state."""
|
||||
self.set_idle()
|
||||
status = DeviceStatus(self)
|
||||
self.backend.add_status_callback(
|
||||
status, success=[StdDaqStatus.IDLE], error=[StdDaqStatus.REJECTED, StdDaqStatus.ERROR]
|
||||
)
|
||||
self.backend.stop()
|
||||
return status
|
||||
|
||||
@property
|
||||
def target_file(self) -> str:
|
||||
"""Return the target file path for the current acquisition."""
|
||||
file_path = cast(str, self.file_path.get())
|
||||
file_prefix = cast(str, self.file_prefix.get())
|
||||
return os.path.join(file_path, f"{file_prefix.removesuffix('_')}.h5")
|
||||
|
||||
########################################
|
||||
# Beamline Specific Implementations #
|
||||
########################################
|
||||
@@ -531,98 +753,87 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
Called after the device is connected and its signals are connected.
|
||||
Default values for signals should be set here.
|
||||
"""
|
||||
# Perform a full initialization of the GigaFrost
|
||||
|
||||
# # TODO: check if this can be moved to the config file
|
||||
# # pylint: disable=protected-access
|
||||
# self.auto_soft_enable._metadata["write_access"] = False
|
||||
# self.backend_url._metadata["write_access"] = False
|
||||
self.auto_soft_enable.put(self._signals_to_be_set["auto_soft_enable"], force=True)
|
||||
self.backend_url.put(self._signals_to_be_set["backend_url"], force=True)
|
||||
|
||||
self.initialize_gigafrost()
|
||||
|
||||
self.backend.connect()
|
||||
|
||||
if self.live_preview:
|
||||
self.live_preview.start()
|
||||
|
||||
def on_stage(self) -> DeviceStatus | None:
|
||||
"""
|
||||
Called while staging the device.
|
||||
|
||||
Information about the upcoming scan can be accessed from the scan_info object.
|
||||
"""
|
||||
# Gigafrost can finish a run without explicit unstaging
|
||||
# If the camera is busy, stop it first
|
||||
if self.busy_stat.value:
|
||||
logger.warning("Camera is already running, unstaging it first!")
|
||||
self.unstage()
|
||||
sleep(0.5)
|
||||
self.stop_camera()
|
||||
|
||||
scan_msg = self.scan_info.msg
|
||||
if scan_msg is None or scan_msg.request_inputs is None or scan_msg.scan_parameters is None:
|
||||
# I don't think this can happen outside of tests, but just in case
|
||||
logger.warning(
|
||||
f"[{self.name}] Scan message is not available or incomplete. "
|
||||
"Cannot configure the GigaFrost camera."
|
||||
)
|
||||
self.acq_configs = {}
|
||||
return
|
||||
|
||||
# FIXME: I don't care about how we fish out config parameters from scan info
|
||||
scan_args = {
|
||||
**self.scan_info.msg.request_inputs["inputs"],
|
||||
**self.scan_info.msg.request_inputs["kwargs"],
|
||||
**self.scan_info.msg.scan_parameters,
|
||||
**scan_msg.request_inputs.get("inputs", {}),
|
||||
**scan_msg.request_inputs.get("kwargs", {}),
|
||||
**scan_msg.scan_parameters,
|
||||
}
|
||||
|
||||
d = {}
|
||||
if "image_width" in scan_args and scan_args["image_width"] is not None:
|
||||
d["image_width"] = scan_args["image_width"]
|
||||
if "image_height" in scan_args and scan_args["image_height"] is not None:
|
||||
d["image_height"] = scan_args["image_height"]
|
||||
if "exp_time" in scan_args and scan_args["exp_time"] is not None:
|
||||
d["exposure_time_ms"] = scan_args["exp_time"]
|
||||
if "exp_period" in scan_args and scan_args["exp_period"] is not None:
|
||||
d["exposure_period_ms"] = scan_args["exp_period"]
|
||||
if "acq_time" in scan_args and scan_args["acq_time"] is not None:
|
||||
d["exposure_time_ms"] = scan_args["acq_time"]
|
||||
if "acq_period" in scan_args and scan_args["acq_period"] is not None:
|
||||
d["exposure_period_ms"] = scan_args["acq_period"]
|
||||
if "exp_burst" in scan_args and scan_args["exp_burst"] is not None:
|
||||
d["exposure_num_burst"] = scan_args["exp_burst"]
|
||||
if "acq_mode" in scan_args and scan_args["acq_mode"] is not None:
|
||||
d["acq_mode"] = scan_args["acq_mode"]
|
||||
|
||||
if d:
|
||||
self.configure(d)
|
||||
if "file_path" not in scan_args:
|
||||
scan_args["file_path"] = (
|
||||
"/gpfs/test/test-beamline" # FIXME: This should be from the scan message
|
||||
)
|
||||
if "file_prefix" not in scan_args:
|
||||
scan_args["file_prefix"] = scan_msg.info["file_components"][0].split("/")[-1] + "_"
|
||||
self.configure(scan_args)
|
||||
|
||||
# Sync if out of sync
|
||||
if self.sync_flag.value == 0:
|
||||
self.sync_swhw.set(1).wait()
|
||||
|
||||
# stdDAQ backend parameters
|
||||
num_points = (
|
||||
1
|
||||
* scan_args.get("steps", 1)
|
||||
* scan_args.get("exp_burst", 1)
|
||||
* scan_args.get("repeats", 1)
|
||||
* scan_args.get("burst_at_each_point", 1)
|
||||
)
|
||||
self.num_images.set(num_points).wait()
|
||||
if "daq_file_path" in scan_args and scan_args["daq_file_path"] is not None:
|
||||
self.file_path.set(scan_args["daq_file_path"]).wait()
|
||||
if "daq_file_prefix" in scan_args and scan_args["daq_file_prefix"] is not None:
|
||||
self.file_prefix.set(scan_args["daq_file_prefix"]).wait()
|
||||
if "daq_num_images" in scan_args and scan_args["daq_num_images"] is not None:
|
||||
self.num_images.set(scan_args["daq_num_images"]).wait()
|
||||
# Start stdDAQ preview
|
||||
if self.live_preview is not None:
|
||||
self.live_preview.start()
|
||||
|
||||
# reset the acquisition configs
|
||||
self.acq_configs = {}
|
||||
|
||||
def on_unstage(self) -> DeviceStatus | None:
|
||||
"""Called while unstaging the device."""
|
||||
# Switch to idle
|
||||
self.disarm()
|
||||
if self.backend is not None:
|
||||
logger.info(f"StdDaq status before unstage: {self.backend.status}")
|
||||
self.backend.stop()
|
||||
logger.info(f"StdDaq status on unstage: {self.backend.status}")
|
||||
return self.stop_camera()
|
||||
|
||||
def on_pre_scan(self) -> DeviceStatus | None:
|
||||
"""Called right before the scan starts on all devices automatically."""
|
||||
# First start the stdDAQ
|
||||
if self.backend is not None:
|
||||
self.backend.start(
|
||||
file_path=self.file_path.get(),
|
||||
file_prefix=self.file_prefix.get(),
|
||||
num_images=self.num_images.get(),
|
||||
)
|
||||
# Then start the camera
|
||||
self.arm()
|
||||
# Switch to acquiring
|
||||
return self.start_camera()
|
||||
|
||||
def on_trigger(self) -> DeviceStatus | None:
|
||||
def on_trigger(self) -> DeviceStatus | StatusBase | None:
|
||||
"""Called when the device is triggered."""
|
||||
if self.busy_stat.get() in (0, "IDLE"):
|
||||
raise RuntimeError("GigaFrost must be running before triggering")
|
||||
|
||||
logger.warning(f"[{self.name}] SW triggering gigafrost")
|
||||
logger.info(f"[{self.name}] SW triggering gigafrost")
|
||||
|
||||
# Soft triggering based on operation mode
|
||||
if (
|
||||
@@ -632,38 +843,39 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
|
||||
):
|
||||
# BEC teststand operation mode: posedge of SoftEnable if Started
|
||||
self.soft_enable.set(0).wait()
|
||||
self.soft_enable.set(1).wait()
|
||||
return self.soft_enable.set(1)
|
||||
|
||||
if self.acquire_block.get() or self.backend is None:
|
||||
wait_time = 0.2 + 0.001 * self.num_exposures.value * max(
|
||||
self.acquire_time.value, self.acquire_period.value
|
||||
)
|
||||
logger.info(f"[{self.name}] Triggering set to block for {wait_time} seconds")
|
||||
return DeviceStatus(self, done=True, success=True, settle_time=wait_time)
|
||||
else:
|
||||
self.soft_trig.set(1).wait()
|
||||
return self.soft_trig.set(1)
|
||||
|
||||
def on_complete(self) -> DeviceStatus | None:
|
||||
"""Called to inquire if a device has completed a scans."""
|
||||
return self.acq_done()
|
||||
|
||||
def _create_dataset(_status: DeviceStatus):
|
||||
self.backend.create_virtual_datasets(
|
||||
self.file_path.get(), file_prefix=self.file_prefix.get() # type: ignore
|
||||
)
|
||||
self._run_subs(
|
||||
sub_type=self.SUB_FILE_EVENT,
|
||||
file_path=self.target_file,
|
||||
done=True,
|
||||
successful=True,
|
||||
hinted_location={"data": "data"},
|
||||
)
|
||||
|
||||
status = self.acq_done()
|
||||
status.add_callback(_create_dataset)
|
||||
return status
|
||||
|
||||
def on_kickoff(self) -> DeviceStatus | None:
|
||||
"""Called to kickoff a device for a fly scan. Has to be called explicitly."""
|
||||
|
||||
def on_stop(self) -> None:
|
||||
def on_stop(self) -> DeviceStatus:
|
||||
"""Called when the device is stopped."""
|
||||
return self.on_unstage()
|
||||
return self.stop_camera()
|
||||
|
||||
|
||||
# Automatically connect to MicroSAXS testbench if directly invoked
|
||||
if __name__ == "__main__":
|
||||
if __name__ == "__main__": # pragma: no cover
|
||||
gf = GigaFrostCamera(
|
||||
"X02DA-CAM-GF2:",
|
||||
name="gf2",
|
||||
backend_url="http://xbl-daq-28:8080",
|
||||
auto_soft_enable=True,
|
||||
std_daq_ws="ws://129.129.95.111:8080",
|
||||
std_daq_rest="http://129.129.95.111:5000",
|
||||
std_daq_live="tcp://129.129.95.111:20000",
|
||||
"X02DA-CAM-GF2:", name="gf2", backend_url="http://xbl-daq-28:8080", auto_soft_enable=True
|
||||
)
|
||||
gf.wait_for_connection()
|
||||
|
||||
@@ -1,158 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Standard DAQ preview image stream module
|
||||
|
||||
Created on Thu Jun 27 17:28:43 2024
|
||||
|
||||
@author: mohacsi_i
|
||||
"""
|
||||
from time import sleep, time
|
||||
import threading
|
||||
import zmq
|
||||
import json
|
||||
|
||||
ZMQ_TOPIC_FILTER = b""
|
||||
|
||||
|
||||
|
||||
class PcoTestConsumer:
|
||||
"""Detector wrapper class around the StdDaq preview image stream.
|
||||
|
||||
This was meant to provide live image stream directly from the StdDAQ.
|
||||
Note that the preview stream must be already throtled in order to cope
|
||||
with the incoming data and the python class might throttle it further.
|
||||
|
||||
You can add a preview widget to the dock by:
|
||||
cam_widget = gui.add_dock('cam_dock1').add_widget('BECFigure').image('daq_stream1')
|
||||
"""
|
||||
|
||||
# Subscriptions for plotting image
|
||||
_shutdown_event = threading.Event()
|
||||
_monitor_mutex = threading.Lock()
|
||||
_monitor_thread = None
|
||||
|
||||
# Status attributes
|
||||
_url = None
|
||||
_image = None
|
||||
_frame = None
|
||||
_socket = None
|
||||
|
||||
def __init__(self, url: str = "tcp://129.129.95.38:20000") -> None:
|
||||
super().__init__()
|
||||
self._url = url
|
||||
|
||||
def connect(self):
|
||||
"""Connect to te StDAQs PUB-SUB streaming interface"""
|
||||
# Socket to talk to server
|
||||
context = zmq.Context()
|
||||
self._socket = context.socket(zmq.PULL)
|
||||
try:
|
||||
self._socket.connect(self.url)
|
||||
except ConnectionRefusedError:
|
||||
sleep(1)
|
||||
self._socket.connect(self.url)
|
||||
|
||||
def disconnect(self):
|
||||
"""Disconnect"""
|
||||
try:
|
||||
if self._socket is not None:
|
||||
self._socket.disconnect(self.url)
|
||||
except zmq.ZMQError:
|
||||
pass
|
||||
finally:
|
||||
self._socket = None
|
||||
|
||||
@property
|
||||
def url(self):
|
||||
return self._url
|
||||
|
||||
@property
|
||||
def image(self):
|
||||
return self._image
|
||||
|
||||
@property
|
||||
def frame(self):
|
||||
return self._frame
|
||||
|
||||
# pylint: disable=protected-access
|
||||
def start(self):
|
||||
"""Start listening for preview data stream"""
|
||||
if self._monitor_mutex.locked():
|
||||
raise RuntimeError("Only one consumer permitted")
|
||||
|
||||
self.connect()
|
||||
self._mon = threading.Thread(target=self.poll, daemon=True)
|
||||
self._mon.start()
|
||||
|
||||
def stop(self):
|
||||
"""Stop a running preview"""
|
||||
self._shutdown_event.set()
|
||||
if self._mon is not None:
|
||||
self._stop_polling = True
|
||||
# Might hang on recv_multipart
|
||||
self._mon.join(timeout=1)
|
||||
# So also disconnect the socket
|
||||
self.disconnect()
|
||||
self._shutdown_event.clear()
|
||||
|
||||
def poll(self):
|
||||
"""Collect streamed updates"""
|
||||
try:
|
||||
t_last = time()
|
||||
print("Starting monitor")
|
||||
with self._monitor_mutex:
|
||||
while not self._shutdown_event.is_set():
|
||||
try:
|
||||
# pylint: disable=no-member
|
||||
r = self._socket.recv_multipart(flags=zmq.NOBLOCK)
|
||||
|
||||
# Length and throtling checks
|
||||
t_curr = time()
|
||||
t_elapsed = t_curr - t_last
|
||||
if t_elapsed < self.parent.throttle.get():
|
||||
continue
|
||||
# # Unpack the Array V1 reply to metadata and array data
|
||||
meta, data = r
|
||||
|
||||
# Update image and update subscribers
|
||||
header = json.loads(meta)
|
||||
self.header = header
|
||||
# if header["type"] == "uint16":
|
||||
# image = np.frombuffer(data, dtype=np.uint16)
|
||||
# if image.size != np.prod(header['shape']):
|
||||
# err = f"Unexpected array size of {image.size} for header: {header}"
|
||||
# raise ValueError(err)
|
||||
# image = image.reshape(header['shape'])
|
||||
|
||||
# # Update image and update subscribers
|
||||
# self._frame = header['frame']
|
||||
# self._image = image
|
||||
t_last = t_curr
|
||||
# print(
|
||||
# f"[{self.name}] Updated frame {header['frame']}\t"
|
||||
# f"Shape: {header['shape']}\tMean: {np.mean(image):.3f}"
|
||||
# )
|
||||
except ValueError:
|
||||
# Happens when ZMQ partially delivers the multipart message
|
||||
pass
|
||||
except zmq.error.Again:
|
||||
# Happens when receive queue is empty
|
||||
sleep(0.1)
|
||||
except Exception as ex:
|
||||
print(f"{str(ex)}")
|
||||
raise
|
||||
finally:
|
||||
try:
|
||||
self._socket.disconnect(self.url)
|
||||
except RuntimeError:
|
||||
pass
|
||||
self._monitor_thread = None
|
||||
print(f"Detaching monitor")
|
||||
|
||||
|
||||
# Automatically connect to MicroSAXS testbench if directly invoked
|
||||
if __name__ == "__main__":
|
||||
daq = PcoTestConsumer(url="tcp://10.4.0.82:8080")
|
||||
daq.start()
|
||||
sleep(500)
|
||||
daq.stop()
|
||||
@@ -1,192 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Standard DAQ preview image stream module
|
||||
|
||||
Created on Thu Jun 27 17:28:43 2024
|
||||
|
||||
@author: mohacsi_i
|
||||
"""
|
||||
from time import sleep, time
|
||||
from threading import Thread
|
||||
import zmq
|
||||
import json
|
||||
from ophyd import Device, Signal, Component, Kind
|
||||
from ophyd_devices.interfaces.base_classes.psi_detector_base import (
|
||||
CustomDetectorMixin,
|
||||
PSIDetectorBase,
|
||||
)
|
||||
|
||||
from bec_lib import bec_logger
|
||||
|
||||
logger = bec_logger.logger
|
||||
ZMQ_TOPIC_FILTER = b""
|
||||
|
||||
|
||||
class PcoTestConsumerMixin(CustomDetectorMixin):
|
||||
"""Setup class for the standard DAQ preview stream
|
||||
|
||||
Parent class: CustomDetectorMixin
|
||||
"""
|
||||
# pylint: disable=protected-access
|
||||
def on_stage(self):
|
||||
"""Start listening for preview data stream"""
|
||||
if self.parent._mon is not None:
|
||||
self.parent.unstage()
|
||||
sleep(0.5)
|
||||
|
||||
self.parent.connect()
|
||||
self._stop_polling = False
|
||||
self.parent._mon = Thread(target=self.poll, daemon=True)
|
||||
self.parent._mon.start()
|
||||
|
||||
def on_unstage(self):
|
||||
"""Stop a running preview"""
|
||||
if self.parent._mon is not None:
|
||||
self._stop_polling = True
|
||||
# Might hang on recv_multipart
|
||||
self.parent._mon.join(timeout=1)
|
||||
# So also disconnect the socket
|
||||
self.parent.disconnect()
|
||||
|
||||
def on_stop(self):
|
||||
"""Stop a running preview"""
|
||||
self.parent.disconnect()
|
||||
|
||||
def poll(self):
|
||||
"""Collect streamed updates"""
|
||||
try:
|
||||
t_last = time()
|
||||
print("Starting monitor")
|
||||
while True:
|
||||
try:
|
||||
# Exit loop and finish monitoring
|
||||
if self._stop_polling:
|
||||
logger.info(f"[{self.parent.name}]\tDetaching monitor")
|
||||
break
|
||||
|
||||
# pylint: disable=no-member
|
||||
r = self.parent._socket.recv_multipart(flags=zmq.NOBLOCK)
|
||||
|
||||
# Length and throtling checks
|
||||
t_curr = time()
|
||||
t_elapsed = t_curr - t_last
|
||||
if t_elapsed < self.parent.throttle.get():
|
||||
continue
|
||||
# # Unpack the Array V1 reply to metadata and array data
|
||||
meta, data = r
|
||||
|
||||
# Update image and update subscribers
|
||||
header = json.loads(meta)
|
||||
self.parent.header = header
|
||||
# if header["type"] == "uint16":
|
||||
# image = np.frombuffer(data, dtype=np.uint16)
|
||||
# if image.size != np.prod(header['shape']):
|
||||
# err = f"Unexpected array size of {image.size} for header: {header}"
|
||||
# raise ValueError(err)
|
||||
# image = image.reshape(header['shape'])
|
||||
|
||||
# # Update image and update subscribers
|
||||
# self.parent.frame.put(header['frame'], force=True)
|
||||
# self.parent.image_shape.put(header['shape'], force=True)
|
||||
# self.parent.image.put(image, force=True)
|
||||
# self.parent._last_image = image
|
||||
# self.parent._run_subs(sub_type=self.parent.SUB_MONITOR, value=image)
|
||||
t_last = t_curr
|
||||
# logger.info(
|
||||
# f"[{self.parent.name}] Updated frame {header['frame']}\t"
|
||||
# f"Shape: {header['shape']}\tMean: {np.mean(image):.3f}"
|
||||
# )
|
||||
print(f"[{self.parent.name}] Updated frame {header['frame']}\t")
|
||||
except ValueError:
|
||||
# Happens when ZMQ partially delivers the multipart message
|
||||
pass
|
||||
except zmq.error.Again:
|
||||
# Happens when receive queue is empty
|
||||
sleep(0.1)
|
||||
except Exception as ex:
|
||||
logger.info(f"[{self.parent.name}]\t{str(ex)}")
|
||||
raise
|
||||
finally:
|
||||
try:
|
||||
self.parent._socket.disconnect(self.parent.url.get())
|
||||
except RuntimeError:
|
||||
pass
|
||||
self.parent._mon = None
|
||||
logger.info(f"[{self.parent.name}]\tDetaching monitor")
|
||||
|
||||
|
||||
class PcoTestConsumer(PSIDetectorBase):
|
||||
"""Detector wrapper class around the StdDaq preview image stream.
|
||||
|
||||
This was meant to provide live image stream directly from the StdDAQ.
|
||||
Note that the preview stream must be already throtled in order to cope
|
||||
with the incoming data and the python class might throttle it further.
|
||||
|
||||
You can add a preview widget to the dock by:
|
||||
cam_widget = gui.add_dock('cam_dock1').add_widget('BECFigure').image('daq_stream1')
|
||||
"""
|
||||
|
||||
# Subscriptions for plotting image
|
||||
USER_ACCESS = ["get_last_image"]
|
||||
SUB_MONITOR = "device_monitor_2d"
|
||||
_default_sub = SUB_MONITOR
|
||||
|
||||
header = None
|
||||
|
||||
custom_prepare_cls = PcoTestConsumerMixin
|
||||
|
||||
# Status attributes
|
||||
url = Component(Signal, kind=Kind.config)
|
||||
throttle = Component(Signal, value=0.25, kind=Kind.config)
|
||||
frame = Component(Signal, kind=Kind.hinted)
|
||||
image_shape = Component(Signal, kind=Kind.normal)
|
||||
# FIXME: The BEC client caches the read()s from the last 50 scans
|
||||
image = Component(Signal, kind=Kind.omitted)
|
||||
_last_image = None
|
||||
_mon = None
|
||||
_socket = None
|
||||
|
||||
def __init__(
|
||||
self, *args, url: str = "tcp://129.129.95.38:20000", parent: Device = None, **kwargs
|
||||
) -> None:
|
||||
super().__init__(*args, parent=parent, **kwargs)
|
||||
self.url._metadata["write_access"] = False
|
||||
self.image._metadata["write_access"] = False
|
||||
self.frame._metadata["write_access"] = False
|
||||
self.image_shape._metadata["write_access"] = False
|
||||
self.url.set(url, force=True).wait()
|
||||
|
||||
def connect(self):
|
||||
"""Connect to te StDAQs PUB-SUB streaming interface
|
||||
|
||||
StdDAQ may reject connection for a few seconds when it restarts,
|
||||
so if it fails, wait a bit and try to connect again.
|
||||
"""
|
||||
# pylint: disable=no-member
|
||||
# Socket to talk to server
|
||||
context = zmq.Context()
|
||||
self._socket = context.socket(zmq.PULL)
|
||||
try:
|
||||
self._socket.connect(self.url.get())
|
||||
except ConnectionRefusedError:
|
||||
sleep(1)
|
||||
self._socket.connect(self.url.get())
|
||||
|
||||
def disconnect(self):
|
||||
"""Disconnect"""
|
||||
try:
|
||||
if self._socket is not None:
|
||||
self._socket.disconnect(self.url.get())
|
||||
except zmq.ZMQError:
|
||||
pass
|
||||
finally:
|
||||
self._socket = None
|
||||
|
||||
def get_image(self):
|
||||
return self._last_image
|
||||
|
||||
|
||||
# Automatically connect to MicroSAXS testbench if directly invoked
|
||||
if __name__ == "__main__":
|
||||
daq = PcoTestConsumerMixin(url="tcp://129.129.106.124:8080", name="preview")
|
||||
daq.wait_for_connection()
|
||||
@@ -1,415 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Wed Dec 6 11:33:54 2023
|
||||
|
||||
@author: mohacsi_i
|
||||
"""
|
||||
import time
|
||||
import numpy as np
|
||||
from ophyd.status import SubscriptionStatus, DeviceStatus
|
||||
from ophyd_devices.interfaces.base_classes.psi_device_base import PSIDeviceBase
|
||||
|
||||
from bec_lib.logger import bec_logger
|
||||
|
||||
from tomcat_bec.devices.gigafrost.pcoedge_base import PcoEdgeBase
|
||||
from tomcat_bec.devices.gigafrost.std_daq_preview import StdDaqPreview
|
||||
from tomcat_bec.devices.gigafrost.std_daq_client import StdDaqClient, StdDaqStatus
|
||||
|
||||
|
||||
logger = bec_logger.logger
|
||||
|
||||
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
class PcoEdge5M(PSIDeviceBase, PcoEdgeBase):
|
||||
"""Ophyd baseclass for Helge camera IOCs
|
||||
|
||||
This class provides wrappers for Helge's camera IOCs around SwissFEL and
|
||||
for high performance SLS 2.0 cameras. The IOC's operation is a bit arcane
|
||||
and there are different versions and cameras all around. So this device
|
||||
only covers the absolute basics.
|
||||
|
||||
Probably the most important part is the configuration state machine. As
|
||||
the SET_PARAMS takes care of buffer allocations it might take some time,
|
||||
as well as a full re-configuration is required every time we change the
|
||||
binning, roi, etc... This is automatically performed upon starting an
|
||||
exposure (if it heven't been done before).
|
||||
|
||||
The status flag state machine during re-configuration is:
|
||||
BUSY low, SET low -> BUSY high, SET low -> BUSY low, SET high -> BUSY low, SET low
|
||||
|
||||
|
||||
UPDATE: Data sending operation modes
|
||||
- Switch to ZMQ streaming by setting FILEFORMAT to ZEROMQ
|
||||
- Set SAVESTART and SAVESTOP to select a ROI of image indices
|
||||
- Start file transfer with FTRANSFER.
|
||||
The ZMQ connection operates in PUSH-PULL mode, i.e. it needs incoming connection.
|
||||
|
||||
STOREMODE sets the acquisition mode:
|
||||
if STOREMODE == Recorder
|
||||
Fills up the buffer with images. Here SAVESTART and SAVESTOP selects a ROI
|
||||
of image indices to be streamed out (i.e. maximum buffer_size number of images)
|
||||
|
||||
if STOREMODE == FIFO buffer
|
||||
Continously streams out data using the buffer as a FIFO queue.
|
||||
Here SAVESTART and SAVESTOP selects a ROI of image indices to be streamed continously
|
||||
(i.e. a large SAVESTOP streams indefinitely). Note that in FIFO mode buffer reads are
|
||||
destructive. to prevent this, we don't have EPICS preview
|
||||
"""
|
||||
|
||||
USER_ACCESS = ["complete", "backend", "live_preview", "arm", "disarm"]
|
||||
|
||||
# Placeholders for stdDAQ and livestream clients
|
||||
backend = None
|
||||
live_preview = None
|
||||
|
||||
# pylint: disable=too-many-arguments
|
||||
def __init__(
|
||||
self,
|
||||
prefix="",
|
||||
*,
|
||||
name,
|
||||
kind=None,
|
||||
read_attrs=None,
|
||||
configuration_attrs=None,
|
||||
parent=None,
|
||||
scan_info=None,
|
||||
std_daq_rest: str | None = None,
|
||||
std_daq_ws: str | None = None,
|
||||
std_daq_live: str | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
# super() will call the mixin class
|
||||
super().__init__(
|
||||
prefix=prefix,
|
||||
name=name,
|
||||
kind=kind,
|
||||
read_attrs=read_attrs,
|
||||
configuration_attrs=configuration_attrs,
|
||||
parent=parent,
|
||||
scan_info=scan_info,
|
||||
**kwargs,
|
||||
)
|
||||
# Configure the stdDAQ client
|
||||
if std_daq_rest is None or std_daq_ws is None:
|
||||
# raise ValueError("Both std_daq_rest and std_daq_ws must be provided")
|
||||
logger.error("No stdDAQ address provided, launching without data backend!")
|
||||
else:
|
||||
self.backend = StdDaqClient(parent=self, ws_url=std_daq_ws, rest_url=std_daq_rest)
|
||||
# Configure image preview
|
||||
if std_daq_live is not None:
|
||||
self.live_preview = StdDaqPreview(url=std_daq_live, cb=self._on_preview_update)
|
||||
else:
|
||||
logger.error("No stdDAQ stream address provided, launching without preview!")
|
||||
|
||||
def configure(self, d: dict = None) -> tuple:
|
||||
"""Configure the base Helge camera device
|
||||
|
||||
Parameters as 'd' dictionary
|
||||
----------------------------
|
||||
num_images : int
|
||||
Number of images to be taken during each scan. Meaning depends on
|
||||
store mode.
|
||||
exposure_time_ms : float
|
||||
Exposure time [ms], usually gets set back to 20 ms
|
||||
exposure_period_ms : float
|
||||
Exposure period [ms], up to 200 ms.
|
||||
store_mode : str
|
||||
Buffer operation mode
|
||||
*'Recorder' to record in buffer
|
||||
*'FIFO buffer' for continous streaming
|
||||
data_format : str
|
||||
Usually set to 'ZEROMQ'
|
||||
acq_mode : str
|
||||
Store mode and data format according to preconfigured settings
|
||||
"""
|
||||
if self.state not in ("IDLE"):
|
||||
raise RuntimeError(f"Can't change configuration from state {self.state}")
|
||||
|
||||
# If Bluesky style configure
|
||||
if d is not None:
|
||||
# Commonly changed settings
|
||||
if "exposure_num_burst" in d:
|
||||
self.file_savestop.set(d["exposure_num_burst"]).wait()
|
||||
if "exposure_time_ms" in d:
|
||||
self.acquire_time.set(d["exposure_time_ms"]).wait()
|
||||
if "exposure_period_ms" in d:
|
||||
self.acquire_delay.set(d["exposure_period_ms"]).wait()
|
||||
if "exposure_period_ms" in d:
|
||||
self.acquire_delay.set(d["exposure_period_ms"]).wait()
|
||||
if "image_width" in d:
|
||||
self.array_size.array_size_x.set(d["image_width"]).wait()
|
||||
if "image_height" in d:
|
||||
self.array_size.array_size_y.set(d["image_height"]).wait()
|
||||
if "store_mode" in d:
|
||||
self.bufferStoreMode.set(d["store_mode"]).wait()
|
||||
if "data_format" in d:
|
||||
self.file_format.set(d["data_format"]).wait()
|
||||
|
||||
# If a pre-configured acquisition mode is specified, set it
|
||||
if "acq_mode" in d:
|
||||
self.set_acquisition_mode(d["acq_mode"])
|
||||
|
||||
# State machine
|
||||
# Initial: BUSY and SET both low
|
||||
# 0. Write 1 to SET_PARAM
|
||||
# 1. BUSY goes high, SET stays low
|
||||
# 2. BUSY goes low, SET goes high
|
||||
# 3. BUSY stays low, SET goes low
|
||||
# So we need a 'negedge' on SET_PARAM
|
||||
def negedge(*, old_value, value, timestamp, **_):
|
||||
return bool(old_value and not value)
|
||||
|
||||
# Subscribe and wait for update
|
||||
status = SubscriptionStatus(self.set_param, negedge, timeout=5, settle_time=0.5)
|
||||
|
||||
self.set_param.set(1).wait()
|
||||
status.wait()
|
||||
|
||||
def set_acquisition_mode(self, acq_mode):
|
||||
"""Set acquisition mode
|
||||
|
||||
Utility function to quickly select between pre-configured and tested
|
||||
acquisition modes.
|
||||
"""
|
||||
if acq_mode in ["default", "step"]:
|
||||
# NOTE: Trigger duration requires a consumer
|
||||
self.bufferStoreMode.set("FIFO Buffer").wait()
|
||||
if acq_mode in ["stream"]:
|
||||
# NOTE: Trigger duration requires a consumer
|
||||
self.bufferStoreMode.set("FIFO Buffer").wait()
|
||||
else:
|
||||
raise RuntimeError(f"Unsupported acquisition mode: {acq_mode}")
|
||||
|
||||
def arm(self):
|
||||
"""Bluesky style stage: arm the detector"""
|
||||
logger.warning("Staging PCO")
|
||||
# Acquisition is only allowed when the IOC is not busy
|
||||
if self.state in ("OFFLINE", "BUSY", "REMOVED", "RUNNING"):
|
||||
raise RuntimeError(f"Camera in in state: {self.state}")
|
||||
|
||||
if (
|
||||
self.bufferStoreMode.get() in ("Recorder", 0)
|
||||
and self.file_savestop.get() > self.buffer_size.get()
|
||||
):
|
||||
logger.warning(
|
||||
f"You'll send empty images, {self.file_savestop.get()} is above buffer size"
|
||||
)
|
||||
|
||||
# Start the acquisition (this sets parameers and starts acquisition)
|
||||
self.acquire.set("Running").wait()
|
||||
|
||||
# Subscribe and wait for update
|
||||
def is_running(*, value, timestamp, **_):
|
||||
return bool(value == 6)
|
||||
|
||||
status = SubscriptionStatus(self.camera_statuscode, is_running, timeout=5, settle_time=0.2)
|
||||
status.wait()
|
||||
|
||||
def disarm(self):
|
||||
"""Bluesky style unstage: stop the detector"""
|
||||
self.acquire.set("Idle").wait()
|
||||
|
||||
# Data streaming is stopped by setting the max index to 0
|
||||
# FIXME: This will interrupt data transfer
|
||||
self.file_savestop.set(0).wait()
|
||||
|
||||
def destroy(self):
|
||||
logger.warning("Destroy called")
|
||||
if self.backend is not None:
|
||||
self.backend.shutdown()
|
||||
super().destroy()
|
||||
|
||||
def _on_preview_update(self, img: np.ndarray, header: dict):
|
||||
"""Send preview stream and update frame index counter"""
|
||||
# FIXME: There's also a recorded images counter provided by the stdDAQ writer
|
||||
self.num_images_counter.put(header["frame"], force=True)
|
||||
self._run_subs(sub_type=self.SUB_DEVICE_MONITOR_2D, obj=self, value=img)
|
||||
|
||||
def acq_done(self) -> DeviceStatus:
|
||||
"""
|
||||
Check if the acquisition is done. For the GigaFrost camera, this is
|
||||
done by checking the status of the backend as the camera does not
|
||||
provide any feedback about its internal state.
|
||||
|
||||
Returns:
|
||||
DeviceStatus: The status of the acquisition
|
||||
"""
|
||||
status = DeviceStatus(self)
|
||||
if self.backend is not None:
|
||||
self.backend.add_status_callback(
|
||||
status,
|
||||
success=[StdDaqStatus.IDLE, StdDaqStatus.FILE_SAVED],
|
||||
error=[StdDaqStatus.REJECTED, StdDaqStatus.ERROR],
|
||||
)
|
||||
return status
|
||||
|
||||
########################################
|
||||
# Beamline Specific Implementations #
|
||||
########################################
|
||||
|
||||
# pylint: disable=protected-access
|
||||
def on_stage(self) -> None:
|
||||
"""Configure and arm PCO.Edge camera for acquisition"""
|
||||
|
||||
# PCO can finish a run without explicit unstaging
|
||||
if self.state not in ("IDLE"):
|
||||
logger.warning(
|
||||
f"Trying to stage the camera from state {self.state}, unstaging it first!"
|
||||
)
|
||||
self.unstage()
|
||||
time.sleep(0.5)
|
||||
|
||||
# Fish out our configuration from scaninfo (via explicit or generic addressing)
|
||||
scan_args = {
|
||||
**self.scan_info.msg.request_inputs["inputs"],
|
||||
**self.scan_info.msg.request_inputs["kwargs"],
|
||||
**self.scan_info.msg.scan_parameters,
|
||||
}
|
||||
|
||||
d = {}
|
||||
if "image_width" in scan_args and scan_args["image_width"] is not None:
|
||||
d["image_width"] = scan_args["image_width"]
|
||||
if "image_height" in scan_args and scan_args["image_height"] is not None:
|
||||
d["image_height"] = scan_args["image_height"]
|
||||
if "exp_time" in scan_args and scan_args["exp_time"] is not None:
|
||||
d["exposure_time_ms"] = scan_args["exp_time"]
|
||||
if "exp_period" in scan_args and scan_args["exp_period"] is not None:
|
||||
d["exposure_period_ms"] = scan_args["exp_period"]
|
||||
if "exp_burst" in scan_args and scan_args["exp_burst"] is not None:
|
||||
d["exposure_num_burst"] = scan_args["exp_burst"]
|
||||
if "acq_time" in scan_args and scan_args["acq_time"] is not None:
|
||||
d["exposure_time_ms"] = scan_args["acq_time"]
|
||||
if "acq_period" in scan_args and scan_args["acq_period"] is not None:
|
||||
d["exposure_period_ms"] = scan_args["acq_period"]
|
||||
if "acq_burst" in scan_args and scan_args["acq_burst"] is not None:
|
||||
d["exposure_num_burst"] = scan_args["acq_burst"]
|
||||
if "acq_mode" in scan_args and scan_args["acq_mode"] is not None:
|
||||
d["acq_mode"] = scan_args["acq_mode"]
|
||||
# elif self.scaninfo.scan_type == "step":
|
||||
# d['acq_mode'] = "default"
|
||||
if "pco_store_mode" in scan_args and scan_args["pco_store_mode"] is not None:
|
||||
d["store_mode"] = scan_args["pco_store_mode"]
|
||||
if "pco_data_format" in scan_args and scan_args["pco_data_format"] is not None:
|
||||
d["data_format"] = scan_args["pco_data_format"]
|
||||
|
||||
# Perform bluesky-style configuration
|
||||
if d:
|
||||
logger.warning(f"[{self.name}] Configuring with:\n{d}")
|
||||
self.configure(d=d)
|
||||
|
||||
# stdDAQ backend parameters
|
||||
num_points = (
|
||||
1
|
||||
* scan_args.get("steps", 1)
|
||||
* scan_args.get("exp_burst", 1)
|
||||
* scan_args.get("repeats", 1)
|
||||
* scan_args.get("burst_at_each_point", 1)
|
||||
)
|
||||
self.num_images.set(num_points).wait()
|
||||
if "daq_file_path" in scan_args and scan_args["daq_file_path"] is not None:
|
||||
self.file_path.set(scan_args["daq_file_path"]).wait()
|
||||
if "daq_file_prefix" in scan_args and scan_args["daq_file_prefix"] is not None:
|
||||
self.file_prefix.set(scan_args["daq_file_prefix"]).wait()
|
||||
if "daq_num_images" in scan_args and scan_args["daq_num_images"] is not None:
|
||||
self.num_images.set(scan_args["daq_num_images"]).wait()
|
||||
# Start stdDAQ preview
|
||||
if self.live_preview is not None:
|
||||
self.live_preview.start()
|
||||
|
||||
def on_unstage(self) -> None:
|
||||
"""Disarm the PCO.Edge camera"""
|
||||
self.disarm()
|
||||
if self.backend is not None:
|
||||
logger.info(f"StdDaq status before unstage: {self.backend.status}")
|
||||
self.backend.stop()
|
||||
|
||||
def on_pre_scan(self) -> DeviceStatus | None:
|
||||
"""Called right before the scan starts on all devices automatically."""
|
||||
logger.warning("Called op_prescan on PCO camera")
|
||||
# First start the stdDAQ
|
||||
if self.backend is not None:
|
||||
self.backend.start(
|
||||
file_path=self.file_path.get(),
|
||||
file_prefix=self.file_prefix.get(),
|
||||
num_images=self.num_images.get(),
|
||||
)
|
||||
# Then start the camera
|
||||
self.arm()
|
||||
|
||||
def on_trigger(self) -> None | DeviceStatus:
|
||||
"""Trigger mode operation
|
||||
|
||||
Use it to repeatedly record a fixed number of frames and send it to stdDAQ. The method waits
|
||||
for the acquisition and data transfer to complete.
|
||||
|
||||
NOTE: Maciej confirmed that sparse data is no problem to the stdDAQ.
|
||||
TODO: Optimize data transfer to launch at end and check completion at the beginning.
|
||||
"""
|
||||
# Ensure that previous data transfer finished
|
||||
# def sentIt(*args, value, timestamp, **kwargs):
|
||||
# return value==0
|
||||
# status = SubscriptionStatus(self.file_savebusy, sentIt, timeout=120)
|
||||
# status.wait()
|
||||
|
||||
# Not sure if it always sends the first batch of images or the newest
|
||||
self.buffer_clear.set(1, settle_time=0.1).wait()
|
||||
|
||||
# Wait until the buffer fills up with enough images
|
||||
t_expected = (self.acquire_time.get() + self.acquire_delay.get()) * self.file_savestop.get()
|
||||
|
||||
def wait_acquisition(*, value, timestamp, **_):
|
||||
num_target = self.file_savestop.get()
|
||||
# logger.warning(f"{value} of {num_target}")
|
||||
return bool(value >= num_target)
|
||||
|
||||
max_wait = max(5, 5 * t_expected)
|
||||
status = SubscriptionStatus(
|
||||
self.buffer_used, wait_acquisition, timeout=max_wait, settle_time=0.2
|
||||
)
|
||||
status.wait()
|
||||
|
||||
# Then start file transfer (need to get the save busy flag update)
|
||||
# self.file_transfer.set(1, settle_time=0.2).wait()
|
||||
self.file_transfer.set(1).wait()
|
||||
|
||||
# And wait until the images have been sent
|
||||
# NOTE: this does not wait for new value, the first check will be
|
||||
# against values from the previous cycle, i.e. pass automatically.
|
||||
t_start = time.time()
|
||||
|
||||
def wait_sending(*, old_value, value, timestamp, **_):
|
||||
t_elapsed = timestamp - t_start
|
||||
# logger.warning(f"{old_value}\t{value}\t{t_elapsed}")
|
||||
return old_value == 1 and value == 0 and t_elapsed > 0
|
||||
|
||||
status = SubscriptionStatus(self.file_savebusy, wait_sending, timeout=120, settle_time=0.2)
|
||||
status.wait()
|
||||
|
||||
def on_complete(self) -> DeviceStatus | None:
|
||||
"""Called to inquire if a device has completed a scans."""
|
||||
return self.acq_done()
|
||||
|
||||
def on_kickoff(self) -> DeviceStatus | None:
|
||||
"""Start data transfer
|
||||
|
||||
TODO: Need to revisit this once triggering is complete
|
||||
"""
|
||||
self.file_transfer.set(1).wait()
|
||||
|
||||
def on_stop(self) -> None:
|
||||
"""Called when the device is stopped."""
|
||||
return self.on_unstage()
|
||||
|
||||
|
||||
# Automatically connect to test camera if directly invoked
|
||||
if __name__ == "__main__":
|
||||
|
||||
# Drive data collection
|
||||
cam = PcoEdge5M(
|
||||
"X02DA-CCDCAM2:",
|
||||
name="mcpcam",
|
||||
std_daq_ws="ws://129.129.95.111:8081",
|
||||
std_daq_rest="http://129.129.95.111:5010",
|
||||
std_daq_live="tcp://129.129.95.111:20010",
|
||||
)
|
||||
cam.wait_for_connection()
|
||||
@@ -1,395 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import enum
|
||||
import json
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import requests
|
||||
from bec_lib.logger import bec_logger
|
||||
from ophyd import StatusBase
|
||||
from typeguard import typechecked
|
||||
from websockets import State
|
||||
from websockets.exceptions import WebSocketException
|
||||
import websockets.sync.client as ws
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from ophyd import Device, DeviceStatus
|
||||
|
||||
|
||||
logger = bec_logger.logger
|
||||
|
||||
|
||||
class StdDaqError(Exception): ...
|
||||
|
||||
|
||||
class StdDaqStatus(str, enum.Enum):
|
||||
"""
|
||||
Status of the StdDAQ.
|
||||
Extracted from https://git.psi.ch/controls-ci/std_detector_buffer/-/blob/master/source/std-det-driver/src/driver_state.hpp
|
||||
"""
|
||||
|
||||
CREATING_FILE = "creating_file"
|
||||
ERROR = "error"
|
||||
FILE_CREATED = "file_created"
|
||||
FILE_SAVED = "file_saved"
|
||||
IDLE = "idle"
|
||||
RECORDING = "recording"
|
||||
REJECTED = "rejected"
|
||||
SAVING_FILE = "saving_file"
|
||||
STARTED = "started"
|
||||
STOP = "stop"
|
||||
UNDEFINED = "undefined"
|
||||
WAITING_FOR_FIRST_IMAGE = "waiting_for_first_image"
|
||||
|
||||
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
class StdDaqClient:
|
||||
"""Standalone stdDAQ client class"""
|
||||
|
||||
USER_ACCESS = ["status", "count", "start", "stop", "get_config", "set_config", "reset"]
|
||||
|
||||
_ws_client: ws.ClientConnection | None = None
|
||||
_count: int = 0
|
||||
_status: str = "undefined"
|
||||
_status_timestamp: float | None = None
|
||||
_ws_monitor_thread: threading.Thread | None = None
|
||||
_config: dict | None = None
|
||||
_status_callbacks: dict[str, tuple[DeviceStatus, list[StdDaqStatus], list[StdDaqStatus]]] = {}
|
||||
|
||||
def __init__(self, parent: Device, ws_url: str, rest_url: str):
|
||||
self.parent = parent
|
||||
self.ws_url = ws_url
|
||||
self.rest_url = rest_url
|
||||
self.name = self.parent.name if self.parent is not None else "None"
|
||||
# Must be here otherwise they're static (shared between class instances)
|
||||
self._ws_recv_mutex = threading.Lock()
|
||||
self._shutdown_event = threading.Event()
|
||||
self._ws_idle_event = threading.Event()
|
||||
self._daq_is_running = threading.Event()
|
||||
|
||||
# Connect to WS interface and start status monitoring
|
||||
self.wait_for_connection()
|
||||
self._daq_is_running.set()
|
||||
self._ws_monitor_thread = threading.Thread(
|
||||
target=self._ws_monitor_loop, name=f"{self.name}_ws_monitor", daemon=True
|
||||
)
|
||||
self._ws_monitor_thread.start()
|
||||
|
||||
@property
|
||||
def status(self) -> StdDaqStatus:
|
||||
"""
|
||||
Get the status of the StdDAQ.
|
||||
"""
|
||||
return self._status
|
||||
|
||||
@property
|
||||
def count(self) -> int:
|
||||
"""Get the recorded frame count"""
|
||||
return self._count
|
||||
|
||||
def add_status_callback(
|
||||
self, status: DeviceStatus, success: list[StdDaqStatus], error: list[StdDaqStatus]
|
||||
):
|
||||
"""
|
||||
Add a DeviceStatus callback for the StdDAQ. The status will be updated
|
||||
when the StdDAQ status changes and set to finished when the status
|
||||
matches one of the specified success statuses and to exception when the
|
||||
status matches one of the specified error statuses.
|
||||
|
||||
Args:
|
||||
status (DeviceStatus): DeviceStatus object
|
||||
success (list[StdDaqStatus]): list of statuses that indicate success
|
||||
error (list[StdDaqStatus]): list of statuses that indicate error
|
||||
"""
|
||||
self._status_callbacks[id(status)] = (status, success, error)
|
||||
|
||||
@typechecked
|
||||
def start(
|
||||
self,
|
||||
file_path: str,
|
||||
file_prefix: str,
|
||||
num_images: int,
|
||||
timeout: float = 20,
|
||||
wait: bool = True,
|
||||
) -> StatusBase | None:
|
||||
"""Start acquisition on the StdDAQ.
|
||||
|
||||
Args:
|
||||
file_path (str): path to save the files
|
||||
file_prefix (str): prefix of the files
|
||||
num_images (int): number of images to acquire
|
||||
timeout (float): timeout for the request
|
||||
Returns:
|
||||
status (StatusBase): Ophyd status object with attached monitor
|
||||
"""
|
||||
# Ensure connection
|
||||
self.wait_for_connection()
|
||||
|
||||
status = StatusBase()
|
||||
# NOTE: CREATING_FILE --> IDLE is a known error, the exact cause is unknown,
|
||||
# Might be botched overwrite protection (solved by changing file_prefix)
|
||||
# In previous versions there was also a mutex ownership problem
|
||||
self.add_status_callback(
|
||||
status, success=["waiting_for_first_image"], error=["rejected", "idle"]
|
||||
)
|
||||
message = {
|
||||
"command": "start",
|
||||
"path": file_path,
|
||||
"file_prefix": file_prefix,
|
||||
"n_image": num_images,
|
||||
}
|
||||
logger.info(f"Starting StdDaq backend. Current status: {self.status}. Message: {message}")
|
||||
self._ws_client.send(json.dumps(message))
|
||||
if wait:
|
||||
status.wait(timeout=timeout)
|
||||
return None
|
||||
return status
|
||||
|
||||
@typechecked
|
||||
def stop(self, timeout: float = 5, wait=True, stop_cmd="stop") -> StatusBase | None:
|
||||
"""Stop acquisition on the StdDAQ.
|
||||
|
||||
Args:
|
||||
timeout (float): timeout for the request
|
||||
Returns:
|
||||
status (StatusBase): Ophyd status object with attached monitor
|
||||
"""
|
||||
# Ensure connection
|
||||
self.wait_for_connection()
|
||||
|
||||
logger.info(f"Stopping StdDaq backend. Current status: {self.status}")
|
||||
status = StatusBase()
|
||||
self.add_status_callback(status, success=["idle"], error=["error"])
|
||||
message = {"command": stop_cmd}
|
||||
|
||||
self._ws_client.send(json.dumps(message))
|
||||
if wait:
|
||||
status.wait(timeout=timeout)
|
||||
return None
|
||||
return status
|
||||
|
||||
def get_config(self, timeout: float = 2, cached=False) -> dict:
|
||||
"""Get the current configuration of the StdDAQ.
|
||||
|
||||
Args:
|
||||
timeout (float): timeout for the request
|
||||
Returns:
|
||||
config (dict): configuration of the StdDAQ
|
||||
"""
|
||||
if cached:
|
||||
return self._config
|
||||
|
||||
response = requests.get(
|
||||
self.rest_url + "/api/config/get", params={"user": "ioc"}, timeout=timeout
|
||||
)
|
||||
response.raise_for_status()
|
||||
self._config = response.json()
|
||||
return self._config
|
||||
|
||||
def set_config(
|
||||
self, config: dict, timeout: float = 2, update: bool = True, force: bool = True
|
||||
) -> None:
|
||||
"""
|
||||
Set the configuration of the StdDAQ. This will overwrite the current configuration.
|
||||
|
||||
Args:
|
||||
config (StdDaqConfig | dict): configuration to set
|
||||
timeout (float): timeout for the request
|
||||
"""
|
||||
old_config = self.get_config()
|
||||
if update:
|
||||
cfg = copy.deepcopy(self._config)
|
||||
cfg.update(config)
|
||||
new_config = cfg
|
||||
else:
|
||||
new_config = config
|
||||
|
||||
# Escape unnecesary restarts
|
||||
if not force and new_config == old_config:
|
||||
return
|
||||
if not new_config:
|
||||
return
|
||||
|
||||
self._pre_restart()
|
||||
|
||||
# new_jason = json.dumps(new_config)
|
||||
logger.warning(new_config)
|
||||
response = requests.post(
|
||||
self.rest_url + "/api/config/set",
|
||||
params={"user": "ioc"},
|
||||
json=new_config,
|
||||
timeout=timeout,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
# Setting a new config will reboot the backend; we therefore have to restart the websocket
|
||||
self._post_restart()
|
||||
|
||||
def _pre_restart(self):
|
||||
"""Stop monitor before restart"""
|
||||
self._daq_is_running.clear()
|
||||
self._ws_idle_event.wait()
|
||||
if self._ws_client is not None:
|
||||
self._ws_client.close()
|
||||
|
||||
def _post_restart(self):
|
||||
"""Start monitor after a restart"""
|
||||
time.sleep(2)
|
||||
self.wait_for_connection()
|
||||
self._daq_is_running.set()
|
||||
|
||||
def reset(self, min_wait: float = 5) -> None:
|
||||
"""
|
||||
Reset the StdDAQ.
|
||||
|
||||
Args:
|
||||
min_wait (float): minimum wait time after reset
|
||||
"""
|
||||
self.set_config(self.get_config())
|
||||
time.sleep(min_wait)
|
||||
|
||||
def wait_for_connection(self, timeout: float = 20) -> None:
|
||||
"""
|
||||
Wait for the connection to the StdDAQ to be established.
|
||||
|
||||
Args:
|
||||
timeout (float): timeout for the request
|
||||
"""
|
||||
start_time = time.time()
|
||||
while True:
|
||||
if self._ws_client is not None and self._ws_client.state == State.OPEN:
|
||||
return
|
||||
try:
|
||||
self._ws_client = ws.connect(self.ws_url)
|
||||
break
|
||||
except ConnectionRefusedError as exc:
|
||||
if time.time() - start_time > timeout:
|
||||
raise TimeoutError("Timeout while waiting for connection to StdDAQ") from exc
|
||||
time.sleep(2)
|
||||
|
||||
def create_virtual_datasets(self, file_path: str, file_prefix: str, timeout: float = 5) -> None:
|
||||
"""
|
||||
Combine the stddaq written files in a given folder in an interleaved
|
||||
h5 virtual dataset.
|
||||
|
||||
Args:
|
||||
file_path (str): path to the folder containing the files
|
||||
file_prefix (str): prefix of the files to combine
|
||||
timeout (float): timeout for the request
|
||||
"""
|
||||
|
||||
# TODO: Add wait for 'idle' state
|
||||
|
||||
response = requests.post(
|
||||
self.rest_url + "/api/h5/create_interleaved_vds",
|
||||
params={"user": "ioc"},
|
||||
json={
|
||||
"base_path": file_path,
|
||||
"file_prefix": file_prefix,
|
||||
"output_file": file_prefix.rstrip("_") + ".h5",
|
||||
},
|
||||
timeout=timeout,
|
||||
headers={"Content-type": "application/json"},
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Shutdown the StdDAQ client.
|
||||
"""
|
||||
logger.warning("Shutting down sdtDAQ monitor")
|
||||
self._shutdown_event.set()
|
||||
if self._ws_monitor_thread is not None:
|
||||
self._ws_monitor_thread.join()
|
||||
logger.warning("Thread joined")
|
||||
if self._ws_client is not None:
|
||||
self._ws_client.close()
|
||||
self._ws_client = None
|
||||
logger.warning("Shutdown complete")
|
||||
|
||||
def _wait_for_server_running(self):
|
||||
"""
|
||||
Wait for the StdDAQ to be running. If the StdDaq is not running, the
|
||||
websocket loop will be set to idle.
|
||||
"""
|
||||
while not self._shutdown_event.is_set():
|
||||
if self._daq_is_running.wait(0.1):
|
||||
self._ws_idle_event.clear()
|
||||
break
|
||||
self._ws_idle_event.set()
|
||||
|
||||
def _ws_monitor_loop(self):
|
||||
"""Loop to update the status property of the StdDAQ.
|
||||
|
||||
This is a persistent monitor that updates the status and calls attached
|
||||
callbacks. It also handles stdDAQ restarts and reconnection by itself.
|
||||
"""
|
||||
if self._ws_recv_mutex.locked():
|
||||
logger.warning(f"[{self.name}] stdDAQ WS monitor loop already locked")
|
||||
return
|
||||
|
||||
with self._ws_recv_mutex:
|
||||
while not self._shutdown_event.is_set():
|
||||
self._wait_for_server_running()
|
||||
try:
|
||||
msg = self._ws_client.recv(timeout=0.1)
|
||||
msg_timestamp = time.time()
|
||||
except TimeoutError:
|
||||
continue
|
||||
except WebSocketException:
|
||||
content = traceback.format_exc()
|
||||
# TODO: this is expected to happen on every reconfiguration
|
||||
logger.warning(f"Websocket connection closed unexpectedly: {content}")
|
||||
self.wait_for_connection()
|
||||
continue
|
||||
msg = json.loads(msg)
|
||||
if self._status != msg["status"]:
|
||||
logger.warning(
|
||||
f"[{self.name}] stdDAQ state transition: {self._status} --> {msg['status']}"
|
||||
)
|
||||
if msg["status"] == "recording":
|
||||
self._count = msg.get("count", 0)
|
||||
# Update status and run callbacks
|
||||
self._status = msg["status"]
|
||||
self._status_timestamp = msg_timestamp
|
||||
self._run_status_callbacks()
|
||||
|
||||
def _run_status_callbacks(self):
|
||||
"""
|
||||
Update the DeviceStatus objects based on the current status of the StdDAQ.
|
||||
If the status matches one of the success or error statuses, the DeviceStatus
|
||||
object will be set to finished or exception, respectively and removed from
|
||||
the list of callbacks.
|
||||
"""
|
||||
|
||||
status = self._status
|
||||
completed_callbacks = []
|
||||
for dev_status, success, error in self._status_callbacks.values():
|
||||
if status in success:
|
||||
dev_status.set_finished()
|
||||
logger.info(f"StdDaq status is {status}")
|
||||
completed_callbacks.append(dev_status)
|
||||
elif status in error:
|
||||
logger.warning(f"StdDaq status is {status}")
|
||||
dev_status.set_exception(StdDaqError(f"StdDaq status is {status}"))
|
||||
completed_callbacks.append(dev_status)
|
||||
|
||||
for cb in completed_callbacks:
|
||||
self._status_callbacks.pop(id(cb))
|
||||
|
||||
|
||||
# Automatically connect to microXAS testbench if directly invoked
|
||||
if __name__ == "__main__":
|
||||
# pylint: disable=disallowed-name,too-few-public-methods
|
||||
class foo:
|
||||
"""Dummy"""
|
||||
|
||||
name = "bar"
|
||||
|
||||
daq = StdDaqClient(
|
||||
parent=foo(), ws_url="ws://129.129.95.111:8080", rest_url="http://129.129.95.111:5000"
|
||||
)
|
||||
@@ -1,133 +0,0 @@
|
||||
import json
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
import traceback
|
||||
|
||||
import numpy as np
|
||||
import zmq
|
||||
from bec_lib.logger import bec_logger
|
||||
|
||||
logger = bec_logger.logger
|
||||
|
||||
ZMQ_TOPIC_FILTER = b""
|
||||
|
||||
|
||||
class StdDaqPreview:
|
||||
"""Standalone stdDAQ preview class"""
|
||||
|
||||
USER_ACCESS = ["start", "stop", "image", "frameno"]
|
||||
_socket = None
|
||||
_zmq_thread = None
|
||||
_throttle = 0.2
|
||||
image = None
|
||||
frameno = None
|
||||
|
||||
def __init__(self, url: str, cb: Callable):
|
||||
self.url = url
|
||||
self._on_update_callback = cb
|
||||
# Must be here otherwise they're static (shared between class instances)
|
||||
self._monitor_mutex = threading.Lock()
|
||||
self._shutdown_event = threading.Event()
|
||||
|
||||
def connect(self):
|
||||
"""Connect to te StDAQs PUB-SUB streaming interface
|
||||
|
||||
StdDAQ may reject connection for a few seconds when it restarts,
|
||||
so if it fails, wait a bit and try to connect again.
|
||||
"""
|
||||
# pylint: disable=no-member
|
||||
|
||||
context = zmq.Context()
|
||||
self._socket = context.socket(zmq.SUB)
|
||||
self._socket.setsockopt(zmq.SUBSCRIBE, ZMQ_TOPIC_FILTER)
|
||||
try:
|
||||
self._socket.connect(self.url)
|
||||
except ConnectionRefusedError:
|
||||
time.sleep(1)
|
||||
self._socket.connect(self.url)
|
||||
|
||||
def start(self):
|
||||
"""Start the preview thread"""
|
||||
# Only one consumer thread
|
||||
if self._zmq_thread:
|
||||
self.stop()
|
||||
|
||||
self._shutdown_event.clear()
|
||||
self._zmq_thread = threading.Thread(
|
||||
target=self._zmq_monitor, daemon=True, name="StdDaq_live_preview"
|
||||
)
|
||||
self._zmq_thread.start()
|
||||
|
||||
def stop(self):
|
||||
"""Stop the preview and disconnect from ZMQ stream"""
|
||||
self._shutdown_event.set()
|
||||
if self._zmq_thread:
|
||||
self._zmq_thread.join()
|
||||
self._zmq_thread = None
|
||||
|
||||
def _zmq_monitor(self):
|
||||
"""ZMQ stream monitor"""
|
||||
|
||||
# Exit if another monitor is running
|
||||
if self._monitor_mutex.locked():
|
||||
return
|
||||
|
||||
with self._monitor_mutex:
|
||||
# Open a new connection
|
||||
self.connect()
|
||||
|
||||
try:
|
||||
# Run the monitor loop
|
||||
t_last = time.time()
|
||||
while not self._shutdown_event.is_set():
|
||||
try:
|
||||
# pylint: disable=no-member
|
||||
r = self._socket.recv_multipart(flags=zmq.NOBLOCK)
|
||||
|
||||
# Throttle parsing and callbacks
|
||||
t_curr = time.time()
|
||||
if t_curr - t_last > self._throttle:
|
||||
self._parse_data(r)
|
||||
t_last = t_curr
|
||||
except ValueError:
|
||||
# Happens when ZMQ partially delivers the multipart message
|
||||
content = traceback.format_exc()
|
||||
logger.warning(f"Websocket connection closed unexpectedly: {content}")
|
||||
continue
|
||||
except zmq.error.Again:
|
||||
# Happens when receive queue is empty
|
||||
time.sleep(0.1)
|
||||
finally:
|
||||
# Stop receiving incoming data
|
||||
self._socket.close()
|
||||
logger.warning("Detached live_preview monitoring")
|
||||
|
||||
def _parse_data(self, data):
|
||||
# Length and throtling checks
|
||||
if len(data) != 2:
|
||||
logger.warning(f"Received incomplete ZMQ message of length {len(data)}")
|
||||
|
||||
# Unpack the Array V1 reply to metadata and array data
|
||||
meta, img_data = data
|
||||
|
||||
# Update image and update subscribers
|
||||
header = json.loads(meta)
|
||||
if header["type"] == "uint16":
|
||||
image = np.frombuffer(img_data, dtype=np.uint16)
|
||||
elif header["type"] == "uint8":
|
||||
image = np.frombuffer(img_data, dtype=np.uint8)
|
||||
else:
|
||||
raise ValueError(f"Unexpected type {header['type']}")
|
||||
if image.size != np.prod(header["shape"]):
|
||||
err = f"Unexpected array size of {image.size} for header: {header}"
|
||||
raise ValueError(err)
|
||||
image = image.reshape(header["shape"])
|
||||
# Print diadnostics and run callback
|
||||
logger.info(
|
||||
f"Live update: frame {header['frame']}\tShape: {header['shape']}\t"
|
||||
f"Mean: {np.mean(image):.3f}"
|
||||
)
|
||||
self.image = image
|
||||
self.frameno = header["frame"]
|
||||
self._on_update_callback(image, header)
|
||||
@@ -1,512 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Standard DAQ control interface module through the websocket API
|
||||
|
||||
Created on Thu Jun 27 17:28:43 2024
|
||||
|
||||
@author: mohacsi_i
|
||||
"""
|
||||
import json
|
||||
from time import sleep
|
||||
from threading import Thread
|
||||
import requests
|
||||
import os
|
||||
|
||||
from ophyd import Signal, Component, Kind
|
||||
from ophyd.status import SubscriptionStatus
|
||||
from websockets.sync.client import connect, ClientConnection
|
||||
from websockets.exceptions import ConnectionClosedOK, ConnectionClosedError
|
||||
|
||||
from ophyd_devices.interfaces.base_classes.psi_detector_base import PSIDetectorBase as PSIDeviceBase
|
||||
from ophyd_devices.interfaces.base_classes.psi_detector_base import (
|
||||
CustomDetectorMixin as CustomDeviceMixin,
|
||||
)
|
||||
from bec_lib import bec_logger
|
||||
|
||||
logger = bec_logger.logger
|
||||
|
||||
|
||||
class StdDaqMixin(CustomDeviceMixin):
|
||||
# pylint: disable=protected-access
|
||||
_mon = None
|
||||
|
||||
def on_stage(self) -> None:
|
||||
"""Configuration and staging
|
||||
|
||||
In the BEC model ophyd devices must fish out their own configuration from the 'scaninfo'.
|
||||
I.e. they need to know which parameters are relevant for them at each scan.
|
||||
|
||||
NOTE: Tomcat might use multiple cameras with their own separate DAQ instances.
|
||||
"""
|
||||
# Fish out our configuration from scaninfo (via explicit or generic addressing)
|
||||
# NOTE: Scans don't have to fully configure the device
|
||||
d = {}
|
||||
if "kwargs" in self.parent.scaninfo.scan_msg.info:
|
||||
scanargs = self.parent.scaninfo.scan_msg.info["kwargs"]
|
||||
if "image_width" in scanargs and scanargs["image_width"] is not None:
|
||||
d["image_width"] = scanargs["image_width"]
|
||||
if "image_height" in scanargs and scanargs["image_height"] is not None:
|
||||
d["image_height"] = scanargs["image_height"]
|
||||
if "nr_writers" in scanargs and scanargs["nr_writers"] is not None:
|
||||
d["nr_writers"] = scanargs["nr_writers"]
|
||||
if "file_path" in scanargs and scanargs["file_path"] is not None:
|
||||
self.parent.file_path.set(scanargs["file_path"].replace("data", "gpfs")).wait()
|
||||
print(scanargs["file_path"])
|
||||
if os.path.isdir(scanargs["file_path"]):
|
||||
print("isdir")
|
||||
pass
|
||||
else:
|
||||
print("creating")
|
||||
try:
|
||||
os.makedirs(scanargs["file_path"], 0o777)
|
||||
os.system("chmod -R 777 " + scanargs["base_path"])
|
||||
except:
|
||||
print("Problem with creating folder")
|
||||
if "file_prefix" in scanargs and scanargs["file_prefix"] != None:
|
||||
print(scanargs["file_prefix"])
|
||||
self.parent.file_prefix.set(scanargs["file_prefix"]).wait()
|
||||
|
||||
if "daq_num_points" in scanargs:
|
||||
d["num_points_total"] = scanargs["daq_num_points"]
|
||||
else:
|
||||
# Try to figure out number of points
|
||||
num_points = 1
|
||||
points_valid = False
|
||||
if "steps" in scanargs and scanargs["steps"] is not None:
|
||||
num_points *= scanargs["steps"]
|
||||
points_valid = True
|
||||
if "exp_burst" in scanargs and scanargs["exp_burst"] is not None:
|
||||
num_points *= scanargs["exp_burst"]
|
||||
points_valid = True
|
||||
if "repeats" in scanargs and scanargs["repeats"] is not None:
|
||||
num_points *= scanargs["repeats"]
|
||||
points_valid = True
|
||||
if points_valid:
|
||||
d["num_points_total"] = num_points
|
||||
|
||||
# Perform bluesky-style configuration
|
||||
if len(d) > 0:
|
||||
# Configure new run (will restart the stdDAQ)
|
||||
logger.warning(f"[{self.parent.name}] stdDAQ needs reconfiguring with:\n{d}")
|
||||
self.parent.configure(d=d)
|
||||
# Wait for REST API to kill the DAQ
|
||||
sleep(0.5)
|
||||
|
||||
# Try to start a new run (reconnects)
|
||||
self.parent.bluestage()
|
||||
# And start status monitoring
|
||||
self._mon = Thread(target=self.monitor, daemon=True)
|
||||
self._mon.start()
|
||||
|
||||
def on_unstage(self):
|
||||
"""Stop a running acquisition and close connection"""
|
||||
print("Creating virtual dataset")
|
||||
self.parent.create_virtual_dataset()
|
||||
self.parent.blueunstage()
|
||||
|
||||
def on_stop(self):
|
||||
"""Stop a running acquisition and close connection"""
|
||||
self.parent.blueunstage()
|
||||
|
||||
def monitor(self) -> None:
|
||||
"""Monitor status messages while connection is open. This will block the reply monitoring
|
||||
to calling unstage() might throw. Status updates are sent every 1 seconds, but finishing
|
||||
acquisition means StdDAQ will close connection, so there's no idle state polling.
|
||||
"""
|
||||
try:
|
||||
sleep(0.2)
|
||||
for msg in self.parent._wsclient:
|
||||
message = json.loads(msg)
|
||||
self.parent.runstatus.put(message["status"], force=True)
|
||||
# logger.info(f"[{self.parent.name}] Pushed status: {message['status']}")
|
||||
except (ConnectionClosedError, ConnectionClosedOK, AssertionError):
|
||||
# Libraty throws theese after connection is closed
|
||||
return
|
||||
except Exception as ex:
|
||||
logger.warning(f"[{self.parent.name}] Exception in polling: {ex}")
|
||||
return
|
||||
finally:
|
||||
self._mon = None
|
||||
|
||||
|
||||
class StdDaqClient(PSIDeviceBase):
|
||||
"""StdDaq API
|
||||
|
||||
This class combines the new websocket and REST interfaces of the stdDAQ. The websocket
|
||||
interface starts and stops the acquisition and provides status, while the REST interface
|
||||
can read and write the JSON configuration file. The stdDAQ needs to restart all services
|
||||
to reconfigure with a new config, which might corrupt
|
||||
the currently written files (fix is underway).
|
||||
|
||||
Example:
|
||||
```
|
||||
daq = StdDaqClient(name="daq", ws_url="ws://xbl-daq-29:8080", rest_url="http://xbl-daq-29:5000")
|
||||
```
|
||||
"""
|
||||
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
custom_prepare_cls = StdDaqMixin
|
||||
USER_ACCESS = [
|
||||
"set_daq_config",
|
||||
"get_daq_config",
|
||||
"nuke",
|
||||
"connect",
|
||||
"message",
|
||||
"state",
|
||||
"bluestage",
|
||||
"blueunstage",
|
||||
]
|
||||
_wsclient = None
|
||||
|
||||
# Status attributes
|
||||
ws_url = Component(Signal, kind=Kind.config, metadata={"write_access": False})
|
||||
runstatus = Component(
|
||||
Signal, value="unknown", kind=Kind.normal, metadata={"write_access": False}
|
||||
)
|
||||
num_images = Component(Signal, value=10000, kind=Kind.config)
|
||||
file_path = Component(Signal, value="/gpfs/test/test-beamline", kind=Kind.config)
|
||||
file_prefix = Component(Signal, value="file", kind=Kind.config)
|
||||
# Configuration attributes
|
||||
rest_url = Component(Signal, kind=Kind.config, metadata={"write_access": False})
|
||||
cfg_detector_name = Component(Signal, kind=Kind.config)
|
||||
cfg_detector_type = Component(Signal, kind=Kind.config)
|
||||
cfg_bit_depth = Component(Signal, kind=Kind.config)
|
||||
cfg_pixel_height = Component(Signal, kind=Kind.config)
|
||||
cfg_pixel_width = Component(Signal, kind=Kind.config)
|
||||
cfg_nr_writers = Component(Signal, kind=Kind.config)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
prefix="",
|
||||
*,
|
||||
name,
|
||||
kind=None,
|
||||
read_attrs=None,
|
||||
configuration_attrs=None,
|
||||
parent=None,
|
||||
device_manager=None,
|
||||
ws_url: str = "ws://localhost:8080",
|
||||
rest_url: str = "http://localhost:5000",
|
||||
data_source_name=None,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
prefix=prefix,
|
||||
name=name,
|
||||
kind=kind,
|
||||
read_attrs=read_attrs,
|
||||
configuration_attrs=configuration_attrs,
|
||||
parent=parent,
|
||||
device_manager=device_manager,
|
||||
**kwargs,
|
||||
)
|
||||
self.ws_url.set(ws_url, force=True).wait()
|
||||
self.rest_url.set(rest_url, force=True).wait()
|
||||
self.data_source_name = data_source_name
|
||||
|
||||
# Connect to the DAQ and initialize values
|
||||
try:
|
||||
self.get_daq_config(update=True)
|
||||
except Exception as ex:
|
||||
logger.error(f"Failed to connect to the stdDAQ REST API\n{ex}")
|
||||
|
||||
def connect(self) -> ClientConnection:
|
||||
"""Connect to the StdDAQ's websockets interface
|
||||
|
||||
StdDAQ may reject connection for a few seconds after restart, or when
|
||||
it wants so if it fails, wait a bit and try to connect again.
|
||||
"""
|
||||
num_retry = 0
|
||||
while num_retry < 5:
|
||||
try:
|
||||
logger.debug(f"[{self.name}] Connecting to stdDAQ at {self.ws_url.get()}")
|
||||
connection = connect(self.ws_url.get())
|
||||
logger.debug(f"[{self.name}] Connected to stdDAQ after {num_retry} tries")
|
||||
return connection
|
||||
except ConnectionRefusedError:
|
||||
num_retry += 1
|
||||
sleep(2)
|
||||
raise ConnectionRefusedError("The stdDAQ websocket interface refused connection 5 times.")
|
||||
|
||||
def message(self, message: dict, timeout=1, wait_reply=True, client=None) -> None | str:
|
||||
"""Send a message to the StdDAQ and receive a reply
|
||||
|
||||
Note: finishing acquisition means StdDAQ will close connection, so
|
||||
there's no idle state polling.
|
||||
"""
|
||||
# Prepare message
|
||||
msg = json.dumps(message) if isinstance(message, dict) else str(message)
|
||||
|
||||
# Connect if client was destroyed
|
||||
if self._wsclient is None:
|
||||
self._wsclient = self.connect()
|
||||
|
||||
# Send message (reopen connection if needed)
|
||||
msg = json.dumps(message) if isinstance(message, dict) else str(message)
|
||||
try:
|
||||
self._wsclient.send(msg)
|
||||
except (ConnectionClosedError, ConnectionClosedOK, AttributeError) as ex:
|
||||
# Re-connect if the connection was closed
|
||||
self._wsclient = self.connect()
|
||||
self._wsclient.send(msg)
|
||||
|
||||
# Wait for reply
|
||||
reply = None
|
||||
if wait_reply:
|
||||
try:
|
||||
reply = self._wsclient.recv(timeout)
|
||||
return reply
|
||||
except (ConnectionClosedError, ConnectionClosedOK) as ex:
|
||||
self._wsclient = None
|
||||
logger.error(f"[{self.name}] WS connection was closed before reply: {ex}")
|
||||
except (TimeoutError, RuntimeError) as ex:
|
||||
logger.error(f"[{self.name}] Error in receiving ws reply: {ex}")
|
||||
return reply
|
||||
|
||||
def configure(self, d: dict = None):
|
||||
"""Configure the next scan with the stdDAQ
|
||||
|
||||
Parameters as 'd' dictionary, the default is unchanged.
|
||||
----------------------------
|
||||
num_points_total : int, optional
|
||||
Number of images to be taken during each scan. Set to -1 for an unlimited number of
|
||||
images (limited by the ringbuffer size and backend speed).
|
||||
file_path: str, optional
|
||||
File path to save the data, usually GPFS.
|
||||
image_width : int, optional
|
||||
ROI size in the x-direction [pixels].
|
||||
image_height : int, optional
|
||||
ROI size in the y-direction [pixels].
|
||||
bit_depth: int, optional
|
||||
Image bit depth for cameras that can change it [int].
|
||||
nr_writers: int, optional
|
||||
Number of writers [int].
|
||||
"""
|
||||
|
||||
# Configuration parameters
|
||||
if "image_width" in d and d["image_width"] != None:
|
||||
self.cfg_pixel_width.set(d["image_width"]).wait()
|
||||
if "image_height" in d and d["image_height"] != None:
|
||||
self.cfg_pixel_height.set(d["image_height"]).wait()
|
||||
if "bit_depth" in d:
|
||||
self.cfg_bit_depth.set(d["bit_depth"]).wait()
|
||||
if "nr_writers" in d and d["nr_writers"] != None:
|
||||
self.cfg_nr_writers.set(d["nr_writers"]).wait()
|
||||
# Run parameters
|
||||
if "num_points_total" in d:
|
||||
self.num_images.set(d["num_points_total"]).wait()
|
||||
|
||||
# Restart the DAQ if resolution changed
|
||||
cfg = self.get_daq_config()
|
||||
if (
|
||||
cfg["image_pixel_height"] != self.cfg_pixel_height.get()
|
||||
or cfg["image_pixel_width"] != self.cfg_pixel_width.get()
|
||||
or cfg["bit_depth"] != self.cfg_bit_depth.get()
|
||||
or cfg["number_of_writers"] != self.cfg_nr_writers.get()
|
||||
):
|
||||
|
||||
# Stop if current status is not idle
|
||||
if self.state() != "idle":
|
||||
logger.warning(f"[{self.name}] stdDAQ reconfiguration might corrupt files")
|
||||
|
||||
# Update retrieved config
|
||||
cfg["image_pixel_height"] = int(self.cfg_pixel_height.get())
|
||||
cfg["image_pixel_width"] = int(self.cfg_pixel_width.get())
|
||||
cfg["bit_depth"] = int(self.cfg_bit_depth.get())
|
||||
cfg["number_of_writers"] = int(self.cfg_nr_writers.get())
|
||||
self.set_daq_config(cfg)
|
||||
sleep(1)
|
||||
self.get_daq_config(update=True)
|
||||
|
||||
def bluestage(self):
|
||||
"""Stages the stdDAQ
|
||||
|
||||
Opens a new connection to the stdDAQ, sends the start command with
|
||||
the current configuration. It waits for the first reply and checks
|
||||
it for obvious failures.
|
||||
"""
|
||||
# Can't stage into a running exposure
|
||||
if self.state() != "idle":
|
||||
raise RuntimeError(f"[{self.name}] stdDAQ can't stage from state: {self.state()}")
|
||||
|
||||
# Must make sure that image size matches the data source
|
||||
if self.data_source_name is not None:
|
||||
cam_img_w = self.device_manager.devices[self.data_source_name].cfgRoiX.get()
|
||||
cam_img_h = self.device_manager.devices[self.data_source_name].cfgRoiY.get()
|
||||
daq_img_w = self.cfg_pixel_width.get()
|
||||
daq_img_h = self.cfg_pixel_height.get()
|
||||
|
||||
if not (daq_img_w == cam_img_w and daq_img_h == cam_img_h):
|
||||
raise RuntimeError(
|
||||
f"[{self.name}] stdDAQ image resolution ({daq_img_w} , {daq_img_h}) does not match camera with ({cam_img_w} , {cam_img_h})"
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"[{self.name}] stdDAQ image resolution ({daq_img_w} , {daq_img_h}) matches camera with ({cam_img_w} , {cam_img_h})"
|
||||
)
|
||||
|
||||
file_path = self.file_path.get()
|
||||
num_images = self.num_images.get()
|
||||
file_prefix = self.file_prefix.get()
|
||||
print(file_prefix)
|
||||
|
||||
# New connection
|
||||
self._wsclient = self.connect()
|
||||
message = {
|
||||
"command": "start",
|
||||
"path": file_path,
|
||||
"file_prefix": file_prefix,
|
||||
"n_image": num_images,
|
||||
}
|
||||
reply = self.message(message)
|
||||
|
||||
if reply is not None:
|
||||
reply = json.loads(reply)
|
||||
self.runstatus.set(reply["status"], force=True).wait()
|
||||
logger.info(f"[{self.name}] Start DAQ reply: {reply}")
|
||||
|
||||
# Give it more time to reconfigure
|
||||
if reply["status"] in ("rejected"):
|
||||
# FIXME: running exposure is a nogo
|
||||
if reply["reason"] == "driver is busy!":
|
||||
raise RuntimeError(
|
||||
f"[{self.name}] Start stdDAQ command rejected: already running"
|
||||
)
|
||||
else:
|
||||
# Give it more time to consolidate
|
||||
sleep(1)
|
||||
else:
|
||||
# Success!!!
|
||||
print(f"[{self.name}] Started stdDAQ in: {reply['status']}")
|
||||
return
|
||||
|
||||
raise RuntimeError(
|
||||
f"[{self.name}] Failed to start the stdDAQ in 1 tries, reason: {reply['reason']}"
|
||||
)
|
||||
|
||||
def blueunstage(self):
|
||||
"""Unstages the stdDAQ
|
||||
|
||||
Opens a new connection to the stdDAQ, sends the stop command and
|
||||
waits for the idle state.
|
||||
"""
|
||||
ii = 0
|
||||
while ii < 10:
|
||||
# Stop the DAQ (will close connection) - reply is always "success"
|
||||
self._wsclient = self.connect()
|
||||
self.message({"command": "stop_all"}, wait_reply=False)
|
||||
|
||||
# Let it consolidate
|
||||
sleep(0.2)
|
||||
|
||||
# Check final status (from new connection)
|
||||
self._wsclient = self.connect()
|
||||
reply = self.message({"command": "status"})
|
||||
if reply is not None:
|
||||
logger.info(f"[{self.name}] DAQ status reply: {reply}")
|
||||
reply = json.loads(reply)
|
||||
|
||||
if reply["status"] in ("idle", "error"):
|
||||
# Only 'idle' state accepted
|
||||
print(f"DAQ stopped on try {ii}")
|
||||
return
|
||||
elif reply["status"] in ("stop"):
|
||||
# Give it more time to stop
|
||||
sleep(0.5)
|
||||
elif ii >= 6:
|
||||
raise RuntimeError(f"Failed to stop StdDAQ: {reply}")
|
||||
ii += 1
|
||||
raise RuntimeError(f"Failed to stop StdDAQ in time")
|
||||
|
||||
##########################################################################
|
||||
# Bluesky flyer interface
|
||||
def complete(self) -> SubscriptionStatus:
|
||||
"""Wait for current run. Must end in status 'file_saved'."""
|
||||
|
||||
def is_running(*args, value, timestamp, **kwargs):
|
||||
result = value in ["idle", "file_saved", "error"]
|
||||
return result
|
||||
|
||||
status = SubscriptionStatus(self.runstatus, is_running, settle_time=0.5)
|
||||
return status
|
||||
|
||||
def get_daq_config(self, update=False) -> dict:
|
||||
"""Read the current configuration from the DAQ"""
|
||||
r = requests.get(self.rest_url.get() + "/api/config/get", params={"user": "ioc"}, timeout=2)
|
||||
if r.status_code != 200:
|
||||
raise ConnectionError(f"[{self.name}] Error {r.status_code}:\t{r.text}")
|
||||
cfg = r.json()
|
||||
|
||||
if update:
|
||||
self.cfg_detector_name.set(cfg["detector_name"]).wait()
|
||||
self.cfg_detector_type.set(cfg["detector_type"]).wait()
|
||||
self.cfg_bit_depth.set(cfg["bit_depth"]).wait()
|
||||
self.cfg_pixel_height.set(cfg["image_pixel_height"]).wait()
|
||||
self.cfg_pixel_width.set(cfg["image_pixel_width"]).wait()
|
||||
self.cfg_nr_writers.set(cfg["number_of_writers"]).wait()
|
||||
return cfg
|
||||
|
||||
def set_daq_config(self, config, settle_time=1):
|
||||
"""Write a full configuration to the DAQ"""
|
||||
url = self.rest_url.get() + "/api/config/set"
|
||||
r = requests.post(
|
||||
url,
|
||||
params={"user": "ioc"},
|
||||
json=config,
|
||||
timeout=2,
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
if r.status_code != 200:
|
||||
raise ConnectionError(f"[{self.name}] Error {r.status_code}:\t{r.text}")
|
||||
# Wait for service to restart (and connect to make sure)
|
||||
# sleep(settle_time)
|
||||
self.connect()
|
||||
return r.json()
|
||||
|
||||
def create_virtual_dataset(self):
|
||||
"""Combine the stddaq written files in a given folder in an interleaved
|
||||
h5 virtual dataset
|
||||
"""
|
||||
url = self.rest_url.get() + "/api/h5/create_interleaved_vds"
|
||||
file_path = self.file_path.get()
|
||||
file_prefix = self.file_prefix.get()
|
||||
|
||||
r = requests.post(
|
||||
url,
|
||||
params={"user": "ioc"},
|
||||
json={
|
||||
"base_path": file_path,
|
||||
"file_prefix": file_prefix,
|
||||
"output_file": file_prefix.rstrip("_") + ".h5",
|
||||
},
|
||||
timeout=2,
|
||||
headers={"Content-type": "application/json"},
|
||||
)
|
||||
|
||||
def nuke(self, restarttime=5):
|
||||
"""Reconfigures the stdDAQ to restart the services. This causes
|
||||
systemd to kill the current DAQ service and restart it with the same
|
||||
configuration. Which might corrupt the currently written file...
|
||||
"""
|
||||
cfg = self.get_daq_config()
|
||||
self.set_daq_config(cfg)
|
||||
sleep(restarttime)
|
||||
|
||||
def state(self) -> str | None:
|
||||
"""Querry the current system status"""
|
||||
try:
|
||||
wsclient = self.connect()
|
||||
wsclient.send(json.dumps({"command": "status"}))
|
||||
r = wsclient.recv(timeout=1)
|
||||
r = json.loads(r)
|
||||
return r["status"]
|
||||
except ConnectionRefusedError:
|
||||
raise
|
||||
|
||||
|
||||
# Automatically connect to microXAS testbench if directly invoked
|
||||
if __name__ == "__main__":
|
||||
daq = StdDaqClient(
|
||||
name="daq", ws_url="ws://sls-daq-001:8080", rest_url="http://sls-daq-001:5000"
|
||||
)
|
||||
daq.wait_for_connection()
|
||||
@@ -1,196 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Standard DAQ preview image stream module
|
||||
|
||||
Created on Thu Jun 27 17:28:43 2024
|
||||
|
||||
@author: mohacsi_i
|
||||
"""
|
||||
import json
|
||||
import enum
|
||||
from time import sleep, time
|
||||
from threading import Thread
|
||||
import zmq
|
||||
import numpy as np
|
||||
from ophyd import Device, Signal, Component, Kind, DeviceStatus
|
||||
from ophyd_devices.interfaces.base_classes.psi_detector_base import (
|
||||
CustomDetectorMixin,
|
||||
PSIDetectorBase,
|
||||
)
|
||||
|
||||
from bec_lib import bec_logger
|
||||
logger = bec_logger.logger
|
||||
ZMQ_TOPIC_FILTER = b''
|
||||
|
||||
|
||||
class StdDaqPreviewState(enum.IntEnum):
|
||||
"""Standard DAQ ophyd device states"""
|
||||
UNKNOWN = 0
|
||||
DETACHED = 1
|
||||
MONITORING = 2
|
||||
|
||||
|
||||
class StdDaqPreviewMixin(CustomDetectorMixin):
|
||||
"""Setup class for the standard DAQ preview stream
|
||||
|
||||
Parent class: CustomDetectorMixin
|
||||
"""
|
||||
_mon = None
|
||||
|
||||
def on_stage(self):
|
||||
"""Start listening for preview data stream"""
|
||||
if self._mon is not None:
|
||||
self.parent.unstage()
|
||||
sleep(0.5)
|
||||
|
||||
self.parent.connect()
|
||||
self._stop_polling = False
|
||||
self._mon = Thread(target=self.poll, daemon=True)
|
||||
self._mon.start()
|
||||
|
||||
def on_unstage(self):
|
||||
"""Stop a running preview"""
|
||||
if self._mon is not None:
|
||||
self._stop_polling = True
|
||||
# Might hang on recv_multipart
|
||||
self._mon.join(timeout=1)
|
||||
# So also disconnect the socket
|
||||
self.parent._socket.disconnect(self.parent.url.get())
|
||||
|
||||
def on_stop(self):
|
||||
"""Stop a running preview"""
|
||||
self.on_unstage()
|
||||
|
||||
def poll(self):
|
||||
"""Collect streamed updates"""
|
||||
self.parent.status.set(StdDaqPreviewState.MONITORING, force=True)
|
||||
try:
|
||||
t_last = time()
|
||||
while True:
|
||||
try:
|
||||
# Exit loop and finish monitoring
|
||||
if self._stop_polling:
|
||||
logger.info(f"[{self.parent.name}]\tDetaching monitor")
|
||||
break
|
||||
|
||||
# pylint: disable=no-member
|
||||
r = self.parent._socket.recv_multipart(flags=zmq.NOBLOCK)
|
||||
|
||||
# Length and throtling checks
|
||||
if len(r) != 2:
|
||||
logger.warning(
|
||||
f"[{self.parent.name}] Received malformed array of length {len(r)}")
|
||||
t_curr = time()
|
||||
t_elapsed = t_curr - t_last
|
||||
if t_elapsed < self.parent.throttle.get():
|
||||
sleep(0.1)
|
||||
continue
|
||||
|
||||
# Unpack the Array V1 reply to metadata and array data
|
||||
meta, data = r
|
||||
|
||||
# Update image and update subscribers
|
||||
header = json.loads(meta)
|
||||
if header["type"] == "uint16":
|
||||
image = np.frombuffer(data, dtype=np.uint16)
|
||||
if image.size != np.prod(header['shape']):
|
||||
err = f"Unexpected array size of {image.size} for header: {header}"
|
||||
raise ValueError(err)
|
||||
image = image.reshape(header['shape'])
|
||||
|
||||
# Update image and update subscribers
|
||||
self.parent.frame.put(header['frame'], force=True)
|
||||
self.parent.image_shape.put(header['shape'], force=True)
|
||||
self.parent.image.put(image, force=True)
|
||||
self.parent._last_image = image
|
||||
self.parent._run_subs(sub_type=self.parent.SUB_MONITOR, value=image)
|
||||
t_last = t_curr
|
||||
logger.info(
|
||||
f"[{self.parent.name}] Updated frame {header['frame']}\t"
|
||||
f"Shape: {header['shape']}\tMean: {np.mean(image):.3f}"
|
||||
)
|
||||
except ValueError:
|
||||
# Happens when ZMQ partially delivers the multipart message
|
||||
pass
|
||||
except zmq.error.Again:
|
||||
# Happens when receive queue is empty
|
||||
sleep(0.1)
|
||||
except Exception as ex:
|
||||
logger.info(f"[{self.parent.name}]\t{str(ex)}")
|
||||
raise
|
||||
finally:
|
||||
self._mon = None
|
||||
self.parent.status.set(StdDaqPreviewState.DETACHED, force=True)
|
||||
logger.info(f"[{self.parent.name}]\tDetaching monitor")
|
||||
|
||||
|
||||
class StdDaqPreviewDetector(PSIDetectorBase):
|
||||
"""Detector wrapper class around the StdDaq preview image stream.
|
||||
|
||||
This was meant to provide live image stream directly from the StdDAQ.
|
||||
Note that the preview stream must be already throtled in order to cope
|
||||
with the incoming data and the python class might throttle it further.
|
||||
|
||||
You can add a preview widget to the dock by:
|
||||
cam_widget = gui.add_dock('cam_dock1').add_widget('BECFigure').image('daq_stream1')
|
||||
"""
|
||||
# Subscriptions for plotting image
|
||||
USER_ACCESS = ["kickoff", "get_last_image"]
|
||||
SUB_MONITOR = "device_monitor_2d"
|
||||
_default_sub = SUB_MONITOR
|
||||
|
||||
custom_prepare_cls = StdDaqPreviewMixin
|
||||
|
||||
# Status attributes
|
||||
url = Component(Signal, kind=Kind.config)
|
||||
throttle = Component(Signal, value=0.25, kind=Kind.config)
|
||||
status = Component(Signal, value=StdDaqPreviewState.UNKNOWN, kind=Kind.omitted)
|
||||
frame = Component(Signal, kind=Kind.hinted)
|
||||
image_shape = Component(Signal, kind=Kind.normal)
|
||||
# FIXME: The BEC client caches the read()s from the last 50 scans
|
||||
image = Component(Signal, kind=Kind.omitted)
|
||||
_last_image = None
|
||||
|
||||
def __init__(
|
||||
self, *args, url: str = "tcp://129.129.95.38:20000", parent: Device = None, **kwargs
|
||||
) -> None:
|
||||
super().__init__(*args, parent=parent, **kwargs)
|
||||
self.url._metadata["write_access"] = False
|
||||
self.status._metadata["write_access"] = False
|
||||
self.image._metadata["write_access"] = False
|
||||
self.frame._metadata["write_access"] = False
|
||||
self.image_shape._metadata["write_access"] = False
|
||||
self.url.set(url, force=True).wait()
|
||||
|
||||
# Connect ro the DAQ
|
||||
self.connect()
|
||||
|
||||
def connect(self):
|
||||
"""Connect to te StDAQs PUB-SUB streaming interface
|
||||
|
||||
StdDAQ may reject connection for a few seconds when it restarts,
|
||||
so if it fails, wait a bit and try to connect again.
|
||||
"""
|
||||
# pylint: disable=no-member
|
||||
# Socket to talk to server
|
||||
context = zmq.Context()
|
||||
self._socket = context.socket(zmq.SUB)
|
||||
self._socket.setsockopt(zmq.SUBSCRIBE, ZMQ_TOPIC_FILTER)
|
||||
try:
|
||||
self._socket.connect(self.url.get())
|
||||
except ConnectionRefusedError:
|
||||
sleep(1)
|
||||
self._socket.connect(self.url.get())
|
||||
|
||||
def get_image(self):
|
||||
return self._last_image
|
||||
|
||||
def kickoff(self) -> DeviceStatus:
|
||||
""" The DAQ was not meant to be toggled"""
|
||||
return DeviceStatus(self, done=True, success=True, settle_time=0.1)
|
||||
|
||||
|
||||
# Automatically connect to MicroSAXS testbench if directly invoked
|
||||
if __name__ == "__main__":
|
||||
daq = StdDaqPreviewDetector(url="tcp://129.129.95.111:20000", name="preview")
|
||||
daq.wait_for_connection()
|
||||
0
tomcat_bec/devices/pco_edge/__init__.py
Normal file
0
tomcat_bec/devices/pco_edge/__init__.py
Normal file
@@ -4,10 +4,45 @@ Created on Wed Dec 6 11:33:54 2023
|
||||
|
||||
@author: mohacsi_i
|
||||
"""
|
||||
import enum
|
||||
|
||||
from ophyd import Component as Cpt
|
||||
from ophyd import Device, DynamicDeviceComponent, EpicsSignal, EpicsSignalRO, Kind, Signal
|
||||
|
||||
|
||||
class TriggerMode(str, enum.Enum):
|
||||
AUTO_TRIGGER = "auto trigger"
|
||||
SOFT_TRIGGER = "soft trigger"
|
||||
EXTERNAL_EXP_TRIGGER = "ext.exp sfttrg"
|
||||
EXTERNAL_EXP_CONTR = "ext.exp contr"
|
||||
|
||||
|
||||
class CameraStatus(str, enum.Enum):
|
||||
OFFLINE = "Offline"
|
||||
IDLE = "Idle"
|
||||
RUNNING = "Running"
|
||||
|
||||
|
||||
class RecMode(str, enum.Enum):
|
||||
SEQUENCE = "Sequence"
|
||||
RING_BUFFER = "Ring buffer"
|
||||
|
||||
|
||||
class StoreMode(str, enum.Enum):
|
||||
RECORDER = "Recorder"
|
||||
FIFO_BUFFER = "FIFO buffer"
|
||||
|
||||
|
||||
class CameraInitStatus(str, enum.Enum):
|
||||
OFFLINE = "OFFLINE"
|
||||
INIT = "INIT"
|
||||
|
||||
|
||||
class CameraStatusCode(enum.IntEnum):
|
||||
IDLE = 2
|
||||
RUNNING = 6
|
||||
|
||||
|
||||
class PcoEdgeBase(Device):
|
||||
"""Ophyd baseclass for Helge camera IOCs
|
||||
|
||||
@@ -46,24 +81,48 @@ class PcoEdgeBase(Device):
|
||||
|
||||
# ########################################################################
|
||||
# General hardware info (in AD nomenclature)
|
||||
manufacturer = Cpt(EpicsSignalRO, "QUERY", kind=Kind.config, doc="Camera manufacturer info")
|
||||
model = Cpt(EpicsSignalRO, "BOARD", kind=Kind.omitted, doc="Camera board info")
|
||||
query = Cpt(EpicsSignalRO, "QUERY", kind=Kind.config, doc="Camera manufacturer info")
|
||||
board = Cpt(EpicsSignalRO, "BOARD", kind=Kind.omitted, doc="Camera board info")
|
||||
|
||||
# ########################################################################
|
||||
# Acquisition configuration (in AD nomenclature)
|
||||
acquire = Cpt(EpicsSignal, "CAMERASTATUS", put_complete=True, kind=Kind.omitted)
|
||||
acquire_time = Cpt(
|
||||
EpicsSignal, "EXPOSURE", put_complete=True, auto_monitor=True, kind=Kind.config
|
||||
camera_status = Cpt(
|
||||
EpicsSignal,
|
||||
"CAMERASTATUS",
|
||||
put_complete=True,
|
||||
kind=Kind.omitted,
|
||||
string=True,
|
||||
doc="Camera acquisition status, either 'Offline', 'Idle' or 'Running'",
|
||||
)
|
||||
acquire_delay = Cpt(
|
||||
EpicsSignal, "DELAY", put_complete=True, auto_monitor=True, kind=Kind.config
|
||||
exposure = Cpt(
|
||||
EpicsSignal,
|
||||
"EXPOSURE",
|
||||
put_complete=True,
|
||||
auto_monitor=True,
|
||||
kind=Kind.config,
|
||||
doc="Exposure time in milliseconds.",
|
||||
)
|
||||
delay = Cpt(
|
||||
EpicsSignal,
|
||||
"DELAY",
|
||||
put_complete=True,
|
||||
auto_monitor=True,
|
||||
kind=Kind.config,
|
||||
doc="Delay time in milliseconds.",
|
||||
)
|
||||
|
||||
# trigger_mode cannot be called 'trigger' as it is a reserved method in ophyd.Device
|
||||
# and it would override the Device.trigger() method
|
||||
trigger_mode = Cpt(
|
||||
EpicsSignal, "TRIGGER", put_complete=True, auto_monitor=True, kind=Kind.config
|
||||
EpicsSignal,
|
||||
"TRIGGER",
|
||||
put_complete=True,
|
||||
auto_monitor=True,
|
||||
kind=Kind.config,
|
||||
string=True,
|
||||
doc="Trigger mode. Must be either 'auto trigger', 'soft trigger', "
|
||||
"'ext.exp sfttrg' or 'ext.exp contr'",
|
||||
)
|
||||
# num_exposures = Cpt(
|
||||
# EpicsSignal, "CNT_NUM", put_complete=True, auto_monitor=True, kind=Kind.config
|
||||
# )
|
||||
|
||||
array_size = DynamicDeviceComponent(
|
||||
{
|
||||
@@ -73,15 +132,6 @@ class PcoEdgeBase(Device):
|
||||
doc="Size of the array in the XY dimensions",
|
||||
)
|
||||
|
||||
# DAQ parameters
|
||||
file_path = Cpt(Signal, kind=Kind.config, value="/gpfs/test/test-beamline")
|
||||
file_prefix = Cpt(Signal, kind=Kind.config, value="scan_")
|
||||
num_images = Cpt(Signal, kind=Kind.config, value=1000)
|
||||
num_images_counter = Cpt(Signal, kind=Kind.hinted, value=0)
|
||||
|
||||
# GF specific interface
|
||||
acquire_block = Cpt(Signal, kind=Kind.config, value=0)
|
||||
|
||||
# ########################################################################
|
||||
# Image size configuration (in AD nomenclature)
|
||||
bin_x = Cpt(EpicsSignal, "BINX", put_complete=True, auto_monitor=True, kind=Kind.config)
|
||||
@@ -90,8 +140,8 @@ class PcoEdgeBase(Device):
|
||||
# ########################################################################
|
||||
# Additional status info
|
||||
busy = Cpt(EpicsSignalRO, "BUSY", auto_monitor=True, kind=Kind.config)
|
||||
camState = Cpt(EpicsSignalRO, "SS_CAMERA", auto_monitor=True, kind=Kind.config)
|
||||
camProgress = Cpt(EpicsSignalRO, "CAMPROGRESS", auto_monitor=True, kind=Kind.config)
|
||||
ss_camera = Cpt(EpicsSignalRO, "SS_CAMERA", auto_monitor=True, kind=Kind.config)
|
||||
cam_progress = Cpt(EpicsSignalRO, "CAMPROGRESS", auto_monitor=True, kind=Kind.config)
|
||||
|
||||
# ########################################################################
|
||||
# Configuration state maschine with separate transition states
|
||||
@@ -104,36 +154,50 @@ class PcoEdgeBase(Device):
|
||||
kind=Kind.config,
|
||||
)
|
||||
|
||||
camera_statuscode = Cpt(EpicsSignalRO, "STATUSCODE", auto_monitor=True, kind=Kind.config)
|
||||
camera_init = Cpt(EpicsSignalRO, "INIT", auto_monitor=True, kind=Kind.config)
|
||||
statuscode = Cpt(EpicsSignalRO, "STATUSCODE", auto_monitor=True, kind=Kind.config, string=True)
|
||||
init = Cpt(
|
||||
EpicsSignalRO,
|
||||
"INIT",
|
||||
auto_monitor=True,
|
||||
kind=Kind.config,
|
||||
string=True,
|
||||
doc="Camera initialization status, either 'OFFLINE' or 'INIT'.",
|
||||
)
|
||||
camera_init_busy = Cpt(EpicsSignalRO, "BUSY_INIT", auto_monitor=True, kind=Kind.config)
|
||||
# camCamera = Cpt(EpicsSignalRO, "CAMERA", auto_monitor=True, kind=Kind.config)
|
||||
# camCameraBusy = Component(EpicsSignalRO, "BUSY_CAMERA", auto_monitor=True, kind=Kind.config)
|
||||
|
||||
# ########################################################################
|
||||
# Acquisition configuration
|
||||
acquire_mode = Cpt(EpicsSignalRO, "ACQMODE", auto_monitor=True, kind=Kind.config)
|
||||
acquire_trigger = Cpt(EpicsSignalRO, "TRIGGER", auto_monitor=True, kind=Kind.config)
|
||||
# acqTriggerSource = Component(
|
||||
# EpicsSignalRO, "TRIGGERSOURCE", auto_monitor=True, kind=Kind.config)
|
||||
# acqTriggerEdge = Component(EpicsSignalRO, "TRIGGEREDGE", auto_monitor=True, kind=Kind.config)
|
||||
acqmode = Cpt(EpicsSignalRO, "ACQMODE", auto_monitor=True, kind=Kind.config)
|
||||
|
||||
# ########################################################################
|
||||
# Buffer configuration
|
||||
bufferRecMode = Cpt(EpicsSignalRO, "RECMODE", auto_monitor=True, kind=Kind.config)
|
||||
bufferStoreMode = Cpt(EpicsSignal, "STOREMODE", auto_monitor=True, kind=Kind.config)
|
||||
fileRecMode = Cpt(EpicsSignalRO, "RECMODE", auto_monitor=True, kind=Kind.config)
|
||||
rec_mode = Cpt(
|
||||
EpicsSignalRO,
|
||||
"RECMODE",
|
||||
auto_monitor=True,
|
||||
kind=Kind.config,
|
||||
string=True,
|
||||
doc="Recording mode of the camera, either 'Sequence' or 'Ring buffer'",
|
||||
)
|
||||
store_mode = Cpt(
|
||||
EpicsSignal,
|
||||
"STOREMODE",
|
||||
auto_monitor=True,
|
||||
kind=Kind.config,
|
||||
string=True,
|
||||
doc="Store mode of the camera, either 'Recorder' or 'FIFO buffer'",
|
||||
)
|
||||
|
||||
buffer_used = Cpt(EpicsSignalRO, "PIC_BUFFER", auto_monitor=True, kind=Kind.normal)
|
||||
buffer_size = Cpt(EpicsSignalRO, "PIC_MAX", auto_monitor=True, kind=Kind.normal)
|
||||
buffer_clear = Cpt(EpicsSignal, "CLEARMEM", put_complete=True, kind=Kind.omitted)
|
||||
pic_buffer = Cpt(EpicsSignalRO, "PIC_BUFFER", auto_monitor=True, kind=Kind.normal)
|
||||
pic_max = Cpt(EpicsSignalRO, "PIC_MAX", auto_monitor=True, kind=Kind.normal)
|
||||
clear_mem = Cpt(EpicsSignal, "CLEARMEM", put_complete=True, kind=Kind.omitted)
|
||||
|
||||
# ########################################################################
|
||||
# File saving/streaming interface
|
||||
cam_data_rate = Cpt(EpicsSignalRO, "CAMRATE", auto_monitor=True, kind=Kind.normal)
|
||||
file_data_rate = Cpt(EpicsSignalRO, "FILERATE", auto_monitor=True, kind=Kind.normal)
|
||||
file_savestart = Cpt(EpicsSignal, "SAVESTART", put_complete=True, kind=Kind.config)
|
||||
file_savestop = Cpt(EpicsSignal, "SAVESTOP", put_complete=True, kind=Kind.config)
|
||||
cam_rate = Cpt(EpicsSignalRO, "CAMRATE", auto_monitor=True, kind=Kind.normal)
|
||||
file_rate = Cpt(EpicsSignalRO, "FILERATE", auto_monitor=True, kind=Kind.normal)
|
||||
save_start = Cpt(EpicsSignal, "SAVESTART", put_complete=True, kind=Kind.config)
|
||||
save_stop = Cpt(EpicsSignal, "SAVESTOP", put_complete=True, kind=Kind.config)
|
||||
file_format = Cpt(EpicsSignal, "FILEFORMAT", put_complete=True, kind=Kind.config)
|
||||
file_transfer = Cpt(EpicsSignal, "FTRANSFER", put_complete=True, kind=Kind.config)
|
||||
file_savebusy = Cpt(EpicsSignalRO, "FILESAVEBUSY", auto_monitor=True, kind=Kind.normal)
|
||||
@@ -147,17 +211,24 @@ class PcoEdgeBase(Device):
|
||||
camError = Cpt(EpicsSignalRO, "ERRCODE", auto_monitor=True, kind=Kind.config)
|
||||
camWarning = Cpt(EpicsSignalRO, "WARNCODE", auto_monitor=True, kind=Kind.config)
|
||||
|
||||
# DAQ parameters
|
||||
file_path = Cpt(Signal, kind=Kind.config, value="/gpfs/test/test-beamline")
|
||||
file_prefix = Cpt(Signal, kind=Kind.config, value="scan_")
|
||||
num_images = Cpt(Signal, kind=Kind.config, value=1000)
|
||||
frames_per_trigger = Cpt(Signal, kind=Kind.config, value=1)
|
||||
num_images_counter = Cpt(Signal, kind=Kind.hinted, value=0)
|
||||
|
||||
@property
|
||||
def state(self) -> str:
|
||||
"""Single word camera state"""
|
||||
if self.set_param.value:
|
||||
return "BUSY"
|
||||
if self.camera_statuscode.value == 2 and self.camera_init.value == 1:
|
||||
if self.statuscode.value == 2 and self.init.value == 1:
|
||||
return "IDLE"
|
||||
if self.camera_statuscode.value == 6 and self.camera_init.value == 1:
|
||||
if self.statuscode.value == 6 and self.init.value == 1:
|
||||
return "RUNNING"
|
||||
# if self.camRemoval.value==0 and self.camInit.value==0:
|
||||
if self.camera_init.value == 0:
|
||||
if self.init.value == 0:
|
||||
return "OFFLINE"
|
||||
# if self.camRemoval.value:
|
||||
# return "REMOVED"
|
||||
553
tomcat_bec/devices/pco_edge/pcoedgecamera.py
Normal file
553
tomcat_bec/devices/pco_edge/pcoedgecamera.py
Normal file
@@ -0,0 +1,553 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Literal, cast
|
||||
|
||||
import numpy as np
|
||||
from bec_lib.logger import bec_logger
|
||||
from ophyd import Component as Cpt
|
||||
from ophyd import Kind, Signal
|
||||
from ophyd.status import AndStatus, DeviceStatus, SubscriptionStatus
|
||||
from ophyd_devices.interfaces.base_classes.psi_device_base import PSIDeviceBase
|
||||
from ophyd_devices.utils.bec_signals import PreviewSignal, ProgressSignal
|
||||
|
||||
from tomcat_bec.devices.pco_edge.pcoedge_base import CameraStatus, CameraStatusCode, PcoEdgeBase
|
||||
from tomcat_bec.devices.std_daq.std_daq_client import (
|
||||
StdDaqClient,
|
||||
StdDaqConfigPartial,
|
||||
StdDaqStatus,
|
||||
)
|
||||
from tomcat_bec.devices.std_daq.std_daq_live_processing import StdDaqLiveProcessing
|
||||
from tomcat_bec.devices.std_daq.std_daq_preview import StdDaqPreview
|
||||
|
||||
logger = bec_logger.logger
|
||||
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from bec_lib.devicemanager import DeviceManagerBase
|
||||
from ophyd import StatusBase
|
||||
|
||||
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
class PcoEdge5M(PSIDeviceBase, PcoEdgeBase):
|
||||
"""Ophyd baseclass for Helge camera IOCs
|
||||
|
||||
This class provides wrappers for Helge's camera IOCs around SwissFEL and
|
||||
for high performance SLS 2.0 cameras. The IOC's operation is a bit arcane
|
||||
and there are different versions and cameras all around. So this device
|
||||
only covers the absolute basics.
|
||||
|
||||
Probably the most important part is the configuration state machine. As
|
||||
the SET_PARAMS takes care of buffer allocations it might take some time,
|
||||
as well as a full re-configuration is required every time we change the
|
||||
binning, roi, etc... This is automatically performed upon starting an
|
||||
exposure (if it heven't been done before).
|
||||
|
||||
The status flag state machine during re-configuration is:
|
||||
BUSY low, SET low -> BUSY high, SET low -> BUSY low, SET high -> BUSY low, SET low
|
||||
|
||||
|
||||
UPDATE: Data sending operation modes
|
||||
- Switch to ZMQ streaming by setting FILEFORMAT to ZEROMQ
|
||||
- Set SAVESTART and SAVESTOP to select a ROI of image indices
|
||||
- Start file transfer with FTRANSFER.
|
||||
The ZMQ connection operates in PUSH-PULL mode, i.e. it needs incoming connection.
|
||||
|
||||
STOREMODE sets the acquisition mode:
|
||||
if STOREMODE == Recorder
|
||||
Fills up the buffer with images. Here SAVESTART and SAVESTOP selects a ROI
|
||||
of image indices to be streamed out (i.e. maximum buffer_size number of images)
|
||||
|
||||
if STOREMODE == FIFO buffer
|
||||
Continously streams out data using the buffer as a FIFO queue.
|
||||
Here SAVESTART and SAVESTOP selects a ROI of image indices to be streamed continously
|
||||
(i.e. a large SAVESTOP streams indefinitely). Note that in FIFO mode buffer reads are
|
||||
destructive. to prevent this, we don't have EPICS preview
|
||||
"""
|
||||
|
||||
USER_ACCESS = ["complete", "backend", "live_preview", "arm", "disarm"]
|
||||
|
||||
analysis_signal = Cpt(Signal, name="analysis_signal", kind=Kind.hinted, doc="Analysis Signal")
|
||||
analysis_signal2 = Cpt(Signal, name="analysis_signal2", kind=Kind.hinted, doc="Analysis Signal")
|
||||
preview = Cpt(PreviewSignal, ndim=2, name="preview", doc="Camera raw data preview signal")
|
||||
progress = Cpt(
|
||||
ProgressSignal,
|
||||
name="progress",
|
||||
doc="Camera progress signal, used to monitor the acquisition progress",
|
||||
)
|
||||
|
||||
# pylint: disable=too-many-arguments
|
||||
def __init__(
|
||||
self,
|
||||
prefix="",
|
||||
*,
|
||||
name,
|
||||
kind=None,
|
||||
read_attrs=None,
|
||||
configuration_attrs=None,
|
||||
parent=None,
|
||||
scan_info=None,
|
||||
std_daq_rest: str | None = None,
|
||||
std_daq_ws: str | None = None,
|
||||
std_daq_live: str | None = None,
|
||||
device_manager: DeviceManagerBase | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
|
||||
self.device_manager = device_manager
|
||||
self.connector = device_manager.connector if device_manager else None
|
||||
super().__init__(
|
||||
prefix=prefix,
|
||||
name=name,
|
||||
kind=kind,
|
||||
read_attrs=read_attrs,
|
||||
configuration_attrs=configuration_attrs,
|
||||
parent=parent,
|
||||
scan_info=scan_info,
|
||||
**kwargs,
|
||||
)
|
||||
# Configure the stdDAQ client
|
||||
if std_daq_rest is None or std_daq_ws is None:
|
||||
raise ValueError("Both std_daq_rest and std_daq_ws must be provided")
|
||||
|
||||
self.live_processing = StdDaqLiveProcessing(
|
||||
parent=self, signal=self.analysis_signal, signal2=self.analysis_signal2
|
||||
)
|
||||
|
||||
self.backend = StdDaqClient(parent=self, ws_url=std_daq_ws, rest_url=std_daq_rest)
|
||||
self.backend.add_count_callback(self._on_count_update)
|
||||
self.live_preview = None
|
||||
self.acq_configs = {}
|
||||
if std_daq_live is not None:
|
||||
self.live_preview = StdDaqPreview(url=std_daq_live, cb=self._on_preview_update)
|
||||
|
||||
def configure(self, d: dict | None = None):
|
||||
"""Configure the base Helge camera device
|
||||
|
||||
Parameters as 'd' dictionary
|
||||
----------------------------
|
||||
num_images : int
|
||||
Number of images to be taken during each scan. Meaning depends on
|
||||
store mode.
|
||||
exposure_time_ms : float
|
||||
Exposure time [ms], usually gets set back to 20 ms
|
||||
exposure_period_ms : float
|
||||
Exposure period [ms], up to 200 ms.
|
||||
store_mode : str
|
||||
Buffer operation mode
|
||||
*'Recorder' to record in buffer
|
||||
*'FIFO buffer' for continous streaming
|
||||
data_format : str
|
||||
Usually set to 'ZEROMQ'
|
||||
acq_mode : str
|
||||
Store mode and data format according to preconfigured settings
|
||||
"""
|
||||
if d is None:
|
||||
return
|
||||
|
||||
# Stop acquisition
|
||||
self.stop_camera().wait(timeout=10)
|
||||
|
||||
backend_config = StdDaqConfigPartial(**d)
|
||||
self.backend.update_config(backend_config)
|
||||
|
||||
config = {}
|
||||
|
||||
for key in self.component_names:
|
||||
val = d.get(key)
|
||||
if val is not None:
|
||||
config[key] = val
|
||||
|
||||
if d.get("exp_time", 0) > 0:
|
||||
config["exposure"] = d["exp_time"] * 1000 # exposure time in ms
|
||||
|
||||
super().configure(config)
|
||||
|
||||
# If a pre-configured acquisition mode is specified, set it
|
||||
if "acq_mode" in d:
|
||||
self.set_acquisition_mode(d["acq_mode"])
|
||||
|
||||
# State machine
|
||||
# Initial: BUSY and SET both low
|
||||
# 0. Write 1 to SET_PARAM
|
||||
# 1. BUSY goes high, SET stays low
|
||||
# 2. BUSY goes low, SET goes high
|
||||
# 3. BUSY stays low, SET goes low
|
||||
# So we need a 'negedge' on SET_PARAM
|
||||
def negedge(*, old_value, value, timestamp, **_):
|
||||
return bool(old_value and not value)
|
||||
|
||||
# Subscribe and wait for update
|
||||
status = SubscriptionStatus(self.set_param, negedge, timeout=5, settle_time=0.5)
|
||||
|
||||
self.set_param.set(1).wait()
|
||||
status.wait()
|
||||
|
||||
def set_acquisition_mode(self, acq_mode):
|
||||
"""Set acquisition mode
|
||||
|
||||
Utility function to quickly select between pre-configured and tested
|
||||
acquisition modes.
|
||||
"""
|
||||
if acq_mode in ["default", "step"]:
|
||||
# NOTE: Trigger duration requires a consumer
|
||||
self.store_mode.set("FIFO Buffer").wait()
|
||||
if acq_mode in ["stream"]:
|
||||
# NOTE: Trigger duration requires a consumer
|
||||
self.store_mode.set("FIFO Buffer").wait()
|
||||
else:
|
||||
raise RuntimeError(f"Unsupported acquisition mode: {acq_mode}")
|
||||
|
||||
def destroy(self):
|
||||
self.backend.shutdown()
|
||||
if self.live_preview:
|
||||
self.live_preview.stop()
|
||||
super().destroy()
|
||||
|
||||
def _on_preview_update(self, img: np.ndarray):
|
||||
corrected_img = self.live_processing.apply_flat_dark_correction(img)
|
||||
self.live_processing.on_new_data(corrected_img)
|
||||
self.preview.put(corrected_img)
|
||||
self._run_subs(sub_type=self.SUB_DEVICE_MONITOR_2D, obj=self, value=corrected_img)
|
||||
|
||||
def _on_count_update(self, count: int):
|
||||
"""
|
||||
Callback for the count update from the backend.
|
||||
Updates the progress signal.
|
||||
|
||||
Args:
|
||||
count (int): The current count of images acquired by the camera.
|
||||
"""
|
||||
expected_counts = cast(int, self.num_images.get())
|
||||
self.progress.put(
|
||||
value=count, max_value=expected_counts, done=bool(count == expected_counts)
|
||||
)
|
||||
|
||||
def acq_done(self) -> DeviceStatus:
|
||||
"""
|
||||
Check if the acquisition is done. For the GigaFrost camera, this is
|
||||
done by checking the status of the backend as the camera does not
|
||||
provide any feedback about its internal state.
|
||||
|
||||
Returns:
|
||||
DeviceStatus: The status of the acquisition
|
||||
"""
|
||||
status = DeviceStatus(self)
|
||||
if self.backend is not None:
|
||||
self.backend.add_status_callback(
|
||||
status,
|
||||
success=[StdDaqStatus.IDLE, StdDaqStatus.FILE_SAVED],
|
||||
error=[StdDaqStatus.REJECTED, StdDaqStatus.ERROR],
|
||||
)
|
||||
return status
|
||||
|
||||
def restart_with_new_config(
|
||||
self,
|
||||
name: str,
|
||||
file_path: str = "",
|
||||
file_prefix: str = "",
|
||||
num_images: int | None = None,
|
||||
frames_per_trigger: int | None = None,
|
||||
) -> StatusBase:
|
||||
"""
|
||||
Restart the camera with a new configuration.
|
||||
This method allows to change the file path, file prefix, and number of images.
|
||||
|
||||
Args:
|
||||
name (str): Name of the configuration to be saved.
|
||||
file_path (str): New file path for the acquisition. If empty, the current file path is used.
|
||||
file_prefix (str): New file prefix for the acquisition. If empty, the current file prefix is used.
|
||||
num_images (int | None): New number of images to acquire. If None, the current number of images is used.
|
||||
frames_per_trigger (int | None): New number of frames per trigger. If None, the current value is used.
|
||||
|
||||
Returns:
|
||||
DeviceStatus: The status of the restart operation. It resolves when the camera is ready to receive the first image.
|
||||
"""
|
||||
self.acq_configs[name] = {}
|
||||
conf = {}
|
||||
if file_path:
|
||||
self.acq_configs[name]["file_path"] = self.file_path.get()
|
||||
conf["file_path"] = file_path
|
||||
if file_prefix:
|
||||
self.acq_configs[name]["file_prefix"] = self.file_prefix.get()
|
||||
conf["file_prefix"] = file_prefix
|
||||
if num_images is not None:
|
||||
self.acq_configs[name]["num_images"] = self.num_images.get()
|
||||
conf["num_images"] = num_images
|
||||
if frames_per_trigger is not None:
|
||||
self.acq_configs[name]["frames_per_trigger"] = self.frames_per_trigger.get()
|
||||
conf["frames_per_trigger"] = frames_per_trigger
|
||||
|
||||
# Stop the camera and wait for it to become idle
|
||||
status = self.stop_camera()
|
||||
status.wait(timeout=10)
|
||||
|
||||
# update the configuration
|
||||
self.configure(conf)
|
||||
|
||||
# Restart the camera with the new configuration
|
||||
return self.start_camera()
|
||||
|
||||
def restore_config(self, name: str) -> None:
|
||||
"""
|
||||
Restore a previously saved configuration and restart the camera.
|
||||
|
||||
Args:
|
||||
name (str): Name of the configuration to restore.
|
||||
"""
|
||||
status = self.stop_camera()
|
||||
status.wait(timeout=10)
|
||||
config = self.acq_configs.pop(name, {})
|
||||
self.configure(config)
|
||||
|
||||
def update_live_processing_reference(
|
||||
self, reference_type: Literal["dark", "flat"]
|
||||
) -> StatusBase:
|
||||
"""
|
||||
Update the flat or dark reference for the live processing.
|
||||
|
||||
Args:
|
||||
reference_type (Literal["dark", "flat"]): Type of the reference to update.
|
||||
If 'dark', the dark reference will be updated, if 'flat', the flat reference will be updated.
|
||||
|
||||
Returns:
|
||||
StatusBase: The status of the update operation.
|
||||
"""
|
||||
if reference_type not in ["dark", "flat"]:
|
||||
raise ValueError("Invalid reference type! Must be 'dark' or 'flat'.")
|
||||
|
||||
# Use the current acquisition to update the reference
|
||||
if self.live_processing is None:
|
||||
raise RuntimeError("Live processing is not available. Cannot update reference.")
|
||||
status = self.live_processing.update_reference_with_file(
|
||||
reference_type=reference_type,
|
||||
file_path=self.target_file,
|
||||
entry="tomcat-gigafrost/data", # type: ignore
|
||||
wait=False, # Do not wait for the update to finish
|
||||
)
|
||||
return status
|
||||
|
||||
def start_camera(self) -> StatusBase:
|
||||
"""
|
||||
Start the camera and the backend.
|
||||
|
||||
Returns:
|
||||
DeviceStatus: The status of the startup. It resolves when the backend is ready to receive the first image.
|
||||
"""
|
||||
status = DeviceStatus(self)
|
||||
self.backend.add_status_callback(
|
||||
status,
|
||||
success=[StdDaqStatus.WAITING_FOR_FIRST_IMAGE],
|
||||
error=[StdDaqStatus.REJECTED, StdDaqStatus.ERROR],
|
||||
)
|
||||
self.backend.start(
|
||||
file_path=self.file_path.get(), # type: ignore
|
||||
file_prefix=self.file_prefix.get(), # type: ignore
|
||||
num_images=self.num_images.get(), # type: ignore
|
||||
)
|
||||
self.camera_status.set(CameraStatus.RUNNING).wait()
|
||||
|
||||
def is_running(*, value, timestamp, **_):
|
||||
return bool(value == CameraStatusCode.RUNNING)
|
||||
|
||||
camera_running_status = SubscriptionStatus(
|
||||
self.statuscode, is_running, timeout=5, settle_time=0.2
|
||||
)
|
||||
self.cancel_on_stop(camera_running_status)
|
||||
|
||||
return AndStatus(status, camera_running_status)
|
||||
|
||||
def set_idle(self) -> AndStatus:
|
||||
"""Set the camera to idle state"""
|
||||
cam_status = self.camera_status.set(CameraStatus.IDLE)
|
||||
save_stop = self.save_stop.set(0)
|
||||
return AndStatus(cam_status, save_stop)
|
||||
|
||||
def stop_camera(self) -> DeviceStatus:
|
||||
"""Stop the camera acquisition and set it to idle state"""
|
||||
self.set_idle().wait()
|
||||
status = DeviceStatus(self)
|
||||
self.backend.add_status_callback(
|
||||
status, success=[StdDaqStatus.IDLE], error=[StdDaqStatus.REJECTED, StdDaqStatus.ERROR]
|
||||
)
|
||||
self.backend.stop()
|
||||
return status
|
||||
|
||||
@property
|
||||
def target_file(self) -> str:
|
||||
"""Return the target file path for the current acquisition."""
|
||||
file_path = cast(str, self.file_path.get())
|
||||
file_prefix = cast(str, self.file_prefix.get())
|
||||
return os.path.join(file_path, f"{file_prefix.removesuffix('_')}.h5")
|
||||
|
||||
########################################
|
||||
# Beamline Specific Implementations #
|
||||
########################################
|
||||
|
||||
def on_init(self) -> None:
|
||||
"""
|
||||
Called when the device is initialized.
|
||||
|
||||
No signals are connected at this point,
|
||||
thus should not be set here but in on_connected instead.
|
||||
"""
|
||||
|
||||
def on_connected(self) -> None:
|
||||
"""
|
||||
Called after the device is connected and its signals are connected.
|
||||
Default values for signals should be set here.
|
||||
"""
|
||||
self.backend.connect()
|
||||
|
||||
if self.live_preview:
|
||||
self.live_preview.start()
|
||||
|
||||
# pylint: disable=protected-access
|
||||
def on_stage(self) -> None:
|
||||
"""Configure and arm PCO.Edge camera for acquisition"""
|
||||
|
||||
# If the camera is busy, stop it first
|
||||
if self.statuscode.get() != CameraStatusCode.IDLE:
|
||||
self.stop_camera()
|
||||
|
||||
scan_msg = self.scan_info.msg
|
||||
if scan_msg is None or scan_msg.request_inputs is None or scan_msg.scan_parameters is None:
|
||||
# I don't think this can happen outside of tests, but just in case
|
||||
logger.warning(
|
||||
f"[{self.name}] Scan message is not available or incomplete. "
|
||||
"Cannot configure the GigaFrost camera."
|
||||
)
|
||||
self.acq_configs = {}
|
||||
return
|
||||
|
||||
scan_args = {
|
||||
**scan_msg.request_inputs.get("inputs", {}),
|
||||
**scan_msg.request_inputs.get("kwargs", {}),
|
||||
**scan_msg.scan_parameters,
|
||||
}
|
||||
|
||||
if "file_path" not in scan_args:
|
||||
scan_args["file_path"] = (
|
||||
"/gpfs/test/test-beamline" # FIXME: This should be from the scan message
|
||||
)
|
||||
if "file_prefix" not in scan_args:
|
||||
scan_args["file_prefix"] = scan_msg.info["file_components"][0].split("/")[-1] + "_"
|
||||
self.configure(scan_args)
|
||||
|
||||
if scan_msg.scan_type == "step":
|
||||
num_points = self.frames_per_trigger.get() * scan_msg.num_points # type: ignore
|
||||
else:
|
||||
num_points = self.frames_per_trigger.get()
|
||||
|
||||
self.num_images.set(num_points).wait()
|
||||
|
||||
# reset the acquisition configs
|
||||
self.acq_configs = {}
|
||||
|
||||
def on_unstage(self) -> DeviceStatus | None:
|
||||
"""Called while unstaging the device."""
|
||||
return self.stop_camera()
|
||||
|
||||
def on_pre_scan(self) -> StatusBase:
|
||||
"""Called right before the scan starts on all devices automatically."""
|
||||
return self.start_camera()
|
||||
|
||||
def on_trigger(self) -> None | DeviceStatus:
|
||||
"""Trigger mode operation
|
||||
|
||||
Use it to repeatedly record a fixed number of frames and send it to stdDAQ. The method waits
|
||||
for the acquisition and data transfer to complete.
|
||||
|
||||
NOTE: Maciej confirmed that sparse data is no problem to the stdDAQ.
|
||||
TODO: Optimize data transfer to launch at end and check completion at the beginning.
|
||||
"""
|
||||
# Ensure that previous data transfer finished
|
||||
# def sentIt(*args, value, timestamp, **kwargs):
|
||||
# return value==0
|
||||
# status = SubscriptionStatus(self.file_savebusy, sentIt, timeout=120)
|
||||
# status.wait()
|
||||
scan_msg = self.scan_info.msg
|
||||
|
||||
if scan_msg.scan_type == "step":
|
||||
# The PCO Edge does not support software triggering. As a result, we have to 'simulate'
|
||||
# the software triggering mechanism by leveraging the PCO's readout buffer: We limit the buffer
|
||||
# readout size (save_start/save_stop) to the number of frames we want per trigger, clear the
|
||||
# buffer and then wait for the buffer to fill up again before transfering the files to the
|
||||
# file writer (std_daq).
|
||||
|
||||
# Set the readout per step scan point to the requested frames per trigger
|
||||
self.save_stop.set(self.frames_per_trigger.get()).wait()
|
||||
|
||||
# Reset the buffer
|
||||
self.clear_mem.set(1, settle_time=0.1).wait()
|
||||
|
||||
# Wait until the buffer fills up with enough images
|
||||
t_expected = (self.exposure.get() + self.delay.get()) * self.save_stop.get()
|
||||
|
||||
def wait_acquisition(*, value, timestamp, **_):
|
||||
num_target = self.save_stop.get()
|
||||
# logger.warning(f"{value} of {num_target}")
|
||||
return bool(value >= num_target)
|
||||
|
||||
max_wait = max(5, 5 * t_expected)
|
||||
buffer_filled_status = SubscriptionStatus(
|
||||
self.pic_buffer, wait_acquisition, timeout=max_wait, settle_time=0.2
|
||||
)
|
||||
self.cancel_on_stop(buffer_filled_status)
|
||||
buffer_filled_status.wait()
|
||||
|
||||
logger.info(f"file savebusy before: {self.file_savebusy.get()}")
|
||||
|
||||
def wait_sending(*, old_value, value, timestamp, **_):
|
||||
logger.info(f"old_value {old_value}, new value: {value}")
|
||||
return old_value == 1 and value == 0
|
||||
|
||||
savebusy_status = SubscriptionStatus(
|
||||
self.file_savebusy, wait_sending, timeout=120, settle_time=0.2
|
||||
)
|
||||
self.cancel_on_stop(savebusy_status)
|
||||
|
||||
self.file_transfer.set(1).wait()
|
||||
savebusy_status.wait()
|
||||
else:
|
||||
raise RuntimeError("Triggering for fly scans is not yet implemented.")
|
||||
|
||||
def on_complete(self) -> DeviceStatus | None:
|
||||
"""Called to inquire if a device has completed a scans."""
|
||||
|
||||
def _create_dataset(_status: DeviceStatus):
|
||||
self.backend.create_virtual_datasets(
|
||||
self.file_path.get(), file_prefix=self.file_prefix.get() # type: ignore
|
||||
)
|
||||
self._run_subs(
|
||||
sub_type=self.SUB_FILE_EVENT,
|
||||
file_path=self.target_file,
|
||||
done=True,
|
||||
successful=True,
|
||||
hinted_location={"data": "data"},
|
||||
)
|
||||
|
||||
status = self.acq_done()
|
||||
status.add_callback(_create_dataset)
|
||||
return status
|
||||
|
||||
def on_kickoff(self) -> DeviceStatus | None:
|
||||
"""Called to kickoff a device for a fly scan. Has to be called explicitly."""
|
||||
|
||||
def on_stop(self) -> DeviceStatus:
|
||||
"""Called when the device is stopped."""
|
||||
return self.stop_camera()
|
||||
|
||||
|
||||
# Automatically connect to test camera if directly invoked
|
||||
if __name__ == "__main__":
|
||||
|
||||
# Drive data collection
|
||||
cam = PcoEdge5M(
|
||||
"X02DA-CCDCAM2:",
|
||||
name="mcpcam",
|
||||
std_daq_ws="ws://129.129.95.111:8081",
|
||||
std_daq_rest="http://129.129.95.111:5010",
|
||||
std_daq_live="tcp://129.129.95.111:20010",
|
||||
)
|
||||
cam.wait_for_connection()
|
||||
0
tomcat_bec/devices/std_daq/__init__.py
Normal file
0
tomcat_bec/devices/std_daq/__init__.py
Normal file
499
tomcat_bec/devices/std_daq/std_daq_client.py
Normal file
499
tomcat_bec/devices/std_daq/std_daq_client.py
Normal file
@@ -0,0 +1,499 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import enum
|
||||
import json
|
||||
import queue
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from typing import TYPE_CHECKING, Callable, Literal
|
||||
|
||||
import requests
|
||||
from bec_lib.logger import bec_logger
|
||||
from ophyd import StatusBase
|
||||
from pydantic import BaseModel, ConfigDict, Field, model_validator
|
||||
from typeguard import typechecked
|
||||
from websockets import State
|
||||
from websockets.exceptions import WebSocketException
|
||||
from websockets.sync.client import ClientConnection, connect
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from ophyd import Device
|
||||
|
||||
|
||||
logger = bec_logger.logger
|
||||
|
||||
|
||||
class StdDaqError(Exception): ...
|
||||
|
||||
|
||||
class StdDaqStatus(str, enum.Enum):
|
||||
"""
|
||||
Status of the StdDAQ.
|
||||
Extracted from https://git.psi.ch/controls-ci/std_detector_buffer/-/blob/master/source/std-det-driver/src/driver_state.hpp
|
||||
"""
|
||||
|
||||
CREATING_FILE = "creating_file"
|
||||
ERROR = "error"
|
||||
FILE_CREATED = "file_created"
|
||||
FILE_SAVED = "file_saved"
|
||||
IDLE = "idle"
|
||||
RECORDING = "recording"
|
||||
REJECTED = "rejected"
|
||||
SAVING_FILE = "saving_file"
|
||||
STARTED = "started"
|
||||
STOP = "stop"
|
||||
UNDEFINED = "undefined"
|
||||
WAITING_FOR_FIRST_IMAGE = "waiting_for_first_image"
|
||||
|
||||
|
||||
class StdDaqConfig(BaseModel):
|
||||
"""
|
||||
Configuration for the StdDAQ.
|
||||
More information can be found here: https://controls-ci.gitpages.psi.ch/std_detector_buffer/docs/Interfaces/configfile
|
||||
"""
|
||||
|
||||
# Mandatory fields
|
||||
detector_name: str = Field(
|
||||
description="Name of deployment - used as identifier in logging, "
|
||||
"part of the name of zmq sockets and shared memory."
|
||||
)
|
||||
detector_type: Literal["gigafrost", "eiger", "pco", "jungfrau-raw", "jungfrau-converted"]
|
||||
image_pixel_height: int
|
||||
image_pixel_width: int
|
||||
bit_depth: int
|
||||
n_modules: int
|
||||
start_udp_port: int
|
||||
module_positions: dict
|
||||
number_of_writers: int
|
||||
|
||||
# Optional fields
|
||||
max_number_of_forwarders_spawned: int | None = None
|
||||
use_all_forwarders: bool | None = None
|
||||
module_sync_queue_size: int | None = None
|
||||
ram_buffer_gb: float | None = None
|
||||
delay_filter_timeout: float | None = None
|
||||
writer_user_id: int | None = None
|
||||
live_stream_configs: dict[str, dict[Literal["type", "config"], str | list]]
|
||||
log_level: Literal["debug", "info", "warning", "error", "off"] | None = Field(
|
||||
default=None,
|
||||
description="Log level for the StdDAQ. Defaults to info. Sets the logging level for services - possible values: debug, info, warning, error, off.",
|
||||
)
|
||||
stats_collection_period: float | None = Field(
|
||||
default=None,
|
||||
description="Period in seconds for printing stats into journald that are shipped to elastic. Defaults to 10. Warning too high frequency will affect the performance of the system",
|
||||
)
|
||||
|
||||
model_config = ConfigDict(extra="ignore")
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def resolve_aliases(cls, values):
|
||||
if "roix" in values:
|
||||
values["image_pixel_height"] = values.pop("roiy")
|
||||
if "roiy" in values:
|
||||
values["image_pixel_width"] = values.pop("roix")
|
||||
return values
|
||||
|
||||
|
||||
class StdDaqConfigPartial(BaseModel):
|
||||
"""
|
||||
Partial configuration for the StdDAQ.
|
||||
"""
|
||||
|
||||
detector_name: str | None = None
|
||||
detector_type: str | None = None
|
||||
n_modules: int | None = None
|
||||
bit_depth: int | None = None
|
||||
image_pixel_height: int | None = Field(default=None, alias="roiy")
|
||||
image_pixel_width: int | None = Field(default=None, alias="roix")
|
||||
start_udp_port: int | None = None
|
||||
writer_user_id: int | None = None
|
||||
max_number_of_forwarders_spawned: int | None = None
|
||||
use_all_forwarders: bool | None = None
|
||||
module_sync_queue_size: int | None = None
|
||||
number_of_writers: int | None = None
|
||||
module_positions: dict | None = None
|
||||
ram_buffer_gb: float | None = None
|
||||
delay_filter_timeout: float | None = None
|
||||
live_stream_configs: dict[str, dict[Literal["type", "config"], str | list]] | None = None
|
||||
|
||||
model_config = ConfigDict(extra="ignore")
|
||||
|
||||
|
||||
class StdDaqWsResponse(BaseModel):
|
||||
"""
|
||||
Response from the StdDAQ websocket
|
||||
"""
|
||||
|
||||
status: StdDaqStatus
|
||||
reason: str | None = None
|
||||
count: int | None = None
|
||||
|
||||
model_config = ConfigDict(extra="allow")
|
||||
|
||||
|
||||
class StdDaqClient:
|
||||
|
||||
USER_ACCESS = ["status", "start", "stop", "get_config", "set_config", "reset"]
|
||||
|
||||
def __init__(self, parent: Device, ws_url: str, rest_url: str):
|
||||
self.parent = parent
|
||||
self.ws_url = ws_url
|
||||
self.rest_url = rest_url
|
||||
self.ws_client: ClientConnection | None = None
|
||||
self._status: StdDaqStatus = StdDaqStatus.UNDEFINED
|
||||
self._ws_update_thread: threading.Thread | None = None
|
||||
self._shutdown_event = threading.Event()
|
||||
self._ws_idle_event = threading.Event()
|
||||
self._daq_is_running = threading.Event()
|
||||
self._config: StdDaqConfig | None = None
|
||||
self._status_callbacks: dict[
|
||||
str, tuple[StatusBase, list[StdDaqStatus], list[StdDaqStatus]]
|
||||
] = {}
|
||||
self._count_callbacks: dict[int, Callable[[int], None]] = {}
|
||||
self._send_queue = queue.Queue()
|
||||
self._daq_is_running.set()
|
||||
|
||||
@property
|
||||
def status(self) -> StdDaqStatus:
|
||||
"""
|
||||
Get the status of the StdDAQ.
|
||||
"""
|
||||
return self._status
|
||||
|
||||
def add_status_callback(
|
||||
self, status: StatusBase, success: list[StdDaqStatus], error: list[StdDaqStatus]
|
||||
):
|
||||
"""
|
||||
Add a StatusBase callback for the StdDAQ. The status will be updated when the StdDAQ status changes and
|
||||
set to finished when the status matches one of the specified success statuses and to exception when the status
|
||||
matches one of the specified error statuses.
|
||||
|
||||
Args:
|
||||
status (StatusBase): StatusBase object
|
||||
success (list[StdDaqStatus]): list of statuses that indicate success
|
||||
error (list[StdDaqStatus]): list of statuses that indicate error
|
||||
"""
|
||||
self._status_callbacks[id(status)] = (status, success, error)
|
||||
|
||||
def add_count_callback(self, callback: Callable[[int], None]) -> int:
|
||||
"""
|
||||
Add a callback for the count of images acquired by the StdDAQ. The callback will be called with the count
|
||||
whenever the StdDAQ status changes and the count is available.
|
||||
|
||||
Args:
|
||||
callback (Callable[[int], None]): callback function that takes an integer as argument
|
||||
|
||||
Returns:
|
||||
int: ID of the callback, which can be used to remove the callback later
|
||||
"""
|
||||
if not callable(callback):
|
||||
raise TypeError("Callback must be a callable function")
|
||||
max_cb_id = max(self._count_callbacks.keys(), default=0)
|
||||
self._count_callbacks[max_cb_id + 1] = callback
|
||||
return max_cb_id + 1
|
||||
|
||||
def remove_count_callback(self, cb_id: int):
|
||||
"""
|
||||
Remove a count callback by its ID.
|
||||
|
||||
Args:
|
||||
cb_id (int): ID of the callback to remove
|
||||
"""
|
||||
if cb_id in self._count_callbacks:
|
||||
del self._count_callbacks[cb_id]
|
||||
else:
|
||||
logger.warning(f"Callback with ID {cb_id} not found in StdDAQ count callbacks.")
|
||||
|
||||
@typechecked
|
||||
def start(
|
||||
self, file_path: str, file_prefix: str, num_images: int, timeout: float = 20, wait=True
|
||||
) -> StatusBase:
|
||||
"""
|
||||
Start acquisition on the StdDAQ.
|
||||
|
||||
Args:
|
||||
file_path (str): path to save the files
|
||||
file_prefix (str): prefix of the files
|
||||
num_images (int): number of images to acquire
|
||||
timeout (float): timeout for the request
|
||||
"""
|
||||
logger.info(f"Starting StdDaq backend. Current status: {self.status}")
|
||||
status = StatusBase()
|
||||
self.add_status_callback(status, success=["waiting_for_first_image"], error=[])
|
||||
message = {
|
||||
"command": "start",
|
||||
"path": file_path,
|
||||
"file_prefix": file_prefix,
|
||||
"n_image": num_images,
|
||||
}
|
||||
self._send_queue.put(message)
|
||||
if wait:
|
||||
status.wait(timeout=timeout)
|
||||
|
||||
return status
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop acquisition on the StdDAQ.
|
||||
|
||||
Args:
|
||||
timeout (float): timeout for the request
|
||||
"""
|
||||
message = {"command": "stop"}
|
||||
return self._send_queue.put(message)
|
||||
|
||||
def get_config(self, cached=False, timeout: float = 2) -> dict:
|
||||
"""
|
||||
Get the current configuration of the StdDAQ.
|
||||
|
||||
Args:
|
||||
cached (bool): whether to use the cached configuration
|
||||
timeout (float): timeout for the request
|
||||
|
||||
Returns:
|
||||
StdDaqConfig: configuration of the StdDAQ
|
||||
"""
|
||||
if cached and self._config is not None:
|
||||
return self._config
|
||||
response = requests.get(
|
||||
self.rest_url + "/api/config/get", params={"user": "ioc"}, timeout=timeout
|
||||
)
|
||||
response.raise_for_status()
|
||||
self._config = StdDaqConfig(**response.json())
|
||||
return self._config.model_dump(exclude_defaults=True)
|
||||
|
||||
def set_config(self, config: StdDaqConfig | dict, timeout: float = 2) -> None:
|
||||
"""
|
||||
Set the configuration of the StdDAQ. This will overwrite the current configuration.
|
||||
|
||||
Args:
|
||||
config (StdDaqConfig | dict): configuration to set
|
||||
timeout (float): timeout for the request
|
||||
"""
|
||||
if not isinstance(config, StdDaqConfig):
|
||||
config = StdDaqConfig(**config)
|
||||
|
||||
out = config.model_dump(exclude_defaults=True, exclude_none=True)
|
||||
if not out:
|
||||
logger.info(
|
||||
"The provided config does not contain relevant values for the StdDaq. Skipping set_config."
|
||||
)
|
||||
return
|
||||
|
||||
self._pre_restart()
|
||||
|
||||
response = requests.post(
|
||||
self.rest_url + "/api/config/set", params={"user": "ioc"}, json=out, timeout=timeout
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
# Setting a new config will reboot the backend; we therefore have to restart the websocket
|
||||
self._post_restart()
|
||||
|
||||
def _pre_restart(self):
|
||||
self._daq_is_running.clear()
|
||||
self._ws_idle_event.wait()
|
||||
if self.ws_client is not None:
|
||||
self.ws_client.close()
|
||||
|
||||
def _post_restart(self):
|
||||
self.wait_for_connection()
|
||||
self._daq_is_running.set()
|
||||
|
||||
def update_config(self, config: StdDaqConfigPartial | dict, timeout: float = 2) -> None:
|
||||
"""
|
||||
Update the configuration of the StdDAQ. This will update the current configuration.
|
||||
|
||||
Args:
|
||||
config (StdDaqConfigPartial | dict): configuration to update
|
||||
timeout (float): timeout for the request
|
||||
"""
|
||||
if not isinstance(config, StdDaqConfigPartial):
|
||||
config = StdDaqConfigPartial(**config)
|
||||
|
||||
patch_config_dict = config.model_dump(exclude_none=True)
|
||||
if not patch_config_dict:
|
||||
return
|
||||
|
||||
current_config = copy.deepcopy(self.get_config())
|
||||
new_config = copy.deepcopy(current_config)
|
||||
new_config.update(patch_config_dict)
|
||||
if current_config == new_config:
|
||||
return
|
||||
|
||||
self.set_config(StdDaqConfig(**new_config), timeout=timeout)
|
||||
|
||||
def reset(self, min_wait: float = 5) -> None:
|
||||
"""
|
||||
Reset the StdDAQ.
|
||||
|
||||
Args:
|
||||
min_wait (float): minimum wait time after reset
|
||||
"""
|
||||
self.set_config(self.get_config())
|
||||
time.sleep(min_wait)
|
||||
|
||||
def wait_for_connection(self, timeout: float = 20) -> None:
|
||||
"""
|
||||
Wait for the connection to the StdDAQ to be established.
|
||||
|
||||
Args:
|
||||
timeout (float): timeout for the request
|
||||
"""
|
||||
start_time = time.time()
|
||||
while True:
|
||||
if self.ws_client is not None and self.ws_client.state == State.OPEN:
|
||||
return
|
||||
try:
|
||||
self.ws_client = connect(self.ws_url)
|
||||
break
|
||||
except ConnectionRefusedError as exc:
|
||||
if time.time() - start_time > timeout:
|
||||
raise TimeoutError("Timeout while waiting for connection to StdDAQ") from exc
|
||||
time.sleep(2)
|
||||
|
||||
def create_virtual_datasets(self, file_path: str, file_prefix: str, timeout: float = 5) -> None:
|
||||
"""
|
||||
Combine the stddaq written files in a given folder in an interleaved
|
||||
h5 virtual dataset.
|
||||
|
||||
Args:
|
||||
file_path (str): path to the folder containing the files
|
||||
file_prefix (str): prefix of the files to combine
|
||||
timeout (float): timeout for the request
|
||||
"""
|
||||
|
||||
# TODO: Add wait for 'idle' state
|
||||
|
||||
response = requests.post(
|
||||
self.rest_url + "/api/h5/create_interleaved_vds",
|
||||
params={"user": "ioc"},
|
||||
json={
|
||||
"base_path": file_path,
|
||||
"file_prefix": file_prefix,
|
||||
"output_file": file_prefix.rstrip("_") + ".h5",
|
||||
},
|
||||
timeout=timeout,
|
||||
headers={"Content-type": "application/json"},
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
def connect(self):
|
||||
"""
|
||||
Connect to the StdDAQ. This method should be called after the client is created. It will
|
||||
launch a background thread to exchange data with the StdDAQ.
|
||||
"""
|
||||
if self._ws_update_thread is not None and self._ws_update_thread.is_alive():
|
||||
return
|
||||
self._ws_update_thread = threading.Thread(
|
||||
target=self._ws_update_loop, name=f"{self.parent.name}_stddaq_ws_loop", daemon=True
|
||||
)
|
||||
self._ws_update_thread.start()
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Shutdown the StdDAQ client.
|
||||
"""
|
||||
self._shutdown_event.set()
|
||||
if self._ws_update_thread is not None:
|
||||
self._ws_update_thread.join()
|
||||
if self.ws_client is not None:
|
||||
self.ws_client.close()
|
||||
self.ws_client = None
|
||||
|
||||
def _wait_for_server_running(self):
|
||||
"""
|
||||
Wait for the StdDAQ to be running. If the StdDaq is not running, the
|
||||
websocket loop will be set to idle.
|
||||
"""
|
||||
while not self._shutdown_event.is_set():
|
||||
if self._daq_is_running.wait(0.1):
|
||||
self._ws_idle_event.clear()
|
||||
break
|
||||
self._ws_idle_event.set()
|
||||
|
||||
def _ws_send_and_receive(self):
|
||||
if not self.ws_client:
|
||||
self.wait_for_connection()
|
||||
try:
|
||||
try:
|
||||
msg = self._send_queue.get(block=False)
|
||||
logger.trace(f"Sending to stddaq ws: {msg}")
|
||||
self.ws_client.send(json.dumps(msg))
|
||||
logger.info(f"Sent to stddaq ws: {msg}")
|
||||
except queue.Empty:
|
||||
pass
|
||||
try:
|
||||
recv_msgs = self.ws_client.recv(timeout=0.1)
|
||||
except TimeoutError:
|
||||
return
|
||||
logger.trace(f"Received from stddaq ws: {recv_msgs}")
|
||||
if recv_msgs is not None:
|
||||
self._on_received_ws_message(recv_msgs)
|
||||
except WebSocketException:
|
||||
content = traceback.format_exc()
|
||||
logger.warning(f"Websocket connection closed unexpectedly: {content}")
|
||||
self.wait_for_connection()
|
||||
|
||||
def _ws_update_loop(self):
|
||||
"""
|
||||
Loop to update the status property of the StdDAQ.
|
||||
"""
|
||||
while not self._shutdown_event.is_set():
|
||||
self._wait_for_server_running()
|
||||
self._ws_send_and_receive()
|
||||
|
||||
def _on_received_ws_message(self, msg: str):
|
||||
"""
|
||||
Handle a message received from the StdDAQ.
|
||||
"""
|
||||
try:
|
||||
data = StdDaqWsResponse(**json.loads(msg))
|
||||
except Exception:
|
||||
content = traceback.format_exc()
|
||||
logger.warning(f"Failed to decode websocket message: {content}")
|
||||
return
|
||||
if data.status != self._status:
|
||||
logger.info(f"std_daq_client status changed from [{self._status}] to [{data.status}]")
|
||||
self._status = data.status
|
||||
if data.count is not None:
|
||||
self._run_count_callbacks(data.count)
|
||||
self._run_status_callbacks()
|
||||
|
||||
def _run_count_callbacks(self, count: int):
|
||||
"""
|
||||
Run the count callbacks with the given count.
|
||||
The callbacks will be called with the count as argument.
|
||||
"""
|
||||
for cb in self._count_callbacks.values():
|
||||
try:
|
||||
cb(count)
|
||||
except Exception as exc:
|
||||
logger.error(f"Error in StdDAQ count callback: {exc}")
|
||||
|
||||
def _run_status_callbacks(self):
|
||||
"""
|
||||
Update the StatusBase objects based on the current status of the StdDAQ.
|
||||
If the status matches one of the success or error statuses, the StatusBase object will be set to finished
|
||||
or exception, respectively and removed from the list of callbacks.
|
||||
"""
|
||||
status = self._status
|
||||
completed_callbacks = []
|
||||
for dev_status, success, error in self._status_callbacks.values():
|
||||
if dev_status.done:
|
||||
logger.warning("Status object already resolved. Skipping StdDaq callback.")
|
||||
continue
|
||||
if status in success:
|
||||
dev_status.set_finished()
|
||||
logger.info(f"StdDaq status is {status}")
|
||||
completed_callbacks.append(dev_status)
|
||||
elif status in error:
|
||||
logger.warning(f"StdDaq status is {status}")
|
||||
dev_status.set_exception(StdDaqError(f"StdDaq status is {status}"))
|
||||
completed_callbacks.append(dev_status)
|
||||
|
||||
for cb in completed_callbacks:
|
||||
self._status_callbacks.pop(id(cb))
|
||||
288
tomcat_bec/devices/std_daq/std_daq_live_processing.py
Normal file
288
tomcat_bec/devices/std_daq/std_daq_live_processing.py
Normal file
@@ -0,0 +1,288 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pathlib
|
||||
import threading
|
||||
from typing import TYPE_CHECKING, Literal
|
||||
|
||||
import h5py
|
||||
import numpy as np
|
||||
from bec_lib import messages
|
||||
from bec_lib.endpoints import MessageEndpoints
|
||||
from bec_lib.logger import bec_logger
|
||||
from ophyd import StatusBase
|
||||
from typeguard import typechecked
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from bec_lib.redis_connector import RedisConnector
|
||||
from ophyd import Signal
|
||||
from ophyd_devices.interfaces.base_classes.psi_device_base import PSIDeviceBase
|
||||
|
||||
|
||||
logger = bec_logger.logger
|
||||
|
||||
|
||||
class StdDaqLiveProcessing:
|
||||
USER_ACCESS = ["set_enabled", "set_mode", "get_mode"]
|
||||
|
||||
def __init__(self, parent: PSIDeviceBase, signal: Signal, signal2: Signal):
|
||||
self.parent = parent
|
||||
self.signal = signal
|
||||
self.signal2 = signal2
|
||||
self._enabled = False
|
||||
self._mode = "sum"
|
||||
self.connector: RedisConnector | None = (
|
||||
self.parent.device_manager.connector if self.parent.device_manager else None
|
||||
)
|
||||
self.references: dict[str, np.ndarray] = {}
|
||||
|
||||
def get_mode(self) -> str:
|
||||
"""
|
||||
Get the current processing mode.
|
||||
|
||||
Returns:
|
||||
str: Current processing mode, e.g., "sum".
|
||||
"""
|
||||
return self._mode
|
||||
|
||||
@typechecked
|
||||
def set_mode(self, mode: Literal["sum"]):
|
||||
"""
|
||||
Set the processing mode.
|
||||
Args:
|
||||
mode (str): Processing mode, currently only "sum" is supported.
|
||||
"""
|
||||
if mode not in ["sum"]:
|
||||
raise ValueError("Unsupported mode. Only 'sum' is currently supported.")
|
||||
self._mode = mode
|
||||
|
||||
def set_enabled(self, value: bool):
|
||||
"""
|
||||
Enable or disable live processing.
|
||||
Args:
|
||||
value (bool): True to enable, False to disable.
|
||||
"""
|
||||
if not isinstance(value, bool):
|
||||
raise ValueError("Enabled must be a boolean value.")
|
||||
self._enabled = value
|
||||
|
||||
#########################################
|
||||
## Live Data Processing #################
|
||||
#########################################
|
||||
|
||||
def on_new_data(self, data: np.ndarray):
|
||||
"""
|
||||
Process new data if live processing is enabled.
|
||||
Args:
|
||||
data (np.ndarray): New data to process.
|
||||
"""
|
||||
|
||||
if not self._enabled:
|
||||
logger.info("Skipping data processing")
|
||||
return
|
||||
|
||||
match self._mode:
|
||||
case "sum":
|
||||
self.process_sum(data)
|
||||
case _:
|
||||
raise ValueError(f"Unknown mode: {self._mode}")
|
||||
|
||||
def process_sum(self, data: np.ndarray):
|
||||
"""
|
||||
Process data by summing it.
|
||||
Args:
|
||||
data (np.ndarray): Data to sum.
|
||||
"""
|
||||
if not isinstance(data, np.ndarray):
|
||||
raise ValueError("Data must be a numpy array.")
|
||||
|
||||
summed_data = np.sum(np.sum(data))
|
||||
self.signal.put(summed_data)
|
||||
|
||||
########################################
|
||||
## Flat and Dark Field References ######
|
||||
########################################
|
||||
|
||||
def apply_flat_dark_correction(self, data: np.ndarray) -> np.ndarray:
|
||||
"""
|
||||
Apply flat and dark field correction to the data.
|
||||
Args:
|
||||
data (np.ndarray): Data to correct.
|
||||
Returns:
|
||||
np.ndarray: Corrected data.
|
||||
"""
|
||||
if not isinstance(data, np.ndarray):
|
||||
raise ValueError("Data must be a numpy array.")
|
||||
|
||||
flat = self.get_flat(data.shape) # type: ignore # ndarray.shape is of type _ShapeType, which is just a generic of Any
|
||||
dark = self.get_dark(data.shape) # type: ignore
|
||||
|
||||
# If flat is just ones, we simply subtract dark from data
|
||||
if np.all(flat == 1):
|
||||
corrected_data = data - dark
|
||||
return corrected_data
|
||||
|
||||
corrected_data = (data - dark) / (flat - dark)
|
||||
return corrected_data
|
||||
|
||||
@typechecked
|
||||
def _load_and_update_reference(
|
||||
self,
|
||||
ref_type: Literal["flat", "dark"],
|
||||
file_path: str | pathlib.PosixPath,
|
||||
entry: str,
|
||||
status: StatusBase | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Update the reference field with data from a file.
|
||||
Args:
|
||||
ref_type (str): Type of reference, either "flat" or "dark".
|
||||
file_path (str): Path to the file containing the reference data.
|
||||
entry (str): Entry name in the file to read the data from.
|
||||
status (StatusBase | None): Status object to report progress.
|
||||
Raises:
|
||||
ValueError: If the file path is not a string or if the entry is not found.
|
||||
Exception: If there is an error reading the file or processing the data.
|
||||
"""
|
||||
|
||||
try:
|
||||
with h5py.File(file_path, "r") as file:
|
||||
if entry not in file:
|
||||
raise ValueError(f"Entry '{entry}' not found in the file.")
|
||||
data = file[entry][:] # type: ignore
|
||||
if not isinstance(data, np.ndarray):
|
||||
raise ValueError("Data in the file must be a numpy array.")
|
||||
if data.ndim == 2:
|
||||
self.references[f"{ref_type}_{data.shape}"] = data # type: ignore
|
||||
elif data.ndim == 3:
|
||||
# For 3D data, we take the mean across the first axis
|
||||
data = np.mean(data, axis=0)
|
||||
self.references[f"{ref_type}_{data.shape}"] = data
|
||||
else:
|
||||
raise ValueError("Data must be 2D or 3D numpy array.")
|
||||
self._publish_to_redis(data, self._redis_endpoint_name(ref_type, data.shape)) # type: ignore
|
||||
if status is not None:
|
||||
status.set_finished()
|
||||
except Exception as exc:
|
||||
if status is not None:
|
||||
status.set_exception(exc)
|
||||
else:
|
||||
logger.error(f"Failed to update {ref_type} field reference from {file_path}: {exc}")
|
||||
raise
|
||||
|
||||
def update_reference_with_file(
|
||||
self,
|
||||
reference_type: Literal["dark", "flat"],
|
||||
file_path: str | pathlib.PosixPath,
|
||||
entry: str,
|
||||
wait=False,
|
||||
) -> StatusBase:
|
||||
"""
|
||||
Update the reference with a new file.
|
||||
Args:
|
||||
reference_type (Literal["dark", "flat"]): Type of reference to update.
|
||||
camera_name (str): Name of the camera.
|
||||
file_path (str): Path to the flat field file.
|
||||
entry (str): Entry name in the file to read the data from.
|
||||
wait (bool): Whether to wait for the update to complete.
|
||||
"""
|
||||
status = StatusBase()
|
||||
if not wait:
|
||||
# If not waiting, run the update in a separate thread
|
||||
threading.Thread(
|
||||
target=self._load_and_update_reference,
|
||||
args=(reference_type, file_path, entry, status),
|
||||
).start()
|
||||
return status
|
||||
|
||||
self._load_and_update_reference(reference_type, file_path, entry, status=status)
|
||||
status.wait()
|
||||
return status
|
||||
|
||||
def get_flat(self, shape: tuple[int, int]) -> np.ndarray:
|
||||
"""
|
||||
Get the flat field reference for a specific shape.
|
||||
Args:
|
||||
shape (tuple[int, int]): Shape of the flat field reference to retrieve.
|
||||
Returns:
|
||||
np.ndarray: Flat field reference for the specified shape.
|
||||
"""
|
||||
if not isinstance(shape, tuple) or len(shape) != 2:
|
||||
raise ValueError("Shape must be a tuple of two integers.")
|
||||
key = f"flat_{shape}"
|
||||
if key not in self.references:
|
||||
# if the reference is not found, check Redis for it
|
||||
redis_data = self._get_from_redis(self._redis_endpoint_name("flat", shape))
|
||||
if redis_data is not None:
|
||||
self.references[key] = redis_data
|
||||
else:
|
||||
# If not found in Redis, create a default flat field reference
|
||||
self.references[key] = np.ones(shape) # Default to ones if not found
|
||||
return self.references[key]
|
||||
|
||||
def get_dark(self, shape: tuple[int, int]) -> np.ndarray:
|
||||
"""
|
||||
Get the dark field reference for a specific shape.
|
||||
Args:
|
||||
shape (tuple[int, int]): Shape of the dark field reference to retrieve.
|
||||
Returns:
|
||||
np.ndarray: Dark field reference for the specified shape.
|
||||
"""
|
||||
if not isinstance(shape, tuple) or len(shape) != 2:
|
||||
raise ValueError("Shape must be a tuple of two integers.")
|
||||
key = f"dark_{shape}"
|
||||
if key not in self.references:
|
||||
redis_data = self._get_from_redis(self._redis_endpoint_name("dark", shape))
|
||||
if redis_data is not None:
|
||||
self.references[key] = redis_data
|
||||
else:
|
||||
self.references[key] = np.zeros(shape)
|
||||
return self.references[key]
|
||||
|
||||
def _redis_endpoint_name(self, ref_type: str, shape: tuple[int, int]) -> str:
|
||||
return f"{self.parent.name}_{ref_type}_{shape}"
|
||||
|
||||
def _publish_to_redis(self, data: np.ndarray, name: str) -> None:
|
||||
"""
|
||||
Publish processed data to Redis.
|
||||
Args:
|
||||
data (np.ndarray): Data to publish.
|
||||
name (str): Name of the data for Redis.
|
||||
"""
|
||||
if self.connector is None:
|
||||
logger.warning("Redis connector is not set. Cannot publish data.")
|
||||
return
|
||||
|
||||
msg = messages.ProcessedDataMessage(data={"data": data, "name": name, "shape": data.shape})
|
||||
self.connector.xadd(
|
||||
MessageEndpoints.processed_data(process_id=name), msg_dict={"data": msg}, max_size=1
|
||||
)
|
||||
|
||||
def _get_from_redis(self, name: str) -> np.ndarray | None:
|
||||
"""
|
||||
Retrieve data from Redis.
|
||||
Args:
|
||||
name (str): Name of the data to retrieve.
|
||||
Returns:
|
||||
np.ndarray: Retrieved data.
|
||||
"""
|
||||
if self.connector is None:
|
||||
logger.warning("Redis connector is not set. Cannot retrieve data.")
|
||||
return None
|
||||
|
||||
msg = self.connector.get_last(MessageEndpoints.processed_data(process_id=name))
|
||||
if not msg:
|
||||
return None
|
||||
|
||||
if isinstance(msg, dict):
|
||||
msg = msg.get("data")
|
||||
if not isinstance(msg, messages.ProcessedDataMessage):
|
||||
logger.error(f"Received unexpected message type: {type(msg)}")
|
||||
return None
|
||||
if isinstance(msg.data, list):
|
||||
data = msg.data[0].get("data")
|
||||
else:
|
||||
data = msg.data.get("data")
|
||||
if not isinstance(data, np.ndarray):
|
||||
logger.error("Data retrieved from Redis is not a numpy array.")
|
||||
return None
|
||||
return data
|
||||
108
tomcat_bec/devices/std_daq/std_daq_preview.py
Normal file
108
tomcat_bec/devices/std_daq/std_daq_preview.py
Normal file
@@ -0,0 +1,108 @@
|
||||
import json
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import numpy as np
|
||||
import zmq
|
||||
from bec_lib.logger import bec_logger
|
||||
|
||||
logger = bec_logger.logger
|
||||
|
||||
ZMQ_TOPIC_FILTER = b""
|
||||
|
||||
|
||||
class StdDaqPreview:
|
||||
USER_ACCESS = ["start", "stop"]
|
||||
|
||||
def __init__(self, url: str, cb: Callable):
|
||||
self.url = url
|
||||
self._socket = None
|
||||
self._shutdown_event = threading.Event()
|
||||
self._zmq_thread = None
|
||||
self._on_update_callback = cb
|
||||
|
||||
def connect(self):
|
||||
"""Connect to te StDAQs PUB-SUB streaming interface
|
||||
|
||||
StdDAQ may reject connection for a few seconds when it restarts,
|
||||
so if it fails, wait a bit and try to connect again.
|
||||
"""
|
||||
# pylint: disable=no-member
|
||||
|
||||
context = zmq.Context()
|
||||
self._socket = context.socket(zmq.SUB)
|
||||
self._socket.setsockopt(zmq.SUBSCRIBE, ZMQ_TOPIC_FILTER)
|
||||
try:
|
||||
self._socket.connect(self.url)
|
||||
except ConnectionRefusedError:
|
||||
time.sleep(1)
|
||||
self._socket.connect(self.url)
|
||||
|
||||
def start(self):
|
||||
self._zmq_thread = threading.Thread(
|
||||
target=self._zmq_update_loop, daemon=True, name="StdDaq_live_preview"
|
||||
)
|
||||
self._zmq_thread.start()
|
||||
|
||||
def stop(self):
|
||||
self._shutdown_event.set()
|
||||
if self._zmq_thread:
|
||||
self._zmq_thread.join()
|
||||
|
||||
def _zmq_update_loop(self):
|
||||
while not self._shutdown_event.is_set():
|
||||
if self._socket is None:
|
||||
self.connect()
|
||||
try:
|
||||
self._poll()
|
||||
except ValueError:
|
||||
# Happens when ZMQ partially delivers the multipart message
|
||||
pass
|
||||
except zmq.error.Again:
|
||||
# Happens when receive queue is empty
|
||||
time.sleep(0.1)
|
||||
|
||||
def _poll(self):
|
||||
"""
|
||||
Poll the ZMQ socket for new data. It will throttle the data update and
|
||||
only subscribe to the topic for a single update. This is not very nice
|
||||
but it seems like there is currently no option to set the update rate on
|
||||
the backend.
|
||||
"""
|
||||
|
||||
if self._shutdown_event.wait(0.2):
|
||||
return
|
||||
|
||||
try:
|
||||
# subscribe to the topic
|
||||
self._socket.setsockopt(zmq.SUBSCRIBE, ZMQ_TOPIC_FILTER)
|
||||
|
||||
# pylint: disable=no-member
|
||||
r = self._socket.recv_multipart(flags=zmq.NOBLOCK)
|
||||
self._parse_data(r)
|
||||
|
||||
finally:
|
||||
# Unsubscribe from the topic
|
||||
self._socket.setsockopt(zmq.UNSUBSCRIBE, ZMQ_TOPIC_FILTER)
|
||||
|
||||
def _parse_data(self, data):
|
||||
# Length and throtling checks
|
||||
if len(data) != 2:
|
||||
logger.warning(f"Received malformed array of length {len(data)}")
|
||||
|
||||
# Unpack the Array V1 reply to metadata and array data
|
||||
meta, img_data = data
|
||||
|
||||
# Update image and update subscribers
|
||||
header = json.loads(meta)
|
||||
if header["type"] == "uint16":
|
||||
image = np.frombuffer(img_data, dtype=np.uint16)
|
||||
else:
|
||||
raise ValueError(f"Unexpected type {header['type']}")
|
||||
if image.size != np.prod(header["shape"]):
|
||||
err = f"Unexpected array size of {image.size} for header: {header}"
|
||||
raise ValueError(err)
|
||||
image = image.reshape(header["shape"])
|
||||
logger.info(f"Live update: frame {header['frame']}")
|
||||
self._on_update_callback(image)
|
||||
@@ -1,3 +1,4 @@
|
||||
from .simple_scans import TomoFlyScan, TomoScan
|
||||
from .tomcat_scans import TomcatSimpleSequence, TomcatSnapNStep
|
||||
from .tutorial_fly_scan import (
|
||||
AcquireDark,
|
||||
|
||||
@@ -0,0 +1,325 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import Literal
|
||||
|
||||
import numpy as np
|
||||
from bec_lib.device import DeviceBase
|
||||
from bec_lib.logger import bec_logger
|
||||
from bec_server.scan_server.scans import AsyncFlyScanBase, LineScan, ScanBase
|
||||
|
||||
logger = bec_logger.logger
|
||||
|
||||
|
||||
class TomoComponents:
|
||||
def __init__(self, scan: ScanBase):
|
||||
self.scan = scan
|
||||
self.stubs = scan.stubs
|
||||
self.device_manager = scan.device_manager
|
||||
|
||||
# Update the available cameras for the current scan
|
||||
self.cameras = self._get_cameras()
|
||||
|
||||
def _get_cameras(self) -> list[DeviceBase]:
|
||||
return [
|
||||
cam.name
|
||||
for cam in self.device_manager.devices.get_devices_with_tags("camera")
|
||||
if cam.enabled
|
||||
]
|
||||
|
||||
def open_shutter(self):
|
||||
"""
|
||||
Open the shutter if it is closed.
|
||||
"""
|
||||
logger.info("Opening shutter.")
|
||||
yield from self.stubs.set(device=["shutter"], value=[1])
|
||||
|
||||
def close_shutter(self):
|
||||
"""
|
||||
Close the shutter if it is open.
|
||||
"""
|
||||
yield from self.stubs.set(device=["shutter"], value=[0])
|
||||
|
||||
def restart_cameras(
|
||||
self,
|
||||
name: str,
|
||||
num_images: int,
|
||||
prefix: str = "",
|
||||
file_path: str = "",
|
||||
frames_per_trigger: int = 1,
|
||||
):
|
||||
if not prefix:
|
||||
return
|
||||
for cam in self.cameras:
|
||||
yield from self.stubs.send_rpc_and_wait(
|
||||
device=cam,
|
||||
func_name="restart_with_new_config",
|
||||
name=name,
|
||||
file_prefix=prefix,
|
||||
file_path=file_path,
|
||||
num_images=num_images,
|
||||
frames_per_trigger=frames_per_trigger,
|
||||
)
|
||||
|
||||
def complete_and_restore_configs(self, name: str):
|
||||
for cam in self.cameras:
|
||||
yield from self.stubs.send_rpc_and_wait(device=cam, func_name="on_complete")
|
||||
yield from self.stubs.send_rpc_and_wait(
|
||||
device=cam, func_name="restore_config", name=name
|
||||
)
|
||||
|
||||
def update_live_processing_references(self, ref_type: Literal["dark", "flat"]):
|
||||
"""
|
||||
Update the live processing references for dark or flat images.
|
||||
|
||||
Args:
|
||||
ref_type (Literal["dark", "flat"]): Type of reference to update.
|
||||
"""
|
||||
if ref_type not in ["dark", "flat"]:
|
||||
raise ValueError("ref_type must be either 'dark' or 'flat'.")
|
||||
|
||||
logger.info(f"Updating live processing references for {ref_type} images.")
|
||||
for cam in self.cameras:
|
||||
yield from self.stubs.send_rpc_and_wait(
|
||||
device=cam, func_name="update_live_processing_reference", reference_type=ref_type
|
||||
)
|
||||
|
||||
def acquire_dark(self, num_images: int, exposure_time: float, name="dark"):
|
||||
"""
|
||||
Acquire dark images.
|
||||
|
||||
Args:
|
||||
num_images (int): Number of dark images to acquire.
|
||||
exposure_time (float): Exposure time for each dark image in seconds.
|
||||
"""
|
||||
if not num_images:
|
||||
return
|
||||
logger.info(f"Acquiring {num_images} dark images with exposure time {exposure_time}s.")
|
||||
|
||||
yield from self.restart_cameras(
|
||||
name=name, prefix=name, num_images=num_images, frames_per_trigger=1
|
||||
)
|
||||
# yield from self.close_shutter()
|
||||
for i in range(num_images):
|
||||
logger.debug(f"Acquiring dark image {i+1}/{num_images}.")
|
||||
yield from self.stubs.trigger(min_wait=exposure_time)
|
||||
yield from self.stubs.read(group="monitored", point_id=self.scan.point_id)
|
||||
self.scan.point_id += 1
|
||||
yield from self.complete_and_restore_configs(name=name)
|
||||
yield from self.update_live_processing_references(ref_type="dark")
|
||||
|
||||
# yield from self.open_shutter()
|
||||
logger.info("Dark image acquisition complete.")
|
||||
|
||||
def acquire_flat(self, num_images: int, exposure_time: float, name="flat"):
|
||||
"""
|
||||
Acquire flat images.
|
||||
|
||||
Args:
|
||||
num_images (int): Number of flat images to acquire.
|
||||
exposure_time (float): Exposure time for each flat image in seconds.
|
||||
"""
|
||||
if not num_images:
|
||||
return
|
||||
logger.info(f"Acquiring {num_images} flat images with exposure time {exposure_time}s.")
|
||||
|
||||
yield from self.restart_cameras(
|
||||
name=name, prefix=name, num_images=num_images, frames_per_trigger=1
|
||||
)
|
||||
# yield from self.open_shutter()
|
||||
for i in range(num_images):
|
||||
logger.debug(f"Acquiring flat image {i+1}/{num_images}.")
|
||||
yield from self.stubs.trigger(min_wait=exposure_time)
|
||||
yield from self.stubs.read(group="monitored", point_id=self.scan.point_id)
|
||||
self.scan.point_id += 1
|
||||
yield from self.complete_and_restore_configs(name=name)
|
||||
yield from self.update_live_processing_references(ref_type="dark")
|
||||
logger.info("Flat image acquisition complete.")
|
||||
|
||||
def acquire_references(self, num_darks: int, num_flats: int, exp_time: float, name: str):
|
||||
yield from self.acquire_dark(num_darks, exposure_time=exp_time, name=name)
|
||||
yield from self.acquire_flat(num_flats, exposure_time=exp_time, name=name)
|
||||
|
||||
|
||||
class TomoScan(LineScan):
|
||||
scan_name = "tomo_line_scan"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*args,
|
||||
exp_time: float = 0,
|
||||
steps: int = None,
|
||||
relative: bool = False,
|
||||
burst_at_each_point: int = 1,
|
||||
num_darks: int = 0,
|
||||
num_flats: int = 0,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
A line scan for one or more motors.
|
||||
|
||||
Args:
|
||||
*args (Device, float, float): pairs of device / start position / end position
|
||||
exp_time (float): exposure time in s. Default: 0
|
||||
steps (int): number of steps. Default: 10
|
||||
relative (bool): if True, the start and end positions are relative to the current position. Default: False
|
||||
burst_at_each_point (int): number of acquisition per point. Default: 1
|
||||
|
||||
Returns:
|
||||
ScanReport
|
||||
|
||||
Examples:
|
||||
>>> scans.line_scan(dev.motor1, -5, 5, dev.motor2, -5, 5, steps=10, exp_time=0.1, relative=True)
|
||||
|
||||
"""
|
||||
super().__init__(
|
||||
*args,
|
||||
exp_time=exp_time,
|
||||
steps=steps,
|
||||
relative=relative,
|
||||
burst_at_each_point=burst_at_each_point,
|
||||
**kwargs,
|
||||
)
|
||||
self.num_darks = num_darks
|
||||
self.num_flats = num_flats
|
||||
self.components = TomoComponents(self)
|
||||
|
||||
def prepare_positions(self):
|
||||
yield from super().prepare_positions()
|
||||
self.num_pos += 2 * (self.num_darks + self.num_flats)
|
||||
|
||||
def pre_scan(self):
|
||||
yield from self.components.acquire_dark(self.num_darks, self.exp_time, name="pre_scan_dark")
|
||||
yield from self.components.acquire_flat(self.num_flats, self.exp_time, name="pre_scan_flat")
|
||||
yield from super().pre_scan()
|
||||
|
||||
def finalize(self):
|
||||
yield from super().finalize()
|
||||
yield from self.components.acquire_dark(
|
||||
self.num_darks, self.exp_time, name="post_scan_dark"
|
||||
)
|
||||
yield from self.components.acquire_flat(
|
||||
self.num_flats, self.exp_time, name="post_scan_flat"
|
||||
)
|
||||
|
||||
|
||||
class TomoFlyScan(AsyncFlyScanBase):
|
||||
scan_name = "tomo_fly_scan"
|
||||
gui_config = {
|
||||
"Motor": ["motor"],
|
||||
"Acquisition parameters": ["sample_in"],
|
||||
"Camera": ["exp_time"],
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
motor: DeviceBase,
|
||||
start: float,
|
||||
stop: float,
|
||||
sample_in: float,
|
||||
sample_out: float,
|
||||
num_darks: int = 0,
|
||||
num_flats: int = 0,
|
||||
exp_time: float = 0,
|
||||
relative: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
A fly scan for a single motor.
|
||||
|
||||
Args:
|
||||
motor (DeviceBase): The motor to scan.
|
||||
start (float): Start position.
|
||||
stop (float): Stop position.
|
||||
sample_in (float): Sample in position.
|
||||
sample_out (float): Sample out position.
|
||||
num_darks (int): Number of dark images to acquire. Default: 0
|
||||
num_flats (int): Number of flat images to acquire. Default: 0
|
||||
exp_time (float): Exposure time in seconds. Default: 0
|
||||
relative (bool): If True, the start and stop positions are relative to the current position. Default: False
|
||||
|
||||
Returns:
|
||||
ScanReport
|
||||
|
||||
Examples:
|
||||
>>> scans.tomo_fly_scan(dev.motor1, 0, 10, sample_in=5, sample_out=7, exp_time=0.1, num_darks=5, num_flats=5)
|
||||
|
||||
"""
|
||||
super().__init__(relative=relative, exp_time=exp_time, **kwargs)
|
||||
self.motor = motor
|
||||
self.start = start
|
||||
self.stop = stop
|
||||
self.sample_in = sample_in
|
||||
self.sample_out = sample_out
|
||||
self.num_darks = num_darks
|
||||
self.num_flats = num_flats
|
||||
self.sample_stage = "samy" # change to the correct sample stage device
|
||||
self.shutter = "hx" # change to the correct shutter device
|
||||
self.num_darks = num_darks
|
||||
self.num_flats = num_flats
|
||||
|
||||
self.components = TomoComponents(self)
|
||||
|
||||
def scan_report_instructions(self):
|
||||
"""
|
||||
Generate scan report instructions for the fly scan.
|
||||
This method provides the necessary instructions to listen to the camera progress during the scan.
|
||||
"""
|
||||
|
||||
# If no cameras are available, fall back to the default scan report instructions
|
||||
if not self.components.cameras:
|
||||
yield from super().scan_report_instructions()
|
||||
return
|
||||
|
||||
# Use the first camera or "gfcam" if available for reporting
|
||||
report_camera = (
|
||||
"gfcam" if "gfcam" in self.components.cameras else self.components.cameras[0]
|
||||
)
|
||||
yield from self.stubs.scan_report_instruction({"device_progress": [report_camera]})
|
||||
|
||||
def prepare_positions(self):
|
||||
self.positions = np.array([[self.start], [self.stop]])
|
||||
self.num_pos = None
|
||||
yield from self._set_position_offset()
|
||||
|
||||
def pre_scan(self):
|
||||
yield from self.components.acquire_dark(self.num_darks, self.exp_time, name="pre_scan_dark")
|
||||
yield from self.components.acquire_flat(self.num_flats, self.exp_time, name="pre_scan_flat")
|
||||
yield from super().pre_scan()
|
||||
|
||||
def scan_core(self):
|
||||
"""
|
||||
Core scanning logic for the fly scan.
|
||||
"""
|
||||
|
||||
# Open the shutter
|
||||
# yield from self.components.open_shutter()
|
||||
|
||||
# Move the sample stage to the sample in position
|
||||
sample_in_status = yield from self.stubs.set(
|
||||
device=self.sample_stage, value=[self.sample_in], wait=False
|
||||
)
|
||||
|
||||
# Move the rotation stage to the start position
|
||||
motor_start_status = yield from self.stubs.set(
|
||||
device=self.motor, value=[self.start], wait=False
|
||||
)
|
||||
|
||||
# Wait for both movements to complete
|
||||
sample_in_status.wait()
|
||||
motor_start_status.wait()
|
||||
|
||||
# Kickoff the rotation stage to start the fly scan
|
||||
flyer_status = yield from self.stubs.set(device=self.motor, value=[self.stop], wait=False)
|
||||
|
||||
# Send a single trigger to kick off the camera acquisition
|
||||
yield from self.stubs.trigger()
|
||||
|
||||
# Monitor the flyer status whilst reading out monitored devices (e.g. temperatures)
|
||||
while not flyer_status.done:
|
||||
yield from self.stubs.read(group="monitored", point_id=self.point_id)
|
||||
self.point_id += 1
|
||||
time.sleep(1)
|
||||
|
||||
# Close the shutter after the scan is complete
|
||||
# yield from self.components.close_shutter()
|
||||
|
||||
Reference in New Issue
Block a user