1 Commits

Author SHA1 Message Date
gac-x02da
7a2df919fb First config file for S-TOMCAT 2025-06-04 09:22:46 +02:00
45 changed files with 3486 additions and 4760 deletions

View File

@@ -2,8 +2,8 @@
# It is needed to track the repo template version, and editing may break things.
# This file will be overwritten by copier on template updates.
_commit: v1.2.8
_src_path: https://github.com/bec-project/plugin_copier_template.git
_commit: v1.0.0
_src_path: https://gitea.psi.ch/bec/bec_plugin_copier_template.git
make_commit: false
project_name: tomcat_bec
widget_plugins_input: []

View File

@@ -1,102 +0,0 @@
name: CI for tomcat_bec
on:
push:
pull_request:
workflow_dispatch:
inputs:
BEC_WIDGETS_BRANCH:
description: "Branch of BEC Widgets to install"
required: false
type: string
default: "main"
BEC_CORE_BRANCH:
description: "Branch of BEC Core to install"
required: false
type: string
default: "main"
OPHYD_DEVICES_BRANCH:
description: "Branch of Ophyd Devices to install"
required: false
type: string
default: "main"
BEC_PLUGIN_REPO_BRANCH:
description: "Branch of the BEC Plugin Repository to install"
required: false
type: string
default: "main"
PYTHON_VERSION:
description: "Python version to use"
required: false
type: string
default: "3.12"
permissions:
pull-requests: write
jobs:
test:
runs-on: ubuntu-latest
env:
QTWEBENGINE_DISABLE_SANDBOX: 1
QT_QPA_PLATFORM: "offscreen"
steps:
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: "${{ inputs.PYTHON_VERSION || '3.12' }}"
- name: Checkout BEC Plugin Repository
uses: actions/checkout@v4
with:
repository: bec/tomcat_bec
ref: "${{ inputs.BEC_PLUGIN_REPO_BRANCH || github.head_ref || github.sha }}"
path: ./tomcat_bec
- name: Lint for merge conflicts from template updates
shell: bash
# Find all Copier conflicts except this line
run: '! grep -r "<<<<<<< before updating" | grep -v "grep -r \"<<<<<<< before updating"'
- name: Checkout BEC Core
uses: actions/checkout@v4
with:
repository: bec/bec
ref: "${{ inputs.BEC_CORE_BRANCH || 'main' }}"
path: ./bec
- name: Checkout Ophyd Devices
uses: actions/checkout@v4
with:
repository: bec/ophyd_devices
ref: "${{ inputs.OPHYD_DEVICES_BRANCH || 'main' }}"
path: ./ophyd_devices
- name: Checkout BEC Widgets
uses: actions/checkout@v4
with:
repository: bec/bec_widgets
ref: "${{ inputs.BEC_WIDGETS_BRANCH || 'main' }}"
path: ./bec_widgets
- name: Install dependencies
shell: bash
run: |
sudo apt-get update
sudo apt-get install -y libgl1 libegl1 x11-utils libxkbcommon-x11-0 libdbus-1-3 xvfb
sudo apt-get -y install libnss3 libxdamage1 libasound2t64 libatomic1 libxcursor1
- name: Install Python dependencies
shell: bash
run: |
pip install uv
uv pip install --system -e ./ophyd_devices
uv pip install --system -e ./bec/bec_lib[dev]
uv pip install --system -e ./bec/bec_ipython_client
uv pip install --system -e ./bec/bec_server[dev]
uv pip install --system -e ./bec_widgets[dev,pyside6]
uv pip install --system -e ./tomcat_bec[dev]
- name: Run Pytest with Coverage
id: coverage
run: pytest --random-order --cov=./tomcat_bec --cov-config=./tomcat_bec/pyproject.toml --cov-branch --cov-report=xml --no-cov-on-fail ./tomcat_bec/tests/ || test $? -eq 5

View File

@@ -1,62 +0,0 @@
name: Create template upgrade PR for tomcat_bec
on:
workflow_dispatch:
permissions:
pull-requests: write
jobs:
create_update_branch_and_pr:
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
steps:
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Install tools
run: |
pip install copier PySide6
- name: Checkout
uses: actions/checkout@v4
- name: Perform update
run: |
git config --global user.email "bec_ci_staging@psi.ch"
git config --global user.name "BEC automated CI"
branch="chore/update-template-$(python -m uuid)"
echo "switching to branch $branch"
git checkout -b $branch
echo "Running copier update..."
output="$(copier update --trust --defaults --conflict inline 2>&1)"
echo "$output"
msg="$(printf '%s\n' "$output" | head -n 1)"
if ! grep -q "make_commit: true" .copier-answers.yml ; then
echo "Autocommit not made, committing..."
git add -A
git commit -a -m "$msg"
fi
if diff-index --quiet HEAD ; then
echo "No changes detected"
exit 0
fi
git push -u origin $branch
curl -X POST "https://gitea.psi.ch/api/v1/repos/${{ gitea.repository }}/pulls" \
-H "Authorization: token ${{ secrets.CI_REPO_WRITE }}" \
-H "Content-Type: application/json" \
-d "{
\"title\": \"Template: $(echo $msg)\",
\"body\": \"This PR was created by Gitea Actions\",
\"head\": \"$(echo $branch)\",
\"base\": \"main\"
}"

7
.gitlab-ci.yml Normal file
View File

@@ -0,0 +1,7 @@
include:
- file: /templates/plugin-repo-template.yml
inputs:
name: tomcat_bec
target: tomcat_bec
branch: $CHILD_PIPELINE_BRANCH
project: bec/awi_utils

View File

@@ -6,7 +6,7 @@ build-backend = "hatchling.build"
name = "tomcat_bec"
version = "0.0.0"
description = "The TOMCAT plugin repository for BEC"
requires-python = ">=3.11"
requires-python = ">=3.10"
classifiers = [
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
@@ -33,7 +33,6 @@ dev = [
"ophyd_devices",
"bec_server",
"requests-mock",
"fakeredis",
]
[project.entry-points."bec"]

View File

@@ -1,337 +0,0 @@
from unittest import mock
import pytest
from tomcat_bec.devices.gigafrost.gigafrost_base import GigaFrostBase
from tomcat_bec.devices.gigafrost.gigafrostcamera import GigaFrostCamera, default_config
from tomcat_bec.devices.std_daq.std_daq_client import StdDaqClient
from tomcat_bec.devices.std_daq.std_daq_preview import StdDaqPreview
@pytest.fixture()
def gfcam_base():
gfcam = GigaFrostCamera(
"X02DA-CAM-GF2:",
name="gfcam",
std_daq_rest="http://example.com/rest",
std_daq_ws="ws://example.com/ws",
)
for component in gfcam.component_names:
type.__setattr__(GigaFrostCamera, component, mock.MagicMock())
yield gfcam
def test_gfcam_init_raises_without_rest_ws():
with pytest.raises(ValueError) as excinfo:
GigaFrostCamera("X02DA-CAM-GF2:", name="gfcam")
excinfo.match("std_daq_rest and std_daq_ws must be provided")
def test_gfcam_init():
gfcam = GigaFrostCamera(
"X02DA-CAM-GF2:",
name="gfcam",
std_daq_rest="http://example.com/rest",
std_daq_ws="ws://example.com/ws",
)
assert gfcam.name == "gfcam"
assert isinstance(gfcam.backend, StdDaqClient)
assert gfcam.live_preview is None
def test_gfcam_init_with_live_preview():
gfcam = GigaFrostCamera(
"X02DA-CAM-GF2:",
name="gfcam",
std_daq_rest="http://example.com/rest",
std_daq_ws="ws://example.com/ws",
std_daq_live="http://example.com/live_preview",
)
assert gfcam.live_preview is not None
assert isinstance(gfcam.live_preview, StdDaqPreview)
def test_gfcam_configure(gfcam_base):
with mock.patch.object(gfcam_base, "stop_camera") as stop_camera:
with mock.patch.object(gfcam_base.backend, "set_config") as set_config:
with mock.patch.object(GigaFrostBase, "configure") as base_configure:
gfcam_base.configure({})
stop_camera.assert_called_once()
stop_camera().wait.assert_called_once()
set_config.assert_not_called()
config = default_config()
base_configure.assert_called_once_with(config)
def test_gfcam_default_config_copies():
assert isinstance(default_config(), dict)
assert id(default_config()) != id(default_config())
def test_gfcam_configure_sets_exp_time_in_ms(gfcam_base):
with mock.patch.object(gfcam_base, "stop_camera") as stop_camera:
with mock.patch.object(gfcam_base.backend, "set_config") as set_config:
with mock.patch.object(GigaFrostBase, "configure") as base_configure:
gfcam_base.configure({"exp_time": 0.1})
stop_camera.assert_called_once()
stop_camera().wait.assert_called_once()
set_config.assert_not_called()
config = default_config()
config.update({"exposure": 100}) # in ms
base_configure.assert_called_once_with(config)
def test_gfcam_set_acquisition_mode_invalid(gfcam_base):
"""Test setting invalid acquisition mode"""
with pytest.raises(RuntimeError) as excinfo:
gfcam_base.set_acquisition_mode("invalid_mode")
excinfo.match("Unsupported acquisition mode: invalid_mode")
@pytest.mark.parametrize(
"mode_soft, mode_external, mode_always, expected_result",
[
(0, 0, 0, None), # No enable mode set
(1, 0, 0, "soft"), # Only soft mode enabled
(0, 1, 0, "external"), # Only external mode enabled
(1, 1, 0, "soft+ext"), # Both soft and external enabled
(0, 0, 1, "always"), # Always mode enabled
(1, 0, 1, "always"), # Always overrides soft
(0, 1, 1, "always"), # Always overrides external
(1, 1, 1, "always"), # Always overrides both soft and external
],
)
def test_gfcam_enable_mode_property(
gfcam_base, mode_soft, mode_external, mode_always, expected_result
):
"""Test that the enable_mode property returns the correct mode based on signal values"""
# Configure the mock return values for the mode signals
gfcam_base.mode_endbl_soft.get.return_value = mode_soft
gfcam_base.mode_enbl_ext.get.return_value = mode_external
gfcam_base.mode_enbl_auto.get.return_value = mode_always
# Check that the property returns the expected result
assert gfcam_base.enable_mode == expected_result
@pytest.mark.parametrize(
"mode,expected_settings",
[
("soft", {"mode_enbl_ext": 0, "mode_endbl_soft": 1, "mode_enbl_auto": 0}),
("external", {"mode_enbl_ext": 1, "mode_endbl_soft": 0, "mode_enbl_auto": 0}),
("soft+ext", {"mode_enbl_ext": 1, "mode_endbl_soft": 1, "mode_enbl_auto": 0}),
("always", {"mode_enbl_ext": 0, "mode_endbl_soft": 0, "mode_enbl_auto": 1}),
],
)
def test_gfcam_enable_mode_setter(gfcam_base, mode, expected_settings):
"""Test setting the enable mode of the GigaFRoST camera"""
# Mock the const.gf_valid_enable_modes to avoid importing the constants
with mock.patch(
"tomcat_bec.devices.gigafrost.gigafrostcamera.const.gf_valid_enable_modes",
["soft", "external", "soft+ext", "always"],
):
# Set the enable mode
gfcam_base.enable_mode = mode
# Verify the correct signals were set
gfcam_base.mode_enbl_ext.set.assert_called_once_with(expected_settings["mode_enbl_ext"])
gfcam_base.mode_endbl_soft.set.assert_called_once_with(expected_settings["mode_endbl_soft"])
gfcam_base.mode_enbl_auto.set.assert_called_once_with(expected_settings["mode_enbl_auto"])
# Verify wait was called on each set operation
gfcam_base.mode_enbl_ext.set().wait.assert_called_once()
gfcam_base.mode_endbl_soft.set().wait.assert_called_once()
gfcam_base.mode_enbl_auto.set().wait.assert_called_once()
# Verify parameters were committed
gfcam_base.set_param.set.assert_called_once_with(1)
gfcam_base.set_param.set().wait.assert_called_once()
def test_gfcam_enable_mode_setter_invalid(gfcam_base):
"""Test setting an invalid enable mode raises an error"""
# Mock the const.gf_valid_enable_modes to avoid importing the constants
with mock.patch(
"tomcat_bec.devices.gigafrost.gigafrostcamera.const.gf_valid_enable_modes",
["soft", "external", "soft+ext", "always"],
):
with pytest.raises(ValueError) as excinfo:
gfcam_base.enable_mode = "invalid_mode"
assert "Invalid enable mode invalid_mode!" in str(excinfo.value)
assert "Valid modes are:" in str(excinfo.value)
# Verify no signals were set
gfcam_base.mode_enbl_ext.set.assert_not_called()
gfcam_base.mode_endbl_soft.set.assert_not_called()
gfcam_base.mode_enbl_auto.set.assert_not_called()
gfcam_base.set_param.set.assert_not_called()
@pytest.mark.parametrize(
"mode_auto, mode_soft, mode_timer, mode_external, expected_result",
[
(0, 0, 0, 0, None), # No trigger mode set
(1, 0, 0, 0, "auto"), # Only auto mode enabled
(0, 1, 0, 0, "soft"), # Only soft mode enabled
(0, 0, 1, 0, "timer"), # Only timer mode enabled
(0, 0, 0, 1, "external"), # Only external mode enabled
(1, 1, 0, 0, "auto"), # Auto takes precedence over soft
(1, 0, 1, 0, "auto"), # Auto takes precedence over timer
(1, 0, 0, 1, "auto"), # Auto takes precedence over external
(0, 1, 1, 0, "soft"), # Soft takes precedence over timer
(0, 1, 0, 1, "soft"), # Soft takes precedence over external
(0, 0, 1, 1, "timer"), # Timer takes precedence over external
(1, 1, 1, 1, "auto"), # Auto takes precedence over all
],
)
def test_gfcam_trigger_mode_property(
gfcam_base, mode_auto, mode_soft, mode_timer, mode_external, expected_result
):
"""Test that the trigger_mode property returns the correct mode based on signal values"""
# Configure the mock return values for the mode signals
gfcam_base.mode_trig_auto.get.return_value = mode_auto
gfcam_base.mode_trig_soft.get.return_value = mode_soft
gfcam_base.mode_trig_timer.get.return_value = mode_timer
gfcam_base.mode_trig_ext.get.return_value = mode_external
# Check that the property returns the expected result
assert gfcam_base.trigger_mode == expected_result
@pytest.mark.parametrize(
"mode,expected_settings",
[
(
"auto",
{"mode_trig_auto": 1, "mode_trig_soft": 0, "mode_trig_timer": 0, "mode_trig_ext": 0},
),
(
"soft",
{"mode_trig_auto": 0, "mode_trig_soft": 1, "mode_trig_timer": 0, "mode_trig_ext": 0},
),
(
"timer",
{"mode_trig_auto": 0, "mode_trig_soft": 0, "mode_trig_timer": 1, "mode_trig_ext": 0},
),
(
"external",
{"mode_trig_auto": 0, "mode_trig_soft": 0, "mode_trig_timer": 0, "mode_trig_ext": 1},
),
],
)
def test_gfcam_trigger_mode_setter(gfcam_base, mode, expected_settings):
"""Test setting the trigger mode of the GigaFRoST camera"""
# Set the trigger mode
gfcam_base.trigger_mode = mode
# Verify the correct signals were set
gfcam_base.mode_trig_auto.set.assert_called_with(expected_settings["mode_trig_auto"])
gfcam_base.mode_trig_soft.set.assert_called_with(expected_settings["mode_trig_soft"])
gfcam_base.mode_trig_timer.set.assert_called_with(expected_settings["mode_trig_timer"])
gfcam_base.mode_trig_ext.set.assert_called_with(expected_settings["mode_trig_ext"])
# Verify wait was called on each set operation
gfcam_base.mode_trig_auto.set().wait.assert_called_once()
gfcam_base.mode_trig_soft.set().wait.assert_called_once()
gfcam_base.mode_trig_timer.set().wait.assert_called_once()
gfcam_base.mode_trig_ext.set().wait.assert_called_once()
# Verify parameters were committed
gfcam_base.set_param.set.assert_called_once_with(1)
gfcam_base.set_param.set().wait.assert_called_once()
def test_gfcam_trigger_mode_setter_invalid(gfcam_base):
"""Test setting an invalid trigger mode raises an error"""
with pytest.raises(ValueError) as excinfo:
gfcam_base.trigger_mode = "invalid_mode"
assert "Invalid trigger mode!" in str(excinfo.value)
assert "Valid modes are: ['auto', 'external', 'timer', 'soft']" in str(excinfo.value)
# Verify no signals were set
gfcam_base.mode_trig_auto.set.assert_not_called()
gfcam_base.mode_trig_soft.set.assert_not_called()
gfcam_base.mode_trig_timer.set.assert_not_called()
gfcam_base.mode_trig_ext.set.assert_not_called()
gfcam_base.set_param.set.assert_not_called()
@pytest.mark.parametrize(
"start_bit, end_bit, expected_result",
[
(0, 0, "off"), # Both bits off
(1, 0, "start"), # Only start bit on
(0, 1, "end"), # Only end bit on
(1, 1, "start+end"), # Both bits on
],
)
def test_gfcam_fix_nframes_mode_property(gfcam_base, start_bit, end_bit, expected_result):
"""Test that the fix_nframes_mode property returns the correct mode based on bit values"""
# Configure the mock return values for the bits
gfcam_base.cnt_startbit.get.return_value = start_bit
# Note: The original code has a bug here - it calls cnt_startbit.get() twice instead of cnt_endbit.get()
# For testing purposes, we'll mock both appropriately
gfcam_base.cnt_endbit.get.return_value = end_bit
# Check that the property returns the expected result
assert gfcam_base.fix_nframes_mode == expected_result
@pytest.mark.parametrize(
"mode, expected_settings",
[
("off", {"cnt_startbit": 0, "cnt_endbit": 0}),
("start", {"cnt_startbit": 1, "cnt_endbit": 0}),
("end", {"cnt_startbit": 0, "cnt_endbit": 1}),
("start+end", {"cnt_startbit": 1, "cnt_endbit": 1}),
],
)
def test_gfcam_fix_nframes_mode_setter(gfcam_base, mode, expected_settings):
"""Test setting the fixed number of frames mode of the GigaFRoST camera"""
# Mock the const.gf_valid_fix_nframe_modes to avoid importing the constants
with mock.patch(
"tomcat_bec.devices.gigafrost.gigafrostcamera.const.gf_valid_fix_nframe_modes",
["off", "start", "end", "start+end"],
):
# Set the mode
gfcam_base.fix_nframes_mode = mode
# Verify the class attribute was set
assert gfcam_base._fix_nframes_mode == mode
# Verify the correct signals were set
gfcam_base.cnt_startbit.set.assert_called_once_with(expected_settings["cnt_startbit"])
gfcam_base.cnt_endbit.set.assert_called_once_with(expected_settings["cnt_endbit"])
# Verify wait was called on each set operation
gfcam_base.cnt_startbit.set().wait.assert_called_once()
gfcam_base.cnt_endbit.set().wait.assert_called_once()
# Verify parameters were committed
gfcam_base.set_param.set.assert_called_once_with(1)
gfcam_base.set_param.set().wait.assert_called_once()
def test_gfcam_fix_nframes_mode_setter_invalid(gfcam_base):
"""Test setting an invalid fixed number of frames mode raises an error"""
# Mock the const.gf_valid_fix_nframe_modes to avoid importing the constants
with mock.patch(
"tomcat_bec.devices.gigafrost.gigafrostcamera.const.gf_valid_fix_nframe_modes",
["off", "start", "end", "start+end"],
):
with pytest.raises(ValueError) as excinfo:
gfcam_base.fix_nframes_mode = "invalid_mode"
assert "Invalid fixed frame number mode!" in str(excinfo.value)
assert "Valid modes are:" in str(excinfo.value)
# Verify no signals were set
gfcam_base.cnt_startbit.set.assert_not_called()
gfcam_base.cnt_endbit.set.assert_not_called()
gfcam_base.set_param.set.assert_not_called()

View File

@@ -1,25 +0,0 @@
from unittest import mock
import pytest
from tomcat_bec.devices.pco_edge.pcoedgecamera import PcoEdge5M
@pytest.fixture()
def pcocam_base():
gfcam = PcoEdge5M(
"X02DA-CAM-GF2:",
name="pco_edge_camera",
std_daq_rest="http://example.com/rest",
std_daq_ws="ws://example.com/ws",
)
for component in gfcam.component_names:
type.__setattr__(PcoEdge5M, component, mock.MagicMock())
yield gfcam
def test_pcocam_init_raises_without_rest_ws():
with pytest.raises(ValueError) as excinfo:
PcoEdge5M("X02DA-CAM-GF2:", name="pco_edge_camera")
excinfo.match("std_daq_rest and std_daq_ws must be provided")

View File

@@ -1,172 +0,0 @@
from unittest import mock
import fakeredis
import h5py
import numpy as np
import pytest
from bec_lib.redis_connector import RedisConnector
from ophyd_devices.interfaces.base_classes.psi_device_base import PSIDeviceBase
from typeguard import TypeCheckError
from tomcat_bec.devices.std_daq.std_daq_live_processing import StdDaqLiveProcessing
def fake_redis_server(host, port, **kwargs):
redis = fakeredis.FakeRedis()
return redis
@pytest.fixture
def connected_connector():
connector = RedisConnector("localhost:1", redis_cls=fake_redis_server) # type: ignore
connector._redis_conn.flushall()
try:
yield connector
finally:
connector.shutdown()
class MockPSIDeviceBase(PSIDeviceBase):
def __init__(self, *args, device_manager=None, **kwargs):
super().__init__(*args, **kwargs)
self.device_manager = device_manager
@pytest.fixture
def mock_device(connected_connector):
device_manager = mock.Mock()
device_manager.connector = connected_connector
device = MockPSIDeviceBase(name="mock_device", device_manager=device_manager)
yield device
@pytest.fixture
def std_daq_live_processing(mock_device):
signal = mock.Mock()
signal2 = mock.Mock()
live_processing = StdDaqLiveProcessing(mock_device, signal, signal2)
yield live_processing
def test_std_daq_live_processing_set_mode(std_daq_live_processing):
std_daq_live_processing.set_mode("sum")
assert std_daq_live_processing.get_mode() == "sum"
with pytest.raises(TypeCheckError):
std_daq_live_processing.set_mode("average")
with pytest.raises(TypeCheckError):
std_daq_live_processing.set_mode(123)
@pytest.fixture(params=["flat", "dark"])
def reference_type(request):
return request.param
def test_std_daq_live_processing_flat_default(std_daq_live_processing, reference_type):
with mock.patch.object(
std_daq_live_processing, "_get_from_redis", return_value=None
) as mock_get_from_redis:
get_method = (
std_daq_live_processing.get_flat
if reference_type == "flat"
else std_daq_live_processing.get_dark
)
out = get_method((100, 100))
mock_get_from_redis.assert_called_once_with(
std_daq_live_processing._redis_endpoint_name(ref_type=reference_type, shape=(100, 100))
)
assert isinstance(out, np.ndarray)
assert out.shape == (100, 100)
if reference_type == "flat":
assert np.all(out == 1), "Default should be all ones"
else:
assert np.all(out == 0), "Default should be all zeros"
@pytest.mark.parametrize("value", [np.random.rand(100, 100), np.random.rand(3, 100, 100)])
def test_std_daq_live_processing_fetch(tmp_path, std_daq_live_processing, value, reference_type):
with h5py.File(tmp_path / "test_data.h5", "w") as f:
f.create_dataset("tomcat-pco/data", data=value)
status = std_daq_live_processing.update_reference_with_file(
reference_type, tmp_path / "test_data.h5", "tomcat-pco/data"
)
status.wait()
get_method = (
std_daq_live_processing.get_flat
if reference_type == "flat"
else std_daq_live_processing.get_dark
)
out = get_method((100, 100))
assert isinstance(out, np.ndarray)
assert out.shape == (100, 100)
# Check that the data is cached locally
assert np.array_equal(
std_daq_live_processing.references[f"{reference_type}_(100, 100)"], out
), "Cached flat data should match fetched data"
redis_data = std_daq_live_processing._get_from_redis(
std_daq_live_processing._redis_endpoint_name(ref_type=reference_type, shape=(100, 100))
)
assert isinstance(redis_data, np.ndarray)
assert redis_data.shape == (100, 100)
assert np.array_equal(redis_data, out), "Redis data should match the locally cached data"
def test_std_daq_live_processing_apply_flat_dark_correction(std_daq_live_processing):
# Create a mock image
image = np.random.rand(100, 100)
# Set flat and dark references
std_daq_live_processing.references["flat_(100, 100)"] = np.ones((100, 100))
std_daq_live_processing.references["dark_(100, 100)"] = np.zeros((100, 100))
# Apply flat and dark correction
corrected_image = std_daq_live_processing.apply_flat_dark_correction(image)
assert isinstance(corrected_image, np.ndarray)
assert corrected_image.shape == (100, 100)
assert np.all(corrected_image >= 0), "Corrected image should not have negative values"
def test_std_daq_live_processing_apply_flat_dark_correction_with_dark(std_daq_live_processing):
# Create a mock image
image = np.random.rand(100, 100) * 1000 # Scale to simulate a realistic image
dark = np.random.rand(100, 100) * 100 # Simulate a dark reference
image += dark # Add dark to the image to simulate a realistic scenario
# Set flat and dark references
std_daq_live_processing.references["flat_(100, 100)"] = np.ones((100, 100))
std_daq_live_processing.references["dark_(100, 100)"] = dark
# Apply flat and dark correction
corrected_image = std_daq_live_processing.apply_flat_dark_correction(image)
assert isinstance(corrected_image, np.ndarray)
assert corrected_image.shape == (100, 100)
assert np.all(corrected_image >= 0), "Corrected image should not have negative values"
def test_std_daq_live_processing_apply_flat_correction_zero_division(std_daq_live_processing):
# Create a mock image
image = np.random.rand(100, 100) * 1000 + 10 # Scale to simulate a realistic image
# Set flat reference with epsilon values
flat = np.ones((100, 100)) * 2
std_daq_live_processing.references["flat_(100, 100)"] = flat
# Set dark reference to ones
dark = np.ones((100, 100)) * 2
std_daq_live_processing.references["dark_(100, 100)"] = dark
# Apply flat correction
corrected_image = std_daq_live_processing.apply_flat_dark_correction(image)
assert isinstance(corrected_image, np.ndarray)
assert corrected_image.shape == (100, 100)
assert np.all(corrected_image >= 0), "Corrected image should not have negative values"
assert np.any(corrected_image < np.inf), "Corrected image should not have infinite values"

View File

@@ -1,361 +1,353 @@
import json
from unittest import mock
import pytest
import requests
import requests_mock
import typeguard
from ophyd import StatusBase
from websockets import WebSocketException
from tomcat_bec.devices.std_daq.std_daq_client import (
StdDaqClient,
StdDaqConfig,
StdDaqError,
StdDaqStatus,
)
@pytest.fixture
def client():
parent_device = mock.MagicMock()
_client = StdDaqClient(
parent=parent_device, ws_url="http://localhost:5000", rest_url="http://localhost:5000"
)
yield _client
_client.shutdown()
@pytest.fixture
def full_config():
full_config = StdDaqConfig(
detector_name="tomcat-gf",
detector_type="gigafrost",
n_modules=8,
bit_depth=16,
image_pixel_height=2016,
image_pixel_width=2016,
start_udp_port=2000,
writer_user_id=18600,
max_number_of_forwarders_spawned=8,
use_all_forwarders=True,
module_sync_queue_size=4096,
number_of_writers=12,
module_positions={},
ram_buffer_gb=150,
delay_filter_timeout=10,
live_stream_configs={
"tcp://129.129.95.111:20000": {"type": "periodic", "config": [1, 5]},
"tcp://129.129.95.111:20001": {"type": "periodic", "config": [1, 5]},
"tcp://129.129.95.38:20000": {"type": "periodic", "config": [1, 1]},
},
)
return full_config
def test_stddaq_client(client):
assert client is not None
def test_stddaq_client_get_daq_config(client, full_config):
with requests_mock.Mocker() as m:
response = full_config
m.get(
"http://localhost:5000/api/config/get?user=ioc",
json=response.model_dump(exclude_defaults=True),
)
out = client.get_config()
# Check that the response is simply the json response
assert out == response.model_dump(exclude_defaults=True)
assert client._config == response
def test_stddaq_client_set_config_pydantic(client, full_config):
"""Test setting configurations through the StdDAQ client"""
with requests_mock.Mocker() as m:
m.post("http://localhost:5000/api/config/set?user=ioc")
# Test with StdDaqConfig object
config = full_config
with mock.patch.object(client, "_pre_restart"), mock.patch.object(client, "_post_restart"):
client.set_config(config)
# Verify the last request
assert m.last_request.json() == full_config.model_dump(exclude_defaults=True)
def test_std_daq_client_set_config_dict(client, full_config):
"""
Test setting configurations through the StdDAQ client with a dictionary input.
"""
with requests_mock.Mocker() as m:
m.post("http://localhost:5000/api/config/set?user=ioc")
# Test with dictionary input
config_dict = full_config.model_dump()
with mock.patch.object(client, "_pre_restart"), mock.patch.object(client, "_post_restart"):
client.set_config(config_dict)
assert m.last_request.json() == full_config.model_dump(exclude_defaults=True)
def test_stddaq_client_set_config_ignores_extra_keys(client, full_config):
"""
Test that the set_config method ignores extra keys in the input dictionary.
"""
with requests_mock.Mocker() as m:
m.post("http://localhost:5000/api/config/set?user=ioc")
# Test with dictionary input
config_dict = full_config.model_dump()
config_dict["extra_key"] = "extra_value"
with mock.patch.object(client, "_pre_restart"), mock.patch.object(client, "_post_restart"):
client.set_config(config_dict)
assert m.last_request.json() == full_config.model_dump(exclude_defaults=True)
def test_stddaq_client_set_config_error(client, full_config):
"""
Test error handling in the set_config method.
"""
with requests_mock.Mocker() as m:
config = full_config
m.post("http://localhost:5000/api/config/set?user=ioc", status_code=500)
with mock.patch.object(client, "_pre_restart"), mock.patch.object(client, "_post_restart"):
with pytest.raises(requests.exceptions.HTTPError):
client.set_config(config)
# import json
# from unittest import mock
# import pytest
# import requests
# import requests_mock
# import typeguard
# from ophyd import StatusBase
# from websockets import WebSocketException
# from tomcat_bec.devices.gigafrost.std_daq_client import StdDaqClient, StdDaqError, StdDaqStatus
# @pytest.fixture
# def client():
# parent_device = mock.MagicMock()
# _client = StdDaqClient(
# parent=parent_device, ws_url="ws://localhost:5001", rest_url="http://localhost:5000"
# )
# yield _client
# _client.shutdown()
# @pytest.fixture
# def full_config():
# full_config = dict(
# detector_name="tomcat-gf",
# detector_type="gigafrost",
# n_modules=8,
# bit_depth=16,
# image_pixel_height=2016,
# image_pixel_width=2016,
# start_udp_port=2000,
# writer_user_id=18600,
# max_number_of_forwarders_spawned=8,
# use_all_forwarders=True,
# module_sync_queue_size=4096,
# number_of_writers=12,
# module_positions={},
# ram_buffer_gb=150,
# delay_filter_timeout=10,
# live_stream_configs={
# "tcp://129.129.95.111:20000": {"type": "periodic", "config": [1, 5]},
# "tcp://129.129.95.111:20001": {"type": "periodic", "config": [1, 5]},
# "tcp://129.129.95.38:20000": {"type": "periodic", "config": [1, 1]},
# },
# )
# return full_config
# def test_stddaq_client(client):
# assert client is not None
# def test_stddaq_client_get_daq_config(client, full_config):
# with requests_mock.Mocker() as m:
# response = full_config
# m.get("http://localhost:5000/api/config/get?user=ioc", json=response.model_dump())
# out = client.get_config()
# # Check that the response is simply the json response
# assert out == response.model_dump()
# assert client._config == response
# def test_stddaq_client_set_config_pydantic(client, full_config):
# """Test setting configurations through the StdDAQ client"""
# with requests_mock.Mocker() as m:
# m.post("http://localhost:5000/api/config/set?user=ioc")
# # Test with StdDaqConfig object
# config = full_config
# with mock.patch.object(client, "_pre_restart"), mock.patch.object(client, "_post_restart"):
# client.set_config(config)
# # Verify the last request
# assert m.last_request.json() == full_config.model_dump()
# def test_std_daq_client_set_config_dict(client, full_config):
# """
# Test setting configurations through the StdDAQ client with a dictionary input.
# """
# with requests_mock.Mocker() as m:
# m.post("http://localhost:5000/api/config/set?user=ioc")
# # Test with dictionary input
# config_dict = full_config.model_dump()
# with mock.patch.object(client, "_pre_restart"), mock.patch.object(client, "_post_restart"):
# client.set_config(config_dict)
# assert m.last_request.json() == full_config.model_dump()
def test_stddaq_client_get_config_cached(client, full_config):
"""
Test that the client returns the cached configuration if it is available.
"""
# Set the cached configuration
config = full_config
client._config = config
# def test_stddaq_client_set_config_ignores_extra_keys(client, full_config):
# """
# Test that the set_config method ignores extra keys in the input dictionary.
# """
# with requests_mock.Mocker() as m:
# m.post("http://localhost:5000/api/config/set?user=ioc")
# Test that the client returns the cached configuration
assert client.get_config(cached=True) == config
# # Test with dictionary input
# config_dict = full_config.model_dump()
# config_dict["extra_key"] = "extra_value"
# with mock.patch.object(client, "_pre_restart"), mock.patch.object(client, "_post_restart"):
# client.set_config(config_dict)
# assert m.last_request.json() == full_config.model_dump()
def test_stddaq_client_status(client):
client._status = StdDaqStatus.FILE_CREATED
assert client.status == StdDaqStatus.FILE_CREATED
# def test_stddaq_client_set_config_error(client, full_config):
# """
# Test error handling in the set_config method.
# """
# with requests_mock.Mocker() as m:
# config = full_config
# m.post("http://localhost:5000/api/config/set?user=ioc", status_code=500)
# with mock.patch.object(client, "_pre_restart"), mock.patch.object(client, "_post_restart"):
# with pytest.raises(requests.exceptions.HTTPError):
# client.set_config(config)
def test_stddaq_client_start(client):
# def test_stddaq_client_get_config_cached(client, full_config):
# """
# Test that the client returns the cached configuration if it is available.
# """
with mock.patch("tomcat_bec.devices.std_daq.std_daq_client.StatusBase") as StatusBase:
client.start(file_path="test_file_path", file_prefix="test_file_prefix", num_images=10)
out = client._send_queue.get()
assert out == {
"command": "start",
"path": "test_file_path",
"file_prefix": "test_file_prefix",
"n_image": 10,
}
StatusBase().wait.assert_called_once()
# # Set the cached configuration
# config = full_config
# client._config = config
# # Test that the client returns the cached configuration
# assert client.get_config(cached=True) == config
def test_stddaq_client_start_type_error(client):
with pytest.raises(typeguard.TypeCheckError):
client.start(file_path="test_file_path", file_prefix="test_file_prefix", num_images="10")
# def test_stddaq_client_status(client):
# client._status = StdDaqStatus.FILE_CREATED
# assert client.status == StdDaqStatus.FILE_CREATED
def test_stddaq_client_stop(client):
"""
Check that the stop method puts the stop command in the send queue.
"""
client.stop()
client._send_queue.get() == {"command": "stop"}
# def test_stddaq_client_start(client):
def test_stddaq_client_update_config(client, full_config):
"""
Test that the update_config method updates the configuration with the provided dictionary.
"""
# with mock.patch("tomcat_bec.devices.gigafrost.std_daq_client.StatusBase") as StatusBase:
# client.start(file_path="test_file_path", file_prefix="test_file_prefix", num_images=10)
# out = client._send_queue.get()
# assert out == {
# "command": "start",
# "path": "test_file_path",
# "file_prefix": "test_file_prefix",
# "n_image": 10,
# }
# StatusBase().wait.assert_called_once()
config = full_config
with requests_mock.Mocker() as m:
m.get("http://localhost:5000/api/config/get?user=ioc", json=config.model_dump())
# Update the configuration
update_dict = {"detector_name": "new_name"}
with mock.patch.object(client, "set_config") as set_config:
client.update_config(update_dict)
# def test_stddaq_client_start_type_error(client):
# with pytest.raises(typeguard.TypeCheckError):
# client.start(file_path="test_file_path", file_prefix="test_file_prefix", num_images="10")
assert set_config.call_count == 1
# def test_stddaq_client_stop(client):
# """
# Check that the stop method puts the stop command in the send queue.
# """
# client.stop()
# client._send_queue.get() == {"command": "stop"}
def test_stddaq_client_updates_only_changed_configs(client, full_config):
"""
Test that the update_config method only updates the configuration if the config has changed.
"""
config = full_config
with requests_mock.Mocker() as m:
m.get("http://localhost:5000/api/config/get?user=ioc", json=config.model_dump())
# Update the configuration
update_dict = {"detector_name": "tomcat-gf"}
with mock.patch.object(client, "set_config") as set_config:
client.update_config(update_dict)
assert set_config.call_count == 0
def test_stddaq_client_updates_only_changed_configs_empty(client, full_config):
"""
Test that the update_config method only updates the configuration if the config has changed.
"""
config = full_config
with requests_mock.Mocker() as m:
m.get("http://localhost:5000/api/config/get?user=ioc", json=config.model_dump())
# Update the configuration
update_dict = {}
with mock.patch.object(client, "set_config") as set_config:
client.update_config(update_dict)
assert set_config.call_count == 0
def test_stddaq_client_pre_restart(client):
"""
Test that the pre_restart method sets the status to RESTARTING.
"""
# let's assume the websocket loop is already idle
client._ws_idle_event.set()
client.ws_client = mock.MagicMock()
client._pre_restart()
client.ws_client.close.assert_called_once()
def test_stddaq_client_post_restart(client):
"""
Test that the post_restart method sets the status to IDLE.
"""
with mock.patch.object(client, "wait_for_connection") as wait_for_connection:
client._post_restart()
wait_for_connection.assert_called_once()
assert client._daq_is_running.is_set()
def test_stddaq_client_reset(client):
"""
Test that the reset method calls get_config and set_config.
"""
with (
mock.patch.object(client, "get_config") as get_config,
mock.patch.object(client, "set_config") as set_config,
):
client.reset()
get_config.assert_called_once()
set_config.assert_called_once()
def test_stddaq_client_run_status_callbacks(client):
"""
Test that the run_status_callback method runs the status callback.
"""
status = StatusBase()
client.add_status_callback(status, success=[StdDaqStatus.FILE_CREATED], error=[])
client._status = StdDaqStatus.FILE_CREATED
client._run_status_callbacks()
status.wait()
assert len(status._callbacks) == 0
def test_stddaq_client_run_status_callbacks_error(client):
"""
Test that the run_status_callback method runs the status callback.
"""
status = StatusBase()
client.add_status_callback(status, success=[], error=[StdDaqStatus.FILE_CREATED])
client._status = StdDaqStatus.FILE_CREATED
client._run_status_callbacks()
with pytest.raises(StdDaqError):
status.wait()
assert len(status._callbacks) == 0
@pytest.mark.parametrize(
"msg, updated",
[({"status": "IDLE"}, False), (json.dumps({"status": "waiting_for_first_image"}), True)],
)
def test_stddaq_client_on_received_ws_message(client, msg, updated):
"""
Test that the on_received_ws_message method runs the status callback.
"""
client._status = None
with mock.patch.object(client, "_run_status_callbacks") as run_status_callbacks:
client._on_received_ws_message(msg)
if updated:
run_status_callbacks.assert_called_once()
assert client._status == StdDaqStatus.WAITING_FOR_FIRST_IMAGE
else:
run_status_callbacks.assert_not_called()
assert client._status is None
def test_stddaq_client_ws_send_and_receive(client):
client.ws_client = mock.MagicMock()
client._send_queue.put({"command": "test"})
client._ws_send_and_receive()
# queue is not empty, so we should send the message
client.ws_client.send.assert_called_once()
client.ws_client.recv.assert_called_once()
client.ws_client.reset_mock()
client._ws_send_and_receive()
# queue is empty, so we should not send the message
client.ws_client.send.assert_not_called()
client.ws_client.recv.assert_called_once()
def test_stddaq_client_ws_send_and_receive_websocket_error(client):
"""
Test that the ws_send_and_receive method handles websocket errors.
"""
client.ws_client = mock.MagicMock()
client.ws_client.send.side_effect = WebSocketException()
client._send_queue.put({"command": "test"})
with mock.patch.object(client, "wait_for_connection") as wait_for_connection:
client._ws_send_and_receive()
wait_for_connection.assert_called_once()
def test_stddaq_client_ws_send_and_receive_timeout_error(client):
"""
Test that the ws_send_and_receive method handles timeout errors.
"""
client.ws_client = mock.MagicMock()
client.ws_client.recv.side_effect = TimeoutError()
client._send_queue.put({"command": "test"})
with mock.patch.object(client, "wait_for_connection") as wait_for_connection:
client._ws_send_and_receive()
wait_for_connection.assert_not_called()
def test_stddaq_client_ws_update_loop(client):
"""
Test that the ws_update_loop method runs the status callback.
"""
client._shutdown_event = mock.MagicMock()
client._shutdown_event.is_set.side_effect = [False, True]
with (
mock.patch.object(client, "_ws_send_and_receive") as ws_send_and_receive,
mock.patch.object(client, "_wait_for_server_running") as wait_for_server_running,
):
client._ws_update_loop()
ws_send_and_receive.assert_called_once()
wait_for_server_running.assert_called_once()
# def test_stddaq_client_update_config(client, full_config):
# """
# Test that the update_config method updates the configuration with the provided dictionary.
# """
# config = full_config
# with requests_mock.Mocker() as m:
# m.get("http://localhost:5000/api/config/get?user=ioc", json=config.model_dump())
# # Update the configuration
# update_dict = {"detector_name": "new_name"}
# with mock.patch.object(client, "set_config") as set_config:
# client.update_config(update_dict)
# assert set_config.call_count == 1
# def test_stddaq_client_updates_only_changed_configs(client, full_config):
# """
# Test that the update_config method only updates the configuration if the config has changed.
# """
# config = full_config
# with requests_mock.Mocker() as m:
# m.get("http://localhost:5000/api/config/get?user=ioc", json=config.model_dump())
# # Update the configuration
# update_dict = {"detector_name": "tomcat-gf"}
# with mock.patch.object(client, "set_config") as set_config:
# client.update_config(update_dict)
# assert set_config.call_count == 0
# def test_stddaq_client_updates_only_changed_configs_empty(client, full_config):
# """
# Test that the update_config method only updates the configuration if the config has changed.
# """
# config = full_config
# with requests_mock.Mocker() as m:
# m.get("http://localhost:5000/api/config/get?user=ioc", json=config.model_dump())
# # Update the configuration
# update_dict = {}
# with mock.patch.object(client, "set_config") as set_config:
# client.update_config(update_dict)
# assert set_config.call_count == 0
# def test_stddaq_client_pre_restart(client):
# """
# Test that the pre_restart method sets the status to RESTARTING.
# """
# # let's assume the websocket loop is already idle
# client._ws_idle_event.set()
# client.ws_client = mock.MagicMock()
# client._pre_restart()
# client.ws_client.close.assert_called_once()
# def test_stddaq_client_post_restart(client):
# """
# Test that the post_restart method sets the status to IDLE.
# """
# with mock.patch.object(client, "wait_for_connection") as wait_for_connection:
# client._post_restart()
# wait_for_connection.assert_called_once()
# assert client._daq_is_running.is_set()
# def test_stddaq_client_reset(client):
# """
# Test that the reset method calls get_config and set_config.
# """
# with (
# mock.patch.object(client, "get_config") as get_config,
# mock.patch.object(client, "set_config") as set_config,
# ):
# client.reset()
# get_config.assert_called_once()
# set_config.assert_called_once()
# def test_stddaq_client_run_status_callbacks(client):
# """
# Test that the run_status_callback method runs the status callback.
# """
# status = StatusBase()
# client.add_status_callback(status, success=[StdDaqStatus.FILE_CREATED], error=[])
# client._status = StdDaqStatus.FILE_CREATED
# client._run_status_callbacks()
# status.wait()
# assert len(status._callbacks) == 0
# def test_stddaq_client_run_status_callbacks_error(client):
# """
# Test that the run_status_callback method runs the status callback.
# """
# status = StatusBase()
# client.add_status_callback(status, success=[], error=[StdDaqStatus.FILE_CREATED])
# client._status = StdDaqStatus.FILE_CREATED
# client._run_status_callbacks()
# with pytest.raises(StdDaqError):
# status.wait()
# assert len(status._callbacks) == 0
# @pytest.mark.parametrize(
# "msg, updated",
# [({"status": "IDLE"}, False), (json.dumps({"status": "waiting_for_first_image"}), True)],
# )
# def test_stddaq_client_on_received_ws_message(client, msg, updated):
# """
# Test that the on_received_ws_message method runs the status callback.
# """
# client._status = None
# with mock.patch.object(client, "_run_status_callbacks") as run_status_callbacks:
# client._on_received_ws_message(msg)
# if updated:
# run_status_callbacks.assert_called_once()
# assert client._status == StdDaqStatus.WAITING_FOR_FIRST_IMAGE
# else:
# run_status_callbacks.assert_not_called()
# assert client._status is None
# def test_stddaq_client_ws_send_and_receive(client):
# client.ws_client = mock.MagicMock()
# client._send_queue.put({"command": "test"})
# client._ws_send_and_receive()
# # queue is not empty, so we should send the message
# client.ws_client.send.assert_called_once()
# client.ws_client.recv.assert_called_once()
# client.ws_client.reset_mock()
# client._ws_send_and_receive()
# # queue is empty, so we should not send the message
# client.ws_client.send.assert_not_called()
# client.ws_client.recv.assert_called_once()
# def test_stddaq_client_ws_send_and_receive_websocket_error(client):
# """
# Test that the ws_send_and_receive method handles websocket errors.
# """
# client.ws_client = mock.MagicMock()
# client.ws_client.send.side_effect = WebSocketException()
# client._send_queue.put({"command": "test"})
# with mock.patch.object(client, "wait_for_connection") as wait_for_connection:
# client._ws_send_and_receive()
# wait_for_connection.assert_called_once()
# def test_stddaq_client_ws_send_and_receive_timeout_error(client):
# """
# Test that the ws_send_and_receive method handles timeout errors.
# """
# client.ws_client = mock.MagicMock()
# client.ws_client.recv.side_effect = TimeoutError()
# client._send_queue.put({"command": "test"})
# with mock.patch.object(client, "wait_for_connection") as wait_for_connection:
# client._ws_send_and_receive()
# wait_for_connection.assert_not_called()
# def test_stddaq_client_ws_update_loop(client):
# """
# Test that the ws_update_loop method runs the status callback.
# """
# client._shutdown_event = mock.MagicMock()
# client._shutdown_event.is_set.side_effect = [False, True]
# with (
# mock.patch.object(client, "_ws_send_and_receive") as ws_send_and_receive,
# mock.patch.object(client, "_wait_for_server_running") as wait_for_server_running,
# ):
# client._ws_update_loop()
# ws_send_and_receive.assert_called_once()
# wait_for_server_running.assert_called_once()

View File

@@ -34,6 +34,3 @@ to setup the prompts.
"""
# pylint: disable=invalid-name, unused-import, import-error, undefined-variable, unused-variable, unused-argument, no-name-in-module
bec._ip.prompts.session_name = "TOMCAT"
bec._ip.prompts.status = 1

View File

@@ -3,12 +3,8 @@ Pre-startup script for BEC client. This script is executed before the BEC client
is started. It can be used to add additional command line arguments.
"""
import os
from bec_lib.service_config import ServiceConfig
import tomcat_bec
def extend_command_line_args(parser):
"""
@@ -20,13 +16,8 @@ def extend_command_line_args(parser):
return parser
def get_config() -> ServiceConfig:
"""
Create and return the ServiceConfig for the plugin repository
"""
deployment_path = os.path.dirname(os.path.dirname(os.path.dirname(tomcat_bec.__file__)))
files = os.listdir(deployment_path)
if "bec_config.yaml" in files:
return ServiceConfig(config_path=os.path.join(deployment_path, "bec_config.yaml"))
else:
return ServiceConfig(redis={"host": "localhost", "port": 6379})
# def get_config() -> ServiceConfig:
# """
# Create and return the service configuration.
# """
# return ServiceConfig(redis={"host": "localhost", "port": 6379})

View File

@@ -1,23 +1,3 @@
gfcam:
description: GigaFrost camera client
deviceClass: tomcat_bec.devices.GigaFrostCamera
deviceConfig:
prefix: "X02DA-CAM-GF2:"
backend_url: "http://sls-daq-001:8080"
auto_soft_enable: true
std_daq_live: "tcp://129.129.95.111:20000"
std_daq_ws: "ws://129.129.95.111:8080"
std_daq_rest: "http://129.129.95.111:5000"
deviceTags:
- camera
- trigger
- gfcam
enabled: true
onFailure: buffer
readOnly: false
readoutPriority: monitored
softwareTrigger: true
fe_sldi_centerx_mm:
description: FE slit horizontal center position in mm
deviceClass: ophyd.EpicsMotor
@@ -162,47 +142,3 @@ fe_sldi_tryt:
readOnly: false
readoutPriority: baseline
softwareTrigger: false
op_fi1:
description: Optics Filter 1
deviceClass: ophyd.EpicsMotor
deviceConfig:
prefix: X02DA-OP-FI1:TRY
deviceTags:
- ??
enabled: true
onFailure: buffer
readOnly: false
readoutPriority: baseline
softwareTrigger: false
op_fi2:
description: Optics Filter 2
deviceClass: ophyd.EpicsMotor
deviceConfig:
prefix: X02DA-OP-FI2:TRY
deviceTags:
- ??
enabled: true
onFailure: buffer
readOnly: false
readoutPriority: baseline
softwareTrigger: false
# pcocam:
# description: PCO.edge camera client
# deviceClass: tomcat_bec.devices.PcoEdge5M
# deviceConfig:
# prefix: 'X02DA-CCDCAM2:'
# std_daq_live: 'tcp://129.129.95.111:20010'
# std_daq_ws: 'ws://129.129.95.111:8081'
# std_daq_rest: 'http://129.129.95.111:5010'
# deviceTags:
# - camera
# - trigger
# - pcocam
# enabled: true
# onFailure: buffer
# readOnly: false
# readoutPriority: monitored
# softwareTrigger: true

View File

@@ -40,7 +40,7 @@ femto_mean_curr:
es1_roty:
readoutPriority: monitored
description: "Test rotation stage"
description: 'Test rotation stage'
deviceClass: ophyd.EpicsMotor
deviceConfig:
prefix: X02DA-ES1-SMP1:ROTY
@@ -49,11 +49,11 @@ es1_roty:
onFailure: buffer
enabled: true
readOnly: false
softwareTrigger: false
softwareTrigger: false
es1_trx:
readoutPriority: monitored
description: "Test translation stage"
description: 'Test translation stage'
deviceClass: ophyd.EpicsMotor
deviceConfig:
prefix: X02DA-ES1-SMP1:TRX
@@ -62,15 +62,15 @@ es1_trx:
onFailure: buffer
enabled: true
readOnly: false
softwareTrigger: false
softwareTrigger: false
es1_ismc:
description: "Automation1 iSMC interface"
deviceClass: tomcat_bec.devices.aa1Controller
deviceConfig:
prefix: "X02DA-ES1-SMP1:CTRL:"
description: 'Automation1 iSMC interface'
deviceClass: tomcat_bec.devices.aa1Controller
deviceConfig:
prefix: 'X02DA-ES1-SMP1:CTRL:'
deviceTags:
- es1
- es1
enabled: true
onFailure: buffer
readOnly: false
@@ -78,23 +78,24 @@ es1_ismc:
softwareTrigger: false
es1_tasks:
description: "Automation1 task management interface"
description: 'Automation1 task management interface'
deviceClass: tomcat_bec.devices.aa1Tasks
deviceConfig:
prefix: "X02DA-ES1-SMP1:TASK:"
prefix: 'X02DA-ES1-SMP1:TASK:'
deviceTags:
- es1
- es1
enabled: false
onFailure: buffer
readOnly: false
readoutPriority: monitored
softwareTrigger: false
es1_psod:
description: "AA1 PSO output interface (trigger)"
deviceClass: tomcat_bec.devices.aa1AxisPsoDistance
description: 'AA1 PSO output interface (trigger)'
deviceClass: tomcat_bec.devices.aa1AxisPsoDistance
deviceConfig:
prefix: "X02DA-ES1-SMP1:ROTY:PSO:"
prefix: 'X02DA-ES1-SMP1:ROTY:PSO:'
deviceTags:
- es1
enabled: true
@@ -103,11 +104,12 @@ es1_psod:
readoutPriority: monitored
softwareTrigger: false
es1_ddaq:
description: "Automation1 position recording interface"
description: 'Automation1 position recording interface'
deviceClass: tomcat_bec.devices.aa1AxisDriveDataCollection
deviceConfig:
prefix: "X02DA-ES1-SMP1:ROTY:DDC:"
prefix: 'X02DA-ES1-SMP1:ROTY:DDC:'
deviceTags:
- es1
enabled: true
@@ -116,6 +118,7 @@ es1_ddaq:
readoutPriority: monitored
softwareTrigger: false
#camera:
# description: Grashopper Camera
# deviceClass: tomcat_bec.devices.GrashopperTOMCAT
@@ -129,6 +132,7 @@ es1_ddaq:
# readoutPriority: monitored
# softwareTrigger: true
# gfcam:
# description: GigaFrost camera client
# deviceClass: tomcat_bec.devices.GigaFrostCamera
@@ -146,25 +150,27 @@ es1_ddaq:
# readoutPriority: monitored
# softwareTrigger: true
gfcam:
description: GigaFrost camera client
deviceClass: tomcat_bec.devices.GigaFrostCamera
deviceConfig:
prefix: "X02DA-CAM-GF2:"
backend_url: "http://sls-daq-001:8080"
prefix: 'X02DA-CAM-GF2:'
backend_url: 'http://sls-daq-001:8080'
auto_soft_enable: true
std_daq_live: "tcp://129.129.95.111:20000"
std_daq_ws: "ws://129.129.95.111:8080"
std_daq_rest: "http://129.129.95.111:5000"
std_daq_live: 'tcp://129.129.95.111:20000'
std_daq_ws: 'ws://129.129.95.111:8080'
std_daq_rest: 'http://129.129.95.111:5000'
deviceTags:
- camera
- trigger
- gfcam
- camera
- trigger
- gfcam
enabled: true
onFailure: buffer
readOnly: false
readoutPriority: monitored
softwareTrigger: true
# gfdaq:
# description: GigaFrost stdDAQ client
# deviceClass: tomcat_bec.devices.StdDaqClient
@@ -195,6 +201,7 @@ gfcam:
# readoutPriority: monitored
# softwareTrigger: false
# pcocam:
# description: PCO.edge camera client
# deviceClass: tomcat_bec.devices.PcoEdge5M
@@ -210,23 +217,24 @@ gfcam:
# readoutPriority: monitored
# softwareTrigger: true
# pcocam:
# description: PCO.edge camera client
# deviceClass: tomcat_bec.devices.PcoEdge5M
# deviceConfig:
# prefix: 'X02DA-CCDCAM2:'
# std_daq_live: 'tcp://129.129.95.111:20010'
# std_daq_ws: 'ws://129.129.95.111:8081'
# std_daq_rest: 'http://129.129.95.111:5010'
# deviceTags:
# - camera
# - trigger
# - pcocam
# enabled: true
# onFailure: buffer
# readOnly: false
# readoutPriority: monitored
# softwareTrigger: true
pcocam:
description: PCO.edge camera client
deviceClass: tomcat_bec.devices.PcoEdge5M
deviceConfig:
prefix: 'X02DA-CCDCAM2:'
std_daq_live: 'tcp://129.129.95.111:20010'
std_daq_ws: 'ws://129.129.95.111:8081'
std_daq_rest: 'http://129.129.95.111:5010'
deviceTags:
- camera
- trigger
- pcocam
enabled: true
onFailure: buffer
readOnly: false
readoutPriority: monitored
softwareTrigger: true
# pcodaq:
# description: GigaFrost stdDAQ client

View File

@@ -21,7 +21,7 @@ eyex:
# onFailure: buffer
# enabled: true
# readOnly: false
# softwareTrigger: false
# softwareTrigger: false
# eyez:
# readoutPriority: baseline
# description: X-ray eye axis Z
@@ -50,7 +50,7 @@ femto_mean_curr:
es1_roty:
readoutPriority: baseline
description: "Test rotation stage"
description: 'Test rotation stage'
deviceClass: tomcat_bec.devices.psimotor.EpicsMotorMR
deviceConfig:
prefix: X02DA-ES1-SMP1:ROTY
@@ -59,15 +59,15 @@ es1_roty:
onFailure: buffer
enabled: false
readOnly: false
softwareTrigger: false
softwareTrigger: false
es1_ismc:
description: "Automation1 iSMC interface"
deviceClass: tomcat_bec.devices.aa1Controller
deviceConfig:
prefix: "X02DA-ES1-SMP1:CTRL:"
description: 'Automation1 iSMC interface'
deviceClass: tomcat_bec.devices.aa1Controller
deviceConfig:
prefix: 'X02DA-ES1-SMP1:CTRL:'
deviceTags:
- es1
- es1
enabled: true
onFailure: buffer
readOnly: false
@@ -75,44 +75,47 @@ es1_ismc:
softwareTrigger: false
es1_tasks:
description: "Automation1 task management interface"
deviceClass: tomcat_bec.devices.aa1Tasks
deviceConfig:
prefix: "X02DA-ES1-SMP1:TASK:"
description: 'Automation1 task management interface'
deviceClass: tomcat_bec.devices.aa1Tasks
deviceConfig:
prefix: 'X02DA-ES1-SMP1:TASK:'
deviceTags:
- es1
- es1
enabled: false
onFailure: buffer
readOnly: false
readoutPriority: monitored
softwareTrigger: false
es1_psod:
description: "AA1 PSO output interface (trigger)"
deviceClass: tomcat_bec.devices.aa1AxisPsoDistance
description: 'AA1 PSO output interface (trigger)'
deviceClass: tomcat_bec.devices.aa1AxisPsoDistance
deviceConfig:
prefix: "X02DA-ES1-SMP1:ROTY:PSO:"
prefix: 'X02DA-ES1-SMP1:ROTY:PSO:'
deviceTags:
- es1
- es1
enabled: true
onFailure: buffer
readOnly: false
readoutPriority: monitored
softwareTrigger: true
es1_ddaq:
description: "Automation1 position recording interface"
deviceClass: tomcat_bec.devices.aa1AxisDriveDataCollection
description: 'Automation1 position recording interface'
deviceClass: tomcat_bec.devices.aa1AxisDriveDataCollection
deviceConfig:
prefix: "X02DA-ES1-SMP1:ROTY:DDC:"
prefix: 'X02DA-ES1-SMP1:ROTY:DDC:'
deviceTags:
- es1
- es1
enabled: false
onFailure: buffer
readOnly: false
readoutPriority: monitored
softwareTrigger: false
#camera:
# description: Grashopper Camera
# deviceClass: tomcat_bec.devices.GrashopperTOMCAT
@@ -130,53 +133,54 @@ gfcam:
description: GigaFrost camera client
deviceClass: tomcat_bec.devices.GigaFrostCamera
deviceConfig:
prefix: "X02DA-CAM-GF2:"
backend_url: "http://sls-daq-001:8080"
prefix: 'X02DA-CAM-GF2:'
backend_url: 'http://sls-daq-001:8080'
auto_soft_enable: true
deviceTags:
- camera
- trigger
- camera
- trigger
enabled: true
onFailure: buffer
readOnly: false
readoutPriority: monitored
softwareTrigger: true
# gfdaq:
# description: GigaFrost stdDAQ client
# deviceClass: tomcat_bec.devices.StdDaqClient
# deviceConfig:
# ws_url: 'ws://129.129.95.111:8080'
# rest_url: 'http://129.129.95.111:5000'
# deviceTags:
# - std-daq
# enabled: true
# onFailure: buffer
# readOnly: false
# readoutPriority: monitored
# softwareTrigger: false
# daq_stream0:
# description: stdDAQ preview (2 every 555)
# deviceClass: tomcat_bec.devices.StdDaqPreviewDetector
# deviceConfig:
# url: 'tcp://129.129.95.111:20000'
# deviceTags:
# - std-daq
# enabled: true
# onFailure: buffer
# readOnly: false
# readoutPriority: monitored
# softwareTrigger: false
gfdaq:
description: GigaFrost stdDAQ client
deviceClass: tomcat_bec.devices.StdDaqClient
deviceConfig:
ws_url: 'ws://129.129.95.111:8080'
rest_url: 'http://129.129.95.111:5000'
deviceTags:
- std-daq
enabled: true
onFailure: buffer
readOnly: false
readoutPriority: monitored
softwareTrigger: false
# daq_stream1:
# description: stdDAQ preview (1 at 5 Hz)
# deviceClass: tomcat_bec.devices.StdDaqPreviewDetector
# deviceConfig:
# url: 'tcp://129.129.95.111:20001'
# deviceTags:
# - std-daq
# enabled: true
# onFailure: buffer
# readOnly: false
# readoutPriority: monitored
# softwareTrigger: false
daq_stream0:
description: stdDAQ preview (2 every 555)
deviceClass: tomcat_bec.devices.StdDaqPreviewDetector
deviceConfig:
url: 'tcp://129.129.95.111:20000'
deviceTags:
- std-daq
enabled: true
onFailure: buffer
readOnly: false
readoutPriority: monitored
softwareTrigger: false
daq_stream1:
description: stdDAQ preview (1 at 5 Hz)
deviceClass: tomcat_bec.devices.StdDaqPreviewDetector
deviceConfig:
url: 'tcp://129.129.95.111:20001'
deviceTags:
- std-daq
enabled: true
onFailure: buffer
readOnly: false
readoutPriority: monitored
softwareTrigger: false

View File

@@ -7,6 +7,8 @@ from .aerotech import (
aa1Tasks,
)
from .gigafrost.gigafrostcamera import GigaFrostCamera
from .gigafrost.pcoedgecamera import PcoEdge5M
from .gigafrost.stddaq_client import StdDaqClient
from .gigafrost.stddaq_preview import StdDaqPreviewDetector
from .grashopper_tomcat import GrashopperTOMCAT
from .pco_edge.pcoedgecamera import PcoEdge5M
from .psimotor import EpicsMotorEC, EpicsMotorMR

View File

@@ -35,22 +35,21 @@ class aa1Controller(Device):
"""Ophyd proxy class for the Aerotech Automation 1's core controller functionality"""
# ToDo: Add error subscription
controllername = Component(EpicsSignalRO, "NAME", string=True, kind=Kind.config)
serialnumber = Component(EpicsSignalRO, "SN", string=True, kind=Kind.config)
apiversion = Component(EpicsSignalRO, "API_VERSION", string=True, kind=Kind.config)
controllername = Component(EpicsSignalRO, "NAME", kind=Kind.config)
serialnumber = Component(EpicsSignalRO, "SN", kind=Kind.config)
apiversion = Component(EpicsSignalRO, "API_VERSION", kind=Kind.config)
axiscount = Component(EpicsSignalRO, "AXISCOUNT", kind=Kind.config)
taskcount = Component(EpicsSignalRO, "TASKCOUNT", kind=Kind.config)
fastpoll = Component(EpicsSignalRO, "POLLTIME", auto_monitor=True, kind=Kind.normal)
slowpoll = Component(EpicsSignalRO, "DRVPOLLTIME", auto_monitor=True, kind=Kind.normal)
errno = Component(EpicsSignalRO, "ERRNO", auto_monitor=True, kind=Kind.normal)
errnmsg = Component(EpicsSignalRO, "ERRMSG", string=True, auto_monitor=True, kind=Kind.normal)
errnmsg = Component(EpicsSignalRO, "ERRMSG", auto_monitor=True, kind=Kind.normal)
_set_ismc = Component(EpicsSignal, "SET", put_complete=True, kind=Kind.omitted)
USER_ACCESS = ["reset"]
def reset(self):
"""Resets the Automation1 iSMC reloading the default configuration. Note that this will
erase all settings that were set during startup and not saved to the MCD file."""
""" Resets the Automation1 iSMC reloading the default configuration"""
self._set_ismc.set(3).wait()

View File

@@ -152,8 +152,9 @@ class aa1AxisDriveDataCollection(PSIDeviceBase, Device):
self.configure(d=d)
# Stage the data collection if not in internally launced mode
# NOTE: Scripted scans start acquiring from the scrits at kickoff
self.arm()
# NOTE: Scripted scans start acquiring from the scrits
if "scan_type" in scan_args and scan_args["scan_type"] in ("scripted", "script"):
self.arm()
# Reset readback
self.reset()

View File

@@ -39,7 +39,6 @@ class AerotechPsoBase(PSIDeviceBase, Device):
output = Component(EpicsSignalRO, "OUTPUT-RBV", auto_monitor=True, kind=Kind.normal)
address = Component(EpicsSignalRO, "ARRAY-ADDR", kind=Kind.config)
_eventSingle = Component(EpicsSignal, "EVENT:SINGLE", put_complete=True, kind=Kind.omitted)
switch = Component(EpicsSignal, "SWITCH", put_complete=True, kind=Kind.omitted)
# ########################################################################
# PSO Distance event module
@@ -120,6 +119,7 @@ class AerotechPsoBase(PSIDeviceBase, Device):
def fire(self, settle_time=0.1) -> None | DeviceStatus:
"""Fire a single PSO event (i.e. manual software trigger)"""
# Only trigger if distance was set to invalid
logger.warning(f"[{self.name}] Triggerin...")
self._eventSingle.set(1, settle_time=settle_time).wait()
status = DeviceStatus(self)
status.set_finished()
@@ -139,8 +139,6 @@ class AerotechPsoBase(PSIDeviceBase, Device):
w_pulse = d.get("pso_w_pulse", 200)
n_pulse = d.get("pso_n_pulse", 1)
self.switch.set("Manual").wait()
# Configure the pulsed/toggled waveform
if wmode in ["toggle", "toggled"]:
# Switching to simple toggle mode
@@ -163,6 +161,12 @@ class AerotechPsoBase(PSIDeviceBase, Device):
else:
raise RuntimeError(f"Unsupported window mode: {wmode}")
# Set PSO output data source
# FIXME : This is essentially staging...
if wmode in ["toggle", "toggled", "pulse", "pulsed"]:
self.outSource.set("Waveform").wait()
elif wmode in ["output", "flag"]:
self.outSource.set("Window").wait()
class aa1AxisPsoDistance(AerotechPsoBase):
@@ -223,6 +227,10 @@ class aa1AxisPsoDistance(AerotechPsoBase):
raise RuntimeError(f"Unsupported distace triggering mode: {pso_wavemode}")
old = self.read_configuration()
# Disable everything
self.winEvents.set("Off").wait()
self.dstCounterEna.set("Off").wait()
self.dstEventsEna.set("Off").wait()
# Configure distance generator (also resets counter to 0)
self._distance_value = pso_distance
@@ -280,25 +288,33 @@ class aa1AxisPsoDistance(AerotechPsoBase):
"""Fire a single PSO event (i.e. manual software trigger)"""
# Only trigger if distance was set to invalid
# if self.dstDistanceVal.get() == 0:
# logger.warning(f"[{self.name}] Triggerin...")
logger.warning(f"[{self.name}] Triggerin...")
return self.fire(settle_time)
def arm(self) -> None:
"""Bluesky style stage
It re-arms the distance array and re-sets the distance counter at current position
"""
"""Bluesky style stage"""
# Stage the PSO distance module and zero counter
if isinstance(self._distance_value, (np.ndarray, list, tuple)):
self.dstArrayRearm.set(1).wait()
# Set distance and wait for polling
self.switch.set("Distance", settle_time=0.2).wait()
# Wait for polling
sleep(0.5)
# Start monitoring the counters if distance is valid
if self.dstDistanceVal.get() > 0:
self.dstEventsEna.set("On").wait()
self.dstCounterEna.set("On").wait()
def disarm(self):
"""Standard bluesky unstage"""
self.switch.set("Manual", settle_time=0.2).wait()
def launch(self):
"""Re-set the counters"""
if isinstance(self._distance_value, (np.ndarray, list, tuple)):
self.dstArrayRearm.set(1).wait()
# Ensure output is set to low
# if self.output.value:
# self.toggle()
# Turn off window mode
self.winOutput.set("Off").wait()
self.winEvents.set("Off").wait()
# Turn off distance mode
self.dstEventsEna.set("Off").wait()
self.dstCounterEna.set("Off").wait()
# Disable output
self.outSource.set("None").wait()
# Sleep for one poll period
sleep(0.2)

View File

@@ -33,7 +33,7 @@ program
//////////////////////////////////////////////////////////////////////////
// Internal parameters - dont use
var $axis as axis = {{ scan.rotaxis or 'ROTY' }}
var $axis as axis = ROTY
var $ii as integer
var $axisFaults as integer = 0
var $iDdcSafeSpace as integer = 4096

View File

@@ -20,7 +20,7 @@ program
// Internal
var $axis as axis = {{ scan.rotaxis or 'ROTY' }}
var $axis as axis = ROTY
var $ii as integer
var $axisFaults as integer = 0

View File

@@ -5,7 +5,6 @@ interface.
@author: mohacsi_i
"""
import time
from ophyd import Device, Component, EpicsSignal, EpicsSignalRO, Kind
from ophyd.status import SubscriptionStatus
@@ -107,8 +106,12 @@ class aa1Tasks(PSIDeviceBase, Device):
self.fileName.set(d["script_file"]).wait()
if "script_text" in d:
# Compile text for syntax checking
# NOTE: This will overwrite 'script_file'
# NOTE: This will load to 'script_file'
self._fileWrite.set(d["script_text"], settle_time=0.2).wait()
self.switch.set("Load").wait()
# Check the result of load
if self._failure.value:
raise RuntimeError("Failed to load script, perhaps a syntax error?")
new = self.read_configuration()
return (old, new)
@@ -166,22 +169,8 @@ class aa1Tasks(PSIDeviceBase, Device):
"""Bluesky style stage, prepare, but does not execute"""
if self.taskIndex.get() in (0, 1, 2):
logger.error(f"[{self.name}] Loading AeroScript on system task. Daring today are we?")
if len(self.fileName.get())==0:
self.fileName.set("bec.ascript", settle_time=0.1).wait()
self._failure.read()
ts_old = float(self._failure.timestamp)
def wait_until_new(*_, old_value, value, timestamp, **__):
nonlocal ts_old
result = bool(ts_old != timestamp) and (value!=-1)
# print(f"{old_value}\t{value}\t{ts_old}\t{timestamp}\t{result}")
return result
# Subscribe and wait for update
status = SubscriptionStatus(self._failure, wait_until_new, settle_time=0.1)
# Load and check success
self.switch.set("Load", settle_time=0.0).wait()
status = self.switch.set("Load", settle_time=0.2)
status.wait()
if self._failure.value:
raise RuntimeError("Failed to load task, please check the Aerotech IOC")
@@ -202,14 +191,11 @@ class aa1Tasks(PSIDeviceBase, Device):
def complete(self) -> SubscriptionStatus:
"""Wait for a RUNNING task"""
# Sleep for foll period
time.sleep(1)
timestamp_ = 0
task_idx = int(self.taskIndex.get())
def not_running(*, value, timestamp, **_):
nonlocal timestamp_
# print(f"State {value[task_idx]} in {value}")
result = value[task_idx] not in ["Running", 4]
timestamp_ = timestamp
return result

View File

@@ -7,7 +7,3 @@ from .AerotechAutomation1 import (
aa1GlobalVariableBindings,
aa1AxisIo,
)
from .AerotechAutomation1Enums import (
DriveDataCaptureInput,
DriveDataCaptureTrigger,
)

File diff suppressed because it is too large Load Diff

View File

@@ -1,18 +1,13 @@
// Test program for simple zig-zag line scanning with PSO window output
// "enable" signal and DDC synchronized to external trigger input.
// Test program for zig-zag line scanning with PSO window output "enable"
// signal and DDC synchronized to external trigger input.
// The file expects external parameter validation
// The PSO locations arrays are set externally from EPICS PV
//
#define DDC_ADDR 0x800000
#define PSO_ADDR 0x0
enum ScanType
POS = 0
NEG = 1
POSNEG = 2
NEGPOS = 3
Pos = 0
Neg = 1
PosNeg = 2
NegPos = 3
end
@@ -20,98 +15,74 @@ program
//////////////////////////////////////////////////////////////////////////
// External parameters - USE THEESE
var $fStartPosition as real = {{ scan.startpos }}
var $fScanRange as real = {{ scan.scanrange }}
var $fScanRange as real = $fStartPosition + {{ scan.scanrange }}
var $iNumRepeat as integer = {{ scan.nrepeat }}
var $eScanType as ScanType = ScanType.{{ scan.scandir or 'POS' }}
var $iNumDdcRead as integer = {{ scan.npoints }}
var $eScanType as ScanType = ScanType.{{ scan.scandir or 'Pos' }}
var $fVelJog as real = {{ scan.jogvel or 200 }}
var $fVelScan as real = {{ scan.scanvel }}
var $fAcceleration = {{ scan.scanacc }}
var $fAccDistance as real = {{ scan.accdist }}
var $eDdcTrigger as DriveDataCaptureTrigger = DriveDataCaptureTrigger.{{ scan.psotrigger }}
var $fAcceleration = {{ scan.scanacc or 500 }}
var $fSafeDist = 10.0
//////////////////////////////////////////////////////////////////////////
// Internal parameters - dont use
var $axis as axis = {{ scan.rotaxis or 'ROTY' }}
var $ii as integer
var $axisFaults as integer = 0
var $axis as axis = ROTY
var $ii as integer
var $iDdcSafeSpace as integer = 4096
// Set acceleration
SetupAxisRampType($axis, RampType.Linear)
SetupAxisRampValue($axis, 0, $fAcceleration)
SetupAxisRampValue($axis,0,$fAcceleration)
var $fAccDistance as real = 0.5 * $fVelScan * $fVelScan / $fAcceleration + $fSafeDist
// set the actual scan range
var $fPosStart as real
var $fPosEnd as real
if $eScanType == ScanType.POS
if $eScanType == ScanType.Pos
$fPosStart = $fStartPosition - $fAccDistance
$fPosEnd = $fStartPosition + $fScanRange + $fAccDistance
elseif $eScanType == ScanType.NEG
elseif $eScanType == ScanType.Neg
$fPosStart = $fStartPosition + $fAccDistance
$fPosEnd = $fStartPosition - $fScanRange - $fAccDistance
elseif $eScanType == ScanType.POSNEG
elseif $eScanType == ScanType.PosNeg
$fPosStart = $fStartPosition - $fAccDistance
$fPosEnd = $fStartPosition + $fScanRange + $fAccDistance
elseif $eScanType == ScanType.NEGPOS
elseif $eScanType == ScanType.NegPos
$fPosStart = $fStartPosition + $fAccDistance
$fPosEnd = $fStartPosition - $fScanRange - $fAccDistance
end
// Move to start position before the scan
// NOTE: Also wait for GigaFrost to start, otherwise early triggers might be missed
MoveAbsolute($axis, $fPosStart, $fVelJog)
WaitForInPosition($axis)
Dwell(2)
// Set globals for feedback
$rglobal[2] = $fPosStart
$rglobal[3] = $fPosEnd
// Configure PSO
// FIXME : When the controller is restarted
PsoDistanceConfigureInputs($axis, [PsoDistanceInput.XC4PrimaryFeedback])
PsoDistanceCounterOff($axis)
PsoDistanceEventsOff($axis)
PsoWindowConfigureEvents($axis, PsoWindowEventMode.None)
PsoWaveformOff($axis)
// Simple PSO trigger pattern
var $iPsoArrayPosAddr as integer = PSO_ADDR
var $iPsoArrayPos[] as real = [{% for psoDist in scan.psoBoundsPos[:-1] %} UnitsToCounts($axis, {{ psoDist }}), {% endfor %} UnitsToCounts($axis, {{ scan.psoBoundsPos[-1] }}) ]
DriveArrayWrite($axis, $iPsoArrayPos, $iPsoArrayPosAddr, length($iPsoArrayPos), DriveArrayType.PsoDistanceEventDistances)
{% if scan.psoBoundsNeg|length > 0 %}
var $iPsoArrayNegAddr as integer = ($iPsoArrayPosAddr + length($iPsoArrayPos)) * 4
var $iPsoArrayNeg[] as real = [{% for psoDist in scan.psoBoundsNeg[:-1] %} UnitsToCounts($axis, {{ psoDist }}), {% endfor %} UnitsToCounts($axis, {{ scan.psoBoundsNeg[-1] }}) ]
DriveArrayWrite($axis, $iPsoArrayNeg, $iPsoArrayNegAddr, length($iPsoArrayNeg), DriveArrayType.PsoDistanceEventDistances)
{% else %}
var $iPsoArrayNegAddr as integer = ($iPsoArrayPosAddr + length($iPsoArrayPos)) * 4
var $iPsoArrayNegSize as integer = 0
var $iPsoArrayNeg[] as real = []
// DriveArrayWrite($axis, $iPsoArrayNeg, $iPsoArrayNegAddr, length($iPsoArrayNeg), DriveArrayType.PsoDistanceEventDistances)
{% endif %}
PsoDistanceConfigureArrayDistances($axis, $iPsoArrayPosAddr, length($iPsoArrayPos), 0)
PsoDistanceCounterOn($axis)
PsoOutputConfigureSource($axis, PsoOutputSource.Waveform)
var $iPsoArrayAddr as integer = 0
var $iPsoArray[] as real = [UnitsToCounts($axis, $fAccDistance), UnitsToCounts($axis, $fScanRange)]
DriveArrayWrite($axis, $iPsoArray, $iPsoArrayAddr, length($iPsoArray), DriveArrayType.PsoDistanceEventDistances)
PsoDistanceConfigureArrayDistances($axis, $iPsoArrayAddr, 2, 0)
PsoDistanceEventsOn($axis)
PsoWaveformConfigureMode($axis, PsoWaveformMode.Toggle)
PsoWaveformOn($axis)
PsoOutputConfigureSource($axis, PsoOutputSource.Waveform)
PsoWaveformOn($axis)
// Configure Drive Data Collection
var $iDdcArraySize as integer = $iNumDdcRead
var $iDdcArrayAddr as integer = 8388608
var $iDdcArraySize as integer = 5000
DriveDataCaptureConfigureInput($axis, 0, DriveDataCaptureInput.PrimaryFeedback);
DriveDataCaptureConfigureInput($axis, 1, DriveDataCaptureInput.AnalogInput0 );
DriveDataCaptureConfigureTrigger($axis, 0, $eDdcTrigger );
DriveDataCaptureConfigureTrigger($axis, 1, $eDdcTrigger );
DriveDataCaptureConfigureTrigger($axis, 0, DriveDataCaptureTrigger.PsoOutput );
DriveDataCaptureConfigureTrigger($axis, 1, DriveDataCaptureTrigger.PsoOutput );
DriveDataCaptureConfigureArray($axis, 0, DDC_ADDR, $iDdcArraySize);
DriveDataCaptureConfigureArray($axis, 1, DDC_ADDR + $iDdcSafeSpace + 8 * $iDdcArraySize, $iDdcArraySize);
DriveDataCaptureConfigureArray($axis, 0, $iDdcArrayAddr, $iDdcArraySize);
DriveDataCaptureConfigureArray($axis, 1, $iDdcArrayAddr + $iDdcSafeSpace + 8 * $iDdcArraySize, $iDdcArraySize);
// Directly before scan
PsoDistanceCounterOn($axis)
@@ -121,44 +92,41 @@ program
///////////////////////////////////////////////////////////
// Start the actual scanning
///////////////////////////////////////////////////////////
for $ii = 0 to ($iNumRepeat-1)
PsoDistanceConfigureArrayDistances($axis, $iPsoArrayAddr, 2, 0)
if $eScanType == ScanType.POS || $eScanType == ScanType.NEG
PsoDistanceConfigureArrayDistances($axis, $iPsoArrayPosAddr, length($iPsoArrayPos), 0)
MoveAbsolute($axis, $fPosEnd, $fVelScan)
WaitForMotionDone($axis)
$axisFaults = StatusGetAxisItem($axis, AxisDataSignal.AxisFault)
if $axisFaults
TaskSetError(TaskGetIndex(), "AxisFault on axis ROTY")
end
elseif $eScanType == ScanType.POSNEG || $eScanType == ScanType.NEGPOS
for $ii = 0 to ($iNumRepeat-1)
// Feedback on progress
$rglobal[4] = $ii
if $eScanType == ScanType.Pos
MoveAbsolute($axis, $fPosEnd, $fVelScan)
WaitForInPosition($axis)
MoveAbsolute($axis, $fPosStart, $fVelScan)
WaitForInPosition($axis)
elseif $eScanType == ScanType.Neg
MoveAbsolute($axis, $fPosEnd, $fVelScan)
WaitForInPosition($axis)
MoveAbsolute($axis, $fPosStart, $fVelScan)
WaitForInPosition($axis)
elseif $eScanType == ScanType.PosNeg
if ($ii % 2) == 0
PsoDistanceConfigureArrayDistances($axis, $iPsoArrayPosAddr, length($iPsoArrayPos), 0)
MoveAbsolute($axis, $fPosEnd, $fVelScan)
WaitForInPosition($axis)
elseif ($ii % 2) == 1
PsoDistanceConfigureArrayDistances($axis, $iPsoArrayNegAddr, length($iPsoArrayNeg), 0)
MoveAbsolute($axis, $fPosStart, $fVelScan)
WaitForInPosition($axis)
end
WaitForMotionDone($axis)
$axisFaults = StatusGetAxisItem($axis, AxisDataSignal.AxisFault)
if $axisFaults
TaskSetError(TaskGetIndex(), "AxisFault on axis ROTY")
elseif $eScanType == ScanType.NegPos
if ($ii % 2) == 0
MoveAbsolute($axis, $fPosEnd, $fVelScan)
WaitForInPosition($axis)
elseif ($ii % 2) == 1
MoveAbsolute($axis, $fPosStart, $fVelScan)
WaitForInPosition($axis)
end
Dwell(0.2)
end
Dwell(0.2)
end
// Directly after scan
PsoDistanceCounterOff($axis)
DriveDataCaptureOff($axis, 0)
DriveDataCaptureOff($axis, 1)
// move back to start position
MoveAbsolute($axis, $fPosStart, $fVelJog)
WaitForInPosition($axis)
Dwell(2)
end

View File

@@ -1,5 +1,5 @@
"""
This module contains the PV definitions for the Gigafrost camera at Tomcat. It
This module contains the PV definitions for the Gigafrost camera at Tomcat. It
does not contain any logic to control the camera.
"""
@@ -7,57 +7,7 @@ from ophyd import Component as Cpt
from ophyd import Device, DynamicDeviceComponent, EpicsSignal, EpicsSignalRO, Kind, Signal
import tomcat_bec.devices.gigafrost.gfconstants as const
class GigaFrostSignalWithValidation(EpicsSignal):
"""
Custom EpicsSignal class that validates the value with the specified validator
before setting the value.
"""
def __init__(
self,
read_pv,
write_pv=None,
*,
put_complete=False,
string=False,
limits=False,
name=None,
validator=None,
**kwargs,
):
self._validator = validator
super().__init__(
read_pv,
write_pv,
put_complete=put_complete,
string=string,
limits=limits,
name=name,
**kwargs,
)
def check_value(self, value):
if self._validator is not None:
self._validator(value)
return super().check_value(value)
def check_image_width(value):
"""
The Gigafrost camera requires the image width to be a multiple of 48.
"""
if value % 48 != 0:
raise ValueError("Image width must be a multiple of 48")
def check_image_height(value):
"""
The Gigafrost camera requires the image height to be a multiple of 16.
"""
if value % 16 != 0:
raise ValueError("Image height must be a multiple of 16")
from tomcat_bec.devices.gigafrost.gfutils import extend_header_table
class GigaFrostBase(Device):
@@ -86,62 +36,51 @@ class GigaFrostBase(Device):
# pylint: disable=too-many-instance-attributes
busy_stat = Cpt(EpicsSignalRO, "BUSY_STAT", auto_monitor=True)
sync_flag = Cpt(EpicsSignalRO, "SYNC_FLAG", auto_monitor=True)
sync_swhw = Cpt(EpicsSignal, "SYNC_SWHW.PROC", put_complete=True, kind=Kind.omitted)
start_cam = Cpt(EpicsSignal, "START_CAM", put_complete=True, kind=Kind.omitted)
set_param = Cpt(EpicsSignal, "SET_PARAM.PROC", put_complete=True, kind=Kind.omitted)
acqmode = Cpt(EpicsSignal, "ACQMODE", auto_monitor=True, put_complete=True, kind=Kind.config)
# Standard camera configs
acquire = Cpt(EpicsSignal, "START_CAM", put_complete=True, kind=Kind.omitted)
acquire_time = Cpt(
EpicsSignal, "EXPOSURE", put_complete=True, auto_monitor=True, kind=Kind.config
)
acquire_period = Cpt(
EpicsSignal, "FRAMERATE", put_complete=True, auto_monitor=True, kind=Kind.config
)
num_exposures = Cpt(
EpicsSignal, "CNT_NUM", put_complete=True, auto_monitor=True, kind=Kind.config
)
array_size = DynamicDeviceComponent(
{
"array_size_x": (EpicsSignalRO, "ROIX", {"auto_monitor": True}),
"array_size_y": (EpicsSignalRO, "ROIY", {"auto_monitor": True}),
"array_size_x": (EpicsSignal, "ROIX", {"auto_monitor": True, "put_complete": True}),
"array_size_y": (EpicsSignal, "ROIY", {"auto_monitor": True, "put_complete": True}),
},
doc="Size of the array in the XY dimensions",
)
# UDP header
ports = Cpt(EpicsSignal, "PORTS", auto_monitor=True, put_complete=True, kind=Kind.config)
framenum = Cpt(EpicsSignal, "FRAMENUM", auto_monitor=True, put_complete=True, kind=Kind.config)
ht_offset = Cpt(
EpicsSignal, "HT_OFFSET", auto_monitor=True, put_complete=True, kind=Kind.config
)
write_srv = Cpt(
EpicsSignal, "WRITE_SRV.PROC", auto_monitor=True, put_complete=True, kind=Kind.omitted
)
# DAQ parameters
file_path = Cpt(Signal, kind=Kind.config, value="/gpfs/test/test-beamline")
file_prefix = Cpt(Signal, kind=Kind.config, value="scan_")
num_images = Cpt(Signal, kind=Kind.config, value=1000)
num_images_counter = Cpt(Signal, kind=Kind.hinted, value=0)
# Standard camera configs
exposure = Cpt(EpicsSignal, "EXPOSURE", put_complete=True, auto_monitor=True, kind=Kind.config)
framerate = Cpt(
EpicsSignal, "FRAMERATE", put_complete=True, auto_monitor=True, kind=Kind.config
)
roix = Cpt(
GigaFrostSignalWithValidation,
"ROIX",
put_complete=True,
auto_monitor=True,
kind=Kind.config,
validator=check_image_width,
)
roiy = Cpt(
GigaFrostSignalWithValidation,
"ROIY",
put_complete=True,
auto_monitor=True,
kind=Kind.config,
validator=check_image_height,
)
# GF specific interface
acquire_block = Cpt(Signal, kind=Kind.config, value=0)
busy_stat = Cpt(EpicsSignalRO, "BUSY_STAT", auto_monitor=True)
sync_flag = Cpt(EpicsSignalRO, "SYNC_FLAG", auto_monitor=True)
sync_swhw = Cpt(EpicsSignal, "SYNC_SWHW.PROC", put_complete=True, kind=Kind.omitted)
set_param = Cpt(EpicsSignal, "SET_PARAM.PROC", put_complete=True, kind=Kind.omitted)
acqmode = Cpt(EpicsSignal, "ACQMODE", put_complete=True, kind=Kind.config)
scan_id = Cpt(EpicsSignal, "SCAN_ID", put_complete=True, auto_monitor=True, kind=Kind.config)
cnt_num = Cpt(EpicsSignal, "CNT_NUM", put_complete=True, auto_monitor=True, kind=Kind.config)
corr_mode = Cpt(
EpicsSignal, "CORR_MODE", put_complete=True, auto_monitor=True, kind=Kind.config
)
# Software signals
soft_enable = Cpt(EpicsSignal, "SOFT_ENABLE", put_complete=True, auto_monitor=True)
soft_enable = Cpt(EpicsSignal, "SOFT_ENABLE", put_complete=True)
soft_trig = Cpt(EpicsSignal, "SOFT_TRIG.PROC", put_complete=True, kind=Kind.omitted)
soft_exp = Cpt(EpicsSignal, "SOFT_EXP", put_complete=True, auto_monitor=True)
soft_exp = Cpt(EpicsSignal, "SOFT_EXP", put_complete=True)
###############################################################################################
# Automatically set modes on camera init
auto_soft_enable = Cpt(Signal, kind=Kind.config, metadata={"write_access": False})
###############################################################################################
# Enable schemes
@@ -150,7 +89,6 @@ class GigaFrostBase(Device):
EpicsSignal,
"MODE_ENBL_EXP_RBV",
write_pv="MODE_ENBL_EXP",
auto_monitor=True,
put_complete=True,
kind=Kind.config,
)
@@ -159,7 +97,6 @@ class GigaFrostBase(Device):
EpicsSignal,
"MODE_ENBL_EXT_RBV",
write_pv="MODE_ENBL_EXT",
auto_monitor=True,
put_complete=True,
kind=Kind.config,
)
@@ -167,7 +104,6 @@ class GigaFrostBase(Device):
EpicsSignal,
"MODE_ENBL_SOFT_RBV",
write_pv="MODE_ENBL_SOFT",
auto_monitor=True,
put_complete=True,
kind=Kind.config,
)
@@ -175,7 +111,6 @@ class GigaFrostBase(Device):
EpicsSignal,
"MODE_ENBL_AUTO_RBV",
write_pv="MODE_ENBL_AUTO",
auto_monitor=True,
put_complete=True,
kind=Kind.config,
)
@@ -186,7 +121,6 @@ class GigaFrostBase(Device):
EpicsSignal,
"MODE_TRIG_EXT_RBV",
write_pv="MODE_TRIG_EXT",
auto_monitor=True,
put_complete=True,
kind=Kind.config,
)
@@ -194,7 +128,6 @@ class GigaFrostBase(Device):
EpicsSignal,
"MODE_TRIG_SOFT_RBV",
write_pv="MODE_TRIG_SOFT",
auto_monitor=True,
put_complete=True,
kind=Kind.config,
)
@@ -202,7 +135,6 @@ class GigaFrostBase(Device):
EpicsSignal,
"MODE_TRIG_TIMER_RBV",
write_pv="MODE_TRIG_TIMER",
auto_monitor=True,
put_complete=True,
kind=Kind.config,
)
@@ -210,7 +142,6 @@ class GigaFrostBase(Device):
EpicsSignal,
"MODE_TRIG_AUTO_RBV",
write_pv="MODE_TRIG_AUTO",
auto_monitor=True,
put_complete=True,
kind=Kind.config,
)
@@ -222,7 +153,6 @@ class GigaFrostBase(Device):
EpicsSignal,
"MODE_EXP_EXT_RBV",
write_pv="MODE_EXP_EXT",
auto_monitor=True,
put_complete=True,
kind=Kind.config,
)
@@ -230,7 +160,6 @@ class GigaFrostBase(Device):
EpicsSignal,
"MODE_EXP_SOFT_RBV",
write_pv="MODE_EXP_SOFT",
auto_monitor=True,
put_complete=True,
kind=Kind.config,
)
@@ -238,7 +167,6 @@ class GigaFrostBase(Device):
EpicsSignal,
"MODE_EXP_TIMER_RBV",
write_pv="MODE_EXP_TIMER",
auto_monitor=True,
put_complete=True,
kind=Kind.config,
)
@@ -250,31 +178,11 @@ class GigaFrostBase(Device):
EpicsSignal,
"CNT_STARTBIT_RBV",
write_pv="CNT_STARTBIT",
auto_monitor=True,
put_complete=True,
kind=Kind.config,
)
cnt_endbit = Cpt(
EpicsSignal,
"CNT_ENDBIT_RBV",
write_pv="CNT_ENDBIT",
auto_monitor=True,
put_complete=True,
kind=Kind.config,
)
# Line swap selection
ls_sw = Cpt(EpicsSignal, "LS_SW", put_complete=True, auto_monitor=True, kind=Kind.config)
ls_nw = Cpt(EpicsSignal, "LS_NW", put_complete=True, auto_monitor=True, kind=Kind.config)
ls_se = Cpt(EpicsSignal, "LS_SE", put_complete=True, auto_monitor=True, kind=Kind.config)
ls_ne = Cpt(EpicsSignal, "LS_NE", put_complete=True, auto_monitor=True, kind=Kind.config)
conn_parm = Cpt(
EpicsSignal,
"CONN_PARM",
string=True,
auto_monitor=True,
put_complete=True,
kind=Kind.config,
EpicsSignal, "CNT_ENDBIT_RBV", write_pv="CNT_ENDBIT", put_complete=True, kind=Kind.config
)
# HW settings as read only
@@ -289,32 +197,121 @@ class GigaFrostBase(Device):
bnc5_rbv = Cpt(EpicsSignalRO, "BNC5_RBV", auto_monitor=True, kind=Kind.config)
t_board = Cpt(EpicsSignalRO, "T_BOARD", auto_monitor=True)
auto_soft_enable = Cpt(Signal, kind=Kind.config)
backend_url = Cpt(Signal, kind=Kind.config)
### HW configuration parameters
# TODO: Only used at INIT, signals not needed
# UDP header configuration parameters
mac_north = Cpt(Signal, kind=Kind.config)
mac_south = Cpt(Signal, kind=Kind.config)
ip_north = Cpt(Signal, kind=Kind.config)
ip_south = Cpt(Signal, kind=Kind.config)
udp_backend_url = Cpt(Signal, kind=Kind.config, metadata={"write_access": False})
udp_ports = Cpt(EpicsSignal, "PORTS", put_complete=True, kind=Kind.config)
udp_framenum = Cpt(EpicsSignal, "FRAMENUM", put_complete=True, kind=Kind.config)
udp_ht_offset = Cpt(EpicsSignal, "HT_OFFSET", put_complete=True, kind=Kind.config)
udp_write_srv = Cpt(EpicsSignal, "WRITE_SRV.PROC", put_complete=True, kind=Kind.omitted)
conn_parm = Cpt(EpicsSignal, "CONN_PARM", string=True, put_complete=True, kind=Kind.config)
file_path = Cpt(Signal, kind=Kind.config, value="")
file_prefix = Cpt(Signal, kind=Kind.config, value="")
num_images = Cpt(Signal, kind=Kind.config, value=1)
# Line swap selection
ls_sw = Cpt(EpicsSignal, "LS_SW", put_complete=True, kind=Kind.config)
ls_nw = Cpt(EpicsSignal, "LS_NW", put_complete=True, kind=Kind.config)
ls_se = Cpt(EpicsSignal, "LS_SE", put_complete=True, kind=Kind.config)
ls_ne = Cpt(EpicsSignal, "LS_NE", put_complete=True, kind=Kind.config)
# pylint: disable=protected-access
def _define_backend_ip(self):
"""Select backend IP address for UDP stream"""
if self.backend_url.get() == const.BE3_DAFL_CLIENT: # xbl-daq-33
if self.udp_backend_url.get() == const.BE3_DAFL_CLIENT: # xbl-daq-33
return const.BE3_NORTH_IP, const.BE3_SOUTH_IP
if self.backend_url.get() == const.BE999_DAFL_CLIENT:
if self.udp_backend_url.get() == const.BE999_DAFL_CLIENT:
return const.BE999_NORTH_IP, const.BE999_SOUTH_IP
raise RuntimeError(f"Backend {self.backend_url.get()} not recognized.")
raise RuntimeError(f"Backend {self.udp_backend_url.get()} not recognized.")
def _define_backend_mac(self):
"""Select backend MAC address for UDP stream"""
if self.backend_url.get() == const.BE3_DAFL_CLIENT: # xbl-daq-33
if self.udp_backend_url.get() == const.BE3_DAFL_CLIENT: # xbl-daq-33
return const.BE3_NORTH_MAC, const.BE3_SOUTH_MAC
if self.backend_url.get() == const.BE999_DAFL_CLIENT:
if self.udp_backend_url.get() == const.BE999_DAFL_CLIENT:
return const.BE999_NORTH_MAC, const.BE999_SOUTH_MAC
raise RuntimeError(f"Backend {self.backend_url.get()} not recognized.")
raise RuntimeError(f"Backend {self.udp_backend_url.get()} not recognized.")
def _build_udp_header_table(self):
"""Build the header table for the UDP communication"""
udp_header_table = []
for i in range(0, 64, 1):
for j in range(0, 8, 1):
dest_port = 2000 + 8 * i + j
source_port = 3000 + j
if j < 4:
extend_header_table(
udp_header_table,
self.mac_south.get(),
self.ip_south.get(),
dest_port,
source_port,
)
else:
extend_header_table(
udp_header_table,
self.mac_north.get(),
self.ip_north.get(),
dest_port,
source_port,
)
return udp_header_table
def initialize_gigafrost(self) -> None:
"""Initialize the camera, set channel values"""
# Stop acquisition
self.acquire.set(0).wait()
# set entry to UDP table
# number of UDP ports to use
self.udp_ports.set(2).wait()
# number of images to send to each UDP port before switching to next
self.udp_framenum.set(5).wait()
# offset in UDP table - where to find the first entry
self.udp_ht_offset.set(0).wait()
# activate changes
self.udp_write_srv.set(1).wait()
# Configure triggering if needed
if self.auto_soft_enable.get():
# Set modes
# self.fix_nframes_mode = "start"
self.cnt_startbit.set(1).wait()
self.cnt_endbit.set(0).wait()
# self.enable_mode = "soft"
self.mode_enbl_ext.set(0).wait()
self.mode_endbl_soft.set(1).wait()
self.mode_enbl_auto.set(0).wait()
# self.trigger_mode = "auto"
self.mode_trig_auto.set(1).wait()
self.mode_trig_soft.set(0).wait()
self.mode_trig_timer.set(0).wait()
self.mode_trig_ext.set(0).wait()
# self.exposure_mode = "timer"
self.mode_exp_ext.set(0).wait()
self.mode_exp_soft.set(0).wait()
self.mode_exp_timer.set(1).wait()
# line swap - on for west, off for east
self.ls_sw.set(1).wait()
self.ls_nw.set(1).wait()
self.ls_se.set(0).wait()
self.ls_ne.set(0).wait()
# Commit parameters
self.set_param.set(1).wait()
# Initialize data backend
n, s = self._define_backend_ip()
self.ip_north.put(n, force=True)
self.ip_south.put(s, force=True)
n, s = self._define_backend_mac()
self.mac_north.put(n, force=True)
self.mac_south.put(s, force=True)
# Set udp header table (data communication parameters)
self.conn_parm.set(self._build_udp_header_table()).wait()

View File

@@ -1,36 +1,34 @@
from __future__ import annotations
# -*- coding: utf-8 -*-
"""
GigaFrost camera class module
import os
from typing import TYPE_CHECKING, Literal, cast
Created on Thu Jun 27 17:28:43 2024
@author: mohacsi_i
"""
from time import sleep, time
import numpy as np
from bec_lib.logger import bec_logger
from ophyd import Component as Cpt
from ophyd import DeviceStatus, Kind, Signal, StatusBase
from ophyd import DeviceStatus
from ophyd_devices.interfaces.base_classes.psi_device_base import PSIDeviceBase
from ophyd_devices.utils.bec_signals import PreviewSignal, ProgressSignal
import tomcat_bec.devices.gigafrost.gfconstants as const
from tomcat_bec.devices.gigafrost.gfutils import extend_header_table
from tomcat_bec.devices.gigafrost.gigafrost_base import GigaFrostBase
from tomcat_bec.devices.std_daq.std_daq_client import (
StdDaqClient,
StdDaqConfigPartial,
StdDaqStatus,
)
from tomcat_bec.devices.std_daq.std_daq_live_processing import StdDaqLiveProcessing
from tomcat_bec.devices.std_daq.std_daq_preview import StdDaqPreview
from tomcat_bec.devices.gigafrost.std_daq_preview import StdDaqPreview
from tomcat_bec.devices.gigafrost.std_daq_client import StdDaqClient, StdDaqStatus
if TYPE_CHECKING:
from bec_lib.devicemanager import DeviceManagerBase
logger = bec_logger.logger
def default_config() -> dict:
"""
Minimal configuration for the GigaFrost camera.
"""
return {"corr_mode": 5, "scan_id": 0} # default correction mode # default scan id
"""
TBD:
- Why is mode_enbl_exp not set during the enable_mode setter, only in the set_acquisition_mode?
- Why is set_acquisition_mode a method and not a property?
- When is a call to set_param necessary?
- Which access pattern is more common, setting the signal directly or using the method / property?
If the latter, we may want to change the inheritance structure to 'hide' the signals in a sub-component.
"""
class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
@@ -59,6 +57,7 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
# pylint: disable=too-many-instance-attributes
USER_ACCESS = [
"complete",
"exposure_mode",
"fix_nframes_mode",
"trigger_mode",
@@ -66,14 +65,13 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
"backend",
"acq_done",
"live_preview",
"live_processing",
"arm",
"disarm",
]
_initialized = False
analysis_signal = Cpt(Signal, name="analysis_signal", kind=Kind.hinted, doc="Analysis Signal")
analysis_signal2 = Cpt(Signal, name="analysis_signal2", kind=Kind.hinted, doc="Analysis Signal")
preview = Cpt(PreviewSignal, name="preview", ndim=2, doc="Preview signal of the gfcam")
progress = Cpt(ProgressSignal, name="progress")
# Placeholders for stdDAQ and livestream clients
backend = None
live_preview = None
def __init__(
self,
@@ -90,14 +88,8 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
std_daq_rest: str | None = None,
std_daq_ws: str | None = None,
std_daq_live: str | None = None,
device_manager: DeviceManagerBase | None = None,
**kwargs,
):
self.device_manager = device_manager
self._signals_to_be_set = {}
self._signals_to_be_set["auto_soft_enable"] = auto_soft_enable
self._signals_to_be_set["backend_url"] = backend_url
# super() will call the mixin class
super().__init__(
prefix=prefix,
@@ -109,83 +101,95 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
scan_info=scan_info,
**kwargs,
)
# Configure the stdDAQ client
if std_daq_rest is None or std_daq_ws is None:
raise ValueError("Both std_daq_rest and std_daq_ws must be provided")
self.live_processing = StdDaqLiveProcessing(
parent=self, signal=self.analysis_signal, signal2=self.analysis_signal2
)
self.backend = StdDaqClient(parent=self, ws_url=std_daq_ws, rest_url=std_daq_rest)
self.backend.add_count_callback(self._on_count_update)
self.live_preview = None
self.acq_configs = {}
# raise ValueError("Both std_daq_rest and std_daq_ws must be provided")
logger.error("No stdDAQ address provided, launching without data backend!")
else:
self.backend = StdDaqClient(parent=self, ws_url=std_daq_ws, rest_url=std_daq_rest)
# Configure image preview
if std_daq_live is not None:
self.live_preview = StdDaqPreview(url=std_daq_live, cb=self._on_preview_update)
else:
logger.error("No stdDAQ stream address provided, launching without preview!")
# Configure camera backend
self.auto_soft_enable.put(auto_soft_enable, force=True)
self.udp_backend_url.put(backend_url, force=True)
def configure(self, d: dict | None = None):
def configure(self, d: dict = None):
"""Configure the next scan with the GigaFRoST camera
Parameters as 'd' dictionary
----------------------------
num_images : int, optional
Number of images to be taken during each scan. Set to -1 for an
unlimited number of images (limited by the ringbuffer size and
backend speed). (default = 10)
num_exposures : int, optional
Number of images to be taken during each scan. Set to -1 for unlimited
number of images (limited by the ringbuffer size and backend speed).
exposure_time_ms : float, optional
Exposure time [ms]. (default = 0.2)
Exposure time [ms].
exposure_period_ms : float, optional
Exposure period [ms], ignored in soft trigger mode. (default = 1.0)
Exposure period [ms], ignored in soft trigger mode.
image_width : int, optional
ROI size in the x-direction [pixels] (default = 2016)
ROI size in the x-direction [pixels] (max. 2016)
image_height : int, optional
ROI size in the y-direction [pixels] (default = 2016)
ROI size in the y-direction [pixels] (max. 2016)
scanid : int, optional
Scan identification number to be associated with the scan data
(default = 0)
correction_mode : int, optional
The correction to be applied to the imaging data. The following
modes are available (default = 5):
* 0: Bypass. No corrections are applied to the data.
* 1: Send correction factor A instead of pixel values
* 2: Send correction factor B instead of pixel values
* 3: Send correction factor C instead of pixel values
* 4: Invert pixel values, but do not apply any linearity correction
* 5: Apply the full linearity correction
acq_mode : str, optional
Select one of the pre-configured trigger behavior
"""
if d is None:
return
# Stop acquisition
self.stop_camera().wait(timeout=10)
self.disarm()
backend_config = StdDaqConfigPartial(**d)
self.backend.update_config(backend_config)
# If Bluesky style configure
if d:
# Commonly changed settings
if "exposure_num_burst" in d:
self.num_exposures.set(d["exposure_num_burst"]).wait()
if "num_exposures" in d:
self.num_exposures.set(d["num_exposures"]).wait()
if "exposure_time_ms" in d:
self.acquire_time.set(d["exposure_time_ms"]).wait()
if "exposure_period_ms" in d:
self.acquire_period.set(d["exposure_period_ms"]).wait()
if "image_width" in d:
if d["image_width"] % 48 != 0:
raise RuntimeError(f"[{self.name}] image_width must be divisible by 48")
self.array_size.array_size_x.set(d["image_width"]).wait()
if "image_height" in d:
if d["image_height"] % 16 != 0:
raise RuntimeError(f"[{self.name}] image_height must be divisible by 16")
self.array_size.array_size_y.set(d["image_height"]).wait()
# Update all specified ophyd signals
config = default_config()
self.corr_mode.set(d.get("corr_mode", 5)).wait()
self.scan_id.set(d.get("scan_id", 0)).wait()
for key in self.component_names:
val = d.get(key)
if val is not None:
config[key] = val
# If a pre-configured acquisition mode is specified, set it
if "acq_mode" in d:
self.set_acquisition_mode(d["acq_mode"])
if d.get("exp_time", 0) > 0:
config["exposure"] = d["exp_time"] * 1000 # exposure time in ms
super().configure(config)
# If the acquisition mode is specified, set it
if "acq_mode" in d:
self.set_acquisition_mode(config["acq_mode"])
# Commit parameters
# Commit parameters to GigaFrost
self.set_param.set(1).wait()
def set_acquisition_mode(
self, acq_mode: Literal["default", "ext_enable", "soft", "ext", "external"]
):
# Backend stdDAQ configuration
if d and self.backend is not None:
daq_update = {}
if "image_height" in d:
daq_update["image_pixel_height"] = d["image_height"]
if "image_width" in d:
daq_update["image_pixel_width"] = d["image_width"]
if "bit_depth" in d:
daq_update["bit_depth"] = d["bit_depth"]
if "number_of_writers" in d:
daq_update["number_of_writers"] = d["number_of_writers"]
if daq_update:
self.backend.set_config(daq_update, force=False)
def set_acquisition_mode(self, acq_mode):
"""Set acquisition mode
Utility function to quickly select between pre-configured and tested
@@ -195,11 +199,10 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
supplied signal. Use external enable instead, that works!
"""
if acq_mode == "default":
# NOTE: Trigger using software events via softEnable (actually works)
if acq_mode in ["default", "step"]:
# NOTE: Software trigger via softEnable (actually works)
# Trigger parameters
self.fix_nframes_mode = "start"
# Switch to physical enable signal
self.mode_enbl_exp.set(0).wait()
@@ -267,35 +270,34 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
return None
@exposure_mode.setter
def exposure_mode(self, exp_mode: Literal["external", "timer", "soft"]):
def exposure_mode(self, mode):
"""Apply the exposure mode for the GigaFRoST camera.
Parameters
----------
exp_mode : {'external', 'timer', 'soft'}
mode : {'external', 'timer', 'soft'}
The exposure mode to be set.
"""
modes = {
"external": self.mode_exp_ext,
"timer": self.mode_exp_timer,
"soft": self.mode_exp_soft,
}
if exp_mode not in const.gf_valid_exposure_modes:
raise ValueError(
f"Invalid exposure mode! Valid modes are:\n{const.gf_valid_exposure_modes}"
)
for key, attr in modes.items():
# set the desired mode to 1, all others to 0
attr.set(int(key == exp_mode)).wait()
if mode == "external":
self.mode_exp_ext.set(1).wait()
self.mode_exp_soft.set(0).wait()
self.mode_exp_timer.set(0).wait()
elif mode == "timer":
self.mode_exp_ext.set(0).wait()
self.mode_exp_soft.set(0).wait()
self.mode_exp_timer.set(1).wait()
elif mode == "soft":
self.mode_exp_ext.set(0).wait()
self.mode_exp_soft.set(1).wait()
self.mode_exp_timer.set(0).wait()
else:
raise ValueError(f"Invalid exposure mode: {mode}!")
# Commit parameters
self.set_param.set(1).wait()
@property
def fix_nframes_mode(self) -> Literal["off", "start", "end", "start+end"] | None:
def fix_nframes_mode(self) -> str | None:
"""Return the current fixed number of frames mode of the GigaFRoST camera.
Returns
@@ -304,7 +306,7 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
The camera's active fixed number of frames mode.
"""
start_bit = self.cnt_startbit.get()
end_bit = self.cnt_endbit.get()
end_bit = self.cnt_startbit.get()
if not start_bit and not end_bit:
return "off"
@@ -318,7 +320,7 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
return None
@fix_nframes_mode.setter
def fix_nframes_mode(self, mode: Literal["off", "start", "end", "start+end"]):
def fix_nframes_mode(self, mode: str):
"""Apply the fixed number of frames settings to the GigaFRoST camera.
Parameters
@@ -326,29 +328,26 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
mode : {'off', 'start', 'end', 'start+end'}
The fixed number of frames mode to be applied.
"""
self._fix_nframes_mode = mode
if self._fix_nframes_mode == "off":
if mode == "off":
self.cnt_startbit.set(0).wait()
self.cnt_endbit.set(0).wait()
elif self._fix_nframes_mode == "start":
elif mode == "start":
self.cnt_startbit.set(1).wait()
self.cnt_endbit.set(0).wait()
elif self._fix_nframes_mode == "end":
elif mode == "end":
self.cnt_startbit.set(0).wait()
self.cnt_endbit.set(1).wait()
elif self._fix_nframes_mode == "start+end":
elif mode == "start+end":
self.cnt_startbit.set(1).wait()
self.cnt_endbit.set(1).wait()
else:
raise ValueError(
f"Invalid fixed frame number mode! Valid modes are: {const.gf_valid_fix_nframe_modes}"
)
raise ValueError(f"Invalid fixed frame number mode: {mode}!")
# Commit parameters
self.set_param.set(1).wait()
@property
def trigger_mode(self) -> Literal["auto", "external", "timer", "soft"] | None:
def trigger_mode(self) -> str | None:
"""Method to detect the current trigger mode set in the GigaFRost camera.
Returns
@@ -373,34 +372,43 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
return None
@trigger_mode.setter
def trigger_mode(self, mode: Literal["auto", "external", "timer", "soft"]):
def trigger_mode(self, mode: str):
"""
Set the trigger mode for the GigaFRoST camera.
Args:
mode(str): The trigger mode to be set. Valid arguments are: ['auto', 'external', 'timer', 'soft']
Parameters
----------
mode : {'auto', 'external', 'timer', 'soft'}
The trigger mode to be set.
"""
modes = {
"auto": self.mode_trig_auto,
"soft": self.mode_trig_soft,
"timer": self.mode_trig_timer,
"external": self.mode_trig_ext,
}
if mode not in modes:
raise ValueError(
"Invalid trigger mode! Valid modes are: ['auto', 'external', 'timer', 'soft']"
)
for key, attr in modes.items():
# set the desired mode to 1, all others to 0
attr.set(int(key == mode)).wait()
if mode == "auto":
self.mode_trig_auto.set(1).wait()
self.mode_trig_soft.set(0).wait()
self.mode_trig_timer.set(0).wait()
self.mode_trig_ext.set(0).wait()
elif mode == "soft":
self.mode_trig_auto.set(0).wait()
self.mode_trig_soft.set(1).wait()
self.mode_trig_timer.set(0).wait()
self.mode_trig_ext.set(0).wait()
elif mode == "timer":
self.mode_trig_auto.set(0).wait()
self.mode_trig_soft.set(0).wait()
self.mode_trig_timer.set(1).wait()
self.mode_trig_ext.set(0).wait()
elif mode == "external":
self.mode_trig_auto.set(0).wait()
self.mode_trig_soft.set(0).wait()
self.mode_trig_timer.set(0).wait()
self.mode_trig_ext.set(1).wait()
else:
raise ValueError(f"Invalid trigger mode: {mode}!")
# Commit parameters
self.set_param.set(1).wait()
@property
def enable_mode(self) -> Literal["soft", "external", "soft+ext", "always"] | None:
def enable_mode(self) -> str | None:
"""Return the enable mode set in the GigaFRoST camera.
Returns
@@ -422,7 +430,7 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
return None
@enable_mode.setter
def enable_mode(self, mode: Literal["soft", "external", "soft+ext", "always"]):
def enable_mode(self, mode: str):
"""
Set the enable mode for the GigaFRoST camera.
@@ -434,27 +442,17 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
The GigaFRoST enable mode. Valid arguments are:
* 'soft':
The GigaFRoST enable signal is supplied through a software
signal
The GigaFRoST enable signal is supplied through a software signal
* 'external':
The GigaFRoST enable signal is supplied through an external TTL
gating signal from the rotaiton stage or some other control
unit
The GigaFRoST enable signal is supplied through an external TTL gating
signal from the rotaiton stage or some other control unit
* 'soft+ext':
The GigaFRoST enable signal can be supplied either via the
software signal or externally. The two signals are combined
with a logical OR gate.
The GigaFRoST enable signal can be supplied either via the software signal
or externally. The two signals are combined with a logical OR gate.
* 'always':
The GigaFRoST is always enabled.
CAUTION: This mode is not compatible with the fixed number of
frames modes!
CAUTION: This mode is not compatible with the fixed number of frames modes!
"""
if mode not in const.gf_valid_enable_modes:
raise ValueError(
f"Invalid enable mode {mode}! Valid modes are:\n{const.gf_valid_enable_modes}"
)
if mode == "soft":
self.mode_enbl_ext.set(0).wait()
self.mode_endbl_soft.set(1).wait()
@@ -471,115 +469,30 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
self.mode_enbl_ext.set(0).wait()
self.mode_endbl_soft.set(0).wait()
self.mode_enbl_auto.set(1).wait()
else:
raise ValueError(f"Invalid enable mode {mode}!")
# Commit parameters
self.set_param.set(1).wait()
def set_idle(self):
def arm(self) -> None:
"""Prepare the camera to accept triggers"""
self.acquire.set(1).wait()
def disarm(self):
"""Set the camera to idle state"""
self.start_cam.set(0).wait()
self.acquire.set(0).wait()
if self.auto_soft_enable.get():
self.soft_enable.set(0).wait()
def initialize_gigafrost(self) -> None:
"""Initialize the camera, set channel values"""
# Stop acquisition
self.start_cam.set(0).wait()
# set entry to UDP table
# number of UDP ports to use
self.ports.set(2).wait()
# number of images to send to each UDP port before switching to next
self.framenum.set(5).wait()
# offset in UDP table - where to find the first entry
self.ht_offset.set(0).wait()
# activate changes
self.write_srv.set(1).wait()
# Configure software triggering if needed
if self.auto_soft_enable.get():
# trigger modes
self.cnt_startbit.set(1).wait()
self.cnt_endbit.set(0).wait()
# set modes
self.enable_mode = "soft"
self.trigger_mode = "auto"
self.exposure_mode = "timer"
# line swap - on for west, off for east
self.ls_sw.set(1).wait()
self.ls_nw.set(1).wait()
self.ls_se.set(0).wait()
self.ls_ne.set(0).wait()
# Commit parameters
self.set_param.set(1).wait()
# Initialize data backend
n, s = self._define_backend_ip()
self.ip_north.put(n, force=True)
self.ip_south.put(s, force=True)
n, s = self._define_backend_mac()
self.mac_north.put(n, force=True)
self.mac_south.put(s, force=True)
# Set udp header table
self.set_udp_header_table()
def set_udp_header_table(self):
"""Set the communication parameters for the camera module"""
self.conn_parm.set(self._build_udp_header_table()).wait()
def destroy(self):
self.backend.shutdown()
if self.live_preview:
self.live_preview.stop()
if self.backend is not None:
self.backend.shutdown()
super().destroy()
def _build_udp_header_table(self):
"""Build the header table for the UDP communication"""
udp_header_table = []
for i in range(0, 64, 1):
for j in range(0, 8, 1):
dest_port = 2000 + 8 * i + j
source_port = 3000 + j
if j < 4:
extend_header_table(
udp_header_table,
self.mac_south.get(),
self.ip_south.get(),
dest_port,
source_port,
)
else:
extend_header_table(
udp_header_table,
self.mac_north.get(),
self.ip_north.get(),
dest_port,
source_port,
)
return udp_header_table
def _on_preview_update(self, img: np.ndarray):
corrected_img = self.live_processing.apply_flat_dark_correction(img)
self.live_processing.on_new_data(corrected_img)
self.preview.put(corrected_img)
self._run_subs(sub_type=self.SUB_DEVICE_MONITOR_2D, obj=self, value=corrected_img)
def _on_count_update(self, count: int):
"""
Callback for the count update from the backend.
Updates the progress signal.
Args:
count (int): The current count of images acquired by the camera.
"""
expected_counts = cast(int, self.num_images.get())
self.progress.put(
value=count, max_value=expected_counts, done=bool(count == expected_counts)
)
def _on_preview_update(self, img: np.ndarray, header: dict):
"""Send preview stream and update frame index counter"""
self.num_images_counter.put(header["frame"], force=True)
self._run_subs(sub_type=self.SUB_DEVICE_MONITOR_2D, obj=self, value=img)
def acq_done(self) -> DeviceStatus:
"""
@@ -591,151 +504,16 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
DeviceStatus: The status of the acquisition
"""
status = DeviceStatus(self)
self.backend.add_status_callback(
status,
success=[StdDaqStatus.IDLE, StdDaqStatus.FILE_SAVED],
error=[StdDaqStatus.REJECTED, StdDaqStatus.ERROR],
)
return status
def restart_with_new_config(
self,
name: str,
file_path: str = "",
file_prefix: str = "",
num_images: int | None = None,
frames_per_trigger: int | None = None,
) -> DeviceStatus:
"""
Restart the camera with a new configuration.
This method allows to change the file path, file prefix, and number of images.
Args:
name (str): Name of the configuration to be saved.
file_path (str): New file path for the acquisition. If empty, the current file path is used.
file_prefix (str): New file prefix for the acquisition. If empty, the current file prefix is used.
num_images (int | None): New number of images to acquire. If None, the current number of images is used.
frames_per_trigger (int | None): New number of frames per trigger. If None, the current value is used.
Returns:
DeviceStatus: The status of the restart operation. It resolves when the camera is ready to receive the first image.
"""
self.acq_configs[name] = {}
conf = {}
if file_path:
self.acq_configs[name]["file_path"] = self.file_path.get()
conf["file_path"] = file_path
if file_prefix:
self.acq_configs[name]["file_prefix"] = self.file_prefix.get()
conf["file_prefix"] = file_prefix
if num_images is not None:
self.acq_configs[name]["num_images"] = self.num_images.get()
conf["num_images"] = num_images
if frames_per_trigger is not None:
self.acq_configs[name]["cnt_num"] = self.cnt_num.get()
conf["cnt_num"] = frames_per_trigger
# Stop the camera and wait for it to become idle
status = self.stop_camera()
status.wait(timeout=10)
# update the configuration
self.configure(conf)
# Restart the camera with the new configuration
return self.start_camera()
def restore_config(self, name: str) -> None:
"""
Restore a previously saved configuration and restart the camera.
Args:
name (str): Name of the configuration to restore.
"""
status = self.stop_camera()
status.wait(timeout=10)
config = self.acq_configs.pop(name, {})
self.configure(config)
def update_live_processing_reference(
self, reference_type: Literal["dark", "flat"]
) -> StatusBase:
"""
Update the flat or dark reference for the live processing.
Args:
reference_type (Literal["dark", "flat"]): Type of the reference to update.
If 'dark', the dark reference will be updated, if 'flat', the flat reference will be updated.
Returns:
StatusBase: The status of the update operation.
"""
if reference_type not in ["dark", "flat"]:
raise ValueError("Invalid reference type! Must be 'dark' or 'flat'.")
# Use the current acquisition to update the reference
if self.live_processing is None:
raise RuntimeError("Live processing is not available. Cannot update reference.")
status = self.live_processing.update_reference_with_file(
reference_type=reference_type,
file_path=self.target_file,
entry="tomcat-gf/data", # type: ignore
wait=False, # Do not wait for the update to finish
)
return status
def start_camera(self) -> DeviceStatus:
"""
Start the camera and the backend.
Returns:
DeviceStatus: The status of the startup. It resolves when the backend is ready to receive the first image.
"""
status = DeviceStatus(self)
self.backend.add_status_callback(
status,
success=[StdDaqStatus.WAITING_FOR_FIRST_IMAGE],
error=[StdDaqStatus.REJECTED, StdDaqStatus.ERROR],
)
self.backend.start(
file_path=self.file_path.get(), # type: ignore
file_prefix=self.file_prefix.get(), # type: ignore
num_images=self.num_images.get(), # type: ignore
)
self.start_cam.set(1).wait()
def _emit_file_event(_status: DeviceStatus):
"""
Emit a file event when the camera is ready.
"""
self._run_subs(
sub_type=self.SUB_FILE_EVENT,
file_path=self.target_file,
done=False,
successful=False,
hinted_h5_entries={"data": "data"},
if self.backend is not None:
self.backend.add_status_callback(
status,
success=[StdDaqStatus.IDLE, StdDaqStatus.FILE_SAVED],
error=[StdDaqStatus.REJECTED, StdDaqStatus.ERROR],
)
status.add_callback(_emit_file_event)
else:
status.set_finished()
return status
def stop_camera(self) -> DeviceStatus:
"""Stop the camera acquisition and set it to idle state."""
self.set_idle()
status = DeviceStatus(self)
self.backend.add_status_callback(
status, success=[StdDaqStatus.IDLE], error=[StdDaqStatus.REJECTED, StdDaqStatus.ERROR]
)
self.backend.stop()
return status
@property
def target_file(self) -> str:
"""Return the target file path for the current acquisition."""
file_path = cast(str, self.file_path.get())
file_prefix = cast(str, self.file_prefix.get())
return os.path.join(file_path, f"{file_prefix.removesuffix('_')}.h5")
########################################
# Beamline Specific Implementations #
########################################
@@ -753,87 +531,98 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
Called after the device is connected and its signals are connected.
Default values for signals should be set here.
"""
# # TODO: check if this can be moved to the config file
# # pylint: disable=protected-access
# self.auto_soft_enable._metadata["write_access"] = False
# self.backend_url._metadata["write_access"] = False
self.auto_soft_enable.put(self._signals_to_be_set["auto_soft_enable"], force=True)
self.backend_url.put(self._signals_to_be_set["backend_url"], force=True)
# Perform a full initialization of the GigaFrost
self.initialize_gigafrost()
self.backend.connect()
if self.live_preview:
self.live_preview.start()
def on_stage(self) -> DeviceStatus | None:
"""
Called while staging the device.
Information about the upcoming scan can be accessed from the scan_info object.
"""
# If the camera is busy, stop it first
# Gigafrost can finish a run without explicit unstaging
if self.busy_stat.value:
self.stop_camera()
scan_msg = self.scan_info.msg
if scan_msg is None or scan_msg.request_inputs is None or scan_msg.scan_parameters is None:
# I don't think this can happen outside of tests, but just in case
logger.warning(
f"[{self.name}] Scan message is not available or incomplete. "
"Cannot configure the GigaFrost camera."
)
self.acq_configs = {}
return
logger.warning("Camera is already running, unstaging it first!")
self.unstage()
sleep(0.5)
# FIXME: I don't care about how we fish out config parameters from scan info
scan_args = {
**scan_msg.request_inputs.get("inputs", {}),
**scan_msg.request_inputs.get("kwargs", {}),
**scan_msg.scan_parameters,
**self.scan_info.msg.request_inputs["inputs"],
**self.scan_info.msg.request_inputs["kwargs"],
**self.scan_info.msg.scan_parameters,
}
if "file_path" not in scan_args:
scan_args["file_path"] = (
"/gpfs/test/test-beamline" # FIXME: This should be from the scan message
)
if "file_prefix" not in scan_args:
scan_args["file_prefix"] = scan_msg.info["file_components"][0].split("/")[-1] + "_"
self.configure(scan_args)
d = {}
if "image_width" in scan_args and scan_args["image_width"] is not None:
d["image_width"] = scan_args["image_width"]
if "image_height" in scan_args and scan_args["image_height"] is not None:
d["image_height"] = scan_args["image_height"]
if "exp_time" in scan_args and scan_args["exp_time"] is not None:
d["exposure_time_ms"] = scan_args["exp_time"]
if "exp_period" in scan_args and scan_args["exp_period"] is not None:
d["exposure_period_ms"] = scan_args["exp_period"]
if "acq_time" in scan_args and scan_args["acq_time"] is not None:
d["exposure_time_ms"] = scan_args["acq_time"]
if "acq_period" in scan_args and scan_args["acq_period"] is not None:
d["exposure_period_ms"] = scan_args["acq_period"]
if "exp_burst" in scan_args and scan_args["exp_burst"] is not None:
d["exposure_num_burst"] = scan_args["exp_burst"]
if "acq_mode" in scan_args and scan_args["acq_mode"] is not None:
d["acq_mode"] = scan_args["acq_mode"]
if d:
self.configure(d)
# Sync if out of sync
if self.sync_flag.value == 0:
self.sync_swhw.set(1).wait()
# stdDAQ backend parameters
num_points = (
1
* scan_args.get("steps", 1)
* scan_args.get("exp_burst", 1)
* scan_args.get("repeats", 1)
* scan_args.get("burst_at_each_point", 1)
)
self.num_images.set(num_points).wait()
# reset the acquisition configs
self.acq_configs = {}
if "daq_file_path" in scan_args and scan_args["daq_file_path"] is not None:
self.file_path.set(scan_args["daq_file_path"]).wait()
if "daq_file_prefix" in scan_args and scan_args["daq_file_prefix"] is not None:
self.file_prefix.set(scan_args["daq_file_prefix"]).wait()
if "daq_num_images" in scan_args and scan_args["daq_num_images"] is not None:
self.num_images.set(scan_args["daq_num_images"]).wait()
# Start stdDAQ preview
if self.live_preview is not None:
self.live_preview.start()
def on_unstage(self) -> DeviceStatus | None:
"""Called while unstaging the device."""
# Switch to idle
logger.info(f"StdDaq status on unstage: {self.backend.status}")
return self.stop_camera()
self.disarm()
if self.backend is not None:
logger.info(f"StdDaq status before unstage: {self.backend.status}")
self.backend.stop()
def on_pre_scan(self) -> DeviceStatus | None:
"""Called right before the scan starts on all devices automatically."""
# Switch to acquiring
return self.start_camera()
# First start the stdDAQ
if self.backend is not None:
self.backend.start(
file_path=self.file_path.get(),
file_prefix=self.file_prefix.get(),
num_images=self.num_images.get(),
)
# Then start the camera
self.arm()
def on_trigger(self) -> DeviceStatus | StatusBase | None:
def on_trigger(self) -> DeviceStatus | None:
"""Called when the device is triggered."""
if self.busy_stat.get() in (0, "IDLE"):
raise RuntimeError("GigaFrost must be running before triggering")
logger.info(f"[{self.name}] SW triggering gigafrost")
logger.warning(f"[{self.name}] SW triggering gigafrost")
# Soft triggering based on operation mode
if (
@@ -843,39 +632,38 @@ class GigaFrostCamera(PSIDeviceBase, GigaFrostBase):
):
# BEC teststand operation mode: posedge of SoftEnable if Started
self.soft_enable.set(0).wait()
return self.soft_enable.set(1)
self.soft_enable.set(1).wait()
return self.soft_trig.set(1)
if self.acquire_block.get() or self.backend is None:
wait_time = 0.2 + 0.001 * self.num_exposures.value * max(
self.acquire_time.value, self.acquire_period.value
)
logger.info(f"[{self.name}] Triggering set to block for {wait_time} seconds")
return DeviceStatus(self, done=True, success=True, settle_time=wait_time)
else:
self.soft_trig.set(1).wait()
def on_complete(self) -> DeviceStatus | None:
"""Called to inquire if a device has completed a scans."""
def _create_dataset(_status: DeviceStatus):
self.backend.create_virtual_datasets(
self.file_path.get(), file_prefix=self.file_prefix.get() # type: ignore
)
self._run_subs(
sub_type=self.SUB_FILE_EVENT,
file_path=self.target_file,
done=True,
successful=True,
hinted_location={"data": "data"},
)
status = self.acq_done()
status.add_callback(_create_dataset)
return status
return self.acq_done()
def on_kickoff(self) -> DeviceStatus | None:
"""Called to kickoff a device for a fly scan. Has to be called explicitly."""
def on_stop(self) -> DeviceStatus:
def on_stop(self) -> None:
"""Called when the device is stopped."""
return self.stop_camera()
return self.on_unstage()
if __name__ == "__main__": # pragma: no cover
# Automatically connect to MicroSAXS testbench if directly invoked
if __name__ == "__main__":
gf = GigaFrostCamera(
"X02DA-CAM-GF2:", name="gf2", backend_url="http://xbl-daq-28:8080", auto_soft_enable=True
"X02DA-CAM-GF2:",
name="gf2",
backend_url="http://xbl-daq-28:8080",
auto_soft_enable=True,
std_daq_ws="ws://129.129.95.111:8080",
std_daq_rest="http://129.129.95.111:5000",
std_daq_live="tcp://129.129.95.111:20000",
)
gf.wait_for_connection()

View File

@@ -0,0 +1,158 @@
# -*- coding: utf-8 -*-
"""
Standard DAQ preview image stream module
Created on Thu Jun 27 17:28:43 2024
@author: mohacsi_i
"""
from time import sleep, time
import threading
import zmq
import json
ZMQ_TOPIC_FILTER = b""
class PcoTestConsumer:
"""Detector wrapper class around the StdDaq preview image stream.
This was meant to provide live image stream directly from the StdDAQ.
Note that the preview stream must be already throtled in order to cope
with the incoming data and the python class might throttle it further.
You can add a preview widget to the dock by:
cam_widget = gui.add_dock('cam_dock1').add_widget('BECFigure').image('daq_stream1')
"""
# Subscriptions for plotting image
_shutdown_event = threading.Event()
_monitor_mutex = threading.Lock()
_monitor_thread = None
# Status attributes
_url = None
_image = None
_frame = None
_socket = None
def __init__(self, url: str = "tcp://129.129.95.38:20000") -> None:
super().__init__()
self._url = url
def connect(self):
"""Connect to te StDAQs PUB-SUB streaming interface"""
# Socket to talk to server
context = zmq.Context()
self._socket = context.socket(zmq.PULL)
try:
self._socket.connect(self.url)
except ConnectionRefusedError:
sleep(1)
self._socket.connect(self.url)
def disconnect(self):
"""Disconnect"""
try:
if self._socket is not None:
self._socket.disconnect(self.url)
except zmq.ZMQError:
pass
finally:
self._socket = None
@property
def url(self):
return self._url
@property
def image(self):
return self._image
@property
def frame(self):
return self._frame
# pylint: disable=protected-access
def start(self):
"""Start listening for preview data stream"""
if self._monitor_mutex.locked():
raise RuntimeError("Only one consumer permitted")
self.connect()
self._mon = threading.Thread(target=self.poll, daemon=True)
self._mon.start()
def stop(self):
"""Stop a running preview"""
self._shutdown_event.set()
if self._mon is not None:
self._stop_polling = True
# Might hang on recv_multipart
self._mon.join(timeout=1)
# So also disconnect the socket
self.disconnect()
self._shutdown_event.clear()
def poll(self):
"""Collect streamed updates"""
try:
t_last = time()
print("Starting monitor")
with self._monitor_mutex:
while not self._shutdown_event.is_set():
try:
# pylint: disable=no-member
r = self._socket.recv_multipart(flags=zmq.NOBLOCK)
# Length and throtling checks
t_curr = time()
t_elapsed = t_curr - t_last
if t_elapsed < self.parent.throttle.get():
continue
# # Unpack the Array V1 reply to metadata and array data
meta, data = r
# Update image and update subscribers
header = json.loads(meta)
self.header = header
# if header["type"] == "uint16":
# image = np.frombuffer(data, dtype=np.uint16)
# if image.size != np.prod(header['shape']):
# err = f"Unexpected array size of {image.size} for header: {header}"
# raise ValueError(err)
# image = image.reshape(header['shape'])
# # Update image and update subscribers
# self._frame = header['frame']
# self._image = image
t_last = t_curr
# print(
# f"[{self.name}] Updated frame {header['frame']}\t"
# f"Shape: {header['shape']}\tMean: {np.mean(image):.3f}"
# )
except ValueError:
# Happens when ZMQ partially delivers the multipart message
pass
except zmq.error.Again:
# Happens when receive queue is empty
sleep(0.1)
except Exception as ex:
print(f"{str(ex)}")
raise
finally:
try:
self._socket.disconnect(self.url)
except RuntimeError:
pass
self._monitor_thread = None
print(f"Detaching monitor")
# Automatically connect to MicroSAXS testbench if directly invoked
if __name__ == "__main__":
daq = PcoTestConsumer(url="tcp://10.4.0.82:8080")
daq.start()
sleep(500)
daq.stop()

View File

@@ -0,0 +1,192 @@
# -*- coding: utf-8 -*-
"""
Standard DAQ preview image stream module
Created on Thu Jun 27 17:28:43 2024
@author: mohacsi_i
"""
from time import sleep, time
from threading import Thread
import zmq
import json
from ophyd import Device, Signal, Component, Kind
from ophyd_devices.interfaces.base_classes.psi_detector_base import (
CustomDetectorMixin,
PSIDetectorBase,
)
from bec_lib import bec_logger
logger = bec_logger.logger
ZMQ_TOPIC_FILTER = b""
class PcoTestConsumerMixin(CustomDetectorMixin):
"""Setup class for the standard DAQ preview stream
Parent class: CustomDetectorMixin
"""
# pylint: disable=protected-access
def on_stage(self):
"""Start listening for preview data stream"""
if self.parent._mon is not None:
self.parent.unstage()
sleep(0.5)
self.parent.connect()
self._stop_polling = False
self.parent._mon = Thread(target=self.poll, daemon=True)
self.parent._mon.start()
def on_unstage(self):
"""Stop a running preview"""
if self.parent._mon is not None:
self._stop_polling = True
# Might hang on recv_multipart
self.parent._mon.join(timeout=1)
# So also disconnect the socket
self.parent.disconnect()
def on_stop(self):
"""Stop a running preview"""
self.parent.disconnect()
def poll(self):
"""Collect streamed updates"""
try:
t_last = time()
print("Starting monitor")
while True:
try:
# Exit loop and finish monitoring
if self._stop_polling:
logger.info(f"[{self.parent.name}]\tDetaching monitor")
break
# pylint: disable=no-member
r = self.parent._socket.recv_multipart(flags=zmq.NOBLOCK)
# Length and throtling checks
t_curr = time()
t_elapsed = t_curr - t_last
if t_elapsed < self.parent.throttle.get():
continue
# # Unpack the Array V1 reply to metadata and array data
meta, data = r
# Update image and update subscribers
header = json.loads(meta)
self.parent.header = header
# if header["type"] == "uint16":
# image = np.frombuffer(data, dtype=np.uint16)
# if image.size != np.prod(header['shape']):
# err = f"Unexpected array size of {image.size} for header: {header}"
# raise ValueError(err)
# image = image.reshape(header['shape'])
# # Update image and update subscribers
# self.parent.frame.put(header['frame'], force=True)
# self.parent.image_shape.put(header['shape'], force=True)
# self.parent.image.put(image, force=True)
# self.parent._last_image = image
# self.parent._run_subs(sub_type=self.parent.SUB_MONITOR, value=image)
t_last = t_curr
# logger.info(
# f"[{self.parent.name}] Updated frame {header['frame']}\t"
# f"Shape: {header['shape']}\tMean: {np.mean(image):.3f}"
# )
print(f"[{self.parent.name}] Updated frame {header['frame']}\t")
except ValueError:
# Happens when ZMQ partially delivers the multipart message
pass
except zmq.error.Again:
# Happens when receive queue is empty
sleep(0.1)
except Exception as ex:
logger.info(f"[{self.parent.name}]\t{str(ex)}")
raise
finally:
try:
self.parent._socket.disconnect(self.parent.url.get())
except RuntimeError:
pass
self.parent._mon = None
logger.info(f"[{self.parent.name}]\tDetaching monitor")
class PcoTestConsumer(PSIDetectorBase):
"""Detector wrapper class around the StdDaq preview image stream.
This was meant to provide live image stream directly from the StdDAQ.
Note that the preview stream must be already throtled in order to cope
with the incoming data and the python class might throttle it further.
You can add a preview widget to the dock by:
cam_widget = gui.add_dock('cam_dock1').add_widget('BECFigure').image('daq_stream1')
"""
# Subscriptions for plotting image
USER_ACCESS = ["get_last_image"]
SUB_MONITOR = "device_monitor_2d"
_default_sub = SUB_MONITOR
header = None
custom_prepare_cls = PcoTestConsumerMixin
# Status attributes
url = Component(Signal, kind=Kind.config)
throttle = Component(Signal, value=0.25, kind=Kind.config)
frame = Component(Signal, kind=Kind.hinted)
image_shape = Component(Signal, kind=Kind.normal)
# FIXME: The BEC client caches the read()s from the last 50 scans
image = Component(Signal, kind=Kind.omitted)
_last_image = None
_mon = None
_socket = None
def __init__(
self, *args, url: str = "tcp://129.129.95.38:20000", parent: Device = None, **kwargs
) -> None:
super().__init__(*args, parent=parent, **kwargs)
self.url._metadata["write_access"] = False
self.image._metadata["write_access"] = False
self.frame._metadata["write_access"] = False
self.image_shape._metadata["write_access"] = False
self.url.set(url, force=True).wait()
def connect(self):
"""Connect to te StDAQs PUB-SUB streaming interface
StdDAQ may reject connection for a few seconds when it restarts,
so if it fails, wait a bit and try to connect again.
"""
# pylint: disable=no-member
# Socket to talk to server
context = zmq.Context()
self._socket = context.socket(zmq.PULL)
try:
self._socket.connect(self.url.get())
except ConnectionRefusedError:
sleep(1)
self._socket.connect(self.url.get())
def disconnect(self):
"""Disconnect"""
try:
if self._socket is not None:
self._socket.disconnect(self.url.get())
except zmq.ZMQError:
pass
finally:
self._socket = None
def get_image(self):
return self._last_image
# Automatically connect to MicroSAXS testbench if directly invoked
if __name__ == "__main__":
daq = PcoTestConsumerMixin(url="tcp://129.129.106.124:8080", name="preview")
daq.wait_for_connection()

View File

@@ -4,45 +4,10 @@ Created on Wed Dec 6 11:33:54 2023
@author: mohacsi_i
"""
import enum
from ophyd import Component as Cpt
from ophyd import Device, DynamicDeviceComponent, EpicsSignal, EpicsSignalRO, Kind, Signal
class TriggerMode(str, enum.Enum):
AUTO_TRIGGER = "auto trigger"
SOFT_TRIGGER = "soft trigger"
EXTERNAL_EXP_TRIGGER = "ext.exp sfttrg"
EXTERNAL_EXP_CONTR = "ext.exp contr"
class CameraStatus(str, enum.Enum):
OFFLINE = "Offline"
IDLE = "Idle"
RUNNING = "Running"
class RecMode(str, enum.Enum):
SEQUENCE = "Sequence"
RING_BUFFER = "Ring buffer"
class StoreMode(str, enum.Enum):
RECORDER = "Recorder"
FIFO_BUFFER = "FIFO buffer"
class CameraInitStatus(str, enum.Enum):
OFFLINE = "OFFLINE"
INIT = "INIT"
class CameraStatusCode(enum.IntEnum):
IDLE = 2
RUNNING = 6
class PcoEdgeBase(Device):
"""Ophyd baseclass for Helge camera IOCs
@@ -81,48 +46,24 @@ class PcoEdgeBase(Device):
# ########################################################################
# General hardware info (in AD nomenclature)
query = Cpt(EpicsSignalRO, "QUERY", kind=Kind.config, doc="Camera manufacturer info")
board = Cpt(EpicsSignalRO, "BOARD", kind=Kind.omitted, doc="Camera board info")
manufacturer = Cpt(EpicsSignalRO, "QUERY", kind=Kind.config, doc="Camera manufacturer info")
model = Cpt(EpicsSignalRO, "BOARD", kind=Kind.omitted, doc="Camera board info")
# ########################################################################
# Acquisition configuration (in AD nomenclature)
camera_status = Cpt(
EpicsSignal,
"CAMERASTATUS",
put_complete=True,
kind=Kind.omitted,
string=True,
doc="Camera acquisition status, either 'Offline', 'Idle' or 'Running'",
acquire = Cpt(EpicsSignal, "CAMERASTATUS", put_complete=True, kind=Kind.omitted)
acquire_time = Cpt(
EpicsSignal, "EXPOSURE", put_complete=True, auto_monitor=True, kind=Kind.config
)
exposure = Cpt(
EpicsSignal,
"EXPOSURE",
put_complete=True,
auto_monitor=True,
kind=Kind.config,
doc="Exposure time in milliseconds.",
acquire_delay = Cpt(
EpicsSignal, "DELAY", put_complete=True, auto_monitor=True, kind=Kind.config
)
delay = Cpt(
EpicsSignal,
"DELAY",
put_complete=True,
auto_monitor=True,
kind=Kind.config,
doc="Delay time in milliseconds.",
)
# trigger_mode cannot be called 'trigger' as it is a reserved method in ophyd.Device
# and it would override the Device.trigger() method
trigger_mode = Cpt(
EpicsSignal,
"TRIGGER",
put_complete=True,
auto_monitor=True,
kind=Kind.config,
string=True,
doc="Trigger mode. Must be either 'auto trigger', 'soft trigger', "
"'ext.exp sfttrg' or 'ext.exp contr'",
EpicsSignal, "TRIGGER", put_complete=True, auto_monitor=True, kind=Kind.config
)
# num_exposures = Cpt(
# EpicsSignal, "CNT_NUM", put_complete=True, auto_monitor=True, kind=Kind.config
# )
array_size = DynamicDeviceComponent(
{
@@ -132,6 +73,15 @@ class PcoEdgeBase(Device):
doc="Size of the array in the XY dimensions",
)
# DAQ parameters
file_path = Cpt(Signal, kind=Kind.config, value="/gpfs/test/test-beamline")
file_prefix = Cpt(Signal, kind=Kind.config, value="scan_")
num_images = Cpt(Signal, kind=Kind.config, value=1000)
num_images_counter = Cpt(Signal, kind=Kind.hinted, value=0)
# GF specific interface
acquire_block = Cpt(Signal, kind=Kind.config, value=0)
# ########################################################################
# Image size configuration (in AD nomenclature)
bin_x = Cpt(EpicsSignal, "BINX", put_complete=True, auto_monitor=True, kind=Kind.config)
@@ -140,8 +90,8 @@ class PcoEdgeBase(Device):
# ########################################################################
# Additional status info
busy = Cpt(EpicsSignalRO, "BUSY", auto_monitor=True, kind=Kind.config)
ss_camera = Cpt(EpicsSignalRO, "SS_CAMERA", auto_monitor=True, kind=Kind.config)
cam_progress = Cpt(EpicsSignalRO, "CAMPROGRESS", auto_monitor=True, kind=Kind.config)
camState = Cpt(EpicsSignalRO, "SS_CAMERA", auto_monitor=True, kind=Kind.config)
camProgress = Cpt(EpicsSignalRO, "CAMPROGRESS", auto_monitor=True, kind=Kind.config)
# ########################################################################
# Configuration state maschine with separate transition states
@@ -154,50 +104,36 @@ class PcoEdgeBase(Device):
kind=Kind.config,
)
statuscode = Cpt(EpicsSignalRO, "STATUSCODE", auto_monitor=True, kind=Kind.config)
init = Cpt(
EpicsSignalRO,
"INIT",
auto_monitor=True,
kind=Kind.config,
string=True,
doc="Camera initialization status, either 'OFFLINE' or 'INIT'.",
)
camera_statuscode = Cpt(EpicsSignalRO, "STATUSCODE", auto_monitor=True, kind=Kind.config)
camera_init = Cpt(EpicsSignalRO, "INIT", auto_monitor=True, kind=Kind.config)
camera_init_busy = Cpt(EpicsSignalRO, "BUSY_INIT", auto_monitor=True, kind=Kind.config)
# camCamera = Cpt(EpicsSignalRO, "CAMERA", auto_monitor=True, kind=Kind.config)
# camCameraBusy = Component(EpicsSignalRO, "BUSY_CAMERA", auto_monitor=True, kind=Kind.config)
# ########################################################################
# Acquisition configuration
acqmode = Cpt(EpicsSignalRO, "ACQMODE", auto_monitor=True, kind=Kind.config)
acquire_mode = Cpt(EpicsSignalRO, "ACQMODE", auto_monitor=True, kind=Kind.config)
acquire_trigger = Cpt(EpicsSignalRO, "TRIGGER", auto_monitor=True, kind=Kind.config)
# acqTriggerSource = Component(
# EpicsSignalRO, "TRIGGERSOURCE", auto_monitor=True, kind=Kind.config)
# acqTriggerEdge = Component(EpicsSignalRO, "TRIGGEREDGE", auto_monitor=True, kind=Kind.config)
# ########################################################################
# Buffer configuration
rec_mode = Cpt(
EpicsSignalRO,
"RECMODE",
auto_monitor=True,
kind=Kind.config,
string=True,
doc="Recording mode of the camera, either 'Sequence' or 'Ring buffer'",
)
store_mode = Cpt(
EpicsSignal,
"STOREMODE",
auto_monitor=True,
kind=Kind.config,
string=True,
doc="Store mode of the camera, either 'Recorder' or 'FIFO buffer'",
)
bufferRecMode = Cpt(EpicsSignalRO, "RECMODE", auto_monitor=True, kind=Kind.config)
bufferStoreMode = Cpt(EpicsSignal, "STOREMODE", auto_monitor=True, kind=Kind.config)
fileRecMode = Cpt(EpicsSignalRO, "RECMODE", auto_monitor=True, kind=Kind.config)
pic_buffer = Cpt(EpicsSignalRO, "PIC_BUFFER", auto_monitor=True, kind=Kind.normal)
pic_max = Cpt(EpicsSignalRO, "PIC_MAX", auto_monitor=True, kind=Kind.normal)
clear_mem = Cpt(EpicsSignal, "CLEARMEM", put_complete=True, kind=Kind.omitted)
buffer_used = Cpt(EpicsSignalRO, "PIC_BUFFER", auto_monitor=True, kind=Kind.normal)
buffer_size = Cpt(EpicsSignalRO, "PIC_MAX", auto_monitor=True, kind=Kind.normal)
buffer_clear = Cpt(EpicsSignal, "CLEARMEM", put_complete=True, kind=Kind.omitted)
# ########################################################################
# File saving/streaming interface
cam_rate = Cpt(EpicsSignalRO, "CAMRATE", auto_monitor=True, kind=Kind.normal)
file_rate = Cpt(EpicsSignalRO, "FILERATE", auto_monitor=True, kind=Kind.normal)
save_start = Cpt(EpicsSignal, "SAVESTART", put_complete=True, kind=Kind.config)
save_stop = Cpt(EpicsSignal, "SAVESTOP", put_complete=True, kind=Kind.config)
cam_data_rate = Cpt(EpicsSignalRO, "CAMRATE", auto_monitor=True, kind=Kind.normal)
file_data_rate = Cpt(EpicsSignalRO, "FILERATE", auto_monitor=True, kind=Kind.normal)
file_savestart = Cpt(EpicsSignal, "SAVESTART", put_complete=True, kind=Kind.config)
file_savestop = Cpt(EpicsSignal, "SAVESTOP", put_complete=True, kind=Kind.config)
file_format = Cpt(EpicsSignal, "FILEFORMAT", put_complete=True, kind=Kind.config)
file_transfer = Cpt(EpicsSignal, "FTRANSFER", put_complete=True, kind=Kind.config)
file_savebusy = Cpt(EpicsSignalRO, "FILESAVEBUSY", auto_monitor=True, kind=Kind.normal)
@@ -211,24 +147,17 @@ class PcoEdgeBase(Device):
camError = Cpt(EpicsSignalRO, "ERRCODE", auto_monitor=True, kind=Kind.config)
camWarning = Cpt(EpicsSignalRO, "WARNCODE", auto_monitor=True, kind=Kind.config)
# DAQ parameters
file_path = Cpt(Signal, kind=Kind.config, value="/gpfs/test/test-beamline")
file_prefix = Cpt(Signal, kind=Kind.config, value="scan_")
num_images = Cpt(Signal, kind=Kind.config, value=1000)
frames_per_trigger = Cpt(Signal, kind=Kind.config, value=1)
num_images_counter = Cpt(Signal, kind=Kind.hinted, value=0)
@property
def state(self) -> str:
"""Single word camera state"""
if self.set_param.value:
return "BUSY"
if self.statuscode.value == 2 and self.init.value == 1:
if self.camera_statuscode.value == 2 and self.camera_init.value == 1:
return "IDLE"
if self.statuscode.value == 6 and self.init.value == 1:
if self.camera_statuscode.value == 6 and self.camera_init.value == 1:
return "RUNNING"
# if self.camRemoval.value==0 and self.camInit.value==0:
if self.init.value == 0:
if self.camera_init.value == 0:
return "OFFLINE"
# if self.camRemoval.value:
# return "REMOVED"

View File

@@ -0,0 +1,415 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 6 11:33:54 2023
@author: mohacsi_i
"""
import time
import numpy as np
from ophyd.status import SubscriptionStatus, DeviceStatus
from ophyd_devices.interfaces.base_classes.psi_device_base import PSIDeviceBase
from bec_lib.logger import bec_logger
from tomcat_bec.devices.gigafrost.pcoedge_base import PcoEdgeBase
from tomcat_bec.devices.gigafrost.std_daq_preview import StdDaqPreview
from tomcat_bec.devices.gigafrost.std_daq_client import StdDaqClient, StdDaqStatus
logger = bec_logger.logger
# pylint: disable=too-many-instance-attributes
class PcoEdge5M(PSIDeviceBase, PcoEdgeBase):
"""Ophyd baseclass for Helge camera IOCs
This class provides wrappers for Helge's camera IOCs around SwissFEL and
for high performance SLS 2.0 cameras. The IOC's operation is a bit arcane
and there are different versions and cameras all around. So this device
only covers the absolute basics.
Probably the most important part is the configuration state machine. As
the SET_PARAMS takes care of buffer allocations it might take some time,
as well as a full re-configuration is required every time we change the
binning, roi, etc... This is automatically performed upon starting an
exposure (if it heven't been done before).
The status flag state machine during re-configuration is:
BUSY low, SET low -> BUSY high, SET low -> BUSY low, SET high -> BUSY low, SET low
UPDATE: Data sending operation modes
- Switch to ZMQ streaming by setting FILEFORMAT to ZEROMQ
- Set SAVESTART and SAVESTOP to select a ROI of image indices
- Start file transfer with FTRANSFER.
The ZMQ connection operates in PUSH-PULL mode, i.e. it needs incoming connection.
STOREMODE sets the acquisition mode:
if STOREMODE == Recorder
Fills up the buffer with images. Here SAVESTART and SAVESTOP selects a ROI
of image indices to be streamed out (i.e. maximum buffer_size number of images)
if STOREMODE == FIFO buffer
Continously streams out data using the buffer as a FIFO queue.
Here SAVESTART and SAVESTOP selects a ROI of image indices to be streamed continously
(i.e. a large SAVESTOP streams indefinitely). Note that in FIFO mode buffer reads are
destructive. to prevent this, we don't have EPICS preview
"""
USER_ACCESS = ["complete", "backend", "live_preview", "arm", "disarm"]
# Placeholders for stdDAQ and livestream clients
backend = None
live_preview = None
# pylint: disable=too-many-arguments
def __init__(
self,
prefix="",
*,
name,
kind=None,
read_attrs=None,
configuration_attrs=None,
parent=None,
scan_info=None,
std_daq_rest: str | None = None,
std_daq_ws: str | None = None,
std_daq_live: str | None = None,
**kwargs,
):
# super() will call the mixin class
super().__init__(
prefix=prefix,
name=name,
kind=kind,
read_attrs=read_attrs,
configuration_attrs=configuration_attrs,
parent=parent,
scan_info=scan_info,
**kwargs,
)
# Configure the stdDAQ client
if std_daq_rest is None or std_daq_ws is None:
# raise ValueError("Both std_daq_rest and std_daq_ws must be provided")
logger.error("No stdDAQ address provided, launching without data backend!")
else:
self.backend = StdDaqClient(parent=self, ws_url=std_daq_ws, rest_url=std_daq_rest)
# Configure image preview
if std_daq_live is not None:
self.live_preview = StdDaqPreview(url=std_daq_live, cb=self._on_preview_update)
else:
logger.error("No stdDAQ stream address provided, launching without preview!")
def configure(self, d: dict = None) -> tuple:
"""Configure the base Helge camera device
Parameters as 'd' dictionary
----------------------------
num_images : int
Number of images to be taken during each scan. Meaning depends on
store mode.
exposure_time_ms : float
Exposure time [ms], usually gets set back to 20 ms
exposure_period_ms : float
Exposure period [ms], up to 200 ms.
store_mode : str
Buffer operation mode
*'Recorder' to record in buffer
*'FIFO buffer' for continous streaming
data_format : str
Usually set to 'ZEROMQ'
acq_mode : str
Store mode and data format according to preconfigured settings
"""
if self.state not in ("IDLE"):
raise RuntimeError(f"Can't change configuration from state {self.state}")
# If Bluesky style configure
if d is not None:
# Commonly changed settings
if "exposure_num_burst" in d:
self.file_savestop.set(d["exposure_num_burst"]).wait()
if "exposure_time_ms" in d:
self.acquire_time.set(d["exposure_time_ms"]).wait()
if "exposure_period_ms" in d:
self.acquire_delay.set(d["exposure_period_ms"]).wait()
if "exposure_period_ms" in d:
self.acquire_delay.set(d["exposure_period_ms"]).wait()
if "image_width" in d:
self.array_size.array_size_x.set(d["image_width"]).wait()
if "image_height" in d:
self.array_size.array_size_y.set(d["image_height"]).wait()
if "store_mode" in d:
self.bufferStoreMode.set(d["store_mode"]).wait()
if "data_format" in d:
self.file_format.set(d["data_format"]).wait()
# If a pre-configured acquisition mode is specified, set it
if "acq_mode" in d:
self.set_acquisition_mode(d["acq_mode"])
# State machine
# Initial: BUSY and SET both low
# 0. Write 1 to SET_PARAM
# 1. BUSY goes high, SET stays low
# 2. BUSY goes low, SET goes high
# 3. BUSY stays low, SET goes low
# So we need a 'negedge' on SET_PARAM
def negedge(*, old_value, value, timestamp, **_):
return bool(old_value and not value)
# Subscribe and wait for update
status = SubscriptionStatus(self.set_param, negedge, timeout=5, settle_time=0.5)
self.set_param.set(1).wait()
status.wait()
def set_acquisition_mode(self, acq_mode):
"""Set acquisition mode
Utility function to quickly select between pre-configured and tested
acquisition modes.
"""
if acq_mode in ["default", "step"]:
# NOTE: Trigger duration requires a consumer
self.bufferStoreMode.set("FIFO Buffer").wait()
if acq_mode in ["stream"]:
# NOTE: Trigger duration requires a consumer
self.bufferStoreMode.set("FIFO Buffer").wait()
else:
raise RuntimeError(f"Unsupported acquisition mode: {acq_mode}")
def arm(self):
"""Bluesky style stage: arm the detector"""
logger.warning("Staging PCO")
# Acquisition is only allowed when the IOC is not busy
if self.state in ("OFFLINE", "BUSY", "REMOVED", "RUNNING"):
raise RuntimeError(f"Camera in in state: {self.state}")
if (
self.bufferStoreMode.get() in ("Recorder", 0)
and self.file_savestop.get() > self.buffer_size.get()
):
logger.warning(
f"You'll send empty images, {self.file_savestop.get()} is above buffer size"
)
# Start the acquisition (this sets parameers and starts acquisition)
self.acquire.set("Running").wait()
# Subscribe and wait for update
def is_running(*, value, timestamp, **_):
return bool(value == 6)
status = SubscriptionStatus(self.camera_statuscode, is_running, timeout=5, settle_time=0.2)
status.wait()
def disarm(self):
"""Bluesky style unstage: stop the detector"""
self.acquire.set("Idle").wait()
# Data streaming is stopped by setting the max index to 0
# FIXME: This will interrupt data transfer
self.file_savestop.set(0).wait()
def destroy(self):
logger.warning("Destroy called")
if self.backend is not None:
self.backend.shutdown()
super().destroy()
def _on_preview_update(self, img: np.ndarray, header: dict):
"""Send preview stream and update frame index counter"""
# FIXME: There's also a recorded images counter provided by the stdDAQ writer
self.num_images_counter.put(header["frame"], force=True)
self._run_subs(sub_type=self.SUB_DEVICE_MONITOR_2D, obj=self, value=img)
def acq_done(self) -> DeviceStatus:
"""
Check if the acquisition is done. For the GigaFrost camera, this is
done by checking the status of the backend as the camera does not
provide any feedback about its internal state.
Returns:
DeviceStatus: The status of the acquisition
"""
status = DeviceStatus(self)
if self.backend is not None:
self.backend.add_status_callback(
status,
success=[StdDaqStatus.IDLE, StdDaqStatus.FILE_SAVED],
error=[StdDaqStatus.REJECTED, StdDaqStatus.ERROR],
)
return status
########################################
# Beamline Specific Implementations #
########################################
# pylint: disable=protected-access
def on_stage(self) -> None:
"""Configure and arm PCO.Edge camera for acquisition"""
# PCO can finish a run without explicit unstaging
if self.state not in ("IDLE"):
logger.warning(
f"Trying to stage the camera from state {self.state}, unstaging it first!"
)
self.unstage()
time.sleep(0.5)
# Fish out our configuration from scaninfo (via explicit or generic addressing)
scan_args = {
**self.scan_info.msg.request_inputs["inputs"],
**self.scan_info.msg.request_inputs["kwargs"],
**self.scan_info.msg.scan_parameters,
}
d = {}
if "image_width" in scan_args and scan_args["image_width"] is not None:
d["image_width"] = scan_args["image_width"]
if "image_height" in scan_args and scan_args["image_height"] is not None:
d["image_height"] = scan_args["image_height"]
if "exp_time" in scan_args and scan_args["exp_time"] is not None:
d["exposure_time_ms"] = scan_args["exp_time"]
if "exp_period" in scan_args and scan_args["exp_period"] is not None:
d["exposure_period_ms"] = scan_args["exp_period"]
if "exp_burst" in scan_args and scan_args["exp_burst"] is not None:
d["exposure_num_burst"] = scan_args["exp_burst"]
if "acq_time" in scan_args and scan_args["acq_time"] is not None:
d["exposure_time_ms"] = scan_args["acq_time"]
if "acq_period" in scan_args and scan_args["acq_period"] is not None:
d["exposure_period_ms"] = scan_args["acq_period"]
if "acq_burst" in scan_args and scan_args["acq_burst"] is not None:
d["exposure_num_burst"] = scan_args["acq_burst"]
if "acq_mode" in scan_args and scan_args["acq_mode"] is not None:
d["acq_mode"] = scan_args["acq_mode"]
# elif self.scaninfo.scan_type == "step":
# d['acq_mode'] = "default"
if "pco_store_mode" in scan_args and scan_args["pco_store_mode"] is not None:
d["store_mode"] = scan_args["pco_store_mode"]
if "pco_data_format" in scan_args and scan_args["pco_data_format"] is not None:
d["data_format"] = scan_args["pco_data_format"]
# Perform bluesky-style configuration
if d:
logger.warning(f"[{self.name}] Configuring with:\n{d}")
self.configure(d=d)
# stdDAQ backend parameters
num_points = (
1
* scan_args.get("steps", 1)
* scan_args.get("exp_burst", 1)
* scan_args.get("repeats", 1)
* scan_args.get("burst_at_each_point", 1)
)
self.num_images.set(num_points).wait()
if "daq_file_path" in scan_args and scan_args["daq_file_path"] is not None:
self.file_path.set(scan_args["daq_file_path"]).wait()
if "daq_file_prefix" in scan_args and scan_args["daq_file_prefix"] is not None:
self.file_prefix.set(scan_args["daq_file_prefix"]).wait()
if "daq_num_images" in scan_args and scan_args["daq_num_images"] is not None:
self.num_images.set(scan_args["daq_num_images"]).wait()
# Start stdDAQ preview
if self.live_preview is not None:
self.live_preview.start()
def on_unstage(self) -> None:
"""Disarm the PCO.Edge camera"""
self.disarm()
if self.backend is not None:
logger.info(f"StdDaq status before unstage: {self.backend.status}")
self.backend.stop()
def on_pre_scan(self) -> DeviceStatus | None:
"""Called right before the scan starts on all devices automatically."""
logger.warning("Called op_prescan on PCO camera")
# First start the stdDAQ
if self.backend is not None:
self.backend.start(
file_path=self.file_path.get(),
file_prefix=self.file_prefix.get(),
num_images=self.num_images.get(),
)
# Then start the camera
self.arm()
def on_trigger(self) -> None | DeviceStatus:
"""Trigger mode operation
Use it to repeatedly record a fixed number of frames and send it to stdDAQ. The method waits
for the acquisition and data transfer to complete.
NOTE: Maciej confirmed that sparse data is no problem to the stdDAQ.
TODO: Optimize data transfer to launch at end and check completion at the beginning.
"""
# Ensure that previous data transfer finished
# def sentIt(*args, value, timestamp, **kwargs):
# return value==0
# status = SubscriptionStatus(self.file_savebusy, sentIt, timeout=120)
# status.wait()
# Not sure if it always sends the first batch of images or the newest
self.buffer_clear.set(1, settle_time=0.1).wait()
# Wait until the buffer fills up with enough images
t_expected = (self.acquire_time.get() + self.acquire_delay.get()) * self.file_savestop.get()
def wait_acquisition(*, value, timestamp, **_):
num_target = self.file_savestop.get()
# logger.warning(f"{value} of {num_target}")
return bool(value >= num_target)
max_wait = max(5, 5 * t_expected)
status = SubscriptionStatus(
self.buffer_used, wait_acquisition, timeout=max_wait, settle_time=0.2
)
status.wait()
# Then start file transfer (need to get the save busy flag update)
# self.file_transfer.set(1, settle_time=0.2).wait()
self.file_transfer.set(1).wait()
# And wait until the images have been sent
# NOTE: this does not wait for new value, the first check will be
# against values from the previous cycle, i.e. pass automatically.
t_start = time.time()
def wait_sending(*, old_value, value, timestamp, **_):
t_elapsed = timestamp - t_start
# logger.warning(f"{old_value}\t{value}\t{t_elapsed}")
return old_value == 1 and value == 0 and t_elapsed > 0
status = SubscriptionStatus(self.file_savebusy, wait_sending, timeout=120, settle_time=0.2)
status.wait()
def on_complete(self) -> DeviceStatus | None:
"""Called to inquire if a device has completed a scans."""
return self.acq_done()
def on_kickoff(self) -> DeviceStatus | None:
"""Start data transfer
TODO: Need to revisit this once triggering is complete
"""
self.file_transfer.set(1).wait()
def on_stop(self) -> None:
"""Called when the device is stopped."""
return self.on_unstage()
# Automatically connect to test camera if directly invoked
if __name__ == "__main__":
# Drive data collection
cam = PcoEdge5M(
"X02DA-CCDCAM2:",
name="mcpcam",
std_daq_ws="ws://129.129.95.111:8081",
std_daq_rest="http://129.129.95.111:5010",
std_daq_live="tcp://129.129.95.111:20010",
)
cam.wait_for_connection()

View File

@@ -0,0 +1,395 @@
from __future__ import annotations
import copy
import enum
import json
import threading
import time
import traceback
from typing import TYPE_CHECKING
import requests
from bec_lib.logger import bec_logger
from ophyd import StatusBase
from typeguard import typechecked
from websockets import State
from websockets.exceptions import WebSocketException
import websockets.sync.client as ws
if TYPE_CHECKING: # pragma: no cover
from ophyd import Device, DeviceStatus
logger = bec_logger.logger
class StdDaqError(Exception): ...
class StdDaqStatus(str, enum.Enum):
"""
Status of the StdDAQ.
Extracted from https://git.psi.ch/controls-ci/std_detector_buffer/-/blob/master/source/std-det-driver/src/driver_state.hpp
"""
CREATING_FILE = "creating_file"
ERROR = "error"
FILE_CREATED = "file_created"
FILE_SAVED = "file_saved"
IDLE = "idle"
RECORDING = "recording"
REJECTED = "rejected"
SAVING_FILE = "saving_file"
STARTED = "started"
STOP = "stop"
UNDEFINED = "undefined"
WAITING_FOR_FIRST_IMAGE = "waiting_for_first_image"
# pylint: disable=too-many-instance-attributes
class StdDaqClient:
"""Standalone stdDAQ client class"""
USER_ACCESS = ["status", "count", "start", "stop", "get_config", "set_config", "reset"]
_ws_client: ws.ClientConnection | None = None
_count: int = 0
_status: str = "undefined"
_status_timestamp: float | None = None
_ws_monitor_thread: threading.Thread | None = None
_config: dict | None = None
_status_callbacks: dict[str, tuple[DeviceStatus, list[StdDaqStatus], list[StdDaqStatus]]] = {}
def __init__(self, parent: Device, ws_url: str, rest_url: str):
self.parent = parent
self.ws_url = ws_url
self.rest_url = rest_url
self.name = self.parent.name if self.parent is not None else "None"
# Must be here otherwise they're static (shared between class instances)
self._ws_recv_mutex = threading.Lock()
self._shutdown_event = threading.Event()
self._ws_idle_event = threading.Event()
self._daq_is_running = threading.Event()
# Connect to WS interface and start status monitoring
self.wait_for_connection()
self._daq_is_running.set()
self._ws_monitor_thread = threading.Thread(
target=self._ws_monitor_loop, name=f"{self.name}_ws_monitor", daemon=True
)
self._ws_monitor_thread.start()
@property
def status(self) -> StdDaqStatus:
"""
Get the status of the StdDAQ.
"""
return self._status
@property
def count(self) -> int:
"""Get the recorded frame count"""
return self._count
def add_status_callback(
self, status: DeviceStatus, success: list[StdDaqStatus], error: list[StdDaqStatus]
):
"""
Add a DeviceStatus callback for the StdDAQ. The status will be updated
when the StdDAQ status changes and set to finished when the status
matches one of the specified success statuses and to exception when the
status matches one of the specified error statuses.
Args:
status (DeviceStatus): DeviceStatus object
success (list[StdDaqStatus]): list of statuses that indicate success
error (list[StdDaqStatus]): list of statuses that indicate error
"""
self._status_callbacks[id(status)] = (status, success, error)
@typechecked
def start(
self,
file_path: str,
file_prefix: str,
num_images: int,
timeout: float = 20,
wait: bool = True,
) -> StatusBase | None:
"""Start acquisition on the StdDAQ.
Args:
file_path (str): path to save the files
file_prefix (str): prefix of the files
num_images (int): number of images to acquire
timeout (float): timeout for the request
Returns:
status (StatusBase): Ophyd status object with attached monitor
"""
# Ensure connection
self.wait_for_connection()
status = StatusBase()
# NOTE: CREATING_FILE --> IDLE is a known error, the exact cause is unknown,
# Might be botched overwrite protection (solved by changing file_prefix)
# In previous versions there was also a mutex ownership problem
self.add_status_callback(
status, success=["waiting_for_first_image"], error=["rejected", "idle"]
)
message = {
"command": "start",
"path": file_path,
"file_prefix": file_prefix,
"n_image": num_images,
}
logger.info(f"Starting StdDaq backend. Current status: {self.status}. Message: {message}")
self._ws_client.send(json.dumps(message))
if wait:
status.wait(timeout=timeout)
return None
return status
@typechecked
def stop(self, timeout: float = 5, wait=True, stop_cmd="stop") -> StatusBase | None:
"""Stop acquisition on the StdDAQ.
Args:
timeout (float): timeout for the request
Returns:
status (StatusBase): Ophyd status object with attached monitor
"""
# Ensure connection
self.wait_for_connection()
logger.info(f"Stopping StdDaq backend. Current status: {self.status}")
status = StatusBase()
self.add_status_callback(status, success=["idle"], error=["error"])
message = {"command": stop_cmd}
self._ws_client.send(json.dumps(message))
if wait:
status.wait(timeout=timeout)
return None
return status
def get_config(self, timeout: float = 2, cached=False) -> dict:
"""Get the current configuration of the StdDAQ.
Args:
timeout (float): timeout for the request
Returns:
config (dict): configuration of the StdDAQ
"""
if cached:
return self._config
response = requests.get(
self.rest_url + "/api/config/get", params={"user": "ioc"}, timeout=timeout
)
response.raise_for_status()
self._config = response.json()
return self._config
def set_config(
self, config: dict, timeout: float = 2, update: bool = True, force: bool = True
) -> None:
"""
Set the configuration of the StdDAQ. This will overwrite the current configuration.
Args:
config (StdDaqConfig | dict): configuration to set
timeout (float): timeout for the request
"""
old_config = self.get_config()
if update:
cfg = copy.deepcopy(self._config)
cfg.update(config)
new_config = cfg
else:
new_config = config
# Escape unnecesary restarts
if not force and new_config == old_config:
return
if not new_config:
return
self._pre_restart()
# new_jason = json.dumps(new_config)
logger.warning(new_config)
response = requests.post(
self.rest_url + "/api/config/set",
params={"user": "ioc"},
json=new_config,
timeout=timeout,
)
response.raise_for_status()
# Setting a new config will reboot the backend; we therefore have to restart the websocket
self._post_restart()
def _pre_restart(self):
"""Stop monitor before restart"""
self._daq_is_running.clear()
self._ws_idle_event.wait()
if self._ws_client is not None:
self._ws_client.close()
def _post_restart(self):
"""Start monitor after a restart"""
time.sleep(2)
self.wait_for_connection()
self._daq_is_running.set()
def reset(self, min_wait: float = 5) -> None:
"""
Reset the StdDAQ.
Args:
min_wait (float): minimum wait time after reset
"""
self.set_config(self.get_config())
time.sleep(min_wait)
def wait_for_connection(self, timeout: float = 20) -> None:
"""
Wait for the connection to the StdDAQ to be established.
Args:
timeout (float): timeout for the request
"""
start_time = time.time()
while True:
if self._ws_client is not None and self._ws_client.state == State.OPEN:
return
try:
self._ws_client = ws.connect(self.ws_url)
break
except ConnectionRefusedError as exc:
if time.time() - start_time > timeout:
raise TimeoutError("Timeout while waiting for connection to StdDAQ") from exc
time.sleep(2)
def create_virtual_datasets(self, file_path: str, file_prefix: str, timeout: float = 5) -> None:
"""
Combine the stddaq written files in a given folder in an interleaved
h5 virtual dataset.
Args:
file_path (str): path to the folder containing the files
file_prefix (str): prefix of the files to combine
timeout (float): timeout for the request
"""
# TODO: Add wait for 'idle' state
response = requests.post(
self.rest_url + "/api/h5/create_interleaved_vds",
params={"user": "ioc"},
json={
"base_path": file_path,
"file_prefix": file_prefix,
"output_file": file_prefix.rstrip("_") + ".h5",
},
timeout=timeout,
headers={"Content-type": "application/json"},
)
response.raise_for_status()
def shutdown(self):
"""
Shutdown the StdDAQ client.
"""
logger.warning("Shutting down sdtDAQ monitor")
self._shutdown_event.set()
if self._ws_monitor_thread is not None:
self._ws_monitor_thread.join()
logger.warning("Thread joined")
if self._ws_client is not None:
self._ws_client.close()
self._ws_client = None
logger.warning("Shutdown complete")
def _wait_for_server_running(self):
"""
Wait for the StdDAQ to be running. If the StdDaq is not running, the
websocket loop will be set to idle.
"""
while not self._shutdown_event.is_set():
if self._daq_is_running.wait(0.1):
self._ws_idle_event.clear()
break
self._ws_idle_event.set()
def _ws_monitor_loop(self):
"""Loop to update the status property of the StdDAQ.
This is a persistent monitor that updates the status and calls attached
callbacks. It also handles stdDAQ restarts and reconnection by itself.
"""
if self._ws_recv_mutex.locked():
logger.warning(f"[{self.name}] stdDAQ WS monitor loop already locked")
return
with self._ws_recv_mutex:
while not self._shutdown_event.is_set():
self._wait_for_server_running()
try:
msg = self._ws_client.recv(timeout=0.1)
msg_timestamp = time.time()
except TimeoutError:
continue
except WebSocketException:
content = traceback.format_exc()
# TODO: this is expected to happen on every reconfiguration
logger.warning(f"Websocket connection closed unexpectedly: {content}")
self.wait_for_connection()
continue
msg = json.loads(msg)
if self._status != msg["status"]:
logger.warning(
f"[{self.name}] stdDAQ state transition: {self._status} --> {msg['status']}"
)
if msg["status"] == "recording":
self._count = msg.get("count", 0)
# Update status and run callbacks
self._status = msg["status"]
self._status_timestamp = msg_timestamp
self._run_status_callbacks()
def _run_status_callbacks(self):
"""
Update the DeviceStatus objects based on the current status of the StdDAQ.
If the status matches one of the success or error statuses, the DeviceStatus
object will be set to finished or exception, respectively and removed from
the list of callbacks.
"""
status = self._status
completed_callbacks = []
for dev_status, success, error in self._status_callbacks.values():
if status in success:
dev_status.set_finished()
logger.info(f"StdDaq status is {status}")
completed_callbacks.append(dev_status)
elif status in error:
logger.warning(f"StdDaq status is {status}")
dev_status.set_exception(StdDaqError(f"StdDaq status is {status}"))
completed_callbacks.append(dev_status)
for cb in completed_callbacks:
self._status_callbacks.pop(id(cb))
# Automatically connect to microXAS testbench if directly invoked
if __name__ == "__main__":
# pylint: disable=disallowed-name,too-few-public-methods
class foo:
"""Dummy"""
name = "bar"
daq = StdDaqClient(
parent=foo(), ws_url="ws://129.129.95.111:8080", rest_url="http://129.129.95.111:5000"
)

View File

@@ -0,0 +1,133 @@
import json
import threading
import time
from typing import Callable
import traceback
import numpy as np
import zmq
from bec_lib.logger import bec_logger
logger = bec_logger.logger
ZMQ_TOPIC_FILTER = b""
class StdDaqPreview:
"""Standalone stdDAQ preview class"""
USER_ACCESS = ["start", "stop", "image", "frameno"]
_socket = None
_zmq_thread = None
_throttle = 0.2
image = None
frameno = None
def __init__(self, url: str, cb: Callable):
self.url = url
self._on_update_callback = cb
# Must be here otherwise they're static (shared between class instances)
self._monitor_mutex = threading.Lock()
self._shutdown_event = threading.Event()
def connect(self):
"""Connect to te StDAQs PUB-SUB streaming interface
StdDAQ may reject connection for a few seconds when it restarts,
so if it fails, wait a bit and try to connect again.
"""
# pylint: disable=no-member
context = zmq.Context()
self._socket = context.socket(zmq.SUB)
self._socket.setsockopt(zmq.SUBSCRIBE, ZMQ_TOPIC_FILTER)
try:
self._socket.connect(self.url)
except ConnectionRefusedError:
time.sleep(1)
self._socket.connect(self.url)
def start(self):
"""Start the preview thread"""
# Only one consumer thread
if self._zmq_thread:
self.stop()
self._shutdown_event.clear()
self._zmq_thread = threading.Thread(
target=self._zmq_monitor, daemon=True, name="StdDaq_live_preview"
)
self._zmq_thread.start()
def stop(self):
"""Stop the preview and disconnect from ZMQ stream"""
self._shutdown_event.set()
if self._zmq_thread:
self._zmq_thread.join()
self._zmq_thread = None
def _zmq_monitor(self):
"""ZMQ stream monitor"""
# Exit if another monitor is running
if self._monitor_mutex.locked():
return
with self._monitor_mutex:
# Open a new connection
self.connect()
try:
# Run the monitor loop
t_last = time.time()
while not self._shutdown_event.is_set():
try:
# pylint: disable=no-member
r = self._socket.recv_multipart(flags=zmq.NOBLOCK)
# Throttle parsing and callbacks
t_curr = time.time()
if t_curr - t_last > self._throttle:
self._parse_data(r)
t_last = t_curr
except ValueError:
# Happens when ZMQ partially delivers the multipart message
content = traceback.format_exc()
logger.warning(f"Websocket connection closed unexpectedly: {content}")
continue
except zmq.error.Again:
# Happens when receive queue is empty
time.sleep(0.1)
finally:
# Stop receiving incoming data
self._socket.close()
logger.warning("Detached live_preview monitoring")
def _parse_data(self, data):
# Length and throtling checks
if len(data) != 2:
logger.warning(f"Received incomplete ZMQ message of length {len(data)}")
# Unpack the Array V1 reply to metadata and array data
meta, img_data = data
# Update image and update subscribers
header = json.loads(meta)
if header["type"] == "uint16":
image = np.frombuffer(img_data, dtype=np.uint16)
elif header["type"] == "uint8":
image = np.frombuffer(img_data, dtype=np.uint8)
else:
raise ValueError(f"Unexpected type {header['type']}")
if image.size != np.prod(header["shape"]):
err = f"Unexpected array size of {image.size} for header: {header}"
raise ValueError(err)
image = image.reshape(header["shape"])
# Print diadnostics and run callback
logger.info(
f"Live update: frame {header['frame']}\tShape: {header['shape']}\t"
f"Mean: {np.mean(image):.3f}"
)
self.image = image
self.frameno = header["frame"]
self._on_update_callback(image, header)

View File

@@ -0,0 +1,512 @@
# -*- coding: utf-8 -*-
"""
Standard DAQ control interface module through the websocket API
Created on Thu Jun 27 17:28:43 2024
@author: mohacsi_i
"""
import json
from time import sleep
from threading import Thread
import requests
import os
from ophyd import Signal, Component, Kind
from ophyd.status import SubscriptionStatus
from websockets.sync.client import connect, ClientConnection
from websockets.exceptions import ConnectionClosedOK, ConnectionClosedError
from ophyd_devices.interfaces.base_classes.psi_detector_base import PSIDetectorBase as PSIDeviceBase
from ophyd_devices.interfaces.base_classes.psi_detector_base import (
CustomDetectorMixin as CustomDeviceMixin,
)
from bec_lib import bec_logger
logger = bec_logger.logger
class StdDaqMixin(CustomDeviceMixin):
# pylint: disable=protected-access
_mon = None
def on_stage(self) -> None:
"""Configuration and staging
In the BEC model ophyd devices must fish out their own configuration from the 'scaninfo'.
I.e. they need to know which parameters are relevant for them at each scan.
NOTE: Tomcat might use multiple cameras with their own separate DAQ instances.
"""
# Fish out our configuration from scaninfo (via explicit or generic addressing)
# NOTE: Scans don't have to fully configure the device
d = {}
if "kwargs" in self.parent.scaninfo.scan_msg.info:
scanargs = self.parent.scaninfo.scan_msg.info["kwargs"]
if "image_width" in scanargs and scanargs["image_width"] is not None:
d["image_width"] = scanargs["image_width"]
if "image_height" in scanargs and scanargs["image_height"] is not None:
d["image_height"] = scanargs["image_height"]
if "nr_writers" in scanargs and scanargs["nr_writers"] is not None:
d["nr_writers"] = scanargs["nr_writers"]
if "file_path" in scanargs and scanargs["file_path"] is not None:
self.parent.file_path.set(scanargs["file_path"].replace("data", "gpfs")).wait()
print(scanargs["file_path"])
if os.path.isdir(scanargs["file_path"]):
print("isdir")
pass
else:
print("creating")
try:
os.makedirs(scanargs["file_path"], 0o777)
os.system("chmod -R 777 " + scanargs["base_path"])
except:
print("Problem with creating folder")
if "file_prefix" in scanargs and scanargs["file_prefix"] != None:
print(scanargs["file_prefix"])
self.parent.file_prefix.set(scanargs["file_prefix"]).wait()
if "daq_num_points" in scanargs:
d["num_points_total"] = scanargs["daq_num_points"]
else:
# Try to figure out number of points
num_points = 1
points_valid = False
if "steps" in scanargs and scanargs["steps"] is not None:
num_points *= scanargs["steps"]
points_valid = True
if "exp_burst" in scanargs and scanargs["exp_burst"] is not None:
num_points *= scanargs["exp_burst"]
points_valid = True
if "repeats" in scanargs and scanargs["repeats"] is not None:
num_points *= scanargs["repeats"]
points_valid = True
if points_valid:
d["num_points_total"] = num_points
# Perform bluesky-style configuration
if len(d) > 0:
# Configure new run (will restart the stdDAQ)
logger.warning(f"[{self.parent.name}] stdDAQ needs reconfiguring with:\n{d}")
self.parent.configure(d=d)
# Wait for REST API to kill the DAQ
sleep(0.5)
# Try to start a new run (reconnects)
self.parent.bluestage()
# And start status monitoring
self._mon = Thread(target=self.monitor, daemon=True)
self._mon.start()
def on_unstage(self):
"""Stop a running acquisition and close connection"""
print("Creating virtual dataset")
self.parent.create_virtual_dataset()
self.parent.blueunstage()
def on_stop(self):
"""Stop a running acquisition and close connection"""
self.parent.blueunstage()
def monitor(self) -> None:
"""Monitor status messages while connection is open. This will block the reply monitoring
to calling unstage() might throw. Status updates are sent every 1 seconds, but finishing
acquisition means StdDAQ will close connection, so there's no idle state polling.
"""
try:
sleep(0.2)
for msg in self.parent._wsclient:
message = json.loads(msg)
self.parent.runstatus.put(message["status"], force=True)
# logger.info(f"[{self.parent.name}] Pushed status: {message['status']}")
except (ConnectionClosedError, ConnectionClosedOK, AssertionError):
# Libraty throws theese after connection is closed
return
except Exception as ex:
logger.warning(f"[{self.parent.name}] Exception in polling: {ex}")
return
finally:
self._mon = None
class StdDaqClient(PSIDeviceBase):
"""StdDaq API
This class combines the new websocket and REST interfaces of the stdDAQ. The websocket
interface starts and stops the acquisition and provides status, while the REST interface
can read and write the JSON configuration file. The stdDAQ needs to restart all services
to reconfigure with a new config, which might corrupt
the currently written files (fix is underway).
Example:
```
daq = StdDaqClient(name="daq", ws_url="ws://xbl-daq-29:8080", rest_url="http://xbl-daq-29:5000")
```
"""
# pylint: disable=too-many-instance-attributes
custom_prepare_cls = StdDaqMixin
USER_ACCESS = [
"set_daq_config",
"get_daq_config",
"nuke",
"connect",
"message",
"state",
"bluestage",
"blueunstage",
]
_wsclient = None
# Status attributes
ws_url = Component(Signal, kind=Kind.config, metadata={"write_access": False})
runstatus = Component(
Signal, value="unknown", kind=Kind.normal, metadata={"write_access": False}
)
num_images = Component(Signal, value=10000, kind=Kind.config)
file_path = Component(Signal, value="/gpfs/test/test-beamline", kind=Kind.config)
file_prefix = Component(Signal, value="file", kind=Kind.config)
# Configuration attributes
rest_url = Component(Signal, kind=Kind.config, metadata={"write_access": False})
cfg_detector_name = Component(Signal, kind=Kind.config)
cfg_detector_type = Component(Signal, kind=Kind.config)
cfg_bit_depth = Component(Signal, kind=Kind.config)
cfg_pixel_height = Component(Signal, kind=Kind.config)
cfg_pixel_width = Component(Signal, kind=Kind.config)
cfg_nr_writers = Component(Signal, kind=Kind.config)
def __init__(
self,
prefix="",
*,
name,
kind=None,
read_attrs=None,
configuration_attrs=None,
parent=None,
device_manager=None,
ws_url: str = "ws://localhost:8080",
rest_url: str = "http://localhost:5000",
data_source_name=None,
**kwargs,
) -> None:
super().__init__(
prefix=prefix,
name=name,
kind=kind,
read_attrs=read_attrs,
configuration_attrs=configuration_attrs,
parent=parent,
device_manager=device_manager,
**kwargs,
)
self.ws_url.set(ws_url, force=True).wait()
self.rest_url.set(rest_url, force=True).wait()
self.data_source_name = data_source_name
# Connect to the DAQ and initialize values
try:
self.get_daq_config(update=True)
except Exception as ex:
logger.error(f"Failed to connect to the stdDAQ REST API\n{ex}")
def connect(self) -> ClientConnection:
"""Connect to the StdDAQ's websockets interface
StdDAQ may reject connection for a few seconds after restart, or when
it wants so if it fails, wait a bit and try to connect again.
"""
num_retry = 0
while num_retry < 5:
try:
logger.debug(f"[{self.name}] Connecting to stdDAQ at {self.ws_url.get()}")
connection = connect(self.ws_url.get())
logger.debug(f"[{self.name}] Connected to stdDAQ after {num_retry} tries")
return connection
except ConnectionRefusedError:
num_retry += 1
sleep(2)
raise ConnectionRefusedError("The stdDAQ websocket interface refused connection 5 times.")
def message(self, message: dict, timeout=1, wait_reply=True, client=None) -> None | str:
"""Send a message to the StdDAQ and receive a reply
Note: finishing acquisition means StdDAQ will close connection, so
there's no idle state polling.
"""
# Prepare message
msg = json.dumps(message) if isinstance(message, dict) else str(message)
# Connect if client was destroyed
if self._wsclient is None:
self._wsclient = self.connect()
# Send message (reopen connection if needed)
msg = json.dumps(message) if isinstance(message, dict) else str(message)
try:
self._wsclient.send(msg)
except (ConnectionClosedError, ConnectionClosedOK, AttributeError) as ex:
# Re-connect if the connection was closed
self._wsclient = self.connect()
self._wsclient.send(msg)
# Wait for reply
reply = None
if wait_reply:
try:
reply = self._wsclient.recv(timeout)
return reply
except (ConnectionClosedError, ConnectionClosedOK) as ex:
self._wsclient = None
logger.error(f"[{self.name}] WS connection was closed before reply: {ex}")
except (TimeoutError, RuntimeError) as ex:
logger.error(f"[{self.name}] Error in receiving ws reply: {ex}")
return reply
def configure(self, d: dict = None):
"""Configure the next scan with the stdDAQ
Parameters as 'd' dictionary, the default is unchanged.
----------------------------
num_points_total : int, optional
Number of images to be taken during each scan. Set to -1 for an unlimited number of
images (limited by the ringbuffer size and backend speed).
file_path: str, optional
File path to save the data, usually GPFS.
image_width : int, optional
ROI size in the x-direction [pixels].
image_height : int, optional
ROI size in the y-direction [pixels].
bit_depth: int, optional
Image bit depth for cameras that can change it [int].
nr_writers: int, optional
Number of writers [int].
"""
# Configuration parameters
if "image_width" in d and d["image_width"] != None:
self.cfg_pixel_width.set(d["image_width"]).wait()
if "image_height" in d and d["image_height"] != None:
self.cfg_pixel_height.set(d["image_height"]).wait()
if "bit_depth" in d:
self.cfg_bit_depth.set(d["bit_depth"]).wait()
if "nr_writers" in d and d["nr_writers"] != None:
self.cfg_nr_writers.set(d["nr_writers"]).wait()
# Run parameters
if "num_points_total" in d:
self.num_images.set(d["num_points_total"]).wait()
# Restart the DAQ if resolution changed
cfg = self.get_daq_config()
if (
cfg["image_pixel_height"] != self.cfg_pixel_height.get()
or cfg["image_pixel_width"] != self.cfg_pixel_width.get()
or cfg["bit_depth"] != self.cfg_bit_depth.get()
or cfg["number_of_writers"] != self.cfg_nr_writers.get()
):
# Stop if current status is not idle
if self.state() != "idle":
logger.warning(f"[{self.name}] stdDAQ reconfiguration might corrupt files")
# Update retrieved config
cfg["image_pixel_height"] = int(self.cfg_pixel_height.get())
cfg["image_pixel_width"] = int(self.cfg_pixel_width.get())
cfg["bit_depth"] = int(self.cfg_bit_depth.get())
cfg["number_of_writers"] = int(self.cfg_nr_writers.get())
self.set_daq_config(cfg)
sleep(1)
self.get_daq_config(update=True)
def bluestage(self):
"""Stages the stdDAQ
Opens a new connection to the stdDAQ, sends the start command with
the current configuration. It waits for the first reply and checks
it for obvious failures.
"""
# Can't stage into a running exposure
if self.state() != "idle":
raise RuntimeError(f"[{self.name}] stdDAQ can't stage from state: {self.state()}")
# Must make sure that image size matches the data source
if self.data_source_name is not None:
cam_img_w = self.device_manager.devices[self.data_source_name].cfgRoiX.get()
cam_img_h = self.device_manager.devices[self.data_source_name].cfgRoiY.get()
daq_img_w = self.cfg_pixel_width.get()
daq_img_h = self.cfg_pixel_height.get()
if not (daq_img_w == cam_img_w and daq_img_h == cam_img_h):
raise RuntimeError(
f"[{self.name}] stdDAQ image resolution ({daq_img_w} , {daq_img_h}) does not match camera with ({cam_img_w} , {cam_img_h})"
)
else:
logger.warning(
f"[{self.name}] stdDAQ image resolution ({daq_img_w} , {daq_img_h}) matches camera with ({cam_img_w} , {cam_img_h})"
)
file_path = self.file_path.get()
num_images = self.num_images.get()
file_prefix = self.file_prefix.get()
print(file_prefix)
# New connection
self._wsclient = self.connect()
message = {
"command": "start",
"path": file_path,
"file_prefix": file_prefix,
"n_image": num_images,
}
reply = self.message(message)
if reply is not None:
reply = json.loads(reply)
self.runstatus.set(reply["status"], force=True).wait()
logger.info(f"[{self.name}] Start DAQ reply: {reply}")
# Give it more time to reconfigure
if reply["status"] in ("rejected"):
# FIXME: running exposure is a nogo
if reply["reason"] == "driver is busy!":
raise RuntimeError(
f"[{self.name}] Start stdDAQ command rejected: already running"
)
else:
# Give it more time to consolidate
sleep(1)
else:
# Success!!!
print(f"[{self.name}] Started stdDAQ in: {reply['status']}")
return
raise RuntimeError(
f"[{self.name}] Failed to start the stdDAQ in 1 tries, reason: {reply['reason']}"
)
def blueunstage(self):
"""Unstages the stdDAQ
Opens a new connection to the stdDAQ, sends the stop command and
waits for the idle state.
"""
ii = 0
while ii < 10:
# Stop the DAQ (will close connection) - reply is always "success"
self._wsclient = self.connect()
self.message({"command": "stop_all"}, wait_reply=False)
# Let it consolidate
sleep(0.2)
# Check final status (from new connection)
self._wsclient = self.connect()
reply = self.message({"command": "status"})
if reply is not None:
logger.info(f"[{self.name}] DAQ status reply: {reply}")
reply = json.loads(reply)
if reply["status"] in ("idle", "error"):
# Only 'idle' state accepted
print(f"DAQ stopped on try {ii}")
return
elif reply["status"] in ("stop"):
# Give it more time to stop
sleep(0.5)
elif ii >= 6:
raise RuntimeError(f"Failed to stop StdDAQ: {reply}")
ii += 1
raise RuntimeError(f"Failed to stop StdDAQ in time")
##########################################################################
# Bluesky flyer interface
def complete(self) -> SubscriptionStatus:
"""Wait for current run. Must end in status 'file_saved'."""
def is_running(*args, value, timestamp, **kwargs):
result = value in ["idle", "file_saved", "error"]
return result
status = SubscriptionStatus(self.runstatus, is_running, settle_time=0.5)
return status
def get_daq_config(self, update=False) -> dict:
"""Read the current configuration from the DAQ"""
r = requests.get(self.rest_url.get() + "/api/config/get", params={"user": "ioc"}, timeout=2)
if r.status_code != 200:
raise ConnectionError(f"[{self.name}] Error {r.status_code}:\t{r.text}")
cfg = r.json()
if update:
self.cfg_detector_name.set(cfg["detector_name"]).wait()
self.cfg_detector_type.set(cfg["detector_type"]).wait()
self.cfg_bit_depth.set(cfg["bit_depth"]).wait()
self.cfg_pixel_height.set(cfg["image_pixel_height"]).wait()
self.cfg_pixel_width.set(cfg["image_pixel_width"]).wait()
self.cfg_nr_writers.set(cfg["number_of_writers"]).wait()
return cfg
def set_daq_config(self, config, settle_time=1):
"""Write a full configuration to the DAQ"""
url = self.rest_url.get() + "/api/config/set"
r = requests.post(
url,
params={"user": "ioc"},
json=config,
timeout=2,
headers={"Content-Type": "application/json"},
)
if r.status_code != 200:
raise ConnectionError(f"[{self.name}] Error {r.status_code}:\t{r.text}")
# Wait for service to restart (and connect to make sure)
# sleep(settle_time)
self.connect()
return r.json()
def create_virtual_dataset(self):
"""Combine the stddaq written files in a given folder in an interleaved
h5 virtual dataset
"""
url = self.rest_url.get() + "/api/h5/create_interleaved_vds"
file_path = self.file_path.get()
file_prefix = self.file_prefix.get()
r = requests.post(
url,
params={"user": "ioc"},
json={
"base_path": file_path,
"file_prefix": file_prefix,
"output_file": file_prefix.rstrip("_") + ".h5",
},
timeout=2,
headers={"Content-type": "application/json"},
)
def nuke(self, restarttime=5):
"""Reconfigures the stdDAQ to restart the services. This causes
systemd to kill the current DAQ service and restart it with the same
configuration. Which might corrupt the currently written file...
"""
cfg = self.get_daq_config()
self.set_daq_config(cfg)
sleep(restarttime)
def state(self) -> str | None:
"""Querry the current system status"""
try:
wsclient = self.connect()
wsclient.send(json.dumps({"command": "status"}))
r = wsclient.recv(timeout=1)
r = json.loads(r)
return r["status"]
except ConnectionRefusedError:
raise
# Automatically connect to microXAS testbench if directly invoked
if __name__ == "__main__":
daq = StdDaqClient(
name="daq", ws_url="ws://sls-daq-001:8080", rest_url="http://sls-daq-001:5000"
)
daq.wait_for_connection()

View File

@@ -0,0 +1,196 @@
# -*- coding: utf-8 -*-
"""
Standard DAQ preview image stream module
Created on Thu Jun 27 17:28:43 2024
@author: mohacsi_i
"""
import json
import enum
from time import sleep, time
from threading import Thread
import zmq
import numpy as np
from ophyd import Device, Signal, Component, Kind, DeviceStatus
from ophyd_devices.interfaces.base_classes.psi_detector_base import (
CustomDetectorMixin,
PSIDetectorBase,
)
from bec_lib import bec_logger
logger = bec_logger.logger
ZMQ_TOPIC_FILTER = b''
class StdDaqPreviewState(enum.IntEnum):
"""Standard DAQ ophyd device states"""
UNKNOWN = 0
DETACHED = 1
MONITORING = 2
class StdDaqPreviewMixin(CustomDetectorMixin):
"""Setup class for the standard DAQ preview stream
Parent class: CustomDetectorMixin
"""
_mon = None
def on_stage(self):
"""Start listening for preview data stream"""
if self._mon is not None:
self.parent.unstage()
sleep(0.5)
self.parent.connect()
self._stop_polling = False
self._mon = Thread(target=self.poll, daemon=True)
self._mon.start()
def on_unstage(self):
"""Stop a running preview"""
if self._mon is not None:
self._stop_polling = True
# Might hang on recv_multipart
self._mon.join(timeout=1)
# So also disconnect the socket
self.parent._socket.disconnect(self.parent.url.get())
def on_stop(self):
"""Stop a running preview"""
self.on_unstage()
def poll(self):
"""Collect streamed updates"""
self.parent.status.set(StdDaqPreviewState.MONITORING, force=True)
try:
t_last = time()
while True:
try:
# Exit loop and finish monitoring
if self._stop_polling:
logger.info(f"[{self.parent.name}]\tDetaching monitor")
break
# pylint: disable=no-member
r = self.parent._socket.recv_multipart(flags=zmq.NOBLOCK)
# Length and throtling checks
if len(r) != 2:
logger.warning(
f"[{self.parent.name}] Received malformed array of length {len(r)}")
t_curr = time()
t_elapsed = t_curr - t_last
if t_elapsed < self.parent.throttle.get():
sleep(0.1)
continue
# Unpack the Array V1 reply to metadata and array data
meta, data = r
# Update image and update subscribers
header = json.loads(meta)
if header["type"] == "uint16":
image = np.frombuffer(data, dtype=np.uint16)
if image.size != np.prod(header['shape']):
err = f"Unexpected array size of {image.size} for header: {header}"
raise ValueError(err)
image = image.reshape(header['shape'])
# Update image and update subscribers
self.parent.frame.put(header['frame'], force=True)
self.parent.image_shape.put(header['shape'], force=True)
self.parent.image.put(image, force=True)
self.parent._last_image = image
self.parent._run_subs(sub_type=self.parent.SUB_MONITOR, value=image)
t_last = t_curr
logger.info(
f"[{self.parent.name}] Updated frame {header['frame']}\t"
f"Shape: {header['shape']}\tMean: {np.mean(image):.3f}"
)
except ValueError:
# Happens when ZMQ partially delivers the multipart message
pass
except zmq.error.Again:
# Happens when receive queue is empty
sleep(0.1)
except Exception as ex:
logger.info(f"[{self.parent.name}]\t{str(ex)}")
raise
finally:
self._mon = None
self.parent.status.set(StdDaqPreviewState.DETACHED, force=True)
logger.info(f"[{self.parent.name}]\tDetaching monitor")
class StdDaqPreviewDetector(PSIDetectorBase):
"""Detector wrapper class around the StdDaq preview image stream.
This was meant to provide live image stream directly from the StdDAQ.
Note that the preview stream must be already throtled in order to cope
with the incoming data and the python class might throttle it further.
You can add a preview widget to the dock by:
cam_widget = gui.add_dock('cam_dock1').add_widget('BECFigure').image('daq_stream1')
"""
# Subscriptions for plotting image
USER_ACCESS = ["kickoff", "get_last_image"]
SUB_MONITOR = "device_monitor_2d"
_default_sub = SUB_MONITOR
custom_prepare_cls = StdDaqPreviewMixin
# Status attributes
url = Component(Signal, kind=Kind.config)
throttle = Component(Signal, value=0.25, kind=Kind.config)
status = Component(Signal, value=StdDaqPreviewState.UNKNOWN, kind=Kind.omitted)
frame = Component(Signal, kind=Kind.hinted)
image_shape = Component(Signal, kind=Kind.normal)
# FIXME: The BEC client caches the read()s from the last 50 scans
image = Component(Signal, kind=Kind.omitted)
_last_image = None
def __init__(
self, *args, url: str = "tcp://129.129.95.38:20000", parent: Device = None, **kwargs
) -> None:
super().__init__(*args, parent=parent, **kwargs)
self.url._metadata["write_access"] = False
self.status._metadata["write_access"] = False
self.image._metadata["write_access"] = False
self.frame._metadata["write_access"] = False
self.image_shape._metadata["write_access"] = False
self.url.set(url, force=True).wait()
# Connect ro the DAQ
self.connect()
def connect(self):
"""Connect to te StDAQs PUB-SUB streaming interface
StdDAQ may reject connection for a few seconds when it restarts,
so if it fails, wait a bit and try to connect again.
"""
# pylint: disable=no-member
# Socket to talk to server
context = zmq.Context()
self._socket = context.socket(zmq.SUB)
self._socket.setsockopt(zmq.SUBSCRIBE, ZMQ_TOPIC_FILTER)
try:
self._socket.connect(self.url.get())
except ConnectionRefusedError:
sleep(1)
self._socket.connect(self.url.get())
def get_image(self):
return self._last_image
def kickoff(self) -> DeviceStatus:
""" The DAQ was not meant to be toggled"""
return DeviceStatus(self, done=True, success=True, settle_time=0.1)
# Automatically connect to MicroSAXS testbench if directly invoked
if __name__ == "__main__":
daq = StdDaqPreviewDetector(url="tcp://129.129.95.111:20000", name="preview")
daq.wait_for_connection()

View File

@@ -1,589 +0,0 @@
from __future__ import annotations
import os
from collections import deque
from typing import TYPE_CHECKING, Literal, cast
import numpy as np
from bec_lib.logger import bec_logger
from ophyd import Component as Cpt
from ophyd import Kind, Signal
from ophyd.status import AndStatus, DeviceStatus, SubscriptionStatus
from ophyd_devices.interfaces.base_classes.psi_device_base import PSIDeviceBase
from ophyd_devices.utils.bec_signals import PreviewSignal, ProgressSignal
from tomcat_bec.devices.pco_edge.pcoedge_base import CameraStatus, CameraStatusCode, PcoEdgeBase
from tomcat_bec.devices.std_daq.std_daq_client import (
StdDaqClient,
StdDaqConfigPartial,
StdDaqStatus,
)
from tomcat_bec.devices.std_daq.std_daq_live_processing import StdDaqLiveProcessing
from tomcat_bec.devices.std_daq.std_daq_preview import StdDaqPreview
logger = bec_logger.logger
if TYPE_CHECKING: # pragma: no cover
from bec_lib.devicemanager import DeviceManagerBase
from ophyd import StatusBase
# pylint: disable=too-many-instance-attributes
class PcoEdge5M(PSIDeviceBase, PcoEdgeBase):
"""Ophyd baseclass for Helge camera IOCs
This class provides wrappers for Helge's camera IOCs around SwissFEL and
for high performance SLS 2.0 cameras. The IOC's operation is a bit arcane
and there are different versions and cameras all around. So this device
only covers the absolute basics.
Probably the most important part is the configuration state machine. As
the SET_PARAMS takes care of buffer allocations it might take some time,
as well as a full re-configuration is required every time we change the
binning, roi, etc... This is automatically performed upon starting an
exposure (if it heven't been done before).
The status flag state machine during re-configuration is:
BUSY low, SET low -> BUSY high, SET low -> BUSY low, SET high -> BUSY low, SET low
UPDATE: Data sending operation modes
- Switch to ZMQ streaming by setting FILEFORMAT to ZEROMQ
- Set SAVESTART and SAVESTOP to select a ROI of image indices
- Start file transfer with FTRANSFER.
The ZMQ connection operates in PUSH-PULL mode, i.e. it needs incoming connection.
STOREMODE sets the acquisition mode:
if STOREMODE == Recorder
Fills up the buffer with images. Here SAVESTART and SAVESTOP selects a ROI
of image indices to be streamed out (i.e. maximum buffer_size number of images)
if STOREMODE == FIFO buffer
Continously streams out data using the buffer as a FIFO queue.
Here SAVESTART and SAVESTOP selects a ROI of image indices to be streamed continously
(i.e. a large SAVESTOP streams indefinitely). Note that in FIFO mode buffer reads are
destructive. to prevent this, we don't have EPICS preview
"""
USER_ACCESS = ["complete", "backend", "live_preview", "arm", "disarm"]
analysis_signal = Cpt(Signal, name="analysis_signal", kind=Kind.hinted, doc="Analysis Signal")
analysis_signal2 = Cpt(Signal, name="analysis_signal2", kind=Kind.hinted, doc="Analysis Signal")
preview = Cpt(PreviewSignal, ndim=2, name="preview", doc="Camera raw data preview signal", num_rotation_90=1, transpose=False)
preview_corrected = Cpt(
PreviewSignal,
ndim=2,
name="preview_corrected",
doc="Camera preview signal with flat and dark correction",
num_rotation_90=1, transpose=False
)
progress = Cpt(
ProgressSignal,
name="progress",
doc="Camera progress signal, used to monitor the acquisition progress",
)
# pylint: disable=too-many-arguments
def __init__(
self,
prefix="",
*,
name,
kind=None,
read_attrs=None,
configuration_attrs=None,
parent=None,
scan_info=None,
std_daq_rest: str | None = None,
std_daq_ws: str | None = None,
std_daq_live: str | None = None,
device_manager: DeviceManagerBase | None = None,
**kwargs,
):
self.device_manager = device_manager
self.connector = device_manager.connector if device_manager else None
super().__init__(
prefix=prefix,
name=name,
kind=kind,
read_attrs=read_attrs,
configuration_attrs=configuration_attrs,
parent=parent,
scan_info=scan_info,
**kwargs,
)
# Configure the stdDAQ client
if std_daq_rest is None or std_daq_ws is None:
raise ValueError("Both std_daq_rest and std_daq_ws must be provided")
self.live_processing = StdDaqLiveProcessing(
parent=self, signal=self.analysis_signal, signal2=self.analysis_signal2
)
self.backend = StdDaqClient(parent=self, ws_url=std_daq_ws, rest_url=std_daq_rest)
self.backend.add_count_callback(self._on_count_update)
self.live_preview = None
self.converted_files = deque(maxlen=100) # Store the last 10 converted files
self.target_files = deque(maxlen=100) # Store the last 10 target files
self.acq_configs = {}
if std_daq_live is not None:
self.live_preview = StdDaqPreview(url=std_daq_live, cb=self._on_preview_update)
def configure(self, d: dict | None = None):
"""Configure the base Helge camera device
Parameters as 'd' dictionary
----------------------------
num_images : int
Number of images to be taken during each scan. Meaning depends on
store mode.
exposure_time_ms : float
Exposure time [ms], usually gets set back to 20 ms
exposure_period_ms : float
Exposure period [ms], up to 200 ms.
store_mode : str
Buffer operation mode
*'Recorder' to record in buffer
*'FIFO buffer' for continous streaming
data_format : str
Usually set to 'ZEROMQ'
acq_mode : str
Store mode and data format according to preconfigured settings
"""
if d is None:
return
# Stop acquisition
self.stop_camera().wait(timeout=10)
backend_config = StdDaqConfigPartial(**d)
self.backend.update_config(backend_config)
config = {}
for key in self.component_names:
val = d.get(key)
if val is not None:
config[key] = val
if d.get("exp_time", 0) > 0:
config["exposure"] = d["exp_time"] * 1000 # exposure time in ms
super().configure(config)
# If a pre-configured acquisition mode is specified, set it
if "acq_mode" in d:
self.set_acquisition_mode(d["acq_mode"])
# State machine
# Initial: BUSY and SET both low
# 0. Write 1 to SET_PARAM
# 1. BUSY goes high, SET stays low
# 2. BUSY goes low, SET goes high
# 3. BUSY stays low, SET goes low
# So we need a 'negedge' on SET_PARAM
def negedge(*, old_value, value, timestamp, **_):
return bool(old_value and not value)
# Subscribe and wait for update
status = SubscriptionStatus(self.set_param, negedge, timeout=5, settle_time=0.5)
self.set_param.set(1).wait()
status.wait()
def set_acquisition_mode(self, acq_mode):
"""Set acquisition mode
Utility function to quickly select between pre-configured and tested
acquisition modes.
"""
if acq_mode in ["default", "step"]:
# NOTE: Trigger duration requires a consumer
self.store_mode.set("FIFO Buffer").wait()
if acq_mode in ["stream"]:
# NOTE: Trigger duration requires a consumer
self.store_mode.set("FIFO Buffer").wait()
else:
raise RuntimeError(f"Unsupported acquisition mode: {acq_mode}")
def destroy(self):
self.backend.shutdown()
if self.live_preview:
self.live_preview.stop()
super().destroy()
def _on_preview_update(self, img: np.ndarray):
corrected_img = self.live_processing.apply_flat_dark_correction(img)
self.live_processing.on_new_data(corrected_img)
self.preview.put(img)
self.preview_corrected.put(corrected_img)
def _on_count_update(self, count: int):
"""
Callback for the count update from the backend.
Updates the progress signal.
Args:
count (int): The current count of images acquired by the camera.
"""
expected_counts = cast(int, self.num_images.get())
self.progress.put(
value=count, max_value=expected_counts, done=bool(count == expected_counts)
)
def acq_done(self) -> DeviceStatus:
"""
Check if the acquisition is done. For the GigaFrost camera, this is
done by checking the status of the backend as the camera does not
provide any feedback about its internal state.
Returns:
DeviceStatus: The status of the acquisition
"""
status = DeviceStatus(self)
if self.backend is not None:
self.backend.add_status_callback(
status,
success=[StdDaqStatus.IDLE, StdDaqStatus.FILE_SAVED],
error=[StdDaqStatus.REJECTED, StdDaqStatus.ERROR],
)
return status
def restart_with_new_config(
self,
name: str,
file_path: str = "",
file_name: str | None = None,
file_suffix: str = "",
num_images: int | None = None,
frames_per_trigger: int | None = None,
) -> StatusBase:
"""
Restart the camera with a new configuration.
This method allows to change the file path, file prefix, and number of images.
Args:
name (str): Name of the configuration to be saved.
file_path (str): New file path for the acquisition. If empty, the current file path is used.
file_prefix (str): New file prefix for the acquisition. If empty, the current file prefix is used.
num_images (int | None): New number of images to acquire. If None, the current number of images is used.
frames_per_trigger (int | None): New number of frames per trigger. If None, the current value is used.
Returns:
DeviceStatus: The status of the restart operation. It resolves when the camera is ready to receive the first image.
"""
if file_name is not None and file_suffix:
raise ValueError("Both file_name and file_suffix are specified. Please choose one.")
self.acq_configs[name] = {}
conf = {}
if file_path:
self.acq_configs[name]["file_path"] = self.file_path.get()
conf["file_path"] = file_path
if file_suffix:
self.acq_configs[name]["file_prefix"] = self.file_prefix.get()
conf["file_prefix"] = "_".join([self.file_prefix.get(), file_suffix])
if file_name:
self.acq_configs[name]["file_prefix"] = self.file_prefix.get()
conf["file_prefix"] = file_name
if num_images is not None:
self.acq_configs[name]["num_images"] = self.num_images.get()
conf["num_images"] = num_images
if frames_per_trigger is not None:
self.acq_configs[name]["frames_per_trigger"] = self.frames_per_trigger.get()
conf["frames_per_trigger"] = frames_per_trigger
# Stop the camera and wait for it to become idle
status = self.stop_camera()
status.wait(timeout=10)
# update the configuration
self.configure(conf)
# Restart the camera with the new configuration
return self.start_camera()
def restore_config(self, name: str) -> None:
"""
Restore a previously saved configuration and restart the camera.
Args:
name (str): Name of the configuration to restore.
"""
status = self.stop_camera()
status.wait(timeout=10)
config = self.acq_configs.pop(name, {})
self.configure(config)
def update_live_processing_reference(
self, reference_type: Literal["dark", "flat"]
) -> StatusBase:
"""
Update the flat or dark reference for the live processing.
Args:
reference_type (Literal["dark", "flat"]): Type of the reference to update.
If 'dark', the dark reference will be updated, if 'flat', the flat reference will be updated.
Returns:
StatusBase: The status of the update operation.
"""
if reference_type not in ["dark", "flat"]:
raise ValueError("Invalid reference type! Must be 'dark' or 'flat'.")
# Use the current acquisition to update the reference
if self.live_processing is None:
raise RuntimeError("Live processing is not available. Cannot update reference.")
status = self.live_processing.update_reference_with_file(
reference_type=reference_type,
file_path=self.target_file,
entry="tomcat-pco/data", # type: ignore
wait=False, # Do not wait for the update to finish
)
return status
def start_camera(self) -> StatusBase:
"""
Start the camera and the backend.
Returns:
DeviceStatus: The status of the startup. It resolves when the backend is ready to receive the first image.
"""
status = DeviceStatus(self)
self.backend.add_status_callback(
status,
success=[StdDaqStatus.WAITING_FOR_FIRST_IMAGE],
error=[StdDaqStatus.REJECTED, StdDaqStatus.ERROR],
)
self.backend.start(
file_path=self.file_path.get(), # type: ignore
file_prefix=self.file_prefix.get(), # type: ignore
num_images=self.num_images.get(), # type: ignore
)
self.camera_status.set(CameraStatus.RUNNING).wait()
self.target_files.append(self.target_file)
def is_running(*, value, timestamp, **_):
return bool(value == CameraStatusCode.RUNNING)
camera_running_status = SubscriptionStatus(
self.statuscode, is_running, timeout=5, settle_time=0.2
)
self.cancel_on_stop(camera_running_status)
return AndStatus(status, camera_running_status)
def set_idle(self) -> AndStatus:
"""Set the camera to idle state"""
cam_status = self.camera_status.set(CameraStatus.IDLE)
save_stop = self.save_stop.set(0)
return AndStatus(cam_status, save_stop)
def stop_camera(self) -> DeviceStatus:
"""Stop the camera acquisition and set it to idle state"""
self.set_idle().wait()
status = DeviceStatus(self)
if self.backend.status != StdDaqStatus.IDLE:
self.backend.add_status_callback(
status,
success=[StdDaqStatus.IDLE],
error=[StdDaqStatus.REJECTED, StdDaqStatus.ERROR],
)
self.backend.stop()
else:
status.set_finished()
return status
@property
def target_file(self) -> str:
"""Return the target file path for the current acquisition."""
file_path = cast(str, self.file_path.get())
file_prefix = cast(str, self.file_prefix.get())
return os.path.join(file_path, f"{file_prefix.removesuffix('_')}.h5")
########################################
# Beamline Specific Implementations #
########################################
def on_init(self) -> None:
"""
Called when the device is initialized.
No signals are connected at this point,
thus should not be set here but in on_connected instead.
"""
def on_connected(self) -> None:
"""
Called after the device is connected and its signals are connected.
Default values for signals should be set here.
"""
self.backend.connect()
if self.live_preview:
self.live_preview.start()
# pylint: disable=protected-access
def on_stage(self) -> None:
"""Configure and arm PCO.Edge camera for acquisition"""
# If the camera is busy, stop it first
if self.statuscode.get() != CameraStatusCode.IDLE:
self.stop_camera()
scan_msg = self.scan_info.msg
if scan_msg is None or scan_msg.request_inputs is None or scan_msg.scan_parameters is None:
# I don't think this can happen outside of tests, but just in case
logger.warning(
f"[{self.name}] Scan message is not available or incomplete. "
"Cannot configure the GigaFrost camera."
)
self.acq_configs = {}
return
scan_args = {
**scan_msg.request_inputs.get("inputs", {}),
**scan_msg.request_inputs.get("kwargs", {}),
**scan_msg.scan_parameters,
}
if "file_path" not in scan_args:
scan_args["file_path"] = (
"/gpfs/test/test-beamline" # FIXME: This should be from the scan message
)
if "file_prefix" not in scan_args:
file_base = scan_msg.info["file_components"][0].split("/")[-1]
file_suffix = scan_msg.info.get("file_suffix") or ""
comps = [file_base, self.name]
if file_suffix:
comps.append(file_suffix)
scan_args["file_prefix"] = "_".join(comps)
self.configure(scan_args)
if scan_msg.scan_type == "step":
num_points = self.frames_per_trigger.get() * max(scan_msg.num_points, 1) # type: ignore
else:
num_points = self.frames_per_trigger.get()
self.num_images.set(num_points).wait()
# reset the acquisition configs
self.acq_configs = {}
def on_unstage(self) -> DeviceStatus | None:
"""Called while unstaging the device."""
return self.stop_camera()
def on_pre_scan(self) -> StatusBase:
"""Called right before the scan starts on all devices automatically."""
return self.start_camera()
def on_trigger(self) -> None | DeviceStatus:
"""Trigger mode operation
Use it to repeatedly record a fixed number of frames and send it to stdDAQ. The method waits
for the acquisition and data transfer to complete.
NOTE: Maciej confirmed that sparse data is no problem to the stdDAQ.
TODO: Optimize data transfer to launch at end and check completion at the beginning.
"""
# Ensure that previous data transfer finished
# def sentIt(*args, value, timestamp, **kwargs):
# return value==0
# status = SubscriptionStatus(self.file_savebusy, sentIt, timeout=120)
# status.wait()
scan_msg = self.scan_info.msg
if scan_msg.scan_type == "step":
# The PCO Edge does not support software triggering. As a result, we have to 'simulate'
# the software triggering mechanism by leveraging the PCO's readout buffer: We limit the buffer
# readout size (save_start/save_stop) to the number of frames we want per trigger, clear the
# buffer and then wait for the buffer to fill up again before transfering the files to the
# file writer (std_daq).
# Set the readout per step scan point to the requested frames per trigger
self.save_stop.set(self.frames_per_trigger.get()).wait()
# Reset the buffer
self.clear_mem.set(1, settle_time=0.1).wait()
# Wait until the buffer fills up with enough images
t_expected = (self.exposure.get() + self.delay.get()) * self.save_stop.get()
def wait_acquisition(*, value, timestamp, **_):
num_target = self.save_stop.get()
# logger.warning(f"{value} of {num_target}")
return bool(value >= num_target)
max_wait = max(5, 5 * t_expected)
buffer_filled_status = SubscriptionStatus(
self.pic_buffer, wait_acquisition, timeout=max_wait, settle_time=0.2
)
self.cancel_on_stop(buffer_filled_status)
buffer_filled_status.wait()
logger.info(f"file savebusy before: {self.file_savebusy.get()}")
def wait_sending(*, old_value, value, timestamp, **_):
logger.info(f"old_value {old_value}, new value: {value}")
return old_value == 1 and value == 0
savebusy_status = SubscriptionStatus(
self.file_savebusy, wait_sending, timeout=120, settle_time=0.2
)
self.cancel_on_stop(savebusy_status)
self.file_transfer.set(1).wait()
savebusy_status.wait()
else:
raise RuntimeError("Triggering for fly scans is not yet implemented.")
def on_complete(self) -> DeviceStatus | None:
"""Called to inquire if a device has completed a scans."""
def _create_dataset(_status: DeviceStatus):
if (
self.target_file in self.converted_files
or self.target_file not in self.target_files
):
logger.info(f"File {self.target_file} already processed or not in target files.")
return
self.backend.create_virtual_datasets(
self.file_path.get(), file_prefix=self.file_prefix.get() # type: ignore
)
self._run_subs(
sub_type=self.SUB_FILE_EVENT,
file_path=self.target_file,
done=True,
successful=True,
hinted_location={"data": "tomcat-pco/data"},
)
self.converted_files.append(self.target_file)
logger.info(f"Finished writing to {self.target_file}")
status = self.acq_done()
status.add_callback(_create_dataset)
return status
def on_kickoff(self) -> DeviceStatus | None:
"""Called to kickoff a device for a fly scan. Has to be called explicitly."""
def on_stop(self) -> DeviceStatus:
"""Called when the device is stopped."""
return self.stop_camera()
# Automatically connect to test camera if directly invoked
if __name__ == "__main__":
# Drive data collection
cam = PcoEdge5M(
"X02DA-CCDCAM2:",
name="mcpcam",
std_daq_ws="ws://129.129.95.111:8081",
std_daq_rest="http://129.129.95.111:5010",
std_daq_live="tcp://129.129.95.111:20010",
)
cam.wait_for_connection()

View File

@@ -1,509 +0,0 @@
from __future__ import annotations
import copy
import enum
import json
import queue
import threading
import time
import traceback
from typing import TYPE_CHECKING, Callable, Literal
import requests
from bec_lib.logger import bec_logger
from ophyd import StatusBase
from pydantic import BaseModel, ConfigDict, Field, model_validator
from typeguard import typechecked
from websockets import State
from websockets.exceptions import WebSocketException
from websockets.sync.client import ClientConnection, connect
if TYPE_CHECKING: # pragma: no cover
from ophyd import Device
logger = bec_logger.logger
class StdDaqError(Exception): ...
class StdDaqStatus(str, enum.Enum):
"""
Status of the StdDAQ.
Extracted from https://git.psi.ch/controls-ci/std_detector_buffer/-/blob/master/source/std-det-driver/src/driver_state.hpp
"""
CREATING_FILE = "creating_file"
ERROR = "error"
FILE_CREATED = "file_created"
FILE_SAVED = "file_saved"
IDLE = "idle"
RECORDING = "recording"
REJECTED = "rejected"
SAVING_FILE = "saving_file"
STARTED = "started"
STOP = "stop"
UNDEFINED = "undefined"
WAITING_FOR_FIRST_IMAGE = "waiting_for_first_image"
class StdDaqConfig(BaseModel):
"""
Configuration for the StdDAQ.
More information can be found here: https://controls-ci.gitpages.psi.ch/std_detector_buffer/docs/Interfaces/configfile
"""
# Mandatory fields
detector_name: str = Field(
description="Name of deployment - used as identifier in logging, "
"part of the name of zmq sockets and shared memory."
)
detector_type: Literal["gigafrost", "eiger", "pco", "jungfrau-raw", "jungfrau-converted"]
image_pixel_height: int
image_pixel_width: int
bit_depth: int
n_modules: int
start_udp_port: int
module_positions: dict
number_of_writers: int
# Optional fields
max_number_of_forwarders_spawned: int | None = None
use_all_forwarders: bool | None = None
module_sync_queue_size: int | None = None
ram_buffer_gb: float | None = None
delay_filter_timeout: float | None = None
writer_user_id: int | None = None
live_stream_configs: dict[str, dict[Literal["type", "config"], str | list]]
log_level: Literal["debug", "info", "warning", "error", "off"] | None = Field(
default=None,
description="Log level for the StdDAQ. Defaults to info. Sets the logging level for services - possible values: debug, info, warning, error, off.",
)
stats_collection_period: float | None = Field(
default=None,
description="Period in seconds for printing stats into journald that are shipped to elastic. Defaults to 10. Warning too high frequency will affect the performance of the system",
)
model_config = ConfigDict(extra="ignore")
@model_validator(mode="before")
@classmethod
def resolve_aliases(cls, values):
if "roix" in values:
values["image_pixel_height"] = values.pop("roiy")
if "roiy" in values:
values["image_pixel_width"] = values.pop("roix")
return values
class StdDaqConfigPartial(BaseModel):
"""
Partial configuration for the StdDAQ.
"""
detector_name: str | None = None
detector_type: str | None = None
n_modules: int | None = None
bit_depth: int | None = None
image_pixel_height: int | None = Field(default=None, alias="roiy")
image_pixel_width: int | None = Field(default=None, alias="roix")
start_udp_port: int | None = None
writer_user_id: int | None = None
max_number_of_forwarders_spawned: int | None = None
use_all_forwarders: bool | None = None
module_sync_queue_size: int | None = None
number_of_writers: int | None = None
module_positions: dict | None = None
ram_buffer_gb: float | None = None
delay_filter_timeout: float | None = None
live_stream_configs: dict[str, dict[Literal["type", "config"], str | list]] | None = None
model_config = ConfigDict(extra="ignore")
class StdDaqWsResponse(BaseModel):
"""
Response from the StdDAQ websocket
"""
status: StdDaqStatus
reason: str | None = None
count: int | None = None
model_config = ConfigDict(extra="allow")
class StdDaqClient:
USER_ACCESS = ["status", "start", "stop", "get_config", "set_config", "reset"]
def __init__(self, parent: Device, ws_url: str, rest_url: str):
self.parent = parent
self.ws_url = ws_url
self.rest_url = rest_url
self.ws_client: ClientConnection | None = None
self._status: StdDaqStatus = StdDaqStatus.UNDEFINED
self._ws_update_thread: threading.Thread | None = None
self._shutdown_event = threading.Event()
self._ws_idle_event = threading.Event()
self._daq_is_running = threading.Event()
self._config: StdDaqConfig | None = None
self._status_callbacks: dict[
str, tuple[StatusBase, list[StdDaqStatus], list[StdDaqStatus]]
] = {}
self._count_callbacks: dict[int, Callable[[int], None]] = {}
self._send_queue = queue.Queue()
self._daq_is_running.set()
@property
def status(self) -> StdDaqStatus:
"""
Get the status of the StdDAQ.
"""
return self._status
def add_status_callback(
self, status: StatusBase, success: list[StdDaqStatus], error: list[StdDaqStatus]
):
"""
Add a StatusBase callback for the StdDAQ. The status will be updated when the StdDAQ status changes and
set to finished when the status matches one of the specified success statuses and to exception when the status
matches one of the specified error statuses.
Args:
status (StatusBase): StatusBase object
success (list[StdDaqStatus]): list of statuses that indicate success
error (list[StdDaqStatus]): list of statuses that indicate error
"""
self._status_callbacks[id(status)] = (status, success, error)
def add_count_callback(self, callback: Callable[[int], None]) -> int:
"""
Add a callback for the count of images acquired by the StdDAQ. The callback will be called with the count
whenever the StdDAQ status changes and the count is available.
Args:
callback (Callable[[int], None]): callback function that takes an integer as argument
Returns:
int: ID of the callback, which can be used to remove the callback later
"""
if not callable(callback):
raise TypeError("Callback must be a callable function")
max_cb_id = max(self._count_callbacks.keys(), default=0)
self._count_callbacks[max_cb_id + 1] = callback
return max_cb_id + 1
def remove_count_callback(self, cb_id: int):
"""
Remove a count callback by its ID.
Args:
cb_id (int): ID of the callback to remove
"""
if cb_id in self._count_callbacks:
del self._count_callbacks[cb_id]
else:
logger.warning(f"Callback with ID {cb_id} not found in StdDAQ count callbacks.")
@typechecked
def start(
self, file_path: str, file_prefix: str, num_images: int, timeout: float = 20, wait=True
) -> StatusBase:
"""
Start acquisition on the StdDAQ.
Args:
file_path (str): path to save the files
file_prefix (str): prefix of the files
num_images (int): number of images to acquire
timeout (float): timeout for the request
"""
logger.info(f"Starting StdDaq backend. Current status: {self.status}")
status = StatusBase()
self.add_status_callback(status, success=["waiting_for_first_image"], error=[])
message = {
"command": "start",
"path": file_path,
"file_prefix": file_prefix,
"n_image": num_images,
}
self._send_queue.put(message)
if wait:
status.wait(timeout=timeout)
return status
def stop(self):
"""
Stop acquisition on the StdDAQ.
Args:
timeout (float): timeout for the request
"""
message = {"command": "stop"}
return self._send_queue.put(message)
def get_config(self, cached=False, timeout: float = 2) -> dict:
"""
Get the current configuration of the StdDAQ.
Args:
cached (bool): whether to use the cached configuration
timeout (float): timeout for the request
Returns:
StdDaqConfig: configuration of the StdDAQ
"""
if cached and self._config is not None:
return self._config
response = requests.get(
self.rest_url + "/api/config/get", params={"user": "ioc"}, timeout=timeout
)
response.raise_for_status()
self._config = StdDaqConfig(**response.json())
return self._config.model_dump(exclude_defaults=True)
def set_config(self, config: StdDaqConfig | dict, timeout: float = 2) -> None:
"""
Set the configuration of the StdDAQ. This will overwrite the current configuration.
Args:
config (StdDaqConfig | dict): configuration to set
timeout (float): timeout for the request
"""
if not isinstance(config, StdDaqConfig):
config = StdDaqConfig(**config)
out = config.model_dump(exclude_defaults=True, exclude_none=True)
if not out:
logger.info(
"The provided config does not contain relevant values for the StdDaq. Skipping set_config."
)
return
self._pre_restart()
response = requests.post(
self.rest_url + "/api/config/set", params={"user": "ioc"}, json=out, timeout=timeout
)
response.raise_for_status()
# Setting a new config will reboot the backend; we therefore have to restart the websocket
self._post_restart()
def _pre_restart(self):
self._daq_is_running.clear()
self._ws_idle_event.wait()
if self.ws_client is not None:
self.ws_client.close()
def _post_restart(self):
self.wait_for_connection()
self._daq_is_running.set()
def update_config(self, config: StdDaqConfigPartial | dict, timeout: float = 2) -> None:
"""
Update the configuration of the StdDAQ. This will update the current configuration.
Args:
config (StdDaqConfigPartial | dict): configuration to update
timeout (float): timeout for the request
"""
if not isinstance(config, StdDaqConfigPartial):
config = StdDaqConfigPartial(**config)
patch_config_dict = config.model_dump(exclude_none=True)
if not patch_config_dict:
return
current_config = copy.deepcopy(self.get_config())
new_config = copy.deepcopy(current_config)
new_config.update(patch_config_dict)
if current_config == new_config:
return
self.set_config(StdDaqConfig(**new_config), timeout=timeout)
def reset(self, min_wait: float = 5) -> None:
"""
Reset the StdDAQ.
Args:
min_wait (float): minimum wait time after reset
"""
self.set_config(self.get_config())
time.sleep(min_wait)
def wait_for_connection(self, timeout: float = 20) -> None:
"""
Wait for the connection to the StdDAQ to be established.
Args:
timeout (float): timeout for the request
"""
start_time = time.time()
while True:
if self.ws_client is not None and self.ws_client.state == State.OPEN:
return
try:
self.ws_client = connect(self.ws_url)
break
except ConnectionRefusedError as exc:
if time.time() - start_time > timeout:
raise TimeoutError("Timeout while waiting for connection to StdDAQ") from exc
time.sleep(2)
def create_virtual_datasets(self, file_path: str, file_prefix: str, timeout: float = 5) -> None:
"""
Combine the stddaq written files in a given folder in an interleaved
h5 virtual dataset.
Args:
file_path (str): path to the folder containing the files
file_prefix (str): prefix of the files to combine
timeout (float): timeout for the request
"""
# TODO: Add wait for 'idle' state
response = requests.post(
self.rest_url + "/api/h5/create_interleaved_vds",
params={"user": "ioc"},
json={
"base_path": file_path,
"file_prefix": file_prefix,
"output_file": file_prefix.rstrip("_") + ".h5",
},
timeout=timeout,
headers={"Content-type": "application/json"},
)
response.raise_for_status()
def connect(self):
"""
Connect to the StdDAQ. This method should be called after the client is created. It will
launch a background thread to exchange data with the StdDAQ.
"""
if self._ws_update_thread is not None and self._ws_update_thread.is_alive():
return
self._ws_update_thread = threading.Thread(
target=self._ws_update_loop, name=f"{self.parent.name}_stddaq_ws_loop", daemon=True
)
self._ws_update_thread.start()
def shutdown(self):
"""
Shutdown the StdDAQ client.
"""
self._shutdown_event.set()
if self._ws_update_thread is not None:
self._ws_update_thread.join()
if self.ws_client is not None:
self.ws_client.close()
self.ws_client = None
def _wait_for_server_running(self):
"""
Wait for the StdDAQ to be running. If the StdDaq is not running, the
websocket loop will be set to idle.
"""
while not self._shutdown_event.is_set():
if self._daq_is_running.wait(0.1):
self._ws_idle_event.clear()
break
self._ws_idle_event.set()
def _ws_send_and_receive(self):
if not self.ws_client:
self.wait_for_connection()
try:
try:
msg = self._send_queue.get(block=False)
logger.trace(f"Sending to stddaq ws: {msg}")
self.ws_client.send(json.dumps(msg))
logger.info(f"Sent to stddaq ws: {msg}")
except queue.Empty:
pass
try:
recv_msgs = self.ws_client.recv(timeout=0.1)
except TimeoutError:
return
logger.trace(f"Received from stddaq ws: {recv_msgs}")
if recv_msgs is not None:
self._on_received_ws_message(recv_msgs)
except WebSocketException:
content = traceback.format_exc()
logger.warning(f"Websocket connection closed unexpectedly: {content}")
self.wait_for_connection()
def _ws_update_loop(self):
"""
Loop to update the status property of the StdDAQ.
"""
while not self._shutdown_event.is_set():
self._wait_for_server_running()
self._ws_send_and_receive()
def _on_received_ws_message(self, msg: str):
"""
Handle a message received from the StdDAQ.
"""
try:
data = StdDaqWsResponse(**json.loads(msg))
except Exception:
content = traceback.format_exc()
logger.warning(f"Failed to decode websocket message: {content}")
return
if data.status != self._status:
logger.info(f"std_daq_client status changed from [{self._status}] to [{data.status}]")
self._status = data.status
if data.count is not None:
self._run_count_callbacks(data.count)
self._run_status_callbacks()
def _run_count_callbacks(self, count: int):
"""
Run the count callbacks with the given count.
The callbacks will be called with the count as argument.
"""
callbacks_ids = list(self._count_callbacks.keys())
for cb_id in callbacks_ids:
if cb_id not in self._count_callbacks:
continue
cb = self._count_callbacks[cb_id]
try:
cb(count)
except Exception as exc:
logger.error(f"Error in StdDAQ count callback with ID {cb_id}: {exc}")
def _run_status_callbacks(self):
"""
Update the StatusBase objects based on the current status of the StdDAQ.
If the status matches one of the success or error statuses, the StatusBase object will be set to finished
or exception, respectively and removed from the list of callbacks.
"""
status = self._status
status_callbacks_ids = list(self._status_callbacks.keys())
for status_id in status_callbacks_ids:
dev_status, success, error = self._status_callbacks.get(status_id, (None, [], []))
if dev_status is None:
continue
if dev_status.done:
logger.info("Status object already resolved. Skipping StdDaq callback.")
self._status_callbacks.pop(status_id)
continue
if status in success:
dev_status.set_finished()
logger.info(f"StdDaq status is {status}")
self._status_callbacks.pop(status_id)
continue
if status in error:
logger.warning(f"StdDaq status is {status}")
dev_status.set_exception(StdDaqError(f"StdDaq status is {status}"))
self._status_callbacks.pop(status_id)

View File

@@ -1,301 +0,0 @@
from __future__ import annotations
import pathlib
import threading
from typing import TYPE_CHECKING, Literal
import h5py
import numpy as np
from bec_lib import messages
from bec_lib.endpoints import MessageEndpoints
from bec_lib.logger import bec_logger
from ophyd import StatusBase
from typeguard import typechecked
if TYPE_CHECKING: # pragma: no cover
from bec_lib.redis_connector import RedisConnector
from ophyd import Signal
from ophyd_devices.interfaces.base_classes.psi_device_base import PSIDeviceBase
logger = bec_logger.logger
class StdDaqLiveProcessing:
USER_ACCESS = ["set_enabled", "set_mode", "get_mode"]
def __init__(self, parent: PSIDeviceBase, signal: Signal, signal2: Signal):
self.parent = parent
self.signal = signal
self.signal2 = signal2
self._enabled = False
self._mode = "sum"
self.connector: RedisConnector | None = (
self.parent.device_manager.connector if self.parent.device_manager else None
)
self.references: dict[str, np.ndarray] = {}
def get_mode(self) -> str:
"""
Get the current processing mode.
Returns:
str: Current processing mode, e.g., "sum".
"""
return self._mode
@typechecked
def set_mode(self, mode: Literal["sum"]):
"""
Set the processing mode.
Args:
mode (str): Processing mode, currently only "sum" is supported.
"""
if mode not in ["sum"]:
raise ValueError("Unsupported mode. Only 'sum' is currently supported.")
self._mode = mode
def set_enabled(self, value: bool):
"""
Enable or disable live processing.
Args:
value (bool): True to enable, False to disable.
"""
if not isinstance(value, bool):
raise ValueError("Enabled must be a boolean value.")
self._enabled = value
#########################################
## Live Data Processing #################
#########################################
def on_new_data(self, data: np.ndarray):
"""
Process new data if live processing is enabled.
Args:
data (np.ndarray): New data to process.
"""
if not self._enabled:
logger.info("Skipping data processing")
return
match self._mode:
case "sum":
self.process_sum(data)
case _:
raise ValueError(f"Unknown mode: {self._mode}")
def process_sum(self, data: np.ndarray):
"""
Process data by summing it.
Args:
data (np.ndarray): Data to sum.
"""
if not isinstance(data, np.ndarray):
raise ValueError("Data must be a numpy array.")
summed_data = np.sum(np.sum(data))
self.signal.put(summed_data)
########################################
## Flat and Dark Field References ######
########################################
def apply_flat_dark_correction(self, data: np.ndarray) -> np.ndarray:
"""
Apply flat and dark field correction to the data.
Args:
data (np.ndarray): Data to correct.
Returns:
np.ndarray: Corrected data.
"""
if not isinstance(data, np.ndarray):
raise ValueError("Data must be a numpy array.")
flat = self.get_flat(data.shape) # type: ignore # ndarray.shape is of type _ShapeType, which is just a generic of Any
dark = self.get_dark(data.shape) # type: ignore
# If flat is just ones, we simply subtract dark from data
if np.all(flat == 1):
corrected_data = data - dark
return corrected_data
# Ensure that the division does not lead to division by zero
flat_corr = np.abs(flat-dark)
corrected_data = np.divide(
data - dark, flat_corr, out=np.zeros_like(data, dtype=np.float32), where=flat_corr != 0
)
return np.clip(corrected_data, a_min=0, a_max=None)
@typechecked
def _load_and_update_reference(
self,
ref_type: Literal["flat", "dark"],
file_path: str | pathlib.PosixPath,
entry: str,
status: StatusBase | None = None,
) -> None:
"""
Update the reference field with data from a file.
Args:
ref_type (str): Type of reference, either "flat" or "dark".
file_path (str): Path to the file containing the reference data.
entry (str): Entry name in the file to read the data from.
status (StatusBase | None): Status object to report progress.
Raises:
ValueError: If the file path is not a string or if the entry is not found.
Exception: If there is an error reading the file or processing the data.
"""
try:
########################################################
# Remove these lines once the mount is fixed
if not isinstance(file_path, str):
file_path = str(file_path)
if file_path.startswith("/gpfs/test"):
file_path = file_path.replace("/gpfs/test", "/data/test")
########################################################
with h5py.File(file_path, "r") as file:
if entry not in file:
raise ValueError(f"Entry '{entry}' not found in the file.")
data = file[entry][:] # type: ignore
if not isinstance(data, np.ndarray):
raise ValueError("Data in the file must be a numpy array.")
if data.ndim == 2:
self.references[f"{ref_type}_{data.shape}"] = data # type: ignore
elif data.ndim == 3:
# For 3D data, we take the mean across the first axis
data = np.mean(data, axis=0)
self.references[f"{ref_type}_{data.shape}"] = data
else:
raise ValueError("Data must be 2D or 3D numpy array.")
self._publish_to_redis(data, self._redis_endpoint_name(ref_type, data.shape)) # type: ignore
if status is not None:
status.set_finished()
except Exception as exc:
if status is not None:
status.set_exception(exc)
else:
logger.error(f"Failed to update {ref_type} field reference from {file_path}: {exc}")
raise
def update_reference_with_file(
self,
reference_type: Literal["dark", "flat"],
file_path: str | pathlib.PosixPath,
entry: str,
wait=False,
) -> StatusBase:
"""
Update the reference with a new file.
Args:
reference_type (Literal["dark", "flat"]): Type of reference to update.
camera_name (str): Name of the camera.
file_path (str): Path to the flat field file.
entry (str): Entry name in the file to read the data from.
wait (bool): Whether to wait for the update to complete.
"""
status = StatusBase()
if not wait:
# If not waiting, run the update in a separate thread
threading.Thread(
target=self._load_and_update_reference,
args=(reference_type, file_path, entry, status),
).start()
return status
self._load_and_update_reference(reference_type, file_path, entry, status=status)
status.wait()
return status
def get_flat(self, shape: tuple[int, int]) -> np.ndarray:
"""
Get the flat field reference for a specific shape.
Args:
shape (tuple[int, int]): Shape of the flat field reference to retrieve.
Returns:
np.ndarray: Flat field reference for the specified shape.
"""
if not isinstance(shape, tuple) or len(shape) != 2:
raise ValueError("Shape must be a tuple of two integers.")
key = f"flat_{shape}"
if key not in self.references:
# if the reference is not found, check Redis for it
redis_data = self._get_from_redis(self._redis_endpoint_name("flat", shape))
if redis_data is not None:
self.references[key] = redis_data
else:
# If not found in Redis, create a default flat field reference
self.references[key] = np.ones(shape) # Default to ones if not found
return self.references[key]
def get_dark(self, shape: tuple[int, int]) -> np.ndarray:
"""
Get the dark field reference for a specific shape.
Args:
shape (tuple[int, int]): Shape of the dark field reference to retrieve.
Returns:
np.ndarray: Dark field reference for the specified shape.
"""
if not isinstance(shape, tuple) or len(shape) != 2:
raise ValueError("Shape must be a tuple of two integers.")
key = f"dark_{shape}"
if key not in self.references:
redis_data = self._get_from_redis(self._redis_endpoint_name("dark", shape))
if redis_data is not None:
self.references[key] = redis_data
else:
self.references[key] = np.zeros(shape)
return self.references[key]
def _redis_endpoint_name(self, ref_type: str, shape: tuple[int, int]) -> str:
return f"{self.parent.name}_{ref_type}_{shape}"
def _publish_to_redis(self, data: np.ndarray, name: str) -> None:
"""
Publish processed data to Redis.
Args:
data (np.ndarray): Data to publish.
name (str): Name of the data for Redis.
"""
if self.connector is None:
logger.warning("Redis connector is not set. Cannot publish data.")
return
msg = messages.ProcessedDataMessage(data={"data": data, "name": name, "shape": data.shape})
self.connector.xadd(
MessageEndpoints.processed_data(process_id=name), msg_dict={"data": msg}, max_size=1
)
def _get_from_redis(self, name: str) -> np.ndarray | None:
"""
Retrieve data from Redis.
Args:
name (str): Name of the data to retrieve.
Returns:
np.ndarray: Retrieved data.
"""
if self.connector is None:
logger.warning("Redis connector is not set. Cannot retrieve data.")
return None
msg = self.connector.get_last(MessageEndpoints.processed_data(process_id=name))
if not msg:
return None
if isinstance(msg, dict):
msg = msg.get("data")
if not isinstance(msg, messages.ProcessedDataMessage):
logger.error(f"Received unexpected message type: {type(msg)}")
return None
if isinstance(msg.data, list):
data = msg.data[0].get("data")
else:
data = msg.data.get("data")
if not isinstance(data, np.ndarray):
logger.error("Data retrieved from Redis is not a numpy array.")
return None
return data

View File

@@ -1,108 +0,0 @@
import json
import threading
import time
from typing import Callable
import numpy as np
import zmq
from bec_lib.logger import bec_logger
logger = bec_logger.logger
ZMQ_TOPIC_FILTER = b""
class StdDaqPreview:
USER_ACCESS = ["start", "stop"]
def __init__(self, url: str, cb: Callable):
self.url = url
self._socket = None
self._shutdown_event = threading.Event()
self._zmq_thread = None
self._on_update_callback = cb
def connect(self):
"""Connect to te StDAQs PUB-SUB streaming interface
StdDAQ may reject connection for a few seconds when it restarts,
so if it fails, wait a bit and try to connect again.
"""
# pylint: disable=no-member
context = zmq.Context()
self._socket = context.socket(zmq.SUB)
self._socket.setsockopt(zmq.SUBSCRIBE, ZMQ_TOPIC_FILTER)
try:
self._socket.connect(self.url)
except ConnectionRefusedError:
time.sleep(1)
self._socket.connect(self.url)
def start(self):
self._zmq_thread = threading.Thread(
target=self._zmq_update_loop, daemon=True, name="StdDaq_live_preview"
)
self._zmq_thread.start()
def stop(self):
self._shutdown_event.set()
if self._zmq_thread:
self._zmq_thread.join()
def _zmq_update_loop(self):
while not self._shutdown_event.is_set():
if self._socket is None:
self.connect()
try:
self._poll()
except ValueError:
# Happens when ZMQ partially delivers the multipart message
pass
except zmq.error.Again:
# Happens when receive queue is empty
time.sleep(0.1)
def _poll(self):
"""
Poll the ZMQ socket for new data. It will throttle the data update and
only subscribe to the topic for a single update. This is not very nice
but it seems like there is currently no option to set the update rate on
the backend.
"""
if self._shutdown_event.wait(0.2):
return
try:
# subscribe to the topic
self._socket.setsockopt(zmq.SUBSCRIBE, ZMQ_TOPIC_FILTER)
# pylint: disable=no-member
r = self._socket.recv_multipart(flags=zmq.NOBLOCK)
self._parse_data(r)
finally:
# Unsubscribe from the topic
self._socket.setsockopt(zmq.UNSUBSCRIBE, ZMQ_TOPIC_FILTER)
def _parse_data(self, data):
# Length and throtling checks
if len(data) != 2:
logger.warning(f"Received malformed array of length {len(data)}")
# Unpack the Array V1 reply to metadata and array data
meta, img_data = data
# Update image and update subscribers
header = json.loads(meta)
if header["type"] == "uint16":
image = np.frombuffer(img_data, dtype=np.uint16)
else:
raise ValueError(f"Unexpected type {header['type']}")
if image.size != np.prod(header["shape"]):
err = f"Unexpected array size of {image.size} for header: {header}"
raise ValueError(err)
image = image.reshape(header["shape"])
logger.info(f"Live update: frame {header['frame']}")
self._on_update_callback(image)

View File

@@ -1,6 +0,0 @@
# Macros
This directory is intended to store macros which will be loaded automatically when starting BEC.
Macros are small functions to make repetitive tasks easier. Functions defined in python files in this directory will be accessible from the BEC console.
Please do not put any code outside of function definitions here. If you wish for code to be automatically run when starting BEC, see the startup script at tomcat_bec/bec_ipython_client/startup/post_startup.py
For a guide on writing macros, please see: https://bec.readthedocs.io/en/latest/user/command_line_interface.html#how-to-write-a-macro

View File

@@ -1,10 +1,8 @@
from .simple_scans import AcquireDark, AcquireFlat, AcquireReferences, TomoFlyScan, TomoScan
from .tomcat_scans import TomcatSimpleSequence, TomcatSnapNStep
# from .tutorial_fly_scan import (
# # AcquireDark,
# AcquireProjections,
# AcquireRefs,
# AcquireWhite,
# TutorialFlyScanContLine,
# )
from .tutorial_fly_scan import (
AcquireDark,
AcquireProjections,
AcquireRefs,
AcquireWhite,
TutorialFlyScanContLine,
)

View File

@@ -1,448 +0,0 @@
from __future__ import annotations
import time
from typing import Literal
import numpy as np
from bec_lib.device import DeviceBase
from bec_lib.logger import bec_logger
from bec_server.scan_server.scans import AsyncFlyScanBase, LineScan, ScanBase
logger = bec_logger.logger
class TomoComponents:
def __init__(self, scan: ScanBase):
self.scan = scan
self.stubs = scan.stubs
self.device_manager = scan.device_manager
self.connector = scan.device_manager.connector
# Update the available cameras for the current scan
self.cameras = self._get_cameras()
def _get_cameras(self) -> list[DeviceBase]:
return [
cam.name
for cam in self.device_manager.devices.get_devices_with_tags("camera")
if cam.enabled
]
def open_shutter(self):
"""
Open the shutter if it is closed.
"""
logger.info("Opening shutter.")
yield from self.stubs.set(device=["shutter"], value=[1])
def close_shutter(self):
"""
Close the shutter if it is open.
"""
yield from self.stubs.set(device=["shutter"], value=[0])
def restart_cameras(
self,
name: str,
num_images: int,
file_suffix: str = "",
file_path: str = "",
frames_per_trigger: int = 1,
):
"""
Restart the cameras with a new configuration.
This is typically used to reset the cameras during another scan, e.g. before acquiring dark or flat images.
Args:
name (str): Name of the configuration to restart with.
num_images (int): Number of images to acquire.
file_suffix (str): Suffix for the file names.
file_path (str): Path where the files will be saved.
frames_per_trigger (int): Number of frames to acquire per trigger.
"""
for cam in self.cameras:
yield from self.stubs.send_rpc_and_wait(
device=cam,
func_name="restart_with_new_config",
name=name,
file_suffix=file_suffix,
file_path=file_path,
num_images=num_images,
frames_per_trigger=frames_per_trigger,
)
def scan_report_instructions(self):
"""
Generate scan report instructions for the acquisition.
This method provides the necessary instructions to listen to the camera progress during the scan.
"""
if not self.cameras:
return
# Use the first camera or "gfcam" if available for reporting
report_camera = "gfcam" if "gfcam" in self.cameras else self.cameras[0]
yield from self.stubs.scan_report_instruction({"device_progress": [report_camera]})
def complete(self):
"""
Complete the acquisition by sending an RPC to each camera.
This method is typically called after the acquisition is done to finalize the process and start
writing the virtual dataset.
"""
for cam in self.cameras:
yield from self.stubs.send_rpc_and_wait(device=cam, func_name="on_complete")
def restore_configs(self, name: str):
"""
Restore the camera configurations after an acquisition.
Args:
name (str): Name of the configuration to restore.
"""
for cam in self.cameras:
yield from self.stubs.send_rpc_and_wait(
device=cam, func_name="restore_config", name=name
)
def update_live_processing_references(self, ref_type: Literal["dark", "flat"]):
"""
Update the live processing references for dark or flat images.
Args:
ref_type (Literal["dark", "flat"]): Type of reference to update.
"""
if ref_type not in ["dark", "flat"]:
raise ValueError("ref_type must be either 'dark' or 'flat'.")
logger.info(f"Updating live processing references for {ref_type} images.")
for cam in self.cameras:
yield from self.stubs.send_rpc_and_wait(
device=cam, func_name="update_live_processing_reference", reference_type=ref_type
)
def acquire_dark(self, num_images: int, exposure_time: float, name="dark", restart=True, restore=True):
"""
Acquire dark images.
Args:
num_images (int): Number of dark images to acquire.
exposure_time (float): Exposure time for each dark image in seconds.
"""
if not num_images:
return
logger.info(f"Acquiring {num_images} dark images with exposure time {exposure_time}s.")
self.connector.send_client_info(f"Acquiring {num_images} dark images.")
if restart:
yield from self.restart_cameras(
name=name, file_suffix=name, num_images=num_images, frames_per_trigger=num_images
)
# yield from self.close_shutter()
yield from self.stubs.trigger(min_wait=exposure_time * num_images)
yield from self.complete()
yield from self.update_live_processing_references(ref_type="dark")
if restore:
yield from self.restore_configs(name=name)
# yield from self.open_shutter()
self.connector.send_client_info("")
logger.info("Dark image acquisition complete.")
def acquire_flat(self, num_images: int, exposure_time: float, name="flat", restart=True, restore=True):
"""
Acquire flat images.
Args:
num_images (int): Number of flat images to acquire.
exposure_time (float): Exposure time for each flat image in seconds.
"""
if not num_images:
return
logger.info(f"Acquiring {num_images} flat images with exposure time {exposure_time}s.")
self.connector.send_client_info(f"Acquiring {num_images} flat images.")
if restart:
yield from self.restart_cameras(
name=name, file_suffix=name, num_images=num_images, frames_per_trigger=num_images
)
# yield from self.open_shutter()
yield from self.stubs.trigger(min_wait=exposure_time * num_images)
yield from self.complete()
yield from self.update_live_processing_references(ref_type="flat")
if restore:
yield from self.restore_configs(name=name)
logger.info("Flat image acquisition complete.")
self.connector.send_client_info("")
def acquire_references(self, num_darks: int, num_flats: int, exp_time: float, restart=True, restore=True):
yield from self.acquire_dark(num_darks, exposure_time=exp_time, restart=restart, restore=restore)
yield from self.acquire_flat(num_flats, exposure_time=exp_time, restart=restart, restore=restore)
class AcquireDark(ScanBase):
scan_name = "acquire_dark"
gui_config = {"Acquisition Parameters": ["num_images", "exp_time"]}
def __init__(self, num_images: int, exp_time: float, **kwargs):
"""
Acquire dark images.
Args:
num_images (int): Number of dark images to acquire.
exp_time (float): Exposure time for each dark image in seconds.
Returns:
ScanReport
"""
frames_per_trigger = num_images if num_images > 0 else 1
super().__init__(frames_per_trigger=frames_per_trigger, exp_time=exp_time, **kwargs)
self.components = TomoComponents(self)
def scan_report_instructions(self):
yield from self.components.scan_report_instructions()
def scan_core(self):
yield from self.components.acquire_dark(
self.frames_per_trigger, self.exp_time, restart=False
)
class AcquireFlat(ScanBase):
scan_name = "acquire_flat"
gui_config = {"Acquisition Parameters": ["num_images", "exp_time"]}
def __init__(self, num_images: int, exp_time: float, **kwargs):
"""
Acquire flat images.
Args:
num_images (int): Number of flat images to acquire.
exp_time (float): Exposure time for each flat image in seconds.
frames_per_trigger (int): Number of frames to acquire per trigger.
Returns:
ScanReport
"""
frames_per_trigger = num_images if num_images > 0 else 1
super().__init__(frames_per_trigger=frames_per_trigger, exp_time=exp_time, **kwargs)
self.components = TomoComponents(self)
def scan_report_instructions(self):
yield from self.components.scan_report_instructions()
def scan_core(self):
yield from self.components.acquire_flat(
self.frames_per_trigger, self.exp_time, restart=False
)
class AcquireReferences(ScanBase):
scan_name = "acquire_refs"
gui_config = {"Acquisition Parameters": ["num_darks", "num_flats", "exp_time"]}
def __init__(self, num_darks: int, num_flats: int, exp_time: float, **kwargs):
"""
Acquire flats and darks.
Args:
num_darks (int): Number of dark images to acquire.
num_flats (int): Number of flat images to acquire.
exp_time (float): Exposure time for each flat image in seconds.
frames_per_trigger (int): Number of frames to acquire per trigger.
Returns:
ScanReport
"""
super().__init__(exp_time=exp_time, **kwargs)
self.num_darks = num_darks
self.num_flats = num_flats
self.components = TomoComponents(self)
def scan_report_instructions(self):
yield from self.components.scan_report_instructions()
def pre_scan(self):
yield from self.components.acquire_references(self.num_darks, self.num_flats, self.exp_time)
def scan_core(self):
yield None
class TomoScan(LineScan):
scan_name = "tomo_line_scan"
def __init__(
self,
*args,
exp_time: float = 0,
steps: int = None,
relative: bool = False,
burst_at_each_point: int = 1,
num_darks: int = 0,
num_flats: int = 0,
**kwargs,
):
"""
A line scan for one or more motors.
Args:
*args (Device, float, float): pairs of device / start position / end position
exp_time (float): exposure time in s. Default: 0
steps (int): number of steps. Default: 10
relative (bool): if True, the start and end positions are relative to the current position. Default: False
burst_at_each_point (int): number of acquisition per point. Default: 1
Returns:
ScanReport
Examples:
>>> scans.line_scan(dev.motor1, -5, 5, dev.motor2, -5, 5, steps=10, exp_time=0.1, relative=True)
"""
super().__init__(
*args,
exp_time=exp_time,
steps=steps,
relative=relative,
burst_at_each_point=burst_at_each_point,
**kwargs,
)
self.num_darks = num_darks
self.num_flats = num_flats
self.components = TomoComponents(self)
def pre_scan(self):
yield from self.components.acquire_dark(self.num_darks, self.exp_time, name="pre_scan_dark")
yield from self.components.acquire_flat(self.num_flats, self.exp_time, name="pre_scan_flat")
yield from super().pre_scan()
# def finalize(self):
# yield from super().finalize()
# yield from self.components.acquire_dark(
# self.num_darks, self.exp_time, name="post_scan_dark"
# )
# yield from self.components.acquire_flat(
# self.num_flats, self.exp_time, name="post_scan_flat"
# )
class TomoFlyScan(AsyncFlyScanBase):
scan_name = "tomo_fly_scan"
gui_config = {
"Motor": ["motor"],
"Acquisition parameters": ["sample_in"],
"Camera": ["exp_time"],
}
def __init__(
self,
motor: DeviceBase,
start: float,
stop: float,
sample_in: float,
sample_out: float,
num_darks: int = 0,
num_flats: int = 0,
exp_time: float = 0,
relative: bool = False,
**kwargs,
):
"""
A fly scan for a single motor.
Args:
motor (DeviceBase): The motor to scan.
start (float): Start position.
stop (float): Stop position.
sample_in (float): Sample in position.
sample_out (float): Sample out position.
num_darks (int): Number of dark images to acquire. Default: 0
num_flats (int): Number of flat images to acquire. Default: 0
exp_time (float): Exposure time in seconds. Default: 0
relative (bool): If True, the start and stop positions are relative to the current position. Default: False
Returns:
ScanReport
Examples:
>>> scans.tomo_fly_scan(dev.motor1, 0, 10, sample_in=5, sample_out=7, exp_time=0.1, num_darks=5, num_flats=5)
"""
super().__init__(relative=relative, exp_time=exp_time, **kwargs)
self.motor = motor
self.start = start
self.stop = stop
self.sample_in = sample_in
self.sample_out = sample_out
self.num_darks = num_darks
self.num_flats = num_flats
self.sample_stage = "samy" # change to the correct sample stage device
self.shutter = "hx" # change to the correct shutter device
self.num_darks = num_darks
self.num_flats = num_flats
self.components = TomoComponents(self)
def scan_report_instructions(self):
"""
Generate scan report instructions for the fly scan.
This method provides the necessary instructions to listen to the camera progress during the scan.
"""
# If no cameras are available, fall back to the default scan report instructions
if not self.components.cameras:
yield from super().scan_report_instructions()
return
# Use the first camera or "gfcam" if available for reporting
report_camera = (
"gfcam" if "gfcam" in self.components.cameras else self.components.cameras[0]
)
yield from self.stubs.scan_report_instruction({"device_progress": [report_camera]})
def prepare_positions(self):
self.positions = np.array([[self.start], [self.stop]])
self.num_pos = None
yield from self._set_position_offset()
def pre_scan(self):
yield from self.components.acquire_dark(self.num_darks, self.exp_time, name="pre_scan_dark")
yield from self.components.acquire_flat(self.num_flats, self.exp_time, name="pre_scan_flat")
yield from super().pre_scan()
def scan_core(self):
"""
Core scanning logic for the fly scan.
"""
# Open the shutter
# yield from self.components.open_shutter()
# Move the sample stage to the sample in position
sample_in_status = yield from self.stubs.set(
device=self.sample_stage, value=[self.sample_in], wait=False
)
# Move the rotation stage to the start position
motor_start_status = yield from self.stubs.set(
device=self.motor, value=[self.start], wait=False
)
# Wait for both movements to complete
sample_in_status.wait()
motor_start_status.wait()
# Kickoff the rotation stage to start the fly scan
flyer_status = yield from self.stubs.set(device=self.motor, value=[self.stop], wait=False)
# Send a single trigger to kick off the camera acquisition
yield from self.stubs.trigger()
# Monitor the flyer status whilst reading out monitored devices (e.g. temperatures)
while not flyer_status.done:
yield from self.stubs.read(group="monitored", point_id=self.point_id)
self.point_id += 1
time.sleep(1)
# Close the shutter after the scan is complete
# yield from self.components.close_shutter()

View File

@@ -1,158 +0,0 @@
// Test program for simple zig-zag line scanning with PSO window output
// "enable" signal and DDC synchronized to external trigger input.
// The file expects external parameter validation
// The PSO locations arrays are set externally from EPICS PV
//
#define DDC_ADDR 0x800000
#define PSO_ADDR 0x0
enum ScanType
POS = 0
NEG = 1
POSNEG = 2
NEGPOS = 3
end
program
//////////////////////////////////////////////////////////////////////////
// External parameters - USE THEESE
var $fStartPosition as real = 42
var $fScanRange as real = 180
var $iNumRepeat as integer = 13
var $eScanType as ScanType = ScanType.NEGPOS
var $iNumDdcRead as integer = 1300
var $fVelJog as real = 200
var $fVelScan as real = 150
var $fAcceleration = 500
var $fAccDistance as real = 22.5
var $eDdcTrigger as DriveDataCaptureTrigger = DriveDataCaptureTrigger.PsoEvent
//////////////////////////////////////////////////////////////////////////
// Internal parameters - dont use
var $axis as axis = ROTY
var $ii as integer
var $axisFaults as integer = 0
var $iDdcSafeSpace as integer = 4096
// Set acceleration
SetupAxisRampType($axis, RampType.Linear)
SetupAxisRampValue($axis, 0, $fAcceleration)
// set the actual scan range
var $fPosStart as real
var $fPosEnd as real
if $eScanType == ScanType.POS
$fPosStart = $fStartPosition - $fAccDistance
$fPosEnd = $fStartPosition + $fScanRange + $fAccDistance
elseif $eScanType == ScanType.NEG
$fPosStart = $fStartPosition + $fAccDistance
$fPosEnd = $fStartPosition - $fScanRange - $fAccDistance
elseif $eScanType == ScanType.POSNEG
$fPosStart = $fStartPosition - $fAccDistance
$fPosEnd = $fStartPosition + $fScanRange + $fAccDistance
elseif $eScanType == ScanType.NEGPOS
$fPosStart = $fStartPosition + $fAccDistance
$fPosEnd = $fStartPosition - $fScanRange - $fAccDistance
end
// Move to start position before the scan
// NOTE: Also wait for GigaFrost to start, otherwise early triggers might be missed
MoveAbsolute($axis, $fPosStart, $fVelJog)
WaitForInPosition($axis)
Dwell(2)
// Set globals for feedback
$rglobal[2] = $fPosStart
$rglobal[3] = $fPosEnd
// Configure PSO
// FIXME : When the controller is restarted
PsoDistanceConfigureInputs($axis, [PsoDistanceInput.XC4PrimaryFeedback])
PsoDistanceCounterOff($axis)
PsoDistanceEventsOff($axis)
PsoWindowConfigureEvents($axis, PsoWindowEventMode.None)
PsoWaveformOff($axis)
// Simple PSO trigger pattern
var $iPsoArrayPosAddr as integer = PSO_ADDR
var $iPsoArrayPos[] as real = [ UnitsToCounts($axis, 22.5), UnitsToCounts($axis, 180) ]
DriveArrayWrite($axis, $iPsoArrayPos, $iPsoArrayPosAddr, length($iPsoArrayPos), DriveArrayType.PsoDistanceEventDistances)
var $iPsoArrayNegAddr as integer = ($iPsoArrayPosAddr + length($iPsoArrayPos)) * 4
var $iPsoArrayNeg[] as real = [ UnitsToCounts($axis, 22.5), UnitsToCounts($axis, 180) ]
DriveArrayWrite($axis, $iPsoArrayNeg, $iPsoArrayNegAddr, length($iPsoArrayNeg), DriveArrayType.PsoDistanceEventDistances)
PsoDistanceConfigureArrayDistances($axis, $iPsoArrayPosAddr, length($iPsoArrayPos), 0)
PsoDistanceCounterOn($axis)
PsoDistanceEventsOn($axis)
PsoWaveformConfigureMode($axis, PsoWaveformMode.Toggle)
PsoWaveformOn($axis)
PsoOutputConfigureSource($axis, PsoOutputSource.Waveform)
// Configure Drive Data Collection
var $iDdcArraySize as integer = $iNumDdcRead
DriveDataCaptureConfigureInput($axis, 0, DriveDataCaptureInput.PrimaryFeedback);
DriveDataCaptureConfigureInput($axis, 1, DriveDataCaptureInput.AnalogInput0 );
DriveDataCaptureConfigureTrigger($axis, 0, $eDdcTrigger );
DriveDataCaptureConfigureTrigger($axis, 1, $eDdcTrigger );
DriveDataCaptureConfigureArray($axis, 0, DDC_ADDR, $iDdcArraySize);
DriveDataCaptureConfigureArray($axis, 1, DDC_ADDR + $iDdcSafeSpace + 8 * $iDdcArraySize, $iDdcArraySize);
// Directly before scan
PsoDistanceCounterOn($axis)
DriveDataCaptureOn($axis, 0)
DriveDataCaptureOn($axis, 1)
///////////////////////////////////////////////////////////
// Start the actual scanning
///////////////////////////////////////////////////////////
if $eScanType == ScanType.POS || $eScanType == ScanType.NEG
PsoDistanceConfigureArrayDistances($axis, $iPsoArrayPosAddr, length($iPsoArrayPos), 0)
MoveAbsolute($axis, $fPosEnd, $fVelScan)
WaitForMotionDone($axis)
$axisFaults = StatusGetAxisItem($axis, AxisDataSignal.AxisFault)
if $axisFaults
TaskSetError(TaskGetIndex(), "AxisFault on axis ROTY")
end
elseif $eScanType == ScanType.POSNEG || $eScanType == ScanType.NEGPOS
for $ii = 0 to ($iNumRepeat-1)
// Feedback on progress
$rglobal[4] = $ii
if ($ii % 2) == 0
PsoDistanceConfigureArrayDistances($axis, $iPsoArrayPosAddr, length($iPsoArrayPos), 0)
MoveAbsolute($axis, $fPosEnd, $fVelScan)
elseif ($ii % 2) == 1
PsoDistanceConfigureArrayDistances($axis, $iPsoArrayNegAddr, length($iPsoArrayNeg), 0)
MoveAbsolute($axis, $fPosStart, $fVelScan)
end
WaitForMotionDone($axis)
$axisFaults = StatusGetAxisItem($axis, AxisDataSignal.AxisFault)
if $axisFaults
TaskSetError(TaskGetIndex(), "AxisFault on axis ROTY")
end
Dwell(0.2)
end
end
// Directly after scan
PsoDistanceCounterOff($axis)
DriveDataCaptureOff($axis, 0)
DriveDataCaptureOff($axis, 1)
// move back to start position
MoveAbsolute($axis, $fPosStart, $fVelJog)
WaitForInPosition($axis)
Dwell(2)
end