Merge branch 'main' into 'feature/sprint_scan'

# Conflicts:
#   tomcat_bec/scans/__init__.py
This commit is contained in:
2024-10-23 11:40:04 +02:00
18 changed files with 2789 additions and 24 deletions

1
.gitignore vendored
View File

@@ -40,6 +40,7 @@ share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
*.bak
MANIFEST
# PyInstaller

28
LICENSE Normal file
View File

@@ -0,0 +1,28 @@
BSD 3-Clause License
Copyright (c) 2024, Paul Scherrer Institute
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -12,7 +12,7 @@ classifiers = [
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering",
]
dependencies = ["ophyd_devices", "bec_lib"]
dependencies = ["ophyd_devices", "bec_lib", "websockets", "pyzmq"]
[project.optional-dependencies]
dev = ["black", "isort", "coverage", "pylint", "pytest", "pytest-random-order", "ophyd_devices", "bec_server"]

View File

@@ -1,7 +1,7 @@
eyex:
readoutPriority: baseline
description: X-ray eye translation x
deviceClass: ophyd.EpicsMotor
description: X-ray eye axis X
deviceClass: tomcat_bec.devices.psimotor.EpicsMotorEC
deviceConfig:
prefix: MTEST-X05LA-ES2-XRAYEYE:M1
deviceTags:
@@ -12,8 +12,8 @@ eyex:
softwareTrigger: false
eyey:
readoutPriority: baseline
description: X-ray eye translation y
deviceClass: ophyd.EpicsMotor
description: X-ray eye axis Y
deviceClass: tomcat_bec.devices.psimotor.EpicsMotorEC
deviceConfig:
prefix: MTEST-X05LA-ES2-XRAYEYE:M2
deviceTags:
@@ -24,8 +24,8 @@ eyey:
softwareTrigger: false
eyez:
readoutPriority: baseline
description: X-ray eye translation z
deviceClass: ophyd.EpicsMotor
description: X-ray eye axis Z
deviceClass: tomcat_bec.devices.psimotor.EpicsMotorEC
deviceConfig:
prefix: MTEST-X05LA-ES2-XRAYEYE:M3
deviceTags:
@@ -47,13 +47,13 @@ femto_mean_curr:
enabled: true
readOnly: true
softwareTrigger: false
es1_roty:
description: 'Test rotation stage'
deviceClass: tomcat_bec.devices.EpicsMotorX
deviceConfig: {prefix: 'X02DA-ES1-SMP1:ROTY'}
onFailure: buffer
enabled: true
readoutPriority: monitored
#es1_roty:
# description: 'Test rotation stage'
# deviceClass: tomcat_bec.devices.EpicsMotorX
# deviceConfig: {prefix: 'X02DA-ES1-SMP1:ROTY'}
# onFailure: buffer
# enabled: false
# readoutPriority: monitored
# es1_tasks:
# description: 'AA1 task management interface'
@@ -79,11 +79,28 @@ es1_roty:
# enabled: true
# readoutPriority: monitored
camera:
description: Grashopper Camera
deviceClass: tomcat_bec.devices.GrashopperTOMCAT
#camera:
# description: Grashopper Camera
# deviceClass: tomcat_bec.devices.GrashopperTOMCAT
# deviceConfig:
# prefix: 'X02DA-PG-USB:'
# deviceTags:
# - camera
# enabled: true
# onFailure: buffer
# readOnly: false
# readoutPriority: monitored
# softwareTrigger: true
gfclient:
description: GigaFrost camera client
deviceClass: tomcat_bec.devices.GigaFrostClient
deviceConfig:
prefix: 'X02DA-PG-USB:'
prefix: 'X02DA-CAM-GF2:'
backend_url: 'http://xbl-daq-28:8080'
auto_soft_enable: true
daq_ws_url: 'ws://xbl-daq-29:8080'
daq_rest_url: 'http://xbl-daq-29:5000'
deviceTags:
- camera
enabled: true
@@ -92,3 +109,28 @@ camera:
readoutPriority: monitored
softwareTrigger: true
daq_stream0:
description: stdDAQ preview (2 every 555)
deviceClass: tomcat_bec.devices.StdDaqPreviewDetector
deviceConfig:
url: 'tcp://129.129.95.38:20002'
deviceTags:
- std-daq
enabled: true
onFailure: buffer
readOnly: false
readoutPriority: monitored
softwareTrigger: false
daq_stream1:
description: stdDAQ preview (4 at 10 Hz)
deviceClass: tomcat_bec.devices.StdDaqPreviewDetector
deviceConfig:
url: 'tcp://129.129.95.38:20001'
deviceTags:
- std-daq
enabled: true
onFailure: buffer
readOnly: false
readoutPriority: monitored
softwareTrigger: false

View File

@@ -8,3 +8,7 @@ from .aerotech.AerotechAutomation1 import (
aa1Tasks,
)
from .grashopper_tomcat import GrashopperTOMCAT
from .psimotor import EpicsMotorMR, EpicsMotorEC
from .gigafrost.gigafrostclient import GigaFrostClient
from .gigafrost.stddaq_preview import StdDaqPreviewDetector

View File

@@ -3,11 +3,6 @@
### tomcat_bec
| Device | Documentation | Module |
| :----- | :------------- | :------ |
| GrashopperTOMCAT | <br> Grashopper detector for TOMCAT<br><br> Parent class: PSIDetectorBase<br><br> class attributes:<br> custom_prepare_cls (GrashopperTOMCATSetup) : Custom detector setup class for TOMCAT,<br> inherits from CustomDetectorMixin<br> cam (SLSDetectorCam) : Detector camera<br> image (SLSImagePlugin) : Image plugin for detector<br> | [tomcat_bec.devices.grashopper_tomcat](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/grashopper_tomcat.py) |
| SLSDetectorCam | <br> SLS Detector Camera - Grashoppter<br><br> Base class to map EPICS PVs to ophyd signals.<br> | [tomcat_bec.devices.grashopper_tomcat](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/grashopper_tomcat.py) |
| SLSImagePlugin | SLS Image Plugin<br><br> Image plugin for SLS detector imitating the behaviour of ImagePlugin from<br> ophyd's areadetector plugins.<br> | [tomcat_bec.devices.grashopper_tomcat](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/grashopper_tomcat.py) |
| TomcatAerotechRotation | Special motor class that provides flyer interface and progress bar. | [tomcat_bec.devices.tomcat_rotation_motors](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/tomcat_rotation_motors.py) |
| EpicsMotorX | Special motor class that provides flyer interface and progress bar. | [tomcat_bec.devices.aerotech.AerotechAutomation1](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/aerotech/AerotechAutomation1.py) |
| aa1AxisDriveDataCollection | Axis data collection<br><br> This class provides convenience wrappers around the Aerotech API's axis<br> specific data collection functionality. This module allows to record<br> hardware synchronized signals with up to 200 kHz.<br><br> The default configuration is using a fixed memory mapping allowing up to<br> 1 million recorded data points on an XC4e (this depends on controller).<br><br> Usage:<br> # Configure the DDC with default internal triggers<br> ddc = aa1AxisPsoDistance(AA1_IOC_NAME+":ROTY:DDC:", name="ddc")<br> ddc.wait_for_connection()<br> ddc.configure(d={'npoints': 5000})<br> ddc.kickoff().wait()<br> ...<br> ret = yield from ddc.collect()<br> | [tomcat_bec.devices.aerotech.AerotechAutomation1](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/aerotech/AerotechAutomation1.py) |
| aa1AxisIo | Analog / digital Input-Output<br><br> This class provides convenience wrappers around the Aerotech API's axis<br> specific IO functionality. Note that this is a low-speed API, actual work<br> should be done in AeroScript. Only one pin can be writen directly but<br> several can be polled!<br> | [tomcat_bec.devices.aerotech.AerotechAutomation1](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/aerotech/AerotechAutomation1.py) |
| aa1AxisPsoBase | Position Sensitive Output - Base class<br><br> This class provides convenience wrappers around the Aerotech IOC's PSO<br> functionality. As a base class, it's just a collection of PVs without<br> significant logic (that should be implemented in the child classes).<br> It uses event-waveform concept to produce signals on the configured<br> output pin: a specified position based event will trigger the generation<br> of a waveform on the oputput that can be either used as exposure enable,<br> as individual trigger or as a series of triggers per each event.<br> As a first approach, the module follows a simple pipeline structure:<br> Genrator --> Event --> Waveform --> Output<br><br> Specific operation modes should be implemented in child classes.<br> | [tomcat_bec.devices.aerotech.AerotechAutomation1](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/aerotech/AerotechAutomation1.py) |
@@ -17,5 +12,17 @@
| aa1DataAcquisition | Controller Data Acquisition - DONT USE at Tomcat<br><br> This class implements the controller data collection feature of the<br> Automation1 controller. This feature logs various inputs at a<br> **fixed frequency** from 1 kHz up to 200 kHz.<br> Usage:<br> 1. Start a new configuration with "startConfig"<br> 2. Add your signals with "addXxxSignal"<br> 3. Start your data collection<br> 4. Read back the recorded data with "readback"<br> | [tomcat_bec.devices.aerotech.AerotechAutomation1](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/aerotech/AerotechAutomation1.py) |
| aa1GlobalVariableBindings | Polled global variables<br><br> This class provides an interface to read/write the first few global variables<br> on the Automation1 controller. These variables are continuously polled<br> and are thus a convenient way to interface scripts with the outside word.<br> | [tomcat_bec.devices.aerotech.AerotechAutomation1](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/aerotech/AerotechAutomation1.py) |
| aa1GlobalVariables | Global variables<br><br> This class provides an interface to directly read/write global variables<br> on the Automation1 controller. These variables are accesible from script<br> files and are thus a convenient way to interface with the outside word.<br><br> Read operations take as input the memory address and the size<br> Write operations work with the memory address and value<br><br> Usage:<br> var = aa1Tasks(AA1_IOC_NAME+":VAR:", name="var")<br> var.wait_for_connection()<br> ret = var.readInt(42)<br> var.writeFloat(1000, np.random.random(1024))<br> ret_arr = var.readFloat(1000, 1024)<br><br> | [tomcat_bec.devices.aerotech.AerotechAutomation1](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/aerotech/AerotechAutomation1.py) |
| aa1TaskState | Task state monitoring API<br><br> This is the task state monitoring interface for Automation1 tasks. It<br> does not launch execution, but can wait for the execution to complete.<br> | [tomcat_bec.devices.aerotech.AerotechAutomation1](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/aerotech/AerotechAutomation1.py) |
| aa1Tasks | Task management API<br><br> The place to manage tasks and AeroScript user files on the controller.<br> You can read/write/compile/execute AeroScript files and also retrieve<br> saved data files from the controller. It will also work around an ophyd<br> bug that swallows failures.<br><br> Execution does not require to store the script in a file, it will compile<br> it and run it directly on a certain thread. But there's no way to<br> retrieve the source code.<br><br> Write a text into a file on the aerotech controller and execute it with kickoff.<br> '''<br> script="var $axis as axis = ROTY\nMoveAbsolute($axis, 42, 90)"<br> tsk = aa1Tasks(AA1_IOC_NAME+":TASK:", name="tsk")<br> tsk.wait_for_connection()<br> tsk.configure({'text': script, 'filename': "foobar.ascript", 'taskIndex': 4})<br> tsk.kickoff().wait()<br> '''<br><br> Just execute an ascript file already on the aerotech controller.<br> '''<br> tsk = aa1Tasks(AA1_IOC_NAME+":TASK:", name="tsk")<br> tsk.wait_for_connection()<br> tsk.configure({'filename': "foobar.ascript", 'taskIndex': 4})<br> tsk.kickoff().wait()<br> '''<br><br> | [tomcat_bec.devices.aerotech.AerotechAutomation1](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/aerotech/AerotechAutomation1.py) |
| aa1TaskState | Task state monitoring API<br><br> This is the task state monitoring interface for Automation1 tasks. It<br> does not launch execution, but can wait for the execution to complete.<br> | [tomcat_bec.devices.aerotech.AerotechAutomation1](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/aerotech/AerotechAutomation1.py) |
| EpicsMotorEC | Detailed ECMC EPICS motor class<br><br> Special motor class to provide additional functionality for ECMC based motors. <br> It exposes additional diagnostic fields and includes basic error management.<br> | [tomcat_bec.devices.psimotor](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/psimotor.py) |
| EpicsMotorMR | Extended EPICS Motor class<br><br> Special motor class that exposes additional motor record functionality.<br> It extends EpicsMotor base class to provide some simple status checks <br> before movement.<br> | [tomcat_bec.devices.psimotor](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/psimotor.py) |
| EpicsMotorX | Special motor class that provides flyer interface and progress bar. | [tomcat_bec.devices.aerotech.AerotechAutomation1](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/aerotech/AerotechAutomation1.py) |
| GigaFrostCamera | Ophyd device class to control Gigafrost cameras at Tomcat<br><br> The actual hardware is implemented by an IOC based on an old fork of Helge's<br> cameras. This means that the camera behaves differently than the SF cameras<br> in particular it provides even less feedback about it's internal progress.<br> Helge will update the GigaFrost IOC after working beamline.<br> The ophyd class is based on the 'gfclient' package and has a lot of Tomcat<br> specific additions. It does behave differently though, as ophyd swallows the<br> errors from failed PV writes.<br><br> Parameters<br> ----------<br> use_soft_enable : bool<br> Flag to use the camera's soft enable (default: False)<br> backend_url : str<br> Backend url address necessary to set up the camera's udp header.<br> (default: http://xbl-daq-23:8080)<br><br> Bugs:<br> ----------<br> FRAMERATE : Ignored in soft trigger mode, period becomes 2xExposure time<br> | [tomcat_bec.devices.gigafrost.gigafrostcamera](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/gigafrost/gigafrostcamera.py) |
| GigaFrostClient | Ophyd device class to control Gigafrost cameras at Tomcat<br><br> The actual hardware is implemented by an IOC based on an old fork of Helge's<br> cameras. This means that the camera behaves differently than the SF cameras<br> in particular it provides even less feedback about it's internal progress.<br> Helge will update the GigaFrost IOC after working beamline.<br> The ophyd class is based on the 'gfclient' package and has a lot of Tomcat<br> specific additions. It does behave differently though, as ophyd swallows the<br> errors from failed PV writes.<br><br> Parameters<br> ----------<br> use_soft_enable : bool<br> Flag to use the camera's soft enable (default: False)<br> backend_url : str<br> Backend url address necessary to set up the camera's udp header.<br> (default: http://xbl-daq-23:8080)<br><br> Usage:<br> ----------<br> gf = GigaFrostClient(<br> "X02DA-CAM-GF2:", name="gf2", backend_url="http://xbl-daq-28:8080", auto_soft_enable=True,<br> daq_ws_url="ws://xbl-daq-29:8080", daq_rest_url="http://xbl-daq-29:5000"<br> )<br><br> Bugs:<br> ----------<br> FRAMERATE : Ignored in soft trigger mode, period becomes 2xexposure time<br> | [tomcat_bec.devices.gigafrost.gigafrostclient](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/gigafrost/gigafrostclient.py) |
| GrashopperTOMCAT | <br> Grashopper detector for TOMCAT<br><br> Parent class: PSIDetectorBase<br><br> class attributes:<br> custom_prepare_cls (GrashopperTOMCATSetup) : Custom detector setup class for TOMCAT,<br> inherits from CustomDetectorMixin<br> cam (SLSDetectorCam) : Detector camera<br> image (SLSImagePlugin) : Image plugin for detector<br> | [tomcat_bec.devices.grashopper_tomcat](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/grashopper_tomcat.py) |
| SLSDetectorCam | <br> SLS Detector Camera - Grashoppter<br><br> Base class to map EPICS PVs to ophyd signals.<br> | [tomcat_bec.devices.grashopper_tomcat](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/grashopper_tomcat.py) |
| SLSImagePlugin | SLS Image Plugin<br><br> Image plugin for SLS detector imitating the behaviour of ImagePlugin from<br> ophyd's areadetector plugins.<br> | [tomcat_bec.devices.grashopper_tomcat](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/grashopper_tomcat.py) |
| StdDaqClient | StdDaq API<br><br> This class combines the new websocket and REST interfaces of the stdDAQ that <br> were meant to replace the documented python client. The websocket interface<br> starts and stops the acquisition and provides status, while the REST<br> interface can read and write the configuration. The DAQ needs to restart<br> all services to reconfigure with a new config.<br><br> The websocket provides status updates about a running acquisition but the <br> interface breaks connection at the end of the run.<br><br> The standard DAQ configuration is a single JSON file locally autodeployed<br> to the DAQ servers (as root!!!). It can only be written through a REST API <br> that is semi-supported. The DAQ might be distributed across several servers, <br> we'll only interface with the primary REST interface will synchronize with <br> all secondary REST servers. In the past this was a source of problems.<br><br> Example:<br> '''<br> daq = StdDaqClient(name="daq", ws_url="ws://xbl-daq-29:8080", rest_url="http://xbl-daq-29:5000")<br> '''<br> | [tomcat_bec.devices.gigafrost.stddaq_client](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/gigafrost/stddaq_client.py) |
| StdDaqPreviewDetector | Detector wrapper class around the StdDaq preview image stream.<br><br> This was meant to provide live image stream directly from the StdDAQ.<br> Note that the preview stream must be already throtled in order to cope<br> with the incoming data and the python class might throttle it further.<br><br> You can add a preview widget to the dock by:<br> cam_widget = gui.add_dock('cam_dock1').add_widget('BECFigure').image('daq_stream1')<br> | [tomcat_bec.devices.gigafrost.stddaq_preview](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/gigafrost/stddaq_preview.py) |
| StdDaqRestClient | Wrapper class around the new StdDaq REST interface.<br><br> This was meant to extend the websocket inteface that replaced the documented<br> python client. It is used as a part of the StdDaqClient aggregator class. <br> Good to know that the stdDAQ restarts all services after reconfiguration.<br><br> The standard DAQ configuration is a single JSON file locally autodeployed<br> to the DAQ servers (as root!!!). It can only be written through the REST API <br> via standard HTTP requests. The DAQ might be distributed across several servers, <br> we'll only interface with the primary REST interface will synchronize with <br> all secondary REST servers. In the past this was a source of problems.<br><br> Example:<br> '''<br> daqcfg = StdDaqRestClient(name="daqcfg", rest_url="http://xbl-daq-29:5000")<br> '''<br> | [tomcat_bec.devices.gigafrost.stddaq_rest](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/gigafrost/stddaq_rest.py) |
| TomcatAerotechRotation | Special motor class that provides flyer interface and progress bar. | [tomcat_bec.devices.tomcat_rotation_motors](https://gitlab.psi.ch/bec/tomcat_bec/-/blob/main/tomcat_bec/devices/tomcat_rotation_motors.py) |

View File

@@ -0,0 +1,56 @@
# -*- coding: utf-8 -*-
"""
Copied utility class from gfclient
Created on Thu Jun 27 17:28:43 2024
@author: mohacsi_i
"""
from enum import IntEnum
gf_valid_enable_modes = ("soft", "external", "soft+ext", "always")
gf_valid_exposure_modes = ("external", "timer", "soft")
gf_valid_trigger_modes = ("auto", "external", "timer", "soft")
gf_valid_fix_nframe_modes = ("off", "start", "end", "start+end")
# STATUS
class GfStatus(IntEnum):
"""Operation states for GigaFrost Ophyd device"""
NEW = 1
INITIALIZED = 2
ACQUIRING = 3
CONFIGURED = 4
STOPPED = 5
INIT = 6
# BACKEND ADDRESSES
BE1_DAFL_CLIENT = "http://xbl-daq-33:8080"
BE1_NORTH_MAC = [0x94, 0x40, 0xC9, 0xB4, 0xB8, 0x00]
BE1_SOUTH_MAC = [0x94, 0x40, 0xC9, 0xB4, 0xA8, 0xD8]
BE1_NORTH_IP = [10, 4, 0, 102]
BE1_SOUTH_IP = [10, 0, 0, 102]
BE2_DAFL_CLIENT = "http://xbl-daq-23:8080"
BE2_NORTH_MAC = [0x24, 0xBE, 0x05, 0xAC, 0x03, 0x62]
BE2_SOUTH_MAC = [0x24, 0xBE, 0x05, 0xAC, 0x03, 0x72]
BE2_NORTH_IP = [10, 4, 0, 100]
BE2_SOUTH_IP = [10, 0, 0, 100]
BE3_DAFL_CLIENT = "http://xbl-daq-26:8080"
BE3_NORTH_MAC = [0x50, 0x65, 0xF3, 0x81, 0x66, 0x51]
BE3_SOUTH_MAC = [0x50, 0x65, 0xF3, 0x81, 0xD5, 0x31] # ens4
BE3_NORTH_IP = [10, 4, 0, 101]
BE3_SOUTH_IP = [10, 0, 0, 101]
# Backend for MicroXAS test-stand
BE999_DAFL_CLIENT = "http://xbl-daq-28:8080"
BE999_SOUTH_MAC = [0x9C, 0xDC, 0x71, 0x47, 0xE5, 0xD1] # 9c:dc:71:47:e5:d1
BE999_NORTH_MAC = [0x9C, 0xDC, 0x71, 0x47, 0xE5, 0xDD] # 9c:dc:71:47:e5:dd
BE999_NORTH_IP = [10, 4, 0, 101]
BE999_SOUTH_IP = [10, 0, 0, 101]
# GF Names
GF1 = "gf1"
GF2 = "gf2"
GF3 = "gf3"

View File

@@ -0,0 +1,236 @@
# -*- coding: utf-8 -*-
"""
Copied utility class from gfclient
Created on Thu Jun 27 17:28:43 2024
@author: mohacsi_i
"""
# Not installed on BEC server
#import jsonschema
MIN_EXPOSURE_MS = 0.002
MAX_EXPOSURE_MS = 40
MIN_ROIX = 48
MAX_ROIX = 2016
STEP_ROIX = 48
MIN_ROIY = 4
MAX_ROIY = 2016
STEP_ROIY = 4
valid_roix = range(MIN_ROIX, MAX_ROIX + 1, STEP_ROIX)
valid_roiy = range(MIN_ROIY, MAX_ROIY + 1, STEP_ROIY)
def is_valid_url(url):
"""Basic URL validation"""
# FIXME: do more checks?
return url.startswith("http://")
def is_valid_exposure_ms(exp_t):
"""check if an exposure time e is valid for gigafrost
exp_t: exposure time in milliseconds
"""
return MIN_EXPOSURE_MS <= exp_t <= MAX_EXPOSURE_MS
def port2byte(port):
"""Post number and endianness conversion"""
return [(port >> 8) & 0xFF, port & 0xFF]
def extend_header_table(table, mac, destination_ip, destination_port, source_port):
"""
Extend the header table by a further entry.
Parameters
----------
table :
The table to be extended
mac :
The mac address for the new entry
destination_ip :
The destination IP address for the new entry
destination_port :
The destination port for the new entry
source_port :
The source port for the new entry
"""
table.extend(mac)
table.extend(destination_ip)
table.extend(port2byte(destination_port))
table.extend(port2byte(source_port))
return table
def is_valid_roi(roiy, roix):
""" Validates ROI on GigaFrost"""
return roiy in valid_roiy and roix in valid_roix
def _print_max_framerate(exposure, roix, roiy):
"""Prints the maximum GigaFrost framerate for a given ROI"""
print(
"roiy=%4i roix=%4i exposure=%6.3fms: %8.1fHz"
% (roiy, roix, exposure, max_framerate_hz(exposure, roix=roix, roiy=roiy))
)
def print_max_framerate(exposure_ms=MIN_EXPOSURE_MS, shape="square"):
"""Prints the maximum GigaFrost framerate for a given exposure time"""
valid_shapes = ["square", "landscape", "portrait"]
if shape not in valid_shapes:
raise ValueError("shape must be one of %s" % valid_shapes)
if shape == "square":
for px_r in valid_roix:
_print_max_framerate(exposure_ms, px_r, px_r)
if shape == "portrait":
for px_x in valid_roix:
_print_max_framerate(exposure_ms, roix=px_x, roiy=MAX_ROIY)
if shape == "landscape":
# valid_roix is a subset of valid_roiy.
# Use the smaller set to get a more manageable amount of output lines
for px_y in valid_roix:
_print_max_framerate(exposure_ms, roix=MAX_ROIX, roiy=px_y)
def max_framerate_hz(exposure_ms=MIN_EXPOSURE_MS, roix=MAX_ROIX, roiy=MAX_ROIY, clk_mhz=62.5):
"""
returns maximum achievable frame rate in auto mode in Hz
Gerd Theidel wrote:
Hallo zusammen,
hier wie besprochen die Info zu den Zeiten.
Im Anhang findet ihr ein Python Beispiel zur
Berechnung der maximalen Framerate im auto trigger mode.
Bei den anderen modes sollte man etwas darunter bleiben,
wie auch im PCO Manual beschrieben.
Zusammengefasst mit den aktuellen Konstanten:
exposure_ms : 0.002 ... 40 (ms)
clk_mhz : 62.5 | 55.0 | 52.5 | 50.0 (MHz)
t_readout = ( ((roi_x / 24) + 14) * (roi_y / 4) + 405) / (1e6 * clk_mhz)
t_exp_sys = (exposure_ms / 1000.0) + (261 / (1e6 * clk_mhz))
framerate = 1.0 / max(t_readout, t_exp_sys)
Gruss,
Gerd
"""
# pylint: disable=invalid-name
# pylint: disable=too-many-locals
if exposure_ms < 0.002 or exposure_ms > 40:
raise ValueError("exposure_ms not in interval [0.002, 40.]")
valid_clock_values = [62.5, 55.0, 52.5, 50.0]
if clk_mhz not in valid_clock_values:
raise ValueError("clock rate not in %s" % valid_clock_values)
# Constants
PLB_IMG_SENS_COUNT_X_OFFS = 2
PLB_IMG_SENS_COUNT_Y_OFFS = 1
# Constant Clock Cycles
CC_T2 = 88 # 99
CC_T3 = 125
CC_T4 = 1
CC_T10 = 2
CC_T11 = 4
CC_T5_MINUS_T11 = 20
CC_T15_MINUS_T10 = 3
CC_TR3 = 1
CC_T13_MINUS_TR3 = 2
CC_150NS = 7 # additional delay through states
CC_DELAY_BEFORE_RESET = 4 # at least 50 ns
CC_ADDITIONAL_TSYS = 10
CC_PIX_RESET_LENGTH = 40 # at least 40 CLK Cycles at 62.5 MHz
CC_COUNT_X_MAX = 84
CC_ROW_OVERHEAD_TIME = 11 + CC_TR3 + CC_T13_MINUS_TR3
CC_FRAME_OVERHEAD_TIME = (
8 + CC_T15_MINUS_T10 + CC_T10 + CC_T2 + CC_T3 + CC_T4 + CC_T5_MINUS_T11 + CC_T11
)
CC_INTEGRATION_START_DELAY = (
CC_COUNT_X_MAX
+ CC_ROW_OVERHEAD_TIME
+ CC_DELAY_BEFORE_RESET
+ CC_PIX_RESET_LENGTH
+ CC_150NS
+ 5
)
CC_TIME_TSYS = CC_FRAME_OVERHEAD_TIME + CC_ADDITIONAL_TSYS
# 12 pixel blocks per quarter
roix = max(((roix + 47) / 48) * 48, PLB_IMG_SENS_COUNT_X_OFFS * 24)
# 2 line blocks per quarter
roiy = max(((roiy + 3) / 4) * 4, PLB_IMG_SENS_COUNT_Y_OFFS * 4)
# print("CC_INTEGRATION_START_DELAY + CC_FRAME_OVERHEAD_TIME",
# CC_INTEGRATION_START_DELAY + CC_FRAME_OVERHEAD_TIME)
# print("CC_ROW_OVERHEAD_TIME",CC_ROW_OVERHEAD_TIME)
# print("CC_TIME_TSYS",CC_TIME_TSYS)
t_readout = (
CC_INTEGRATION_START_DELAY
+ CC_FRAME_OVERHEAD_TIME
+ ((roix / 24) + CC_ROW_OVERHEAD_TIME) * (roiy / 4)
) / (1e6 * clk_mhz)
t_exp_sys = (exposure_ms / 1000.0) + (CC_TIME_TSYS / (1e6 * clk_mhz))
# with constants as of time of writing:
# t_readout = ( ((roix / 24) + 14) * (roiy / 4) + 405) / (1e6 * clk_mhz)
# t_exp_sys = (exposure_ms / 1000.0) + (261 / (1e6 * clk_mhz))
framerate = 1.0 / max(t_readout, t_exp_sys)
return framerate
layoutSchema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9_.-]*$": {
"type": "object",
"required": [
"writer",
"DaflClient",
"zmq_stream",
"live_preview",
"ioc_name",
"description",
],
"properties": {
"writer": {"type": "string"},
"DaflClient": {"type": "string"},
"zmq_stream": {"type": "string"},
"live_preview": {"type": "string"},
"ioc_name": {"type": "string"},
"description": {"type": "string"},
},
}
},
"additionalProperties": False,
}
def validate_json(json_data):
"""Silently validate a JSON data according to the predefined schema"""
#try:
# jsonschema.validate(instance=json_data, schema=layoutSchema)
#except jsonschema.exceptions.ValidationError:
# return False
return True

View File

@@ -0,0 +1,737 @@
# -*- coding: utf-8 -*-
"""
GigaFrost camera class module
Created on Thu Jun 27 17:28:43 2024
@author: mohacsi_i
"""
from time import sleep
from ophyd import Signal, Component, EpicsSignal, EpicsSignalRO, Kind, DeviceStatus
from ophyd.device import Staged
from ophyd_devices.interfaces.base_classes.psi_detector_base import (
CustomDetectorMixin,
PSIDetectorBase,
)
try:
import gfconstants as const
except ModuleNotFoundError:
import tomcat_bec.devices.gigafrost.gfconstants as const
try:
from gfutils import extend_header_table
except ModuleNotFoundError:
from tomcat_bec.devices.gigafrost.gfutils import extend_header_table
try:
from bec_lib import bec_logger
logger = bec_logger.logger
except ModuleNotFoundError:
import logging
logger = logging.getLogger("GfCam")
class GigaFrostCameraMixin(CustomDetectorMixin):
"""Mixin class to setup TOMCAT specific implementations of the detector.
This class will be called by the custom_prepare_cls attribute of the
detector class.
"""
def _define_backend_ip(self):
""" Select backend IP address for UDP stream"""
if self.parent.backendUrl.get() == const.BE3_DAFL_CLIENT: # xbl-daq-33
return const.BE3_NORTH_IP, const.BE3_SOUTH_IP
if self.parent.backendUrl.get() == const.BE999_DAFL_CLIENT:
return const.BE999_NORTH_IP, const.BE999_SOUTH_IP
raise RuntimeError(f"Backend {self.parent.backendUrl.get()} not recognized.")
def _define_backend_mac(self):
""" Select backend MAC address for UDP stream"""
if self.parent.backendUrl.get() == const.BE3_DAFL_CLIENT: # xbl-daq-33
return const.BE3_NORTH_MAC, const.BE3_SOUTH_MAC
if self.parent.backendUrl.get() == const.BE999_DAFL_CLIENT:
return const.BE999_NORTH_MAC, const.BE999_SOUTH_MAC
raise RuntimeError(f"Backend {self.parent.backendUrl.get()} not recognized.")
def _set_udp_header_table(self):
"""Set the communication parameters for the camera module"""
self.parent.cfgConnectionParam.set(self._build_udp_header_table()).wait()
def _build_udp_header_table(self):
"""Build the header table for the UDP communication"""
udp_header_table = []
for i in range(0, 64, 1):
for j in range(0, 8, 1):
dest_port = 2000 + 8 * i + j
source_port = 3000 + j
if j < 4:
extend_header_table(
udp_header_table,
self.parent.macSouth.get(),
self.parent.ipSouth.get(),
dest_port,
source_port
)
else:
extend_header_table(
udp_header_table,
self.parent.macNorth.get(),
self.parent.ipNorth.get(),
dest_port,
source_port
)
return udp_header_table
# def on_init(self) -> None:
# """ Initialize the camera, set channel values"""
# # ToDo: Not sure if it's a good idea to change camera settings upon
# # ophyd device startup, i.e. each deviceserver restart.
# self._init_gigafrost()
# self.parent._initialized = True
def _init_gigafrost(self) -> None:
""" Initialize the camera, set channel values"""
## Stop acquisition
self.parent.cmdStartCamera.set(0).wait()
### set entry to UDP table
# number of UDP ports to use
self.parent.cfgUdpNumPorts.set(2).wait()
# number of images to send to each UDP port before switching to next
self.parent.cfgUdpNumFrames.set(5).wait()
# offset in UDP table - where to find the first entry
self.parent.cfgUdpHtOffset.set(0).wait()
# activate changes
self.parent.cmdWriteService.set(1).wait()
# Configure software triggering if needed
if self.parent.autoSoftEnable.get():
# trigger modes
self.parent.cfgCntStartBit.set(1).wait()
self.parent.cfgCntEndBit.set(0).wait()
# set modes
self.parent.enable_mode = "soft"
self.parent.trigger_mode = "auto"
self.parent.exposure_mode = "timer"
# line swap - on for west, off for east
self.parent.cfgLineSwapSW.set(1).wait()
self.parent.cfgLineSwapNW.set(1).wait()
self.parent.cfgLineSwapSE.set(0).wait()
self.parent.cfgLineSwapNE.set(0).wait()
# Commit parameters
self.parent.cmdSetParam.set(1).wait()
# Initialize data backend
n, s = self._define_backend_ip()
self.parent.ipNorth.put(n, force=True)
self.parent.ipSouth.put(s, force=True)
n, s = self._define_backend_mac()
self.parent.macNorth.put(n, force=True)
self.parent.macSouth.put(s, force=True)
# Set udp header table
self._set_udp_header_table()
return super().on_init()
def on_stage(self) -> None:
"""Specify actions to be executed during stage
Specifies actions to be executed during the stage step in preparation
for a scan. self.parent.scaninfo already has all current parameters for
the upcoming scan.
The gigafrost camera IOC does not write data to disk, that's done by
the DAQ ophyd device.
IMPORTANT:
It must be safe to assume that the device is ready for the scan
to start immediately once this function is finished.
"""
# Gigafrost can finish a run without explicit unstaging
if self.parent.infoBusyFlag.value:
logger.warn("Camera is already busy, unstaging it first!")
self.parent.unstage()
sleep(0.5)
# Sync if out of sync
if self.parent.infoSyncFlag.value == 0:
self.parent.cmdSyncHw.set(1).wait()
# Switch to acquiring
self.parent.cmdStartCamera.set(1).wait()
def on_unstage(self) -> None:
"""Specify actions to be executed during unstage.
This step should include checking if the acqusition was successful,
and publishing the file location and file event message,
with flagged done to BEC.
"""
# Switch to idle
self.parent.cmdStartCamera.set(0).wait()
if self.parent.autoSoftEnable.get():
self.parent.cmdSoftEnable.set(0).wait()
def on_stop(self) -> None:
"""
Specify actions to be executed during stop.
This must also set self.parent.stopped to True.
This step should include stopping the detector and backend service.
"""
return self.on_unstage()
def on_trigger(self) -> None | DeviceStatus:
"""
Specify actions to be executed upon receiving trigger signal.
Return a DeviceStatus object or None
"""
if self.parent.infoBusyFlag.get() in (0, 'IDLE'):
raise RuntimeError('GigaFrost must be running before triggering')
# Soft triggering based on operation mode
if self.parent.autoSoftEnable.get() and self.parent.trigger_mode=='auto' and self.parent.enable_mode=='soft':
# BEC teststand operation mode: posedge of SoftEnable if Started
self.parent.cmdSoftEnable.set(0).wait()
self.parent.cmdSoftEnable.set(1).wait()
else:
self.parent.cmdSoftTrigger.set(1).wait()
class GigaFrostCamera(PSIDetectorBase):
"""Ophyd device class to control Gigafrost cameras at Tomcat
The actual hardware is implemented by an IOC based on an old fork of Helge's
cameras. This means that the camera behaves differently than the SF cameras
in particular it provides even less feedback about it's internal progress.
Helge will update the GigaFrost IOC after working beamline.
The ophyd class is based on the 'gfclient' package and has a lot of Tomcat
specific additions. It does behave differently though, as ophyd swallows the
errors from failed PV writes.
Parameters
----------
use_soft_enable : bool
Flag to use the camera's soft enable (default: False)
backend_url : str
Backend url address necessary to set up the camera's udp header.
(default: http://xbl-daq-23:8080)
Bugs:
----------
FRAMERATE : Ignored in soft trigger mode, period becomes 2xExposure time
"""
# pylint: disable=too-many-instance-attributes
custom_prepare_cls = GigaFrostCameraMixin
USER_ACCESS = [""]
_initialized = False
infoBusyFlag = Component(EpicsSignalRO, "BUSY_STAT", auto_monitor=True)
infoSyncFlag = Component(EpicsSignalRO, "SYNC_FLAG", auto_monitor=True)
cmdSyncHw = Component(EpicsSignal, "SYNC_SWHW.PROC", put_complete=True, kind=Kind.omitted)
cmdStartCamera = Component(EpicsSignal, "START_CAM", put_complete=True, kind=Kind.omitted)
cmdSetParam = Component(EpicsSignal, "SET_PARAM.PROC", put_complete=True, kind=Kind.omitted)
# UDP header
cfgUdpNumPorts = Component(EpicsSignal, "PORTS", put_complete=True, kind=Kind.config)
cfgUdpNumFrames = Component(EpicsSignal, "FRAMENUM", put_complete=True, kind=Kind.config)
cfgUdpHtOffset = Component(EpicsSignal, "HT_OFFSET", put_complete=True, kind=Kind.config)
cmdWriteService = Component(EpicsSignal, "WRITE_SRV.PROC", put_complete=True, kind=Kind.omitted)
# Standard camera configs
cfgExposure = Component(
EpicsSignal, "EXPOSURE", put_complete=True, auto_monitor=True, kind=Kind.config)
cfgFramerate = Component(
EpicsSignal, "FRAMERATE", put_complete=True, auto_monitor=True, kind=Kind.config)
cfgRoiX = Component(
EpicsSignal, "ROIX", put_complete=True, auto_monitor=True, kind=Kind.config)
cfgRoiY = Component(
EpicsSignal, "ROIY", put_complete=True, auto_monitor=True, kind=Kind.config)
cfgScanId = Component(
EpicsSignal, "SCAN_ID", put_complete=True, auto_monitor=True, kind=Kind.config)
cfgCntNum = Component(
EpicsSignal, "CNT_NUM", put_complete=True, auto_monitor=True, kind=Kind.config)
cfgCorrMode = Component(
EpicsSignal, "CORR_MODE", put_complete=True, auto_monitor=True, kind=Kind.config)
# Software signals
cmdSoftEnable = Component(EpicsSignal, "SOFT_ENABLE", put_complete=True)
cmdSoftTrigger = Component(
EpicsSignal, "SOFT_TRIG.PROC", put_complete=True, kind=Kind.omitted)
cmdSoftExposure = Component(EpicsSignal, "SOFT_EXP", put_complete=True)
# Trigger configuration PVs
cfgCntStartBit = Component(
EpicsSignal,
"CNT_STARTBIT_RBV",
write_pv="CNT_STARTBIT",
put_complete=True,
kind=Kind.config,
)
cfgCntEndBit = Component(
EpicsSignal,
"CNT_ENDBIT_RBV",
write_pv="CNT_ENDBIT",
put_complete=True,
kind=Kind.config
)
# Enable modes
cfgTrigEnableExt = Component(
EpicsSignal,
"MODE_ENBL_EXT_RBV",
write_pv="MODE_ENBL_EXT",
put_complete=True,
kind=Kind.config,
)
cfgTrigEnableSoft = Component(
EpicsSignal,
"MODE_ENBL_SOFT_RBV",
write_pv="MODE_ENBL_SOFT",
put_complete=True,
kind=Kind.config,
)
cfgTrigEnableAuto = Component(
EpicsSignal,
"MODE_ENBL_AUTO_RBV",
write_pv="MODE_ENBL_AUTO",
put_complete=True,
kind=Kind.config,
)
cfgTrigVirtEnable = Component(
EpicsSignal,
"MODE_ENBL_EXP_RBV",
write_pv="MODE_ENBL_EXP",
put_complete=True,
kind=Kind.config,
)
# Trigger modes
cfgTrigExt = Component(
EpicsSignal,
"MODE_TRIG_EXT_RBV",
write_pv="MODE_TRIG_EXT",
put_complete=True,
kind=Kind.config,
)
cfgTrigSoft = Component(
EpicsSignal,
"MODE_TRIG_SOFT_RBV",
write_pv="MODE_TRIG_SOFT",
put_complete=True,
kind=Kind.config,
)
cfgTrigTimer = Component(
EpicsSignal,
"MODE_TRIG_TIMER_RBV",
write_pv="MODE_TRIG_TIMER",
put_complete=True,
kind=Kind.config,
)
cfgTrigAuto = Component(
EpicsSignal,
"MODE_TRIG_AUTO_RBV",
write_pv="MODE_TRIG_AUTO",
put_complete=True,
kind=Kind.config,
)
# Exposure modes
cfgTrigExpExt = Component(
EpicsSignal,
"MODE_EXP_EXT_RBV",
write_pv="MODE_EXP_EXT",
put_complete=True,
kind=Kind.config,
)
cfgTrigExpSoft = Component(
EpicsSignal,
"MODE_EXP_SOFT_RBV",
write_pv="MODE_EXP_SOFT",
put_complete=True,
kind=Kind.config,
)
cfgTrigExpTimer = Component(
EpicsSignal,
"MODE_EXP_TIMER_RBV",
write_pv="MODE_EXP_TIMER",
put_complete=True,
kind=Kind.config,
)
# Line swap selection
cfgLineSwapSW = Component(EpicsSignal, "LS_SW", put_complete=True, kind=Kind.config)
cfgLineSwapNW = Component(EpicsSignal, "LS_NW", put_complete=True, kind=Kind.config)
cfgLineSwapSE = Component(EpicsSignal, "LS_SE", put_complete=True, kind=Kind.config)
cfgLineSwapNE = Component(EpicsSignal, "LS_NE", put_complete=True, kind=Kind.config)
cfgConnectionParam = Component(
EpicsSignal, "CONN_PARM", string=True, put_complete=True, kind=Kind.config
)
# HW settings as read only
cfgSyncFlag = Component(EpicsSignalRO, "PIXRATE", auto_monitor=True, kind=Kind.config)
cfgTrigDelay = Component(EpicsSignalRO, "TRIG_DELAY", auto_monitor=True, kind=Kind.config)
cfgSyncoutDelay = Component(EpicsSignalRO, "SYNCOUT_DLY", auto_monitor=True, kind=Kind.config)
cfgOutputPolarity0 = Component(EpicsSignalRO, "BNC0_RBV", auto_monitor=True, kind=Kind.config)
cfgOutputPolarity1 = Component(EpicsSignalRO, "BNC1_RBV", auto_monitor=True, kind=Kind.config)
cfgOutputPolarity2 = Component(EpicsSignalRO, "BNC2_RBV", auto_monitor=True, kind=Kind.config)
cfgOutputPolarity3 = Component(EpicsSignalRO, "BNC3_RBV", auto_monitor=True, kind=Kind.config)
cfgInputPolarity1 = Component(EpicsSignalRO, "BNC4_RBV", auto_monitor=True, kind=Kind.config)
cfgInputPolarity2 = Component(EpicsSignalRO, "BNC5_RBV", auto_monitor=True, kind=Kind.config)
infoBoardTemp = Component(EpicsSignalRO, "T_BOARD", auto_monitor=True)
USER_ACCESS = ["exposure_mode", "fix_nframes_mode", "trigger_mode", "enable_mode"]
autoSoftEnable = Component(Signal, kind=Kind.config)
backendUrl = Component(Signal, kind=Kind.config)
macNorth = Component(Signal, kind=Kind.config)
macSouth = Component(Signal, kind=Kind.config)
ipNorth = Component(Signal, kind=Kind.config)
ipSouth = Component(Signal, kind=Kind.config)
def __init__(
self,
prefix="",
*,
name,
auto_soft_enable=False,
backend_url=const.BE999_DAFL_CLIENT,
**kwargs,
):
# Ugly hack to pass values before on_init()
self._signals_to_be_set = {}
self._signals_to_be_set['auto_soft_enable'] = auto_soft_enable
self._signals_to_be_set['backend_url'] = backend_url
# super() will call the mixin class
super().__init__(prefix=prefix, name=name, **kwargs)
def _init(self):
"""Ugly hack: values must be set before on_init() is called"""
# Additional parameters
self.autoSoftEnable._metadata["write_access"] = False
self.backendUrl._metadata["write_access"] = False
self.autoSoftEnable.put(self._signals_to_be_set['auto_soft_enable'], force=True)
self.backendUrl.put(self._signals_to_be_set['backend_url'], force=True)
return super()._init()
def initialize(self):
""" Initialization in separate command"""
self.custom_prepare_cls._init_gigafrost()
self._initialized = True
def trigger(self) -> DeviceStatus:
""" Sends a software trigger to GigaFrost"""
super().trigger()
# There's no status readback from the camera, so we just wait
status = DeviceStatus(self)
sleep_time = self.cfgExposure.value*self.cfgCntNum.value*0.001+0.050
sleep(sleep_time)
logger.info("[%s] Slept for %f seconds", self.name, sleep_time)
status.set_finished()
return status
def configure(self, d: dict=None):
"""Configure the next scan with the GigaFRoST camera
Parameters as 'd' dictionary
----------------------------
nimages : int, optional
Number of images to be taken during each scan. Set to -1 for an
unlimited number of images (limited by the ringbuffer size and
backend speed). (default = 10)
exposure : float, optional
Exposure time [ms]. (default = 0.2)
period : float, optional
Exposure period [ms], ignored in soft trigger mode. (default = 1.0)
pixel_width : int, optional
ROI size in the x-direction [pixels] (default = 2016)
pixel_height : int, optional
ROI size in the y-direction [pixels] (default = 2016)
scanid : int, optional
Scan identification number to be associated with the scan data
(default = 0)
correction_mode : int, optional
The correction to be applied to the imaging data. The following
modes are available (default = 5):
* 0: Bypass. No corrections are applied to the data.
* 1: Send correction factor A instead of pixel values
* 2: Send correction factor B instead of pixel values
* 3: Send correction factor C instead of pixel values
* 4: Invert pixel values, but do not apply any linearity correction
* 5: Apply the full linearity correction
"""
# Stop acquisition
self.unstage()
if not self._initialized:
pass
# If Bluesky style configure
if d is not None:
nimages = d.get('nimages', 10)
exposure = d.get('exposure', 0.2)
period = d.get('period', 1.0)
pixel_width = d.get('pixel_width', 2016)
pixel_height = d.get('pixel_height', 2016)
pixel_width = d.get('image_width', pixel_width)
pixel_height = d.get('image_height', pixel_height)
pixel_width = d.get('roix', pixel_width)
pixel_height = d.get('roiy', pixel_height)
scanid = d.get('scanid', 0)
correction_mode = d.get('correction_mode', 5)
# change settings
self.cfgExposure.set(exposure).wait()
self.cfgFramerate.set(period).wait()
self.cfgRoiX.set(pixel_width).wait()
self.cfgRoiY.set(pixel_height).wait()
self.cfgScanId.set(scanid).wait()
self.cfgCntNum.set(nimages).wait()
self.cfgCorrMode.set(correction_mode).wait()
# Commit parameter
self.cmdSetParam.set(1).wait()
def stage(self):
""" Standard stage command"""
if not self._initialized:
logger.warn(
"Ophyd device havent ran the initialization sequence,"
"IOC might be in unknown configuration."
)
return super().stage()
@property
def exposure_mode(self):
"""Returns the current exposure mode of the GigaFRost camera.
Returns
-------
exp_mode : {'external', 'timer', 'soft'}
The camera's active exposure mode.
If more than one mode is active at the same time, it returns None.
"""
mode_soft = self.cfgTrigExpSoft.get()
mode_timer = self.cfgTrigExpTimer.get()
mode_external = self.cfgTrigExpExt.get()
if mode_soft and not mode_timer and not mode_external:
return "soft"
if not mode_soft and mode_timer and not mode_external:
return "timer"
if not mode_soft and not mode_timer and mode_external:
return "external"
return None
@exposure_mode.setter
def exposure_mode(self, exp_mode):
"""Apply the exposure mode for the GigaFRoST camera.
Parameters
----------
exp_mode : {'external', 'timer', 'soft'}
The exposure mode to be set.
"""
if exp_mode == "external":
self.cfgTrigExpExt.set(1).wait()
self.cfgTrigExpSoft.set(0).wait()
self.cfgTrigExpTimer.set(0).wait()
elif exp_mode == "timer":
self.cfgTrigExpExt.set(0).wait()
self.cfgTrigExpSoft.set(0).wait()
self.cfgTrigExpTimer.set(1).wait()
elif exp_mode == "soft":
self.cfgTrigExpExt.set(0).wait()
self.cfgTrigExpSoft.set(1).wait()
self.cfgTrigExpTimer.set(0).wait()
else:
raise ValueError(
f"Invalid exposure mode! Valid modes are:\n{const.gf_valid_exposure_modes}"
)
# Commit parameters
self.cmdSetParam.set(1).wait()
@property
def fix_nframes_mode(self):
"""Return the current fixed number of frames mode of the GigaFRoST camera.
Returns
-------
fix_nframes_mode : {'off', 'start', 'end', 'start+end'}
The camera's active fixed number of frames mode.
"""
start_bit = self.cfgCntStartBit.get()
end_bit = self.cfgCntStartBit.get()
if not start_bit and not end_bit:
return "off"
if start_bit and not end_bit:
return "start"
if not start_bit and end_bit:
return "end"
if start_bit and end_bit:
return "start+end"
return None
@fix_nframes_mode.setter
def fix_nframes_mode(self, mode):
"""Apply the fixed number of frames settings to the GigaFRoST camera.
Parameters
----------
mode : {'off', 'start', 'end', 'start+end'}
The fixed number of frames mode to be applied.
"""
self._fix_nframes_mode = mode
if self._fix_nframes_mode == "off":
self.cfgCntStartBit.set(0).wait()
self.cfgCntEndBit.set(0).wait()
elif self._fix_nframes_mode == "start":
self.cfgCntStartBit.set(1).wait()
self.cfgCntEndBit.set(0).wait()
elif self._fix_nframes_mode == "end":
self.cfgCntStartBit.set(0).wait()
self.cfgCntEndBit.set(1).wait()
elif self._fix_nframes_mode == "start+end":
self.cfgCntStartBit.set(1).wait()
self.cfgCntEndBit.set(1).wait()
else:
raise ValueError(
f"Invalid fixed number of frames mode! Valid modes are:\n{const.gf_valid_fix_nframe_modes}"
)
# Commit parameters
self.cmdSetParam.set(1).wait()
@property
def trigger_mode(self):
"""Method to detect the current trigger mode set in the GigaFRost camera.
Returns
-------
mode : {'auto', 'external', 'timer', 'soft'}
The camera's active trigger mode. If more than one mode is active
at the moment, None is returned.
"""
mode_auto = self.cfgTrigAuto.get()
mode_external = self.cfgTrigExt.get()
mode_timer = self.cfgTrigTimer.get()
mode_soft = self.cfgTrigSoft.get()
if mode_auto:
return "auto"
if mode_soft:
return "soft"
if mode_timer:
return "timer"
if mode_external:
return "external"
return None
@trigger_mode.setter
def trigger_mode(self, mode):
"""Set the trigger mode for the GigaFRoST camera.
Parameters
----------
mode : {'auto', 'external', 'timer', 'soft'}
The GigaFRoST trigger mode.
"""
if mode == "auto":
self.cfgTrigAuto.set(1).wait()
self.cfgTrigSoft.set(0).wait()
self.cfgTrigTimer.set(0).wait()
self.cfgTrigExt.set(0).wait()
elif mode == "external":
self.cfgTrigAuto.set(0).wait()
self.cfgTrigSoft.set(0).wait()
self.cfgTrigTimer.set(0).wait()
self.cfgTrigExt.set(1).wait()
elif mode == "timer":
self.cfgTrigAuto.set(0).wait()
self.cfgTrigSoft.set(0).wait()
self.cfgTrigTimer.set(1).wait()
self.cfgTrigExt.set(0).wait()
elif mode == "soft":
self.cfgTrigAuto.set(0).wait()
self.cfgTrigSoft.set(1).wait()
self.cfgTrigTimer.set(0).wait()
self.cfgTrigExt.set(0).wait()
else:
raise ValueError(
"Invalid trigger mode! Valid modes are:\n{const.gf_valid_trigger_modes}"
)
# Commit parameters
self.cmdSetParam.set(1).wait()
@property
def enable_mode(self):
"""Return the enable mode set in the GigaFRost camera.
Returns
-------
enable_mode: {'soft', 'external', 'soft+ext', 'always'}
The camera's active enable mode.
"""
mode_soft = self.cfgTrigEnableSoft.get()
mode_external = self.cfgTrigEnableExt.get()
mode_auto = self.cfgTrigEnableAuto.get()
if mode_soft and not mode_auto:
return "soft+ext" if mode_external else "soft"
if mode_auto and not mode_soft and not mode_external:
return "always"
if mode_external and not mode_soft and not mode_auto:
return "external"
return None
@enable_mode.setter
def enable_mode(self, mode):
"""Apply the enable mode for the GigaFRoST camera.
Parameters
----------
mode : {'soft', 'external', 'soft+ext', 'always'}
The enable mode to be applied.
"""
if mode not in const.gf_valid_enable_modes:
raise ValueError(
"Invalid enable mode! Valid modes are:\n{const.gf_valid_enable_modes}"
)
if mode == "soft":
self.cfgTrigEnableExt.set(0).wait()
self.cfgTrigEnableSoft.set(1).wait()
self.cfgTrigEnableAuto.set(0).wait()
elif mode == "external":
self.cfgTrigEnableExt.set(1).wait()
self.cfgTrigEnableSoft.set(0).wait()
self.cfgTrigEnableAuto.set(0).wait()
elif mode == "soft+ext":
self.cfgTrigEnableExt.set(1).wait()
self.cfgTrigEnableSoft.set(1).wait()
self.cfgTrigEnableAuto.set(0).wait()
elif mode == "always":
self.cfgTrigEnableExt.set(0).wait()
self.cfgTrigEnableSoft.set(0).wait()
self.cfgTrigEnableAuto.set(1).wait()
# Commit parameters
self.cmdSetParam.set(1).wait()
# Automatically connect to MicroSAXS testbench if directly invoked
if __name__ == "__main__":
gf = GigaFrostCamera(
"X02DA-CAM-GF2:", name="gf2", backend_url="http://xbl-daq-28:8080", auto_soft_enable=True
)
gf.wait_for_connection()

View File

@@ -0,0 +1,198 @@
# -*- coding: utf-8 -*-
"""
GigaFrost client module that combines camera and DAQ
Created on Thu Jun 27 17:28:43 2024
@author: mohacsi_i
"""
from ophyd import Component, DeviceStatus
from ophyd_devices.interfaces.base_classes.psi_detector_base import (
CustomDetectorMixin,
PSIDetectorBase,
)
try:
from . import gfconstants as const
from . import stddaq_client as stddaq
from . import gigafrostcamera as gfcam
except ModuleNotFoundError:
import tomcat_bec.devices.gigafrost.gfconstants as const
from tomcat_bec.devices.gigafrost.stddaq_client import StdDaqClient
from tomcat_bec.devices.gigafrost.gigafrostcamera import GigaFrostCamera
class GigaFrostClientMixin(CustomDetectorMixin):
"""Mixin class to setup TOMCAT specific implementations of the detector.
This class will be called by the custom_prepare_cls attribute of the detector class.
"""
def on_stage(self) -> None:
"""
Specify actions to be executed during stage in preparation for a scan.
self.parent.scaninfo already has all current parameters for the upcoming scan.
In case the backend service is writing data on disk, this step should include publishing
a file_event and file_message to BEC to inform the system where the data is written to.
IMPORTANT:
It must be safe to assume that the device is ready for the scan
to start immediately once this function is finished.
"""
# Gigafrost can finish a run without explicit unstaging
if self.parent._staged:
self.parent.unstage()
#self.parent.daq.stage()
#self.parent.cam.stage()
# def on_unstage(self) -> None:
# """
# Specify actions to be executed during unstage.
# This step should include checking if the acqusition was successful,
# and publishing the file location and file event message,
# with flagged done to BEC.
# """
# self.parent.cam.unstage()
# self.parent.daq.unstage()
def on_stop(self) -> None:
"""
Specify actions to be executed during stop.
This must also set self.parent.stopped to True.
This step should include stopping the detector and backend service.
"""
return self.on_unstage()
def on_trigger(self) -> None | DeviceStatus:
"""
Specify actions to be executed upon receiving trigger signal.
Return a DeviceStatus object or None
"""
return self.parent.cam.trigger()
class GigaFrostClient(PSIDetectorBase):
"""Ophyd device class to control Gigafrost cameras at Tomcat
The actual hardware is implemented by an IOC based on an old fork of Helge's
cameras. This means that the camera behaves differently than the SF cameras
in particular it provides even less feedback about it's internal progress.
Helge will update the GigaFrost IOC after working beamline.
The ophyd class is based on the 'gfclient' package and has a lot of Tomcat
specific additions. It does behave differently though, as ophyd swallows the
errors from failed PV writes.
Parameters
----------
use_soft_enable : bool
Flag to use the camera's soft enable (default: False)
backend_url : str
Backend url address necessary to set up the camera's udp header.
(default: http://xbl-daq-23:8080)
Usage:
----------
gf = GigaFrostClient(
"X02DA-CAM-GF2:", name="gf2", backend_url="http://xbl-daq-28:8080", auto_soft_enable=True,
daq_ws_url="ws://xbl-daq-29:8080", daq_rest_url="http://xbl-daq-29:5000"
)
Bugs:
----------
FRAMERATE : Ignored in soft trigger mode, period becomes 2xexposure time
"""
# pylint: disable=too-many-instance-attributes
custom_prepare_cls = GigaFrostClientMixin
USER_ACCESS = [""]
cam = Component(gfcam.GigaFrostCamera, prefix="X02DA-CAM-GF2:", name="cam")
daq = Component(stddaq.StdDaqClient, name="daq")
# pylint: disable=too-many-arguments
def __init__(
self,
prefix="",
*,
name,
auto_soft_enable=False,
backend_url=const.BE999_DAFL_CLIENT,
daq_ws_url = "ws://localhost:8080",
daq_rest_url = "http://localhost:5000",
kind=None,
**kwargs,
):
self.__class__.__dict__["cam"].kwargs['backend_url'] = backend_url
self.__class__.__dict__["cam"].kwargs['auto_soft_enable'] = auto_soft_enable
self.__class__.__dict__["daq"].kwargs['ws_url'] = daq_ws_url
self.__class__.__dict__["daq"].kwargs['rest_url'] = daq_rest_url
super().__init__(prefix=prefix, name=name, kind=kind, **kwargs)
def configure(self, d: dict=None):
"""Configure the next scan with the GigaFRoST camera and standard DAQ backend.
It also makes some simple checks for consistent configuration, but otherwise
status feedback is missing on both sides.
Parameters
----------
ntotal : int, optional
Total mumber of images to be taken by the DAQ during the whole scan.
Set to -1 for an unlimited number of images (limited by the
ringbuffer size and backend speed). (default = 10000)
nimages : int, optional
Number of images to be taken during each trigger (i.e. burst).
Maximum is 16777215 images. (default = 10)
exposure : float, optional
Exposure time, max 40 ms. [ms]. (default = 0.2)
period : float, optional
Exposure period [ms], ignored in soft trigger mode. (default = 1.0)
pixel_width : int, optional
Image size in the x-direction, must be multiple of 48 [pixels] (default = 2016)
pixel_height : int, optional
Image size in the y-direction, must be multiple of 16 [pixels] (default = 2016)
scanid : int, optional
Scan identification number to be associated with the scan data.
ToDo: This should be retrieved from the BEC. (default = 0)
correction_mode : int, optional
The correction to be applied to the imaging data. The following
modes are available (default = 5):
"""
# Unstage camera (reconfiguration will anyway stop camera)
super().unstage()
# If Bluesky style configure
old = self.read_configuration()
self.cam.configure(d)
self.daq.configure(d)
new = self.read_configuration()
return old, new
def stage(self):
""" Stages the current device and all sub-devices
"""
px_daq_h = self.daq.config.cfg_pixel_height.get()
px_daq_w = self.daq.config.cfg_pixel_width.get()
px_gf_w = self.cam.cfgRoiX.get()
px_gf_h = self.cam.cfgRoiY.get()
if px_daq_h != px_gf_h or px_daq_w != px_gf_w:
raise RuntimeError("Different image size configured on GF and the DAQ")
return super().stage()
# def trigger(self) -> DeviceStatus:
# """ Triggers the current device and all sub-devices, i.e. the camera.
# """
# status = super().trigger()
# return status
# Automatically connect to MicroSAXS testbench if directly invoked
if __name__ == "__main__":
gf = GigaFrostClient(
"X02DA-CAM-GF2:", name="gf2", backend_url="http://xbl-daq-28:8080", auto_soft_enable=True,
daq_ws_url="ws://xbl-daq-29:8080", daq_rest_url="http://xbl-daq-29:5000"
)
gf.wait_for_connection()

View File

@@ -0,0 +1,26 @@
# Gifgafrost camera at Tomcat
The GigaFrost camera IOC is a form from an ancient version of Helge's cameras.
As we're commissioning, the current folder also contains the standard DAQ client.
The ophyd implementation tries to balance between familiarity with the old
**gfclient** pyepics library and the BEC/bluesky event model.
# Examples
A simple code example with soft triggering:
'''
d = {'ntotal':100000, 'nimages':3009, 'exposure':10.0, 'period':20.0, 'pixel_width':2016, 'pixel_height':2016}
gfc.configure(d)
gfc.stage()
for ii in range(10):
gfc.trigger()
gfc.unstage()
'''
# Opening GigaFrost panel
The CaQtDM panel can be opened by:
'''
caqtdm -macro "CAM=X02DA-CAM-GF2" X_X02DA_GIGAFROST_camControl_user.ui &
'''

View File

@@ -0,0 +1,243 @@
# -*- coding: utf-8 -*-
"""
Standard DAQ control interface module through the websocket API
Created on Thu Jun 27 17:28:43 2024
@author: mohacsi_i
"""
import json
from time import sleep
from threading import Thread
from ophyd import Device, Signal, Component, Kind
from websockets.sync.client import connect
from websockets.exceptions import ConnectionClosedOK, ConnectionClosedError
try:
from stddaq_rest import StdDaqRestClient
except ModuleNotFoundError:
from tomcat_bec.devices.gigafrost.stddaq_rest import StdDaqRestClient
class StdDaqClient(Device):
"""StdDaq API
This class combines the new websocket and REST interfaces of the stdDAQ that
were meant to replace the documented python client. The websocket interface
starts and stops the acquisition and provides status, while the REST
interface can read and write the configuration. The DAQ needs to restart
all services to reconfigure with a new config.
The websocket provides status updates about a running acquisition but the
interface breaks connection at the end of the run.
The standard DAQ configuration is a single JSON file locally autodeployed
to the DAQ servers (as root!!!). It can only be written through a REST API
that is semi-supported. The DAQ might be distributed across several servers,
we'll only interface with the primary REST interface will synchronize with
all secondary REST servers. In the past this was a source of problems.
Example:
'''
daq = StdDaqClient(name="daq", ws_url="ws://xbl-daq-29:8080", rest_url="http://xbl-daq-29:5000")
'''
"""
# pylint: disable=too-many-instance-attributes
# Status attributes
url = Component(Signal, kind=Kind.config)
status = Component(Signal, value="unknown", kind=Kind.normal)
n_total = Component(Signal, value=10000, kind=Kind.config)
file_path = Component(Signal, value="/gpfs/test/test-beamline", kind=Kind.config)
# Configuration
config = Component(StdDaqRestClient, kind=Kind.config)
def __init__(
self,
*args,
ws_url: str = "ws://localhost:8080",
rest_url: str="http://localhost:5000",
parent: Device = None,
**kwargs
) -> None:
self.__class__.__dict__['config'].kwargs['rest_url'] = rest_url
super().__init__(*args, parent=parent, **kwargs)
self.status._metadata["write_access"] = False
self.url._metadata["write_access"] = False
self.url.set(ws_url, force=True).wait()
self._ws_url = ws_url
self._mon = None
# Connect ro the DAQ
self._client = None
self.connect()
def connect(self):
"""Connect to the StdDAQ's websockets interface
StdDAQ may reject connection for a few seconds after restart, or when
it wants so if it fails, wait a bit and try to connect again.
"""
num_retry = 0
while num_retry < 5:
try:
self._client = connect(self._ws_url)
break
except ConnectionRefusedError:
num_retry += 1
sleep(3)
if num_retry==5:
raise ConnectionRefusedError(
"The stdDAQ websocket interface refused connection 5 times.")
def monitor(self):
"""Attach monitoring to the DAQ"""
self._client = connect(self._ws_url)
self._mon = Thread(target=self.poll, daemon=True)
self._mon.start()
def configure(self, d: dict=None) -> tuple:
"""Set the standard DAQ parameters for the next run
Note that full reconfiguration is not possible with the websocket
interface, only changing acquisition parameters. These changes are only
activated upon staging!
Example:
----------
std.configure(n_total=10000, file_path="/data/test/raw")
Parameters
----------
n_total : int, optional
Total number of images to be taken during each scan. Set to -1 for
an unlimited number of images (limited by the ringbuffer size and
backend speed). (default = 10000)
file_path : string, optional
Save file path. (default = '/gpfs/test/test-beamline')
"""
old_config = self.read_configuration()
if d is not None:
# Set acquisition parameters
if 'n_total' in d:
self.n_total.set(int(d['n_total']))
if 'ntotal' in d:
self.n_total.set(int(d['ntotal']))
if 'file_path' in d:
self.output_file.set(str(d['file_path']))
# Configure DAQ
if 'pixel_width' in d or 'pixel_height' in d:
self.stop()
sleep(1)
self.config.configure(d)
new_config = self.read_configuration()
return (old_config, new_config)
def stage(self) -> list:
"""Start a new run with the standard DAQ
Behavior: the StdDAQ can stop the previous run either by itself or
by calling unstage. So it might start from an already running state or
not, we can't query if not running.
"""
if self._staged:
self.unstage()
self._client.close()
file_path = self.file_path.get()
n_total = self.n_total.get()
message = {"command": "start", "path": file_path, "n_image": n_total}
reply = self.message(message)
if reply is not None:
reply = json.loads(reply)
if reply["status"] in ("creating_file"):
self.status.put(reply["status"], force=True)
elif reply["status"] in ("rejected"):
raise RuntimeError(
f"Start StdDAQ command rejected (might be already running): {reply['reason']}"
)
self._mon = Thread(target=self.poll, daemon=True)
self._mon.start()
return super().stage()
def unstage(self):
""" Stop a running acquisition
WARN: This will also close the connection!!!
"""
# The poller thread locks recv raising a RuntimeError
try:
message = {"command": "stop"}
self.message(message, wait_reply=False)
except RuntimeError:
pass
return super().unstage()
def stop(self, *, success=False):
""" Stop a running acquisition
WARN: This will also close the connection!!!
"""
self.unstage()
def message(self, message: dict, timeout=1, wait_reply=True):
"""Send a message to the StdDAQ and receive a reply
Note: finishing acquisition means StdDAQ will close connection, so
there's no idle state polling.
"""
if isinstance(message, dict):
msg = json.dumps(message)
else:
msg = str(message)
# Send message (reopen connection if needed)
try:
self._client.send(msg)
except (ConnectionClosedError, ConnectionClosedOK):
self.connect()
self._client.send(msg)
# Wait for reply
reply = None
if wait_reply:
try:
reply = self._client.recv(timeout)
return reply
except (ConnectionClosedError, ConnectionClosedOK, TimeoutError) as ex:
print(ex)
return reply
def poll(self):
"""Monitor status messages until connection is open
This will block the reply monitoring to calling unstage() might throw.
Status updates are sent every 1 seconds
"""
try:
sleep(1.2)
for msg in self._client:
try:
message = json.loads(msg)
self.status.put(message["status"], force=True)
except (ConnectionClosedError, ConnectionClosedOK):
return
except Exception as ex:
print(ex)
return
except (ConnectionClosedError, ConnectionClosedOK):
return
finally:
self._mon = None
# Automatically connect to microXAS testbench if directly invoked
if __name__ == "__main__":
daq = StdDaqClient(name="daq", ws_url="ws://xbl-daq-29:8080", rest_url="http://xbl-daq-29:5000")
daq.wait_for_connection()

View File

@@ -0,0 +1,184 @@
# -*- coding: utf-8 -*-
"""
Standard DAQ preview image stream module
Created on Thu Jun 27 17:28:43 2024
@author: mohacsi_i
"""
import json
import enum
from time import sleep, time
from threading import Thread
import zmq
import numpy as np
from ophyd import Device, Signal, Component, Kind
from ophyd_devices.interfaces.base_classes.psi_detector_base import (
CustomDetectorMixin,
PSIDetectorBase,
)
from bec_lib import bec_logger
logger = bec_logger.logger
ZMQ_TOPIC_FILTER = b''
class StdDaqPreviewState(enum.IntEnum):
"""Standard DAQ ophyd device states"""
UNKNOWN = 0
DETACHED = 1
MONITORING = 2
class StdDaqPreviewMixin(CustomDetectorMixin):
"""Setup class for the standard DAQ preview stream
Parent class: CustomDetectorMixin
"""
_mon = None
def on_stage(self):
"""Start listening for preview data stream"""
if self._mon is not None:
self.parent.unstage()
sleep(0.5)
self.parent.connect()
self._stop_polling = False
self._mon = Thread(target=self.poll, daemon=True)
self._mon.start()
def on_unstage(self):
"""Stop a running preview"""
if self._mon is not None:
self._stop_polling = True
# Might hang on recv_multipart
self._mon.join(timeout=1)
# So also disconnect the socket
self.parent._socket.disconnect()
def on_stop(self):
"""Stop a running preview"""
self.on_unstage()
def poll(self):
"""Collect streamed updates"""
self.parent.status.set(StdDaqPreviewState.MONITORING, force=True)
try:
t_last = time()
while True:
try:
# Exit loop and finish monitoring
if self._stop_polling:
logger.info(f"[{self.parent.name}]\tDetaching monitor")
break
# pylint: disable=no-member
r = self.parent._socket.recv_multipart(flags=zmq.NOBLOCK)
# Length and throtling checks
if len(r)!=2:
continue
t_curr = time()
t_elapsed = t_curr - t_last
if t_elapsed > self.parent.throttle.get():
sleep(0.1)
continue
# Unpack the Array V1 reply to metadata and array data
meta, data = r
# Update image and update subscribers
header = json.loads(meta)
if header["type"]=="uint16":
image = np.frombuffer(data, dtype=np.uint16)
if image.size != np.prod(header['shape']):
err = f"Unexpected array size of {image.size} for header: {header}"
raise ValueError(err)
image = image.reshape(header['shape'])
# Update image and update subscribers
self.parent.frame.put(header['frame'], force=True)
self.parent.image_shape.put(header['shape'], force=True)
self.parent.image.put(image, force=True)
self.parent._run_subs(sub_type=self.parent.SUB_MONITOR, value=image)
t_last=t_curr
logger.info(
f"[{self.parent.name}] Updated frame {header['frame']}\t"
f"Shape: {header['shape']}\tMean: {np.mean(image):.3f}"
)
except ValueError:
# Happens when ZMQ partially delivers the multipart message
pass
except zmq.error.Again:
# Happens when receive queue is empty
sleep(0.1)
except Exception as ex:
logger.info(f"[{self.parent.name}]\t{str(ex)}")
raise
finally:
self._mon = None
self.parent.status.set(StdDaqPreviewState.DETACHED, force=True)
logger.info(f"[{self.parent.name}]\tDetaching monitor")
class StdDaqPreviewDetector(PSIDetectorBase):
"""Detector wrapper class around the StdDaq preview image stream.
This was meant to provide live image stream directly from the StdDAQ.
Note that the preview stream must be already throtled in order to cope
with the incoming data and the python class might throttle it further.
You can add a preview widget to the dock by:
cam_widget = gui.add_dock('cam_dock1').add_widget('BECFigure').image('daq_stream1')
"""
# Subscriptions for plotting image
SUB_MONITOR = "device_monitor_2d"
_default_sub = SUB_MONITOR
custom_prepare_cls = StdDaqPreviewMixin
# Status attributes
url = Component(Signal, kind=Kind.config)
throttle = Component(Signal, value=0.1, kind=Kind.config)
status = Component(Signal, value=StdDaqPreviewState.UNKNOWN, kind=Kind.omitted)
image = Component(Signal, kind=Kind.normal)
frame = Component(Signal, kind=Kind.hinted)
image_shape = Component(Signal, kind=Kind.normal)
def __init__(
self, *args, url: str = "tcp://129.129.95.38:20000", parent: Device = None, **kwargs
) -> None:
super().__init__(*args, parent=parent, **kwargs)
self.url._metadata["write_access"] = False
self.status._metadata["write_access"] = False
self.image._metadata["write_access"] = False
self.frame._metadata["write_access"] = False
self.image_shape._metadata["write_access"] = False
self.url.set(url, force=True).wait()
# Connect ro the DAQ
self.connect()
def connect(self):
"""Connect to te StDAQs PUB-SUB streaming interface
StdDAQ may reject connection for a few seconds when it restarts,
so if it fails, wait a bit and try to connect again.
"""
# pylint: disable=no-member
# Socket to talk to server
context = zmq.Context()
self._socket = context.socket(zmq.SUB)
self._socket.setsockopt(zmq.SUBSCRIBE, ZMQ_TOPIC_FILTER)
try:
self._socket.connect(self.url.get())
except ConnectionRefusedError:
sleep(1)
self._socket.connect(self.url.get())
# Automatically connect to MicroSAXS testbench if directly invoked
if __name__ == "__main__":
daq = StdDaqPreviewDetector(url="tcp://129.129.95.38:20000", name="preview")
daq.wait_for_connection()

View File

@@ -0,0 +1,211 @@
# -*- coding: utf-8 -*-
"""
Standard DAQ configuration interface module through the latrest REST API
Created on Thu Jun 27 17:28:43 2024
@author: mohacsi_i
"""
from time import sleep
from ophyd import Device, Signal, Component, Kind
import requests
try:
from bec_lib import bec_logger
logger = bec_logger.logger
except ModuleNotFoundError:
import logging
logger = logging.getLogger("GfCam")
class StdDaqRestClient(Device):
"""Wrapper class around the new StdDaq REST interface.
This was meant to extend the websocket inteface that replaced the documented
python client. It is used as a part of the StdDaqClient aggregator class.
Good to know that the stdDAQ restarts all services after reconfiguration.
The standard DAQ configuration is a single JSON file locally autodeployed
to the DAQ servers (as root!!!). It can only be written through the REST API
via standard HTTP requests. The DAQ might be distributed across several servers,
we'll only interface with the primary REST interface will synchronize with
all secondary REST servers. In the past this was a source of problems.
Example:
'''
daqcfg = StdDaqRestClient(name="daqcfg", rest_url="http://xbl-daq-29:5000")
'''
"""
# pylint: disable=too-many-instance-attributes
USER_ACCESS = ["write_daq_config"]
# Status attributes
rest_url = Component(Signal, kind=Kind.config)
cfg_detector_name = Component(Signal, kind=Kind.config)
cfg_detector_type = Component(Signal, kind=Kind.config)
cfg_n_modules = Component(Signal, kind=Kind.config)
cfg_bit_depth = Component(Signal, kind=Kind.config)
cfg_pixel_height = Component(Signal, kind=Kind.config)
cfg_pixel_width = Component(Signal, kind=Kind.config)
cfg_start_udp_port = Component(Signal, kind=Kind.config)
cfg_writer_user_id = Component(Signal, kind=Kind.config)
cfg_submodule_info = Component(Signal, kind=Kind.config)
cfg_max_number_of_forwarders = Component(Signal, kind=Kind.config)
cfg_use_all_forwarders = Component(Signal, kind=Kind.config)
cfg_module_sync_queue_size = Component(Signal, kind=Kind.config)
cfg_module_positions = Component(Signal, kind=Kind.config)
def __init__(
self, *args, rest_url: str = "http://localhost:5000", parent: Device = None, **kwargs
) -> None:
super().__init__(*args, parent=parent, **kwargs)
self.rest_url._metadata["write_access"] = False
self.rest_url.put(rest_url, force=True)
# Connect ro the DAQ and initialize values
try:
self.read_daq_config()
except Exception as ex:
logger.error(f"Failed to connect to the StdDAQ REST API\n{ex}")
def get_daq_config(self) -> dict:
"""Read the current configuration from the DAQ
"""
r = requests.get(
self.rest_url.get() + '/api/config/get',
params={'config_file': "/etc/std_daq/configs/gf1.json", 'user':"ioc"},
timeout=2
)
if r.status_code != 200:
raise ConnectionError(f"[{self.name}] Error {r.status_code}:\t{r.text}")
return r.json()
def read_daq_config(self) -> None:
"""Extract the current configuration from the JSON file
"""
cfg = self.get_daq_config()
self.cfg_detector_name.set(cfg['detector_name']).wait()
self.cfg_detector_type.set(cfg['detector_type']).wait()
self.cfg_n_modules.set(cfg['n_modules']).wait()
self.cfg_bit_depth.set(cfg['bit_depth']).wait()
self.cfg_pixel_height.set(cfg['image_pixel_height']).wait()
self.cfg_pixel_width.set(cfg['image_pixel_width']).wait()
self.cfg_start_udp_port.set(cfg['start_udp_port']).wait()
self.cfg_writer_user_id.set(cfg['writer_user_id']).wait()
#self.cfg_submodule_info.set(cfg['submodule_info']).wait()
self.cfg_max_number_of_forwarders.set(cfg['max_number_of_forwarders_spawned']).wait()
self.cfg_use_all_forwarders.set(cfg['use_all_forwarders']).wait()
self.cfg_module_sync_queue_size.set(cfg['module_sync_queue_size']).wait()
#self.cfg_module_positions.set(cfg['module_positions']).wait()
def _build_config(self, orig=None) -> dict:
config = {
# 'detector_name': str(self.cfg_detector_name.get()),
# 'detector_type': str(self.cfg_detector_type.get()),
# 'n_modules': int(self.cfg_n_modules.get()),
# 'bit_depth': int(self.cfg_bit_depth.get()),
'image_pixel_height': int(self.cfg_pixel_height.get()),
'image_pixel_width': int(self.cfg_pixel_width.get()),
# 'start_udp_port': int(self.cfg_start_udp_port.get()),
# 'writer_user_id': int(self.cfg_writer_user_id.get()),
# 'log_level': "info",
# 'submodule_info': {},
# 'max_number_of_forwarders_spawned': int(self.cfg_max_number_of_forwarders.get()),
# 'use_all_forwarders': bool(self.cfg_use_all_forwarders.get()),
# 'module_sync_queue_size': int(self.cfg_module_sync_queue_size.get()),
# 'module_positions': {},
# 'number_of_writers': 14
}
if orig is not None:
config = orig.update(config)
return config
def write_daq_config(self):
"""Write configuration ased on current PV values. Some fields might be
unchangeable.
"""
orig = self.get_daq_config()
config = self._build_config(orig)
#params = {"user": "ioc", "config_file": "/etc/std_daq/configs/gf1.json"}
params = {"user": "ioc"}
r = requests.post(
self.rest_url.get() +'/api/config/set',
params=params,
json=config,
timeout=2,
headers={"Content-Type": "application/json"}
)
if r.status_code != 200:
raise ConnectionError(f"[{self.name}] Error {r.status_code}:\t{r.text}")
return r
def configure(self, d: dict=None):
"""Configure the next scan with the GigaFRoST camera
Parameters
----------
pixel_width : int, optional
Image size in the x-direction [pixels] (default = 2016)
pixel_height : int, optional
Image size in the y-direction [pixels] (default = 2016)
"""
# Reads the current config
old = self.read_configuration()
self.read_daq_config()
# If Bluesky style configure
if d is not None:
# Only reconfigure if we're instructed
if ('pixel_width' in d) or ('pixel_height' in d) or ('image_width' in d) or ('image_height' in d):
pixel_width = d.get('pixel_width', 2016)
pixel_height = d.get('pixel_height', 2016)
pixel_width = d.get('image_width', pixel_width)
pixel_height = d.get('image_height', pixel_height)
if self.cfg_pixel_height.get()!=pixel_height or self.cfg_pixel_width.get() != pixel_width:
self.cfg_pixel_height.set(pixel_height).wait()
self.cfg_pixel_width.set(pixel_width).wait()
self.write_daq_config()
logger.info("[%s] Reconfigured the StdDAQ", self.name)
# No feedback on restart, we just sleep
sleep(3)
new = self.read_configuration()
return old, new
def read(self):
self.read_daq_config()
return super().read()
def read_configuration(self):
try:
self.read_daq_config()
except Exception as ex:
logger.error(f"Failed to connect to the StdDAQ REST API\n{ex}")
return super().read_configuration()
def stage(self) -> list:
"""Stage op: Read the current configuration from the DAQ
"""
self.read_daq_config()
return super().stage()
def unstage(self):
"""Unstage op: Read the current configuration from the DAQ
"""
self.read_daq_config()
return super().unstage()
def stop(self, *, success=False):
"""Stop op: Read the current configuration from the DAQ
"""
self.unstage()
# Automatically connect to MicroSAXS testbench if directly invoked
if __name__ == "__main__":
daqcfg = StdDaqRestClient(name="daqcfg", rest_url="http://xbl-daq-29:5000")
daqcfg.wait_for_connection()

View File

@@ -0,0 +1,124 @@
""" Extension class for EpicsMotor
This module extends the basic EpicsMotor with additional functionality. It
exposes additional parameters of the EPICS MotorRecord and provides a more
detailed interface for motors using the new ECMC-based motion systems at PSI.
"""
import warnings
from ophyd import Component, EpicsMotor, EpicsSignal, EpicsSignalRO, Kind
from ophyd.status import DeviceStatus, MoveStatus
from ophyd.utils.errors import UnknownStatusFailure
from ophyd.utils.epics_pvs import AlarmSeverity
class SpmgStates:
""" Enum for the EPICS MotorRecord's SPMG state"""
# pylint: disable=too-few-public-methods
STOP = 0
PAUSE = 1
MOVE= 2
GO = 3
class EpicsMotorMR(EpicsMotor):
""" Extended EPICS Motor class
Special motor class that exposes additional motor record functionality.
It extends EpicsMotor base class to provide some simple status checks
before movement.
"""
tolerated_alarm = AlarmSeverity.INVALID
motor_deadband = Component(
EpicsSignalRO, ".RDBD", auto_monitor=True, kind=Kind.config)
motor_mode = Component(
EpicsSignal, ".SPMG", auto_monitor=True, put_complete=True, kind=Kind.omitted)
motor_status = Component(
EpicsSignal, ".STAT", auto_monitor=True, kind=Kind.omitted)
motor_enable = Component(
EpicsSignal, ".CNEN", auto_monitor=True, kind=Kind.omitted)
def move(self, position, wait=True, **kwargs) -> MoveStatus:
""" Extended move function with a few sanity checks
Note that the default EpicsMotor only supports the 'GO' movement mode.
"""
# Reset SPMG before move
spmg = self.motor_mode.get()
if spmg != SpmgStates.GO:
self.motor_mode.set(SpmgStates.GO).wait()
# Warn if EPIC motorRecord claims an error
status = self.motor_status.get()
if status:
warnings.warn(f"EPICS MotorRecord is in alarm state {status}, ophyd will raise", RuntimeWarning)
# Warni if trying to move beyond an active limit
# if self.high_limit_switch and position > self.position:
# warnings.warn("Attempting to move above active HLS", RuntimeWarning)
# if self.low_limit_switch and position < self.position:
# warnings.warn("Attempting to move below active LLS", RuntimeWarning)
try:
status = super().move(position, wait, **kwargs)
return status
except UnknownStatusFailure:
status = DeviceStatus(self)
status.set_finished()
return status
class EpicsMotorEC(EpicsMotorMR):
""" Detailed ECMC EPICS motor class
Special motor class to provide additional functionality for ECMC based motors.
It exposes additional diagnostic fields and includes basic error management.
"""
USER_ACCESS = ['reset']
enable_readback = Component(EpicsSignalRO, "-EnaAct", auto_monitor=True, kind=Kind.normal)
enable = Component(
EpicsSignal, "-EnaCmd-RB", write_pv="-EnaCmd", auto_monitor=True, kind=Kind.normal)
homed = Component(EpicsSignalRO, "-Homed", auto_monitor=True, kind=Kind.normal)
velocity_readback = Component(EpicsSignalRO, "-VelAct", auto_monitor=True, kind=Kind.normal)
position_readback = Component(EpicsSignalRO, "-PosAct", auto_monitor=True, kind=Kind.normal)
position_error = Component(EpicsSignalRO, "-PosErr", auto_monitor=True, kind=Kind.normal)
#high_interlock = Component(EpicsSignalRO, "-SumIlockFwd", auto_monitor=True, kind=Kind.normal)
#low_interlock = Component(EpicsSignalRO, "-SumIlockBwd", auto_monitor=True, kind=Kind.normal)
#ecmc_status = Component(EpicsSignalRO, "-Status", auto_monitor=True, kind=Kind.normal)
error = Component(EpicsSignalRO, "-ErrId", auto_monitor=True, kind=Kind.normal)
error_msg = Component(EpicsSignalRO, "-MsgTxt", auto_monitor=True, kind=Kind.normal)
error_reset = Component(EpicsSignal, "-ErrRst", put_complete=True, kind=Kind.omitted)
def move(self, position, wait=True, **kwargs) -> MoveStatus:
""" Extended move function with a few sanity checks
Note that the default EpicsMotor only supports the 'GO' movement mode.
"""
# Check ECMC error status before move
error = self.error.get()
if error:
raise RuntimeError(f"Motor is in error state with message: '{self.error_msg.get()}'")
return super().move(position, wait, **kwargs)
def reset(self):
""" Resets an ECMC axis
Recovers an ECMC axis from a previous error. Note that this does not
solve the cause of the error, that you'll have to do yourself.
Common error causes:
-------------------------
- MAX_POSITION_LAG_EXCEEDED : The PID tuning is wrong.
- MAX_VELOCITY_EXCEEDED : Either the PID is wrong or the motor is sticking-slipping
- BOTH_LIMITS_ACTIVE : The motors are probably not connected
"""
# Reset the error
self.error_reset.set(1, settle_time=0.1).wait()
# Check if it disappeared
if self.error.get():
raise RuntimeError(f"Failed to reset axis, error still present: '{self.error_msg.get()}'")

View File

@@ -1 +1,2 @@
from .tutorial_fly_scan import AcquireDark, AcquireFlat, TutorialFlyScanContLine
from .gigafrost_test import GigaFrostStepScan

View File

@@ -0,0 +1,545 @@
import time
import numpy as np
from bec_lib import bec_logger
from scan_server.scans import AsyncFlyScanBase, ScanArgType, ScanBase
logger = bec_logger.logger
class AeroSingleScan(AsyncFlyScanBase):
scan_name = "aero_single_scan"
scan_report_hint = "scan_progress"
required_kwargs = ["startpos", "scanrange", "psodist"]
arg_input = {}
arg_bundle_size = {"bundle": len(arg_input), "min": None, "max": None}
def __init__(self, *args, parameter: dict = None, **kwargs):
"""Performs a single line scan with PSO output and data collection.
Examples:
>>> scans.aero_single_scan(startpos=42, scanrange=2*10+3*180, psodist=[10, 180, 0.01, 180, 0.01, 180])
"""
super().__init__(parameter=parameter, **kwargs)
self.axis = []
self.scan_motors = []
self.num_pos = 0
self.scanStart = self.caller_kwargs.get("startpos")
self.scanEnd = self.scanStart + self.caller_kwargs.get("scanrange")
self.psoBounds = self.caller_kwargs.get("psodist")
self.scanVel = self.caller_kwargs.get("velocity", 30)
self.scanTra = self.caller_kwargs.get("travel", 80)
self.scanAcc = self.caller_kwargs.get("acceleration", 500)
self.scanExpNum = self.caller_kwargs.get("daqpoints", 5000)
def pre_scan(self):
# Move to start position
st = yield from self.stubs.send_rpc_and_wait("es1_roty", "dmove", self.scanStart)
st.wait()
yield from self.stubs.pre_scan()
def scan_report_instructions(self):
"""Scan report instructions for the progress bar, yields from mcs card"""
if not self.scan_report_hint:
yield None
return
yield from self.stubs.scan_report_instruction({"scan_progress": ["es1_roty"]})
def scan_core(self):
# Configure PSO, DDC and motor
yield from self.stubs.send_rpc_and_wait(
"es1_roty",
"configure",
{"velocity": self.scanVel, "acceleration": self.scanVel / self.scanAcc},
)
yield from self.stubs.send_rpc_and_wait(
"es1_psod", "configure", {"distance": self.psoBounds, "wmode": "toggle"}
)
yield from self.stubs.send_rpc_and_wait(
"es1_ddaq",
"configure",
{"npoints": self.scanExpNum},
)
# DAQ with real trigger
# yield from self.stubs.send_rpc_and_wait(
# "es1_ddaq", "configure", {"npoints": self.scanExpNum, "trigger": "HSINP0_RISE"},
# )
# Kick off PSO and DDC
st = yield from self.stubs.send_rpc_and_wait("es1_psod", "kickoff")
st.wait()
st = yield from self.stubs.send_rpc_and_wait("es1_ddaq", "kickoff")
st.wait()
print("Start moving")
# Start the actual movement
yield from self.stubs.send_rpc_and_wait(
"es1_roty",
"configure",
{"target": self.scanEnd},
)
yield from self.stubs.kickoff(
device="es1_roty",
parameter={"target": self.scanEnd},
)
# yield from self.stubs.set(device='es1_roty', value=self.scanEnd, wait_group="flyer")
target_diid = self.DIID - 1
# Wait for motion to finish
while True:
yield from self.stubs.read_and_wait(group="primary", wait_group="readout_primary", pointID=self.pointID)
self.pointID += 1
status = self.stubs.get_req_status(
device="es1_roty", RID=self.metadata["RID"], DIID=target_diid
)
progress = self.stubs.get_device_progress(device="es1_roty", RID=self.metadata["RID"])
print(f"status: {status}\tprogress: {progress}")
if progress:
self.num_pos = progress
if status:
break
time.sleep(1)
print("Scan done\n\n")
self.num_pos = self.pointID
class AeroSequenceScan(AsyncFlyScanBase):
scan_name = "aero_sequence_scan"
scan_report_hint = "table"
required_kwargs = ["startpos", "ranges"]
arg_input = {}
arg_bundle_size = {"bundle": len(arg_input), "min": None, "max": None}
def __init__(self, *args, parameter: dict = None, **kwargs):
"""Performs a sequence scan with PSO output and data collection
Examples:
>>> scans.aero_sequence_scan(startpos=42, ranges=([179.9, 0.1, 5]), expnum=3600, repnum=3, repmode="PosNeg")
"""
super().__init__(parameter=parameter, **kwargs)
self.axis = []
self.scan_motors = ["es1_roty"]
self.num_pos = 0
self.scanStart = self.caller_kwargs.get("startpos")
self.scanRanges = self.caller_kwargs.get("ranges")
self.scanExpNum = self.caller_kwargs.get("expnum", 25000)
self.scanRepNum = self.caller_kwargs.get("repnum", 1)
self.scanRepMode = self.caller_kwargs.get("repmode", "Pos")
self.scanVel = self.caller_kwargs.get("velocity", 30)
self.scanTra = self.caller_kwargs.get("travel", 80)
self.scanAcc = self.caller_kwargs.get("acceleration", 500)
self.scanSafeDist = self.caller_kwargs.get("safedist", 10)
if isinstance(self.scanRanges[0], (int, float)):
self.scanRanges = self.scanRanges
if self.scanRepMode not in ["PosNeg", "Pos", "NegPos", "Neg"]:
raise RuntimeError(f"Unexpected sequence repetition mode: {self.scanRepMode}")
def pre_scan(self):
# Calculate PSO positions from tables
AccDist = 0.5 * self.scanVel * self.scanVel / self.scanAcc + self.scanSafeDist
# Relative PSO bounds
self.psoBoundsPos = [AccDist]
try:
for line in self.scanRanges:
print(f"Line is: {line} of type {type(line)}")
for rr in range(int(line[2])):
self.psoBoundsPos.append(line[0])
self.psoBoundsPos.append(line[1])
except TypeError:
line = self.scanRanges
print(f"Line is: {line} of type {type(line)}")
for rr in range(int(line[2])):
self.psoBoundsPos.append(line[0])
self.psoBoundsPos.append(line[1])
del self.psoBoundsPos[-1]
self.psoBoundsNeg = [AccDist]
self.psoBoundsNeg.extend(self.psoBoundsPos[::-1])
scanrange = 2 * AccDist + np.sum(self.psoBoundsPos)
if self.scanRepMode in ["PosNeg", "Pos"]:
self.PosStart = self.scanStart - AccDist
self.PosEnd = self.scanStart + scanrange
elif self.scanRepMode in ["NegPos", "Neg"]:
self.PosStart = self.scanStart + AccDist
self.PosEnd = self.scanStart - scanrange
else:
raise RuntimeError(f"Unexpected sequence repetition mode: {self.scanRepMode}")
print(f"\tCalculated scan range: {self.PosStart} to {self.PosEnd} range {scanrange}")
# ToDo: We could append all distances and write a much longer 'distance array'. this would elliminate the need of rearming...
# Move roughly to start position
yield from self.stubs.send_rpc_and_wait(
"es1_roty",
"configure",
{"velocity": self.scanTra, "acceleration": self.scanTra / self.scanAcc},
)
st = yield from self.stubs.send_rpc_and_wait("es1_roty", "move", self.PosStart)
st.wait()
yield from self.stubs.pre_scan()
def scan_core(self):
# Move to start position (with travel velocity)
yield from self.stubs.send_rpc_and_wait(
"es1_roty",
"configure",
{"velocity": self.scanTra, "acceleration": self.scanTra / self.scanAcc},
)
yield from self.stubs.send_rpc_and_wait("es1_roty", "move", self.PosStart)
# Condigure PSO, DDC and motorHSINP0_RISE
yield from self.stubs.send_rpc_and_wait(
"es1_psod", "configure", {"distance": self.psoBoundsPos, "wmode": "toggle"}
)
yield from self.stubs.send_rpc_and_wait(
"es1_ddaq",
"configure",
{"npoints": self.scanExpNum},
)
# With real trigger
# yield from self.stubs.send_rpc_and_wait(
# "es1_ddaq", "configure", {"npoints": self.scanExpNum, "trigger": "HSINP0_RISE"}
# )
yield from self.stubs.send_rpc_and_wait(
"es1_roty",
"configure",
{"velocity": self.scanVel, "acceleration": self.scanVel / self.scanAcc},
)
# Kickoff pso and daq
st = yield from self.stubs.send_rpc_and_wait("es1_psod", "kickoff")
st.wait()
st = yield from self.stubs.send_rpc_and_wait("es1_ddaq", "kickoff")
st.wait()
# Run the actual scan (haven't figured out the proggress bar)
print("Starting actual scan loop")
for ii in range(self.scanRepNum):
print(f"Scan segment {ii}")
# No option to reset the index counter...
yield from self.stubs.send_rpc_and_wait("es1_psod", "dstArrayRearm.set", 1)
if self.scanRepMode in ["Pos", "Neg"]:
yield from self.stubs.send_rpc_and_wait(
"es1_roty",
"configure",
{"velocity": self.scanVel, "acceleration": self.scanVel / self.scanAcc},
)
st = yield from self.stubs.send_rpc_and_wait("es1_roty", "move", self.PosEnd)
st.wait()
yield from self.stubs.send_rpc_and_wait(
"es1_roty",
"configure",
{"velocity": self.scanTra, "acceleration": self.scanTra / self.scanAcc},
)
st = yield from self.stubs.send_rpc_and_wait("es1_roty", "move", self.PosStart)
st.wait()
elif self.scanRepMode in ["PosNeg", "NegPos"]:
if ii % 2 == 0:
st = yield from self.stubs.send_rpc_and_wait("es1_roty", "move", self.PosEnd)
st.wait()
else:
st = yield from self.stubs.send_rpc_and_wait("es1_roty", "move", self.PosStart)
st.wait()
self.pointID += 1
self.num_pos += 1
time.sleep(0.2)
# Complete (should complete instantly)
yield from self.stubs.complete(device="es1_psod")
yield from self.stubs.complete(device="es1_ddaq")
st = yield from self.stubs.send_rpc_and_wait("es1_psod", "complete")
st.wait()
st = yield from self.stubs.send_rpc_and_wait("es1_ddaq", "complete")
st.wait()
# Collect - Throws a warning due to returning a generator
# st = yield from self.stubs.send_rpc_and_wait("es1_psod", "collect")
# st = yield from self.stubs.send_rpc_and_wait("es1_ddaq", "collect")
yield from self.stubs.read_and_wait(group="primary", wait_group="readout_primary")
target_diid = self.DIID - 1
yield from self.stubs.kickoff(
device="es1_roty",
parameter={"target": self.PosStart},
)
yield from self.stubs.wait(device=["es1_roty"], wait_group="kickoff", wait_type="move")
yield from self.stubs.complete(device="es1_roty")
# Wait for motion to finish
while True:
pso_status = self.stubs.get_req_status(
device="es1_psod", RID=self.metadata["RID"], DIID=target_diid
)
daq_status = self.stubs.get_req_status(
device="es1_ddaq", RID=self.metadata["RID"], DIID=target_diid
)
mot_status = self.stubs.get_req_status(
device="es1_roty", RID=self.metadata["RID"], DIID=target_diid
)
progress = self.stubs.get_device_progress(device="es1_psod", RID=self.metadata["RID"])
progress = self.stubs.get_device_progress(device="es1_ddaq", RID=self.metadata["RID"])
progress = self.stubs.get_device_progress(device="es1_roty", RID=self.metadata["RID"])
print(f"pso: {pso_status}\tdaq: {daq_status}\tmot: {mot_status}\tprogress: {progress}")
if progress:
self.num_pos = int(progress)
if mot_status:
break
time.sleep(1)
print("Scan done\n\n")
def cleanup(self):
"""Set scan progress to 1 to finish the scan"""
self.num_pos = 1
return super().cleanup()
class AeroScriptedScan(AsyncFlyScanBase):
scan_name = "aero_scripted_scan"
scan_report_hint = "table"
required_kwargs = ["filename", "subs"]
arg_input = {}
arg_bundle_size = {"bundle": len(arg_input), "min": None, "max": None}
def __init__(self, *args, parameter: dict = None, **kwargs):
"""Executes an AeroScript template as a flyer
The script is generated from a template file using jinja2.
Examples:
>>> scans.aero_scripted_scan(filename="AerotechSnapAndStepTemplate.ascript", subs={'startpos': 42, 'stepsize': 0.1, 'numsteps': 1800, 'exptime': 0.1})
"""
super().__init__(parameter=parameter, **kwargs)
self.axis = []
self.scan_motors = ["es1_roty"]
self.num_pos = 0
self.filename = self.caller_kwargs.get("filename")
self.subs = self.caller_kwargs.get("subs")
self.taskIndex = self.caller_kwargs.get("taskindex", 4)
def pre_scan(self):
print("TOMCAT Loading Aeroscript template")
# Load the test file
with open(self.filename) as f:
templatetext = f.read()
# Substitute jinja template
import jinja2
tm = jinja2.Template(templatetext)
self.scripttext = tm.render(scan=self.subs)
yield from self.stubs.pre_scan()
def scan_core(self):
print("TOMCAT Sequeence scan (via Jinjad AeroScript)")
t_start = time.time()
# Configure by copying text to controller file and compiling it
yield from self.stubs.send_rpc_and_wait(
"es1_tasks",
"configure",
{"text": self.scripttext, "filename": "becExec.ascript", "taskIndex": self.taskIndex},
)
# Kickoff
st = yield from self.stubs.send_rpc_and_wait("es1_tasks", "kickoff")
st.wait()
time.sleep(0.5)
# Complete
yield from self.stubs.complete(device="es1_tasks")
# Collect - up to implementation
t_end = time.time()
t_elapsed = t_end - t_start
print(f"Elapsed scan time: {t_elapsed}")
def cleanup(self):
"""Set scan progress to 1 to finish the scan"""
self.num_pos = self.pointID
return super().cleanup()
class AeroSnapNStep(AeroScriptedScan):
scan_name = "aero_snapNstep"
scan_report_hint = "table"
required_kwargs = ["startpos", "expnum"]
arg_input = {}
arg_bundle_size = {"bundle": len(arg_input), "min": None, "max": None}
def __init__(self, *args, parameter: dict = None, **kwargs):
"""Executes a scripted SnapNStep scan
This scan generates and executes an AeroScript file to run
a hardware step scan on the Aerotech controller.
The script is generated from a template file using jinja2.
Examples:
>>> scans.scans.aero_snapNstep(startpos=42, range=180, expnum=1800, exptime=0.1)
"""
super().__init__(parameter=parameter, **kwargs)
self.axis = []
self.scan_motors = ["es1_roty"]
self.num_pos = 0
# self.filename = "/afs/psi.ch/user/m/mohacsi_i/ophyd_devices/ophyd_devices/epics/devices/aerotech/AerotechSnapAndStepTemplate.ascript"
self.filename = "/sls/X05LA/data/x05laop/bec/ophyd_devices/ophyd_devices/epics/devices/aerotech/AerotechSnapAndStepTemplate.ascript"
self.scanTaskIndex = self.caller_kwargs.get("taskindex", 4)
self.scanStart = self.caller_kwargs.get("startpos")
self.scanExpNum = self.caller_kwargs.get("expnum")
self.scanRange = self.caller_kwargs.get("range", 180)
self.scanExpTime = self.caller_kwargs.get("exptime", 0.1)
self.scanStepSize = self.scanRange / self.scanExpNum
# self.scanVel = self.caller_kwargs.get("velocity", 30)
# self.scanTra = self.caller_kwargs.get("travel", 80)
# self.scanAcc = self.caller_kwargs.get("acceleration", 500)
self.subs = {
"startpos": self.scanStart,
"stepsize": self.scanStepSize,
"numsteps": self.scanExpNum,
"exptime": self.scanExpTime,
}
def scan_core(self):
print("TOMCAT Snap N Step scan (via Jinjad AeroScript)")
# Run template execution frm parent
yield from self.stubs.read_and_wait(group="primary", wait_group="readout_primary", pointID=self.pointID)
self.pointID += 1
yield from super().scan_core()
# Collect - Throws a warning due to returning a generator
yield from self.stubs.send_rpc_and_wait("es1_ddaq", "npoints.put", self.scanExpNum)
# st = yield from self.stubs.send_rpc_and_wait("es1_ddaq", "collect")
# st.wait()
class AeroScriptedSequence(AeroScriptedScan):
scan_name = "aero_scripted_sequence"
scan_report_hint = "table"
required_kwargs = ["startpos", "ranges"]
arg_input = {}
arg_bundle_size = {"bundle": len(arg_input), "min": None, "max": None}
def __init__(self, *args, parameter: dict = None, **kwargs):
"""Executes a scripted sequence scan
This scan generates and executes an AeroScript file to run a hardware sequence scan on the
Aerotech controller. You might win a few seconds this way, but it has some limtations...
The script is generated from a template file using jinja2.
Examples:
>>> scans.aero_scripted_sequence(startpos=42, ranges=([179.9, 0.1, 5]), expnum=3600, repnum=3, repmode="PosNeg")
"""
super().__init__(parameter=parameter, **kwargs)
self.axis = []
self.scan_motors = ["es1_roty"]
self.num_pos = 0
self.filename = "/afs/psi.ch/user/m/mohacsi_i/ophyd_devices/ophyd_devices/epics/devices/aerotech/AerotechSimpleSequenceTemplate.ascript"
self.scanTaskIndex = self.caller_kwargs.get("taskindex", 4)
self.scanStart = self.caller_kwargs.get("startpos")
self.scanRanges = self.caller_kwargs.get("ranges")
self.scanExpNum = self.caller_kwargs.get("expnum", 25000)
self.scanRepNum = self.caller_kwargs.get("repnum", 1)
self.scanRepMode = self.caller_kwargs.get("repmode", "Pos")
self.scanVel = self.caller_kwargs.get("velocity", 30)
self.scanTra = self.caller_kwargs.get("travel", 80)
self.scanAcc = self.caller_kwargs.get("acceleration", 500)
self.scanSafeDist = self.caller_kwargs.get("safedist", 10)
self.subs = {
"startpos": self.scanStart,
"scandir": self.scanRepMode,
"nrepeat": self.scanRepNum,
"npoints": self.scanExpNum,
"scanvel": self.scanVel,
"jogvel": self.scanTra,
"scanacc": self.scanAcc,
}
if self.scanRepMode not in ["PosNeg", "Pos", "NegPos", "Neg"]:
raise RuntimeError(f"Unexpected sequence repetition mode: {self.scanRepMode}")
if isinstance(self.scanRanges[0], (int, float)):
self.scanRanges = self.scanRanges
def pre_scan(self):
# Calculate PSO positions from tables
AccDist = 0.5 * self.scanVel * self.scanVel / self.scanAcc + self.scanSafeDist
# Relative PSO bounds
self.psoBoundsPos = [AccDist]
try:
for line in self.scanRanges:
print(f"Line is: {line} of type {type(line)}")
for rr in range(int(line[2])):
self.psoBoundsPos.append(line[0])
self.psoBoundsPos.append(line[1])
except TypeError:
line = self.scanRanges
print(f"Line is: {line} of type {type(line)}")
for rr in range(int(line[2])):
self.psoBoundsPos.append(line[0])
self.psoBoundsPos.append(line[1])
del self.psoBoundsPos[-1]
self.psoBoundsNeg = [AccDist]
self.psoBoundsNeg.extend(self.psoBoundsPos[::-1])
self.scanrange = 2 * AccDist + np.sum(self.psoBoundsPos)
if self.scanRepMode in ["PosNeg", "Pos"]:
self.PosStart = self.scanStart - AccDist
elif self.scanRepMode in ["NegPos", "Neg"]:
self.PosStart = self.scanStart + AccDist
else:
raise RuntimeError(f"Unexpected sequence repetition mode: {self.scanRepMode}")
print(f"\tCalculated scan range: {self.PosStart} range {self.scanrange}")
# ToDo: We could append all distances and write a much longer 'distance array'. this would elliminate the need of rearming...
self.subs.update(
{
"psoBoundsPos": self.psoBoundsPos,
"psoBoundsNeg": self.psoBoundsNeg,
"scanrange": self.scanrange,
}
)
# Move roughly to start position
yield from self.stubs.send_rpc_and_wait(
"es1_roty",
"configure",
{"velocity": self.scanTra, "acceleration": self.scanTra / self.scanAcc},
)
st = yield from self.stubs.send_rpc_and_wait("es1_roty", "move", self.PosStart)
st.wait()
yield from super().pre_scan()
def scan_core(self):
print("TOMCAT Sequence scan (via Jinjad AeroScript)")
# Run template execution frm parent
yield from super().scan_core()
# Collect - Throws a warning due to returning a generator
yield from self.stubs.send_rpc_and_wait("es1_ddaq", "npoints.put", self.scanExpNum)
# st = yield from self.stubs.send_rpc_and_wait("es1_ddaq", "collect")
# st.wait()

View File

@@ -0,0 +1,122 @@
import time
import numpy as np
from bec_lib import bec_logger
from bec_server.scan_server.scans import AsyncFlyScanBase, ScanArgType, ScanBase
logger = bec_logger.logger
class GigaFrostStepScan(AsyncFlyScanBase):
"""Test scan for running the GigaFrost with standard DAQ
"""
scan_name = "gigafrost_line_scan"
scan_report_hint = "table"
required_kwargs = ["motor", "range"]
arg_input = {}
arg_bundle_size = {"bundle": len(arg_input), "min": None, "max": None}
def __init__(self, *args, parameter: dict = None, **kwargs):
"""Example step scan
Perform a simple step scan with a motor while software triggering the
gigafrost burst sequence at each point and recording it to the StdDAQ.
Actually only the configuration is gigafrost specific, everything else
is just using standard Bluesky event model.
Example
-------
>>> scans.gigafrost_line_scan(motor='eyex', range=(-5, 5), steps=10, exp_time=20, exp_burst=10, relative=True)
Parameters
----------
motor: str
Scan motor name, moveable, mandatory.
range : (float, float)
Scan range of the axis.
steps : int, optional
Number of scan steps to cover the range. (default = 10)
exp_time : float, optional [0.01 ... 40]
Exposure time for each frame in [ms]. The IOC fixes the exposure
period to be 2x this long so it doesnt matter. (default = 20)
exp_burst : float, optional
Number of images to be taken for each scan point. (default=10)
relative : boolean, optional
Toggle between relative and absolute input coordinates.
(default = False)
"""
super().__init__(parameter=parameter, **kwargs)
self.axis = []
self.num_pos = 0
self.scan_motors = [self.caller_kwargs.get("motor")]
self.scan_range = self.caller_kwargs.get("range")
self.scan_relat = self.caller_kwargs.get("relative", False)
self.scan_steps = int(self.caller_kwargs.get("steps", 10))
self.scan_exp_t = float(self.caller_kwargs.get("exp_time", 5))
self.scan_exp_p = 2 * self.scan_exp_t
self.scan_exp_b = int(self.caller_kwargs.get("exp_burst", 10))
if self.scan_steps <=0:
raise RuntimeError(f"Requires at least one scan step")
def prepare_positions(self):
# Calculate scan start position
if self.scan_relat:
pos = yield from self.stubs.send_rpc_and_wait(self.scan_motors[0], "read")
self.start_pos = pos[self.scan_motors[0]].get("value") + self.scan_range[0]
else:
self.start_pos = self.scan_range[0]
# Calculate step size
self.step_size = (self.scan_range[1]-self.scan_range[0])/self.scan_steps
self.positions = [self.start_pos]
for ii in range(self.scan_steps):
self.positions.append(self.start_pos + ii*self.step_size)
self.num_positions = len(self.positions)
def pre_scan(self):
# Move roughly to start position
print(f"Scan start position: {self.start_pos}")
st = yield from self.stubs.send_rpc_and_wait(self.scan_motors[0], "move", self.start_pos)
st.wait()
yield from self.stubs.pre_scan()
def stage(self):
d= {
"ntotal": self.scan_steps * self.scan_exp_b,
"nimages": self.scan_exp_b,
"exposure": self.scan_exp_t,
"period": self.scan_exp_p,
"pixel_width": 480,
"pixel_height": 128
}
yield from self.stubs.send_rpc_and_wait("gfclient", "configure", d)
# For god, NO!
yield from super().stage()
def scan_core(self):
self.pointID = 0
for ii in range(self.num_positions):
print(f"Point: {ii}")
st = yield from self.stubs.send_rpc_and_wait(self.scan_motors[0], "move", self.positions[ii])
st.wait()
st = yield from self.stubs.send_rpc_and_wait("gfclient", "trigger")
st.wait()
self.pointID += 1
time.sleep(0.2)
time.sleep(1)
print("Scan done\n\n")
def cleanup(self):
"""Set scan progress to 1 to finish the scan"""
self.num_pos = 1
print("Scan cleanup\n\n")
return super().cleanup()