changes by the martin and John

This commit is contained in:
2024-05-15 15:22:01 +02:00
parent 5e954dec77
commit 153333f336
4 changed files with 151 additions and 57 deletions

View File

@@ -130,13 +130,13 @@ class AppCfg(QSettings):
"compression" : True, "compression" : True,
"factor" : 11.33, "factor" : 11.33,
"geometry" : True, "geometry" : True,
"double_pixel_action" : "mask", "double_pixels_action" : "mask",
"remove_raw_files" : False, "remove_raw_files" : False,
"save_dap_results" : True, "save_dap_results" : True,
"crystfel_lists_laser" : True, "crystfel_lists_laser" : True,
})) }))
if AppCfg.DAQ_LOC not in keys: if AppCfg.DAQ_LOC not in keys:
dflt.append((AppCfg.DAQ_LOC, {'end_station':"cristallina", 'p_group':"p20516"})) dflt.append((AppCfg.DAQ_LOC, {'end_station':"cristallina", 'p_group':"p20516", 'jungfraujoch' : False,}))
if AppCfg.DAQ_RUN not in keys: if AppCfg.DAQ_RUN not in keys:
dflt.append((AppCfg.DAQ_RUN, {'prefix':'jf', 'padding':10, 'cell_name' : 'na', 'block_size':1000,})) dflt.append((AppCfg.DAQ_RUN, {'prefix':'jf', 'padding':10, 'cell_name' : 'na', 'block_size':1000,}))
#if AppCfg.DAQ_BS_CH not in keys: #if AppCfg.DAQ_BS_CH not in keys:
@@ -145,7 +145,8 @@ class AppCfg(QSettings):
"SARFE10-PSSS059:FIT-COM", "SARFE10-PSSS059:FIT-FWHM", "SARFE10-PSSS059:FIT-RES", "SARFE10-PSSS059:FIT-RMS", "SARFE10-PSSS059:FIT_ERR", "SARFE10-PSSS059:SPECT-COM", "SARFE10-PSSS059:SPECT-RES", "SARFE10-PSSS059:FIT-COM", "SARFE10-PSSS059:FIT-FWHM", "SARFE10-PSSS059:FIT-RES", "SARFE10-PSSS059:FIT-RMS", "SARFE10-PSSS059:FIT_ERR", "SARFE10-PSSS059:SPECT-COM", "SARFE10-PSSS059:SPECT-RES",
"SARFE10-PSSS059:SPECT-RMS", "SAROP31-PBPS113:Lnk9Ch0-PP_VAL_PD0", "SAROP31-PBPS113:Lnk9Ch0-PP_VAL_PD1", "SAROP31-PBPS113:Lnk9Ch0-PP_VAL_PD2", "SAROP31-PBPS113:Lnk9Ch0-PP_VAL_PD3", "SAROP31-PBPS113:XPOS", "SARFE10-PSSS059:SPECT-RMS", "SAROP31-PBPS113:Lnk9Ch0-PP_VAL_PD0", "SAROP31-PBPS113:Lnk9Ch0-PP_VAL_PD1", "SAROP31-PBPS113:Lnk9Ch0-PP_VAL_PD2", "SAROP31-PBPS113:Lnk9Ch0-PP_VAL_PD3", "SAROP31-PBPS113:XPOS",
"SAROP31-PBPS113:YPOS", "SAROP31-PBPS113:INTENSITY", "SAROP31-PBPS113:INTENSITY_UJ", "SAROP31-PBPS149:Lnk9Ch0-PP_VAL_PD0", "SAROP31-PBPS149:Lnk9Ch0-PP_VAL_PD1", "SAROP31-PBPS149:Lnk9Ch0-PP_VAL_PD2", "SAROP31-PBPS113:YPOS", "SAROP31-PBPS113:INTENSITY", "SAROP31-PBPS113:INTENSITY_UJ", "SAROP31-PBPS149:Lnk9Ch0-PP_VAL_PD0", "SAROP31-PBPS149:Lnk9Ch0-PP_VAL_PD1", "SAROP31-PBPS149:Lnk9Ch0-PP_VAL_PD2",
"SAROP31-PBPS149:Lnk9Ch0-PP_VAL_PD3", "SAROP31-PBPS149:XPOS", "SAROP31-PBPS149:YPOS", "SAROP31-PBPS149:INTENSITY", "SAROP31-PBPS149:INTENSITY_UJ"))) #list of BS channels "SAROP31-PBPS149:Lnk9Ch0-PP_VAL_PD3", "SAROP31-PBPS149:XPOS", "SAROP31-PBPS149:YPOS", "SAROP31-PBPS149:INTENSITY", "SAROP31-PBPS149:INTENSITY_UJ", "SARES30-LSCP1-FNS:CH7:VAL_GET", "SARES30-LSCP1-FNS:CH6:VAL_GET",
"SARES30-LSCP1-FNS:CH5:VAL_GET", "SARES30-LSCP1-FNS:CH4:VAL_GET", "SARES30-LSCP1-FNS:CH3:VAL_GET", "SARES30-LSCP1-FNS:CH2:VAL_GET", "SARES30-LSCP1-FNS:CH1:VAL_GET", "SARES30-LSCP1-FNS:CH0:VAL_GET"))) #list of BS channels
#if AppCfg.DAQ_PV_CH not in keys: #if AppCfg.DAQ_PV_CH not in keys:
dflt.append((AppCfg.DAQ_PV_CH, ("SARUN03-UIND030:K_SET.VAL", "SARUN04-UIND030:K_SET.VAL", "SARUN05-UIND030:K_SET.VAL", "SARUN06-UIND030:K_SET.VAL", "SARUN07-UIND030:K_SET.VAL", "SARUN08-UIND030:K_SET.VAL", "SARUN09-UIND030:K_SET.VAL", dflt.append((AppCfg.DAQ_PV_CH, ("SARUN03-UIND030:K_SET.VAL", "SARUN04-UIND030:K_SET.VAL", "SARUN05-UIND030:K_SET.VAL", "SARUN06-UIND030:K_SET.VAL", "SARUN07-UIND030:K_SET.VAL", "SARUN08-UIND030:K_SET.VAL", "SARUN09-UIND030:K_SET.VAL",
"SARUN10-UIND030:K_SET.VAL", "SARUN11-UIND030:K_SET.VAL", "SARUN12-UIND030:K_SET.VAL", "SARUN13-UIND030:K_SET.VAL", "SARUN14-UIND030:K_SET.VAL", "SARUN15-UIND030:K_SET.VAL", "SARCL02-MBND100:P-READ", "SARUN10-UIND030:K_SET.VAL", "SARUN11-UIND030:K_SET.VAL", "SARUN12-UIND030:K_SET.VAL", "SARUN13-UIND030:K_SET.VAL", "SARUN14-UIND030:K_SET.VAL", "SARUN15-UIND030:K_SET.VAL", "SARCL02-MBND100:P-READ",
@@ -421,14 +422,15 @@ verbose bits:
{'name':'compression', 'value':daq_det['compression'], 'type':'bool'}, {'name':'compression', 'value':daq_det['compression'], 'type':'bool'},
{'name':'factor', 'value':daq_det['factor'], 'type':'float','step':0.01}, {'name':'factor', 'value':daq_det['factor'], 'type':'float','step':0.01},
{'name':'geometry', 'value':daq_det['geometry'], 'type':'bool'}, {'name':'geometry', 'value':daq_det['geometry'], 'type':'bool'},
{'name':'double_pixel_action', 'value':daq_det['double_pixel_action'], 'type':'str'}, {'name':'double_pixels_action', 'value':daq_det['double_pixels_action'], 'type':'str'},
{'name':'remove_raw_files', 'value':daq_det['remove_raw_files'], 'type':'bool'}, {'name':'remove_raw_files', 'value':daq_det['remove_raw_files'], 'type':'bool'},
{'name':'save_dap_results', 'value':daq_det['save_dap_results'], 'type':'bool'}, {'name':'save_dap_results', 'value':daq_det['save_dap_results'], 'type':'bool'},
{'name':'crystfel_lists_laser','value':daq_det['crystfel_lists_laser'],'type':'bool'} {'name':'crystfel_lists_laser','value':daq_det['crystfel_lists_laser'],'type':'bool'}
]}, ]},
{'name':AppCfg.DAQ_LOC, 'title':'location', 'type':'group', 'children':[ {'name':AppCfg.DAQ_LOC, 'title':'location', 'type':'group', 'children':[
{'name':'end_station', 'value':daq_loc['end_station'], 'type':'str',}, {'name':'end_station', 'value':daq_loc['end_station'], 'type':'str',},
{'name':'p_group', 'value':daq_loc['p_group'], 'type':'str'}, {'name':'p_group', 'value':daq_loc['p_group'], 'type':'str'},
{'name':'jungfraujoch', 'value':daq_loc['jungfraujoch'], 'type':'bool'},
]}, ]},
{'name':AppCfg.DAQ_RUN, 'title':'location', 'type':'group', 'children':[ {'name':AppCfg.DAQ_RUN, 'title':'location', 'type':'group', 'children':[
{'name':'prefix', 'value':daq_run['prefix'], 'type':'str',}, {'name':'prefix', 'value':daq_run['prefix'], 'type':'str',},

View File

@@ -12,6 +12,8 @@ based on xxx code snipplets
import numpy as np import numpy as np
from tqdm import tqdm from tqdm import tqdm
from time import time
from time import sleep
from slic.core.acquisition import SFAcquisition from slic.core.acquisition import SFAcquisition
from slic.core.acquisition.sfacquisition import BSChannels, transpose_dicts, print_response from slic.core.acquisition.sfacquisition import BSChannels, transpose_dicts, print_response
@@ -23,72 +25,95 @@ class CTAAcquisition(SFAcquisition):
def __init__(self, cta, *args, n_block_size=None, **kwargs): def __init__(self, cta, *args, n_block_size=None, **kwargs):
self.cta = cta self.cta = cta
self.n_block_size = n_block_size self.n_block_size = n_block_size
self.wait_time=60.0
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
def acquire(self, filename, data_base_dir=None, detectors=None, channels=None, pvs=None, scan_info=None, n_block_size=None, n_pulses=100, n_repeat=1, is_scan_step=False, wait=True, **kwargs): def acquire(self, filename, data_base_dir=None, detectors=None, channels=None, pvs=None, scan_info=None, n_block_size=None, n_pulses=100, n_repeat=1, is_scan_step=False, wait=True, **kwargs):
if n_repeat != 1: if n_repeat != 1:
raise NotImplementedError("Repetitions are not implemented") #TODO raise NotImplementedError("Repetitions are not implemented") #TODO
if not is_scan_step: if not is_scan_step:
run_number = self.client.next_run() run_number = self.client.next_run()
print(f"Advanced run number to {run_number}.") print(f"Advanced run number to {run_number}.")
else: else:
run_number = self.client.run_number run_number = self.client.run_number
print(f"Continuing run number {run_number}.") print(f"Continuing run number {run_number}.")
# if not filename or filename == "/dev/null": # if not filename or filename == "/dev/null":
# print("Skipping retrieval since no filename was given.") # print("Skipping retrieval since no filename was given.")
# return # return
if detectors is None: if detectors is None:
print("No detectors specified, using default detector list.") print("No detectors specified, using default detector list.")
detectors = self.default_detectors detectors = self.default_detectors
if pvs is None: if pvs is None:
print("No PVs specified, using default PV list.") print("No PVs specified, using default PV list.")
pvs = self.default_pvs pvs = self.default_pvs
if channels is None: if channels is None:
print("No channels specified, using default channel list.") print("No channels specified, using default channel list.")
channels = self.default_channels channels = self.default_channels
n_block_size = n_block_size or self.n_block_size n_block_size = n_block_size or self.n_block_size
print("block size:", n_block_size) #print("block size:", n_block_size)
bschs = BSChannels(*channels) bschs = BSChannels(*channels)
bschs.check() bschs.check()
client = self.client client = self.client
print(kwargs)
client.set_config(n_pulses, filename, detectors=detectors, channels=channels, pvs=pvs, scan_info=scan_info, **kwargs) client.set_config(n_pulses, filename, detectors=detectors, channels=channels, pvs=pvs, scan_info=scan_info, **kwargs)
#print('set client')
def _acquire(): def _acquire():
print('we even made it this far, woah')
self.cta.stop() try:
print('one more') self.cta.stop()
self.cta.start() self.cta.start()
print('cta started') sleep(0.1) ###?
# sleep(0.1) ###? time_start = time()
time_start = time() start_pid = self.cta.get_start_pid()
print('time') print("CTA start pid:", start_pid)
start_pid = self.cta.get_start_pid() stop_pid = start_pid + n_pulses
print('reached start') #print("CTA stop pid:", stop_pid)
print("CTA start pid:", start_pid) pids = np.arange(start_pid, stop_pid)
print("didn't print start") pids_blocks = split(pids, n_block_size)
stop_pid = start_pid + n_pulses #print(pids_blocks)
pids = np.arange(start_pid, stop_pid) for pb in tqdm(pids_blocks):
pids_blocks = split(pids, n_block_size) #print(pb)
print('did a block thing') if filename:
for pb in tqdm(pids_blocks): sleep(n_block_size / 100 + 0.2)
# if filename: res = self.retrieve(filename, pb, run_number=run_number, scan_info=scan_info, **kwargs) # error here
res = self.retrieve(filename, pb, run_number=run_number) res = transpose_dicts(res)
# else: filenames = res.pop("filenames")
# res = {} #print(filenames)
res = transpose_dicts(res)
filenames = res.pop("filenames") else:
res = {}
#print(res)
print_response(res) print_response(res)
# possibly optional, because cta should be done! # possibly optional, because cta should be done!
while self.cta.is_running(): while self.cta.is_running():
sleep(self.wait_time) sleep(self.wait_time)
delta_time = time() - time_start delta_time = time() - time_start
print(f"Waiting since {delta_time} seconds for CTA sequence to finish") print(f"Waiting since {delta_time} seconds for CTA sequence to finish")
return filenames
return filenames
except Exception as e:
print("error we should get: ", e)
def stopper(): def stopper():
client.stop() client.stop()
self.cta.stop() self.cta.stop()
task = DAQTask(_acquire, stopper=stopper, filename=filename, hold=False) task = DAQTask(_acquire, stopper=stopper, filename=filename, hold=False)
self.current_task = task self.current_task = task
if wait: if wait:
@@ -96,22 +121,25 @@ class CTAAcquisition(SFAcquisition):
task.wait() task.wait()
except KeyboardInterrupt: except KeyboardInterrupt:
print("Stopped current DAQ task:") print("Stopped current DAQ task:")
return task return task
def split(a, block_size): def split(a, block_size):
if block_size is None: if block_size is None:
return [a] return [a]
length = len(a) length = len(a)
indices = np.arange(block_size, length, block_size) # must not start at 0, otherwise the first entry is an empty array indices = np.arange(block_size, length, block_size) # must not start at 0, otherwise the first entry is an empty array
return np.array_split(a, indices) return np.array_split(a, indices)
if __name__ == "__main__": if __name__ == "__main__":
from slic.devices.timing.events import CTASequencer from slic.devices.timing.events import CTASequencer
cta = CTASequencer("SAT-CCTA-ESE") cta = CTASequencer("SAT-CCTA-ESC")
daq = CTAAcquisition(cta, "maloja", "p19509", default_channels=["SAT-CVME-TIFALL5:EvtSet"], append_user_tag_to_data_dir=True) daq = CTAAcquisition(cta, "cristallina", "p19150", default_channels=["SAR-CVME-TIFALL6:EvtSet"], append_user_tag_to_data_dir=True)
cta.cfg.repetitions = n_pulses # etc. etc. cta.cfg.repetitions = n_pulses # etc. etc.
#daq.acquire("test") daq.acquire("test")

View File

@@ -28,6 +28,7 @@ try:
from slic.core.acquisition import SFAcquisition from slic.core.acquisition import SFAcquisition
from slic.devices.timing.events import CTASequencer from slic.devices.timing.events import CTASequencer
from ctadaq import CTAAcquisition from ctadaq import CTAAcquisition
from jfjoch_device import JFJ
except ImportError as e: except ImportError as e:
_log.warning(e) _log.warning(e)
@@ -35,6 +36,10 @@ class Shutter:
def __init__(self,mode=1): def __init__(self,mode=1):
self._mode=mode self._mode=mode
app=QApplication.instance() #apologies! Wasn't sure how best to do this, could maybe feed sync_flag as a variable to open and close from swissmx.py
cfg=app._cfg
dt_misc = cfg.value(AppCfg.DT_MISC)
self.sync_flag=dt_misc['sync_flag']
def open(self): def open(self):
mode=self._mode mode=self._mode
@@ -43,8 +48,12 @@ class Shutter:
elif mode==1: elif mode==1:
# open laser shutter # open laser shutter
epics.caput("SLAAR31-LPSYS-ESC:LHX1_SHUT_OPEN", 1) epics.caput("SLAAR31-LPSYS-ESC:LHX1_SHUT_OPEN", 1)
if self.sync_flag==0: #if using cta, sets pulse_picker output to follow pulser 3
# open fast shutter # open fast shutter
epics.caput("SARES30-LTIM01-EVR0:RearUniv0_SNUMPD", 3)
epics.caput("SARES30-LTIM01-EVR0:RearUniv0_SNUMPD2", 3)
epics.caput("SARES30-LTIM01-EVR0:RearUniv0_SOURCE", 0)
epics.caput("SARES30-LTIM01-EVR0:RearUniv0_SOURCE2", 0)
epics.caput("SARES30-LTIM01-EVR0:RearUniv0-Ena-SP", 1) epics.caput("SARES30-LTIM01-EVR0:RearUniv0-Ena-SP", 1)
_log.info('shutter opened') _log.info('shutter opened')
@@ -53,13 +62,19 @@ class Shutter:
if mode==0: if mode==0:
_log.info('close simulated shutter') _log.info('close simulated shutter')
elif mode==1: elif mode==1:
# close fast shutter
epics.caput("SARES30-LTIM01-EVR0:RearUniv0-Ena-SP", 0)
# close laser shutter # close laser shutter
epics.caput("SLAAR31-LPSYS-ESC:LHX1_SHUT_CLOSE", 1) epics.caput("SLAAR31-LPSYS-ESC:LHX1_SHUT_CLOSE", 1)
_log.info('shutter closed') _log.info('shutter closed')
# close fast shutter
epics.caput("SARES30-LTIM01-EVR0:RearUniv0-Ena-SP", 0)
if self.sync_flag==0: #if using cta, sets pulse_picker output back to high low, could do this every time?
epics.caput("SARES30-LTIM01-EVR0:RearUniv0_SNUMPD", 1)
epics.caput("SARES30-LTIM01-EVR0:RearUniv0_SNUMPD2", 1)
epics.caput("SARES30-LTIM01-EVR0:RearUniv0_SOURCE", 3)
epics.caput("SARES30-LTIM01-EVR0:RearUniv0_SOURCE2", 4)
class Deltatau: class Deltatau:
def __init__(self,sim=False): def __init__(self,sim=False):
app=QApplication.instance() app=QApplication.instance()
@@ -106,22 +121,66 @@ class Jungfrau:
_log.info(f'simulated') _log.info(f'simulated')
app=QApplication.instance() #temproary fix, couldnt access these in function, maybe the bt above needs to be self.detectors ... etc app=QApplication.instance() #temproary fix, couldnt access these in function, maybe the bt above needs to be self.detectors ... etc
cfg=app._cfg cfg=app._cfg
detectors = [ cfg.value(AppCfg.DAQ_DET) ] det = cfg.value(AppCfg.DAQ_DET)
print("det", repr(det))
detectors = [det] if det.get("name") else None
bs_channels = cfg.value(AppCfg.DAQ_BS_CH) bs_channels = cfg.value(AppCfg.DAQ_BS_CH)
pv_channels = cfg.value(AppCfg.DAQ_PV_CH) pv_channels = cfg.value(AppCfg.DAQ_PV_CH)
loc=cfg.value(AppCfg.DAQ_LOC) loc=cfg.value(AppCfg.DAQ_LOC)
dt_misc = cfg.value(AppCfg.DT_MISC)
code_gen=kwargs.get('code_gen',0) code_gen=kwargs.get('code_gen',0)
if code_gen==3: sync_mode=dt_misc['sync_mode']
sync_flag=dt_misc['sync_flag']
if loc['jungfraujoch']:
try:
self.jfj = JFJ("http://sf-daq-2:5232")
self.detectors=None
except:
self.jfj = None
else:
self.jfj = None
if sync_flag==0:
grid_cnt=kwargs['grid']['count'] grid_cnt=kwargs['grid']['count']
kwargs['grid']['count']
self._cta=cta=CTASequencer("SAR-CCTA-ESC")
repetitions=grid_cnt[0] #'x' or number of columns repetitions=grid_cnt[0] #'x' or number of columns
cta_multiplier=grid_cnt[1]-1 #'y' or number of appertures in a column/number of rows cta_multiplier=grid_cnt[1] #'y' or number of appertures in a column/number of rows
#kwargs['tmove'] kwargs['twait'] cta=CTASequencer("SAR-CCTA-ESC")
xray_seq=[0,]*(kwargs['twait']//kwargs['tmove'])+[1] # multiplier is proportional to wait_time i.e. 10 ms = 1, 20 ms =2, 30 ms =3. if code_gen==3:
cta.seq[200]=xray_seq*cta_multiplier # x-ray_shutter wait_pulses=kwargs['twait']//kwargs['tmove']
laser_seq=[1,]*(kwargs['twait']//kwargs['tmove'])+[1] #xray_seq=[0,]*wait_pulses+[1] # multiplier is proportional to wait_time i.e. 10 ms = 1, 20 ms =2, 30 ms =3.
cta.seq[215]=laser_seq*cta_multiplier # laser_shutter xray_seq=[1,]+[0,]*wait_pulses
cta.seq[200]=xray_seq*cta_multiplier # x-ray_shutter
laser_seq=[1,]*wait_pulses+[1]
cta.seq[215]=laser_seq*cta_multiplier # laser_shutter
droplet_sequence=[0, 1, 0, 0] # sub 8 ms delay = [0, 0, 1, 0]
cta.seq[216]=droplet_sequence*(cta_multiplier//2)
cta.seq[214]=[1,]+[0,]*(len(laser_seq*cta_multiplier)-1)
else:
print('not code gen 3')
#cta_multiplier-=14
#cta.seq[200]=[0,0,0,0,0,0,0]+[1,]*cta_multiplier + [0,0,0,0,0,0,0] # for +7 row chip x-ray
#cta.seq[215]=[0,0,0,0,0,0,0]+[1,]*cta_multiplier + [0,0,0,0,0,0,0] # for +7 row chip laser
#cta.seq[214]=[1,0,0,0,0,0,0]+[0,]*cta_multiplier + [0,0,0,0,0,0,0] # for +7 row chip start
# no extra rows
cta.seq[214]=[1,]+[0,]*(cta_multiplier-1)
cta.seq[200]=[1,]*cta_multiplier # x-ray_shutter
#cta.seq[215]=[1,]*cta_multiplier # laser_shutter
#cta.seq[216]=[1,]*cta_multiplier # droplet_ejector all
cta.seq[216]=[1,0,]*(cta_multiplier//2) # droplet_ejector 1:1
#cta.seq[216]=[1,0,0,]*(cta_multiplier//3) # droplet_ejector 1:2
#cta.seq[216]=[1,0,0,0,0,0,]*(cta_multiplier//6) # droplet_ejector 1:5
#cta.seq[216]=[1,0,0,0,0,0,0,0,0,]*(cta_multiplier//9) # droplet_ejector 1:8
#cta.seq[216]=[1,0,0,0,0,0,0,0,0,0,0,0,]*(cta_multiplier//12) # droplet_ejector 1:11
# skip 6 rows add 6 on end -- first 2 and 1 216
#cta.seq[214]=[1,0,0,0,0,0,]+[0,]*(cta_multiplier-12)+[0,0,0,0,0,0,]
#cta.seq[200]=[0,0,0,0,0,0,]+[1,]*(cta_multiplier-12)+[0,0,0,0,0,0,] # x-ray_shutter
#cta.seq[216]=[0,0,0,0,0,0,]+[1,]*(cta_multiplier-12)+[0,0,0,0,0,0,] # droplet_ejector all
#cta.seq[216]=[0,0,0,0,0,0,]+[1,0,]*((cta_multiplier-12)//2)+[0,0,0,0,0,0,] # droplet_ejector 1:1
#cta.seq[216]=[0,0,0,0,0,0,]+[1,0,0,]*((cta_multiplier-12)//3)+[0,0,0,0,0,0] # droplet_ejector 1:2
#cta.seq[216]=[0,0,0,0,0,0,]+[1,0,0,0]*((cta_multiplier-12)//4)+[0,0,0,0,0,0] # droplet_ejector 1:3
#cta.seq[216]=[0,0,0,0,0,0,]+[1,0,0,0,0,0,]*((cta_multiplier-12)//6)+[0,0,0,0,0,0]# droplet_ejector 1:5
#cta.seq[216]=[0,0,0,0,0,0,]+[1,0,0,0,0,0,0,0,0,0,0,0,]*((cta_multiplier-12)//12)+[0,0,0,0,0,0] # droplet_ejector 1:11
cta.cfg.repetitions=repetitions # self._cta.cfg.repetitions = n_pulses_run/cta_multiplier cta.cfg.repetitions=repetitions # self._cta.cfg.repetitions = n_pulses_run/cta_multiplier
cta.seq.upload() cta.seq.upload()
self._daq=CTAAcquisition(cta, loc['end_station'], loc['p_group'], default_detectors=detectors, self._daq=CTAAcquisition(cta, loc['end_station'], loc['p_group'], default_detectors=detectors,
@@ -147,11 +206,15 @@ class Jungfrau:
except TypeError as e: except TypeError as e:
_log.warning(f'failed to get _pulse_id_start: {e}') _log.warning(f'failed to get _pulse_id_start: {e}')
n_pulses_run = n_pulses + run['padding'] n_pulses_run = n_pulses + run['padding']
n_pulses_run*=2 # comment me out please when not using 10 ms wait (for stop and go)
print('number of triggers ', n_pulses_run, ' is greater than the number of appertures', n_pulses)
block_size = run['block_size'] block_size = run['block_size']
if self.jfj:
self.jfj.acquire(beam_x_pxl = 1613, beam_y_pxl = 1666, detector_distance_mm = 151, photon_energy_keV = 12, sample_name = run['cell_name'], file_prefix = run['prefix'], ntrigger = n_pulses_run)
if type(self._daq) is CTAAcquisition: if type(self._daq) is CTAAcquisition:
self._daq.acquire(run['prefix'], n_pulses=min(n_pulses_run, block_size), n_block_size=block_size, wait=False, cell_name=run['cell_name']) self._daq.acquire(run['prefix'], n_pulses=max(n_pulses_run, block_size), n_block_size=block_size, wait=False, cell_name=run['cell_name'])
else: else:
self._daq.acquire(run['prefix'], n_pulses=min(n_pulses_run, block_size), n_repeat=ceil(n_pulses_run/block_size), wait=False, cell_name=run['cell_name']) self._daq.acquire(run['prefix'], n_pulses=max(n_pulses_run, block_size), n_repeat=ceil(n_pulses_run/block_size), wait=False, cell_name=run['cell_name'])
cfg.setValue(AppCfg.DAQ_RUN,run) cfg.setValue(AppCfg.DAQ_RUN,run)
def gather_upload(self): def gather_upload(self):

View File

@@ -2418,6 +2418,7 @@ Author Thierry Zamofing (thierry.zamofing@psi.ch)
sp.run() # start motion program sp.run() # start motion program
sp.wait_armed() # wait until motors are at first position sp.wait_armed() # wait until motors are at first position
shutter.open() shutter.open()
time.sleep(1.1)
jf.acquire(num_pts) jf.acquire(num_pts)
sp.trigger(1.0) # send a start trigger (if needed) after given time sp.trigger(1.0) # send a start trigger (if needed) after given time
_log.info('start trigger sent') _log.info('start trigger sent')
@@ -2491,7 +2492,7 @@ Author Thierry Zamofing (thierry.zamofing@psi.ch)
p_fx, p_fy, p_cx, p_cz,=pos_gonio['pos_'+pos] p_fx, p_fy, p_cx, p_cz,=pos_gonio['pos_'+pos]
except KeyError: except KeyError:
raise ValueError("Goniometer position *{}* is not known!!") raise ValueError("Goniometer position *{}* is not known!!")
_log.info(f"moving goniometer {pos} to fx:{p_fx:.5g},fy:{p_fy:.5g},cx:{p_cx:.5g},cz_{p_cz:.5g},ry:{p_ry:.5g}") _log.info(f"moving goniometer {pos} to fx:{p_fx:.5g},fy:{p_fy:.5g},cx:{p_cx:.5g},cz_{p_cz:.5g}") #,ry:{p_ry:.5g}")
tw_fx.move_abs(p_fx) tw_fx.move_abs(p_fx)
tw_fy.move_abs(p_fy) tw_fy.move_abs(p_fy)