diff --git a/backlight.py b/backlight.py index 6018b9d..203cd9d 100755 --- a/backlight.py +++ b/backlight.py @@ -56,8 +56,8 @@ if __name__ == "__main__": args = parser.parse_args() os.environ['EPICS_CA_ADDR_LIST'] = '129.129.244.255 sf-saresc-cagw.psi.ch:5062 sf-saresc-cagw.psi.ch:5066' + _log.info('Arguments:{}'.format(args.__dict__)) - _log.debug(args) bl = Backlight() if args.test: bl.move('in') diff --git a/camera.py b/camera.py index a31c458..61c2813 100755 --- a/camera.py +++ b/camera.py @@ -94,13 +94,21 @@ class epics_cam(object): print('camera.new_frame_pv_cb') def get_image(self): - print('camera.get_image') + try: + pv_pic=self.getPv('FPICTURE') + except AttributeError: + imgSeq=self._sim['imgSeq'] + idx=self._sim['imgIdx'] + self._sim['imgIdx']=(idx + 1) % imgSeq.shape[0] + _log.info('simulated idx:{}'.format(idx)) + self.pic=pic=imgSeq[idx] + return pic try: pv_pic=self.getPv('FPICTURE') sz=self._sz pic = pv_pic.get(count=sz[0]*sz[1], as_numpy=True).reshape(sz[::-1]) except AttributeError as e: - logger.warning("failed to fetch image") + _log.warning("failed to fetch image") else: if pic.dtype==np.int16: pic.dtype=np.uint16 @@ -125,12 +133,18 @@ class epics_cam(object): pv_cs.put(v, wait=True) def run(self,cb=None): - pv_cam=self.getPv('CAMERA') + try: + pv_cam=self.getPv('CAMERA') + except AttributeError: + _log.info('simulated mode') + if cb: + _log.error('simulated mode with callback not yet supported:{}'.format(cb)) + return if pv_cam.value==Camera.OFF: pv_cs = self.getPv('CAMERASTATUS') pv_cs.put(CameraStatus.RUNNING, wait=True) while pv_cam.value==Camera.OFF: - print('wait...');time.sleep(.5) + _log.warning('CAMERASTATUS:OFF, retry...');time.sleep(.5) self.update_size() if cb is None: self._pv['pic'] = epics.PV(self._prefix + "FPICTURE") @@ -185,6 +199,33 @@ class epics_cam(object): self.update_size() + def sim_gen(self,sz=(1500,1000),t=100,mode=0): + 'generate simulation data' + _log.info('generate simulation images, mode:{}...'.format(mode)) + w,h=sz + self._imgSeq=imgSeq=np.ndarray(shape=(t,h,w),dtype=np.uint16) + x = np.linspace(-5, 5, w) + y = np.linspace(-5, 5, h) + # full coordinate arrays + xx, yy = np.meshgrid(x, y) + + for i in range(t): + #imgSeq[i,:,:] = 100*np.sqrt(np.sin(xx+.1*i)**2 + np.sin(yy+.01*i)**2)#+xx*t+yy*t) + #imgSeq[i,:,:] = 100*np.sqrt(np.sin(xx+.1*i)**2 + np.sin((1+.1*np.sin(.2*i))*yy+.001*i**2)**2)#+xx*t+yy*t) + #imgSeq[i,:,:] = 100*np.sqrt(np.sin(xx+2*np.sin(i/t*2*np.pi))**2 + np.sin(yy)**2) + px=2*np.sin(i/t*2*np.pi) + fx=1 + py=2*np.sin(i/t*2*np.pi) + fy=1+.3*np.sin(i/t*2*np.pi*2) + imgSeq[i,:,:] = 100*np.sqrt(np.sin(xx*fx+px)**2 + np.sin(yy*fy+py)**2) + #np.random.bytes(100) + wr=w//4 + hr=h//4 + imgSeq[:,0:hr,0:wr]+=np.random.randint(0,100,(t,hr,wr),dtype=np.uint16) + self._sim['imgSeq']=imgSeq + self._sim['imgIdx']=0 + _log.info('dome') + if __name__ == "__main__": import time, os, PIL.Image, platform, subprocess import argparse @@ -203,17 +244,24 @@ if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--ui", "-u", help="qt test", type=int, default=0) + parser.add_argument("--sim", "-s", help="simulation mode", type=int, default=None) parser.add_argument("--prefix","-p",help="PV prefix for images: default=%(default)s",type=str,default="SARES30-CAMS156-SMX-OAV",) args = parser.parse_args() - _log.info('STARTING') - if args.prefix=='SwissMxSim': - os.environ['EPICS_CA_ADDR_LIST']='localhost' + _log.info('Arguments:{}'.format(args.__dict__)) + if args.sim is not None: + args.prefix=None # use simulated camera + elif args.prefix=='SwissMxSim': + os.environ['EPICS_CA_ADDR_LIST']='localhost' #simulated epics camera else: os.environ['EPICS_CA_ADDR_LIST'] ='129.129.244.255 sf-saresc-cagw.psi.ch:5062 sf-saresc-cagw.psi.ch:5066' + if not args.ui: cam = epics_cam(prefix=args.prefix) + if args.prefix is None: + cam.sim_gen(mode=args.sim) + #sz=(2448,2048) #ctr=(1200,1400) #sz=(1200,1000) @@ -256,7 +304,7 @@ if __name__ == "__main__": else: sz=self.update_size() pic=kwargs['value'].reshape(sz[::-1]) - print('new_frame_pv_cb',pic[-1][-1],kwargs['count']) + _log.debug('new_frame_pv_cb count {}'.format(kwargs['count'])) if pic.dtype==np.int16: pic.dtype=np.uint16 try: @@ -268,11 +316,37 @@ if __name__ == "__main__": pic=pic[::trf[0,0],::trf[1,1]] else: pic=pic[::trf[0,1],::trf[1,0]].T + #feducial test + f=np.array(((0,0,0,0,0), + (0,1,1,1,0), + (0,1,0,0,0), + (0,1,1,0,0), + (0,1,0,0,0), + (0,0,0,0,0),),pic.dtype) + pic[0:6,0:5]=f*pic.max() imv.setImage(pic, autoRange=False, autoLevels=False) + def new_frame_sim_cb(self,arl=False): + imgSeq =self._sim['imgSeq'] + idx =self._sim['imgIdx'] + fps =self._sim['fps']; + udt =self._sim['updateTime'] + self._sim['imgIdx']=(idx+1) % imgSeq.shape[0] + #_log.info('simulated idx:{}'.format(idx)) + pic = imgSeq[idx] + imv.setImage(pic, autoRange=arl, autoLevels=arl) + + QtCore.QTimer.singleShot(1, self.new_frame_sim_cb) + now = ptime.time() + fps2 = 1.0 / (now - udt) + self._sim['updateTime'] = now + self._sim['fps'] = fps * 0.9 + fps2 * 0.1 + print("%d %0.1f fps" % (idx,fps)) + import sys from pyqtgraph.Qt import QtCore, QtGui import pyqtgraph as pg + import pyqtgraph.ptime as ptime print(pg.__version__) # Interpret image data as row-major instead of col-major @@ -290,6 +364,8 @@ if __name__ == "__main__": ## Display the data and assign each frame a time value from 1.0 to 3.0 cam = UIcamera(prefix=args.prefix) + + #cam.set_binning(4,4) #cam.run() #cam.get_image() @@ -297,6 +373,12 @@ if __name__ == "__main__": #cam.stop(None) cam.run(cam.new_frame_pv_cb) + if args.prefix is None: + cam.sim_gen(mode=args.sim) + cam._sim['fps']=0; + cam._sim['updateTime'] = ptime.time() + cam.new_frame_sim_cb(arl=True) + ## Set a custom color map colors = [(0, 0, 0),(45, 5, 61),(84, 42, 55),(150, 87, 60),(208, 171, 141),(255, 255, 255)] cmap = pg.ColorMap(pos=np.linspace(0.0, 1.0, 6), color=colors) diff --git a/geometry.py b/geometry.py index 2e0507f..f76610e 100644 --- a/geometry.py +++ b/geometry.py @@ -9,7 +9,7 @@ coordinate systems, optical center, xray axis, pixel sizes etc. ''' -class gepmetry: +class geometry: def __init__(self): pass @@ -34,9 +34,8 @@ class gepmetry: # [pyx pyy]*[ny] results in a vector in meter of a vector [nx,ny] pixels in x and y direction pass - def set_zoom2pixsz(): - #tx: 2d-vector in m when moving px pixel in x direction - #ty: 2d-vector in m when moving py pixel in y direction + def set_zoom2pixsz(meas): + #calculates _lut_z2p out of measurements # the _lut_z2p is dictionaty a lookuptable # zoom {1,200,400,600,800,1000} #[pxx pxy] @@ -53,6 +52,14 @@ class gepmetry: ((10,0),(0,10)), # zoom 1000 dtype=np.float32)} + n=len(meas) + zoom =np.ndarray(shape=n,dtype=np.float32) + pixsz=np.ndarray(shape=(n,2,2),dtype=np.float32) + + for i,(k,v) in enumerate(meas): + pass + self._lut_z2p={ 'zoom': zoom, 'pixsz': pixsz} + def autofocus(): # cam camera object # mot motor object @@ -78,5 +85,63 @@ class gepmetry: +if __name__ == "__main__": + import argparse + logging.basicConfig(level=logging.DEBUG,format='%(levelname)s:%(module)s:%(lineno)d:%(funcName)s:%(message)s ') + + parser = argparse.ArgumentParser() + parser.add_argument('-m', '--mode', help='mode') + parser.add_argument("-t", "--test", help="test sequence", action="store_true") + + args = parser.parse_args() + _log.info('Arguments:{}'.format(args.__dict__)) + + + # recorded data: + # x y x y + #Zoom 1 x+1.2 97 543 -> 1100 507 + # y+1.0 607 941 -> 575 106 + #Zoom 200 x+0.6 93 504 -> 853 510 + # y+0.4 475 897 -> 472 157 + #Zoom 400 x+0.5 88 615 -> 1094 579 + # y+0.4 705 991 -> 673 190 + #Zoom 600 x+0.3 32 460 -> 103 416 + # y+0.25 551 937 -> 520 106 + #Zoom 800 x+0.18 65 524 -> 1050 484 + # y+0.14 632 946 -> 602 168 + #Zoom 1000 x+0.1 121 632 -> 1044 592 + # y+0.08 593 883 -> 570 145 + + + measure={ + 1:{'x': (+1.2 , ( 97, 543),(1100, 507)), + 'y': (+1.0 , ( 607, 941),( 575, 106))}, + 200:{'x': (+0.6 , ( 93, 504),( 853, 510)), + 'y': (+0.4 , ( 475, 897),( 472, 157))}, + 400:{'x': (+0.5 , ( 88, 615),(1094, 579)), + 'y': (+0.4 , ( 705, 991),( 673, 190))}, + 600:{'x': (+0.3 , ( 32, 460),( 103, 416)), + 'y': (+0.25 , ( 551, 937),( 520, 106))}, + 800:{'x': (+0.18 , ( 65, 524),(1050, 484)), + 'y': (+0.14 , ( 632, 946),( 602, 168))}, + 1000:{'x': (+0.1 , ( 121, 632),(1044, 592)), + 'y': (+0.08 , ( 593, 883),( 570, 145))}, + } + + obj=geometry() + obj.calc_zoom2pixsz(measure) + + + + + + + + + + + + + diff --git a/illumination.py b/illumination.py index 3727913..c10f87d 100755 --- a/illumination.py +++ b/illumination.py @@ -131,8 +131,8 @@ if __name__ == "__main__": parser.add_argument("-t", "--test", help="test sequence", action="store_true") args = parser.parse_args() + _log.info('Arguments:{}'.format(args.__dict__)) - _log.debug(args) host = "129.129.221.92" port = 1003 leds = IlluminationControl(host,port) diff --git a/zoom.py b/zoom.py index c43c772..0d8f88c 100755 --- a/zoom.py +++ b/zoom.py @@ -108,7 +108,7 @@ class Zoom(QGroupBox, Ui_Zoom): lbox.layout().setSpacing(0) lbox.layout().setContentsMargins(0, 0, 0, 0) self._top_grid.layout().addWidget(lbox) - #self.blgt_button = QPushButton(qtawesome.icon("material.lightbulb_outline"), "Backlight") + #TODO: self.blgt_button = QPushButton(qtawesome.icon("material.lightbulb_outline"), "Backlight") self.blgt_button = QPushButton( "Backlight") self.blgt_button.clicked.connect(self.toggle_backlight) @@ -329,17 +329,21 @@ if __name__ == "__main__": parser.add_argument("--sim", "-s", help="simulate all devices", action='store_true') args = parser.parse_args() - import sys import backlight import illumination import camera - _log.info('Start') + _log.info('Arguments:{}'.format(args.__dict__)) os.environ['EPICS_CA_ADDR_LIST'] = '129.129.244.255 sf-saresc-cagw.psi.ch:5062 sf-saresc-cagw.psi.ch:5066' app = QApplication(sys.argv) from app_config import settings, appsconf + #from PyQt5 import QtGui + #qtawesome.load_font("material","MaterialIcons-Regular.ttf","MaterialIcons-Regular.json","fonts/",) + #QtGui.QFontDatabase.addApplicationFont("fonts/Inconsolata-Bold.ttf") + #QtGui.QFontDatabase.addApplicationFont("fonts/Baloo-Regular.ttf") + if args.sim: app._backlight = backlight.Backlight(None) app._illumination = illumination.IlluminationControl(None)