diff --git a/clen_tools/distance-refinement.py b/clen_tools/distance-refinement.py new file mode 100644 index 0000000..f1af2e8 --- /dev/null +++ b/clen_tools/distance-refinement.py @@ -0,0 +1,155 @@ + + +# modules +import pandas as pd +import subprocess +import os, errno +import regex as re +import numpy as np + +def h5_sample( lst, sample ): + + # create sample of images from run + # read h5.lst - note - removes // from imade column + cols = [ "h5", "image" ] + sample_df = pd.read_csv( lst, sep="\s//", engine="python", names=cols ) + + # take defined sample + sample_df = sample_df.sample( sample ) + + # sort list + sample_df = sample_df.sort_index() + + # re-add // to image columm + sample_df[ "image" ] = "//" + sample_df.image.astype(str) + + # write sample to file + sample_file = "h5_{0}_sample.lst".format( sample ) + sample_df.to_csv( sample_file, sep=" ", index=False, header=False ) + + # return sample file name + return sample_file + +def geom_amend( lab6_geom_file, clen ): + + # read lab6 geom + lab6_geom = open( lab6_geom_file, "r" ) + + # use regex to find clen and replace with new + # clen example => clen = 0.1217 + clen_geom = re.sub( "clen = 0\.\d+", "clen = {0}".format( clen ), lab6_geom.read() ) + + # close lab6 geom file + lab6_geom.close() + + # write new clen_geom to file + clen_geom_file = "{0}.geom".format( clen ) + geom = open( clen_geom_file, "w" ) + geom.write( clen_geom ) + geom.close() + + # return clen_geom file name + return clen_geom_file + +def write_crystfel_run( clen, sample_h5_file, clen_geom_file, cell_file ): + + # crystfel file name + cryst_run_file = "{0}_cryst_run.sh".format( clen ) + + # write file + run_sh = open( cryst_run_file, "w" ) + run_sh.write( "#!/bin/sh\n\n" ) + run_sh.write( "module purge\n" ) + run_sh.write( "module load crystfel/0.10.2\n" ) +# run_sh.write( "module use MX unstable\n" ) +# run_sh.write( "module load gcc/4.8.5 hdf5_serial/1.10.3 xds/20210205 DirAx/1.17 pinkindexer/2021.08\n" ) +# run_sh.write( "module load xgandalf/2021.08 fdip/2021.08 mosflm/7.3.0 crystfel/0.10.0 HDF5_bitshuffle/2018.05 HDF5_LZ4/2018.05 ccp4\n\n" ) + run_sh.write( "indexamajig -i {0} \\\n".format( sample_h5_file ) ) + run_sh.write( " --output={0}.stream \\\n".format( clen ) ) + run_sh.write( " --geometry={0}\\\n".format( clen_geom_file ) ) + run_sh.write( " --pdb={0} \\\n".format( cell_file ) ) + run_sh.write( " --indexing=xgandalf-latt-cell --peaks=peakfinder8 \\\n" ) + run_sh.write( " --integration=rings-grad --tolerance=10.0,10.0,10.0,2,3,2 --threshold=10 --min-snr=5 --int-radius=2,3,6 \\\n" ) + run_sh.write( " -j 36 --no-multi --no-retry --check-peaks --max-res=3000 --min-pix-count=1 --local-bg-radius=4 --min-res=85\n\n" ) + run_sh.close() + + # make file executable + subprocess.call( [ "chmod", "+x", "{0}".format( cryst_run_file ) ] ) + + # return crystfel file name + return cryst_run_file + +def main( lst, sample, lab6_geom_file, centre_clen, cell_file, steps, scan_name, step_size ): + + # set current working directory + cwd = os.getcwd() + + # make sample list + print( "making {0} sample of images".format( sample ) ) + sample_h5 = h5_sample( lst, sample) + sample_h5_file = "{0}/{1}".format( cwd, sample_h5 ) + print( "done" ) + + # make list of clen steps above and below the central clen + print( "make clen array around {0}".format( centre_clen ) ) + step_range = step_size*steps + bottom_step = centre_clen-step_range/2 + top_step = bottom_step+step_range + step_range = np.arange( bottom_step, top_step, step_size ) + step_range = step_range.round( 4 ) # important - otherwise np gives your .99999999 instead of 1 somethimes + print( "done" ) + + # make directorys for results + print( "begin CrystFEL anaylsis of different clens" ) + + # loop to cycle through clen steps + for clen in step_range: + + # move back to cwd + os.chdir( cwd ) + + print( "processing clen = {0}".format( clen ) ) + # define process directory + proc_dir = "{0}/{1}/{2}".format( cwd, scan_name, clen ) + + # make process directory + try: + os.makedirs( proc_dir ) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + # move to process directory + os.chdir( proc_dir ) + + # make geom file + print( "amend .geom file" ) + clen_geom_file = geom_amend( lab6_geom_file, clen ) + print( "done" ) + + # make crystfel run file + print( "make crystfel file" ) + cryst_run_file = write_crystfel_run( clen, sample_h5_file, clen_geom_file, cell_file ) + print( "done" ) + + # run crystfel file + print( "run crystFEL" ) + #subprocess.call( [ "./{0}".format( cryst_run_file ) ] ) + subprocess.call( [ "sbatch", "-p", "day", "--cpus-per-task=32", "--", "./{0}".format( cryst_run_file ) ] ) + print( "done" ) + + #subprocess.call( [ "sbatch", "-p", "day", "--cpus-per-task=32", "--", "run{0}.sh".format( run.zfill(4) ) ] ) + +# variables +sample = 500 +lst = "/sf/cristallina/data/p20590/work/process/jhb/detector_refinement/acq0001.JF17T16V01.dark.lst" +lab6_geom_file = "/sf/cristallina/data/p20590/work/process/jhb/detector_refinement/8M_p-op_c-op_p20590.geom" +centre_clen = 0.122 # in m +cell_file = "/sf/cristallina/data/p20590/work/process/jhb/detector_refinement/hewl.cell" +steps = 10 +scan_name = "fine_scan" +step_size = 0.0005 # m - 0.001 = coarse scan, 0.0005 = fine + +main( lst, sample, lab6_geom_file, centre_clen, cell_file, steps, scan_name, step_size ) + + diff --git a/clen_tools/distance-scan-analysis.py b/clen_tools/distance-scan-analysis.py new file mode 100644 index 0000000..7bcbadc --- /dev/null +++ b/clen_tools/distance-scan-analysis.py @@ -0,0 +1,155 @@ + + +# modules +import pandas as pd +import regex as re +import os +import numpy as np +import matplotlib.pyplot as plt + + +def scrub_clen( stream_pwd ): + + # get clen from stream name + # example - /sf/cristallina/data/p20590/work/process/jhb/detector_refinement/coarse_scan/0.115/0.115.stream + # scrub clen and return - else nan + try: + pattern = r"0\.\d+/(0\.\d+)\.stream" + re_search = re.search( pattern, stream_pwd ) + clen = re_search.group( 1 ) + if AttributeError: + return float( clen ) + except AttributeError: + return np.nan + +def find_streams( top_dir ): + + # create df for streams + stream_df = pd.DataFrame() + + # search for all files that end with .stream + + for path, dirs, files in os.walk( top_dir ): + for name in files: + if name.endswith( ".stream" ): + + # get stream pwd + stream_pwd = os.path.join( path, name ) + + # scrub clen from stream + clen = scrub_clen( stream_pwd ) + + # put clen and stream pwd into df + data = [ { "stream_pwd" : stream_pwd, + "clen" : clen + } ] + stream_df_1 = pd.DataFrame( data ) + stream_df = pd.concat( ( stream_df, stream_df_1 ) ) + + # sort df based on clen + stream_df = stream_df.sort_values( by="clen" ) + + # reset df index + stream_df = stream_df.reset_index( drop=True ) + + # return df of streams and clens + return stream_df + +def scrub_us( stream ): + + # get uc values from stream file + # example - Cell parameters 7.71784 7.78870 3.75250 nm, 90.19135 90.77553 90.19243 deg + # scrub clen and return - else nan + try: + pattern = r"Cell\sparameters\s(\d\.\d+)\s(\d\.\d+)\s(\d\.\d+)\snm,\s(\d+\.\d+)\s(\d+\.\d+)\s(\d+\.\d+)\sdeg" + cells = re.findall( pattern, stream ) + if AttributeError: + return cells + except AttributeError: + return np.nan + +def main( top_dir ): + + # find stream files from process directory + print( "finding stream files" ) + stream_df = find_streams( top_dir ) + print( "done" ) + + # making results df for unit cell and index no. + results_df = pd.DataFrame() + + # loop through stream files and collect unit_cell information + print( "looping through stream files to collect unit cell, indexed information" ) + for index, row in stream_df.iterrows(): + + stream_pwd, clen = row[ "stream_pwd" ], row[ "clen" ] + + # open stream file + print( "scrubbing stream for clen={0}".format( clen ) ) + stream = open( stream_pwd, "r" ).read() + + # scrub unit cell information + cells = scrub_us( stream ) + + # put cells in df + cols = [ "a", "b", "c", "alpha", "beta", "gamma" ] + cells_df = pd.DataFrame( cells, columns=cols ) + cells_df = cells_df.astype( float ) + + # calc stats + indexed = len( cells_df ) + std_a = cells_df.a.std() + std_b = cells_df.b.std() + std_c = cells_df.c.std() + + # put stats in results df + stats = [ { "clen" : clen, + "indexed" : indexed, + "std_a" : std_a, + "std_b" : std_b, + "std_c" : std_c + } ] + results_df_1 = pd.DataFrame( stats ) + results_df = pd.concat( ( results_df, results_df_1 ) ) + + print( "done" ) + + # reset index + results_df = results_df.reset_index( drop=True ) + + # plot results + fig, ax1 = plt.subplots() + + # indexed images plot + color = "tab:red" + ax1.set_xlabel( "clen" ) + ax1.set_ylabel( "indexed", color=color ) + ax1.plot( results_df.clen, results_df.indexed, color=color) + ax1.tick_params( axis="y", labelcolor=color) + + # instantiate a second axes that shares the same x-axis + ax2 = ax1.twinx() + + # std_a plot + color = "tab:blue" + ax2.set_ylabel( "st.deviation", color=color ) + ax2.plot( results_df.clen, results_df.std_a, color=color ) + ax2.tick_params(axis='y', labelcolor=color) + + # std_b plot + ax2.plot( results_df.clen, results_df.std_b, color=color ) + ax2.tick_params(axis='y', labelcolor=color) + + # std_b plot + ax2.plot( results_df.clen, results_df.std_c, color=color ) + ax2.tick_params(axis='y', labelcolor=color) + + fig.tight_layout() # otherwise the right y-label is slightly clipped + plt.show() + + +# variables +top_dir = "/sf/cristallina/data/p20590/work/process/jhb/detector_refinement/coarse_scan" + + +main( top_dir ) diff --git a/geom_files/crmx8M_230322.geom b/geom_files/crmx8M_230322.geom new file mode 100644 index 0000000..011f9ab --- /dev/null +++ b/geom_files/crmx8M_230322.geom @@ -0,0 +1,217 @@ +; Optimized panel offsets can be found at the end of the file +; Optimized panel offsets can be found at the end of the file +; +clen = 0.1217 +photon_energy = 12290 + +flag_lessthan=-30000 + +; older, change it to adu_per_eV and set it to one over the photon energy in eV +adu_per_photon = 1 +max_adu=11000 ; jungfrau max value (for dynamic gain regime is 120.000eV) +res = 13333.3 ; 75 micron pixel size + +rigid_group_collection_c0 = g0,g1,g2,g3,g4,g5,g6,g7,g8,g9,g10,g11,g12,g13,g14,g15 + +rigid_group_g0 = p0 +rigid_group_g1 = p1 +rigid_group_g2 = p2 +rigid_group_g3 = p3 +rigid_group_g4 = p4 +rigid_group_g5 = p5 +rigid_group_g6 = p6 +rigid_group_g7 = p7 +rigid_group_g8 = p8 +rigid_group_g9 = p9 +rigid_group_g10 = p10 +rigid_group_g11 = p11 +rigid_group_g12 = p12 +rigid_group_g13 = p13 +rigid_group_g14 = p14 +rigid_group_g15 = p15 + +; These lines describe the data layout for the Eiger native multi-event files +dim0 = % +dim1 = ss +dim2 = fs +data = /data/JF17T16V01/data + +; Uncomment these lines if you have a separate bad pixel map (recommended!) +mask_file = /sf/cristallina/data/p20558/raw/JF_pedestals/20221018_101546.pixel_mask.h5 +mask = /pixel_mask +mask_good = 0x1 +mask_bad = 0x0 + + +; corner_{x,y} set the position of the corner of the detector (in pixels) +; relative to the beam + +p0/min_fs = 0 +p0/min_ss = 0 +p0/max_fs = 1029 +p0/max_ss = 513 +p0/fs = +1.000000x +0.000814y +p0/ss = -0.000814x +1.000000y +p0/corner_x = -999.642458 +p0/corner_y = -1664.515501 + +p1/min_fs = 0 +p1/min_ss = 514 +p1/max_fs = 1029 +p1/max_ss = 1027 +p1/fs = +1.000000x +0.000362y +p1/ss = -0.000362x +1.000000y +p1/corner_x = 39.799242 +p1/corner_y = -1594.665501 + + +p2/min_fs = 0.0 +p2/min_ss = 1028.0 +p2/max_fs = 1029.0 +p2/max_ss = 1541.0 +p2/fs = +1.000000x +0.000339y +p2/ss = -0.000339x +1.000000y +p2/corner_x = -999.172458 +p2/corner_y = -1113.325501 + +p3/min_fs = 0.0 +p3/min_ss = 1542.0 +p3/max_fs = 1029.0 +p3/max_ss = 2055.0 +p3/fs = +1.000000x -0.000244y +p3/ss = +0.000244x +1.000000y +p3/corner_x = 39.292642 +p3/corner_y = -1043.965501 + +p4/min_fs = 0.0 +p4/min_ss = 2056.0 +p4/max_fs = 1029.0 +p4/max_ss = 2569.0 +p4/fs = +1.000000x +0.000439y +p4/ss = -0.000439x +1.000000y +p4/corner_x = -998.792458 +p4/corner_y = -563.734501 + +p5/min_fs = 0.0 +p5/min_ss = 2570.0 +p5/max_fs = 1029.0 +p5/max_ss = 3083.0 +p5/fs = +1.000000x -0.000061y +p5/ss = +0.000061x +1.000000y +p5/corner_x = 39.468642 +p5/corner_y = -493.653501 + + +p6/min_fs = 0.0 +p6/min_ss = 3084.0 +p6/max_fs = 1029.0 +p6/max_ss = 3597.0 +p6/fs = +1.000000x +0.000062y +p6/ss = -0.000062x +1.000000y +p6/corner_x = -1067.942458 +p6/corner_y = -13.120701 + +p7/min_fs = 0.0 +p7/min_ss = 3598.0 +p7/max_fs = 1029.0 +p7/max_ss = 4111.0 +p7/fs = +1.000000x +0.000058y +p7/ss = -0.000058x +1.000000y +p7/corner_x = -29.805658 +p7/corner_y = 55.333999 + +p8/min_fs = 0.0 +p8/min_ss = 4112 +p8/max_fs = 1029.0 +p8/max_ss = 4625 +p8/fs = +1.000000x +0.000505y +p8/ss = -0.000505x +1.000000y +p8/corner_x = -1068.292458 +p8/corner_y = 536.798499 + +p9/min_fs = 0.0 +p9/min_ss = 4626 +p9/max_fs = 1029.0 +p9/max_ss = 5139 +p9/fs = +1.000000x +0.000463y +p9/ss = -0.000463x +1.000000y +p9/corner_x = -29.422758 +p9/corner_y = 605.193499 + +p10/min_fs = 0.0 +p10/min_ss = 5140 +p10/max_fs = 1029.0 +p10/max_ss = 5653 +p10/fs = +1.000000x -0.000296y +p10/ss = +0.000296x +1.000000y +p10/corner_x = -1068.232458 +p10/corner_y = 1087.074499 + +p11/min_fs = 0.0 +p11/min_ss = 5654 +p11/max_fs = 1029.0 +p11/max_ss = 6167 +p11/fs = +1.000000x -0.000162y +p11/ss = +0.000162x +1.000000y +p11/corner_x = -30.060558 +p11/corner_y = 1155.674499 + +p12/min_fs = 0 +p12/min_ss = 6168 +p12/max_fs = 1029 +p12/max_ss = 6681 +p12/fs = -0.000746x +1.000000y +p12/ss = -1.000000x -0.000746y +p12/corner_x = -1021.782458 +p12/corner_y = -1066.215501 + +p13/min_fs = 0 +p13/min_ss = 6682 +p13/max_fs = 1029 +p13/max_ss = 7195 +p13/fs = -0.000324x +1.000000y +p13/ss = -1.000000x -0.000324y +p13/corner_x = 1606.827542 +p13/corner_y = -996.073501 + +p14/min_fs = 0 +p14/min_ss = 7196 +p14/max_fs = 1029 +p14/max_ss = 7709 +p14/fs = +0.002185x +0.999998y +p14/ss = -0.999998x +0.002185y +p14/corner_x = -1093.032458 +p14/corner_y = -27.189301 + +p15/min_fs = 0 +p15/min_ss = 7710 +p15/max_fs = 1029 +p15/max_ss = 8223 +p15/fs = +0.000156x +1.000000y +p15/ss = -1.000000x +0.000156y +p15/corner_x = 1537.837542 +p15/corner_y = 42.438499 + +badregion/min_fs = 0 +badregion/min_ss = 5654 +badregion/max_fs = 1029 +badregion/max_ss = 6167 +badregion/panel = p11 + + +p0/coffset = -0.000093 +p1/coffset = -0.000093 +p2/coffset = -0.000093 +p3/coffset = -0.000093 +p4/coffset = -0.000093 +p5/coffset = -0.000093 +p6/coffset = -0.000093 +p7/coffset = -0.000093 +p8/coffset = -0.000093 +p9/coffset = -0.000093 +p10/coffset = -0.000093 +p11/coffset = -0.000093 +p12/coffset = -0.000093 +p13/coffset = -0.000093 +p14/coffset = -0.000093 +p15/coffset = -0.000093 diff --git a/pyfai-tools/convert-scan-for-pyfai-16M.py b/pyfai-tools/convert-scan-for-pyfai-16M.py new file mode 100644 index 0000000..436fddf --- /dev/null +++ b/pyfai-tools/convert-scan-for-pyfai-16M.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 + +# author J.Beale + +""" +# aim + - -16M=varient for large detectors +make image file to input into pyFAI for initial detector beam-centre and detector distance calibration +refer to Cristallina8M-calibration for complete protocol +https://docs.google.com/document/d/1RoeUUogvRxX4M6uqGwkjf3dVJBabiMUx4ZxwcA5e9Dc/edit# + +# protocol +take scan of LaB6 +## IMPORTANT ## +- save image as photon-counts - in slic/run_control scale=beam energy +- detector_geometry=TRUE - saves detector panels in their correct orientation +## scan inputs ## +- <0.01 trans +- motor scan > 10 um per step +- 10 images per step, 100 steps +- use scan.json as input for this script + +# usage +python make-tiff.py -j -s -n + +# output +creates a .npy file that can be loaded directly into pyFAI +""" + +# modules +from matplotlib import pyplot as plt +import numpy as np +from sfdata import SFScanInfo +from tqdm import tqdm +import argparse + +def convert_image( path_to_json, jungfrau, name ): + + # opens scan + print( "opening scane" ) + scan = SFScanInfo( path_to_json ) + + # steps in scane + nsteps = len(scan) + + # define step ch and im_shape + step = scan[0] + ch = step[jungfrau] + img_shape = ch[0].shape + + print("stepping through scan and averaging images at each step") + # step through scan and average files from each positions + imgs_shape = (nsteps, *img_shape) + imgs = np.empty(imgs_shape) + for i, subset in tqdm(enumerate(scan)): + + # go through data in_batches so you don't run out of memory + ch = subset[jungfrau] + mean = np.zeros(img_shape) + for _indices, batch in ch.in_batches(size=2): + mean += np.mean(batch, axis=0) + + # take mean of means for batch opened data + imgs[i] = mean + + print( "done" ) + # sum averaged imaged + print( "final average" ) + mean_image = imgs.mean(axis=0) + print("done") + + # output to file + print( "saving to .npy = {0}".format( name ) ) + np.save( "{0}.npy".format( name ), mean_image ) + print( "done" ) + + # create plot of summed, averaged scan + fig, ax = plt.subplots() + ax.imshow(mean_image, vmin=0, vmax=1000) + plt.show() + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "-j", + "--jungfrau", + help="name of the jungfrau used, i.e., JF17T16V01 for Cristallina MX", + type=str, + default="JF17T16V01" + ) + parser.add_argument( + "-s", + "--scan", + help="path to json scan file", + type=str, + default="/sf/cristallina/data/p20590/raw/run0003/meta/scan.json" + ) + parser.add_argument( + "-n", + "--name", + help="name of output file", + type=str, + default="mean_scan" + ) + args = parser.parse_args() + + convert_image( args.scan, args.jungfrau, args.name ) diff --git a/pyfai-tools/convert-scan-for-pyfai.py b/pyfai-tools/convert-scan-for-pyfai.py new file mode 100644 index 0000000..0418aac --- /dev/null +++ b/pyfai-tools/convert-scan-for-pyfai.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 + +# author J.Beale + +""" +# aim +make image file to input into pyFAI for initial detector beam-centre and detector distance calibration +refer to Cristallina8M-calibration for complete protocol +https://docs.google.com/document/d/1RoeUUogvRxX4M6uqGwkjf3dVJBabiMUx4ZxwcA5e9Dc/edit# + +# protocol +take scan of LaB6 +## IMPORTANT ## +- save image as photon-counts - in slic/run_control scale=beam energy +- detector_geometry=TRUE - saves detector panels in their correct orientation +## scan inputs ## +- <0.01 trans +- motor scan > 10 um per step +- 10 images per step, 100 steps +- use scan.json as input for this script + +# usage +python make-tiff.py -j -s -n + +# output +creates a .npy file that can be loaded directly into pyFAI +""" + +# modules +from matplotlib import pyplot as plt +import numpy as np +from sfdata import SFScanInfo +from tqdm import tqdm +import argparse + +def convert_image( path_to_json, jungfrau, name ): + + # opens scan + scan = SFScanInfo( path_to_json ) + + # step through scan and average files from each positions + mean_image = [] + for step in tqdm( enumerate(scan) ): + # step is a SFDataFiles object + subset = step[1] + mean = np.mean( subset[ jungfrau ].data, axis=0 ) + mean_image.append(mean) + + # sum averaged imaged + sum_image = np.sum( mean_image, axis=0 ) + + # output to file + np.save( "{0}.npy".format( name ), sum_image ) + + # create plot of summed, averaged scan + fig, ax = plt.subplots() + ax.imshow(sum_image, vmin=0, vmax=1000) + plt.show() + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "-j", + "--jungfrau", + help="name of the jungfrau used, i.e., JF17T16V01 for Cristallina MX", + type=str, + default="JF17T16V01" + ) + parser.add_argument( + "-s", + "--scan", + help="path to json scan file", + type=str, + default="/sf/cristallina/data/p20590/raw/run0003/meta/scan.json" + ) + parser.add_argument( + "-n", + "--name", + help="name of output file", + type=str, + default="sum_mean_scan" + ) + args = parser.parse_args() + + convert_image( args.scan, args.jungfrau, args.name ) diff --git a/pyfai-tools/sum_mean_scan.poni b/pyfai-tools/sum_mean_scan.poni new file mode 100644 index 0000000..230eebc --- /dev/null +++ b/pyfai-tools/sum_mean_scan.poni @@ -0,0 +1,12 @@ +# Nota: C-Order, 1 refers to the Y axis, 2 to the X axis +# Calibration done at Thu Mar 16 16:56:31 2023 +poni_version: 2 +Detector: Detector +Detector_config: {"pixel1": 7.5e-08, "pixel2": 7.5e-08, "max_shape": [3333, 3212]} +Distance: 0.00012198044363190573 +Poni1: 0.0001245625996392014 +Poni2: 0.00012028397296269736 +Rot1: -0.0013017934228372007 +Rot2: 0.0011373661117621663 +Rot3: -0.00014487988888865335 +Wavelength: 1.0088217935980494e-10 diff --git a/pyfai-tools/update-geom-from-lab6.py b/pyfai-tools/update-geom-from-lab6.py new file mode 100644 index 0000000..3aa709b --- /dev/null +++ b/pyfai-tools/update-geom-from-lab6.py @@ -0,0 +1,168 @@ +#!/usr/bin/python + +import pandas as pd +import numpy as np +import regex as re +from scipy import constants +import argparse +from datetime import datetime +date = datetime.today().strftime('%y%m%d') + + +def calculate_new_corner_positions( beam_x, beam_y ): + + # make df of current corner positions + positions = { "current_x" : [ 607, 1646, 607, 1646, 607, 1646, 538, 1577, 538, 1577, 538, 1577, 538, 3212, 514, 3143 ], + "current_y" : [ 0, 69, 550, 619, 1100, 1169, 1650, 1719, 2200, 2269, 2750, 2819, 597, 667, 1636, 1706 ] + } + corner_df = pd.DataFrame( positions ) + + # calculate new corner positions + corner_df[ "new_x" ] = corner_df.current_x.subtract( beam_x ) + corner_df[ "new_y" ] = corner_df.current_y.subtract( beam_y ) + + # drop old positions + corner_df = corner_df[[ "new_x", "new_y" ]] + + return corner_df + +def scrub_poni( path_to_poni_file ): + + # open poni file + poni_file = open( path_to_poni_file, "r" ).read() + + # regex patterns to scrub poni data + clen_m_pattern = r"Distance:\s(\d\.\d*)" + poni1_m_pattern = r"Poni1:\s(\d\.\d*)" + poni2_m_pattern = r"Poni2:\s(\d\.\d*)" + wave_pattern = r"Wavelength:\s(\d\.\d*)e(-\d+)" + + # regex seach + clen = re.search( clen_m_pattern, poni_file ).group( 1 ) + poni1_m = re.search( poni1_m_pattern, poni_file ).group( 1 ) + poni2_m = re.search( poni2_m_pattern, poni_file ).group( 1 ) + wave = re.search( wave_pattern, poni_file ).group( 1, 2 ) + + # calulate proper wavelength + wave = float(wave[0]) * np.float_power( 10, int( wave[1]) ) + + # calculate beam_centre + poni1_p = float( poni1_m ) / 0.000000075 + poni2_p = float( poni2_m ) / 0.000000075 + + # calculate beam energy in eV + eV = ( ( constants.c * constants.h ) / wave ) / constants.electron_volt + + # return poni1 = y, poni2 = x and energy + return poni1_p, poni2_p, eV, round( float( clen )*1000, 5 ) + + +def write_new_positions( path_to_geom, beam_x, beam_y, clen, energy ): + + # open current geometry file + current_geom_file = open( path_to_geom, "r" ).read() + + # calculate new corner positions + corner_df = calculate_new_corner_positions( beam_x, beam_y ) + + # replace current corner positions with new ones + for i in range(0, 16): + + # x and y positions + new_x, new_y = round( corner_df.new_x[i], 3 ), round( corner_df.new_y[i], 3 ) + + # input new x position + current_pattern_x = r"p" + re.escape( str(i) ) + r"/corner_x = -?\d+\.\d+" + new_pattern_x = r"p" + re.escape( str(i) ) + r"/corner_x = " + str( new_x ) + current_geom_file = re.sub( current_pattern_x, new_pattern_x, current_geom_file ) + + # input new y position + current_pattern_y = r"p" + re.escape( str(i) ) + r"/corner_y = -?\d+\.\d+" + new_pattern_y = r"p" + re.escape( str(i) ) + r"/corner_y = " + str( new_y ) + current_geom_file = re.sub( current_pattern_y, new_pattern_y, current_geom_file ) + + # input new clen + current_clen = r"clen = \d\.\d+" + new_clen = r"clen = " + str( clen ) + current_geom_file = re.sub( current_clen, new_clen, current_geom_file ) + + # input new energy + current_energy = r"photon_energy = \d+" + new_energy = r"photon_energy = " + str( energy ) + current_geom_file = re.sub( current_energy, new_energy, current_geom_file ) + + # create geom new file + geom_start = path_to_geom[:-5] + new_geom_name = "{0}_{1}.geom".format( geom_start, date ) + + # write new geom file + f = open( new_geom_name, "w" ) + f.write( current_geom_file ) + f.close() + + # return new_geom_name + return new_geom_name + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "-g", + "--geom", + help="give the path to the cristallina 8M geom file to be updated", + type=str, + default="/sf/cristallina/data/p20558/work/geom/optimised_geom_file/p20558_hewl_op.geom", + ) + parser.add_argument( + "-x", + "--beam_x", + help="beam_x in pixels", + type=float, + default=1603.73 + ) + parser.add_argument( + "-y", + "--beam_y", + help="beam_y in pixels", + type=float, + default=1661.99 + ) + parser.add_argument( + "-c", + "--clen", + help="detector distance in m", + type=int, + default=0.111 + ) + parser.add_argument( + "-e", + "--energy", + help="photon energy", + type=int, + default=12400 + ) + parser.add_argument( + "-i", + "--poni", + help="path to poni file", + type=str, + ) + parser.add_argument( + "-p", + "--p-group", + help="p-group name", + type=str, + ) + args = parser.parse_args() + # run geom converter + if args.poni is not None: + print( "reading poni file" ) + beam_y, beam_x, eV, clen = scrub_poni( args.poni ) + print( "beam x, beam_y = {0}, {1}\nphoton_energy = {2}\nclen = {3}".format( beam_x, beam_y, eV, clen ) ) + new_geom_name = write_new_positions( args.geom, beam_x, beam_y, clen, eV ) + print( "updated .geom file with poni calculations\n new .geom = {0}".format( new_geom_name ) ) + else: + print( "manually input positions" ) + print( "beam x, beam_y = {0}, {1}\nphoton_energy = {2}\nclen = {3}".format( args.beam_x, args.beam_y, args.energy, args.clen ) ) + new_geom_name = write_new_positions( args.geom, args.beam_x, args.beam_y, args.clen, args.energy ) + print( "updated .geom file with poni calculations\nnew .geom = {0}".format( new_geom_name ) ) \ No newline at end of file