rectifying conflict
This commit is contained in:
@@ -1,92 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
# author J.Beale
|
|
||||||
|
|
||||||
"""
|
|
||||||
# aim
|
|
||||||
- -16M=varient for large detectors
|
|
||||||
make image file to input into pyFAI for initial detector beam-centre and detector distance calibration
|
|
||||||
refer to Cristallina8M-calibration for complete protocol
|
|
||||||
https://docs.google.com/document/d/1RoeUUogvRxX4M6uqGwkjf3dVJBabiMUx4ZxwcA5e9Dc/edit#
|
|
||||||
|
|
||||||
# protocol
|
|
||||||
take scan of LaB6
|
|
||||||
## IMPORTANT ##
|
|
||||||
- save image as photon-counts - in slic/run_control scale=beam energy
|
|
||||||
- detector_geometry=TRUE - saves detector panels in their correct orientation
|
|
||||||
## scan inputs ##
|
|
||||||
- <0.01 trans
|
|
||||||
- motor scan > 10 um per step
|
|
||||||
- 10 images per step, 100 steps
|
|
||||||
- use scan.json as input for this script
|
|
||||||
|
|
||||||
# usage
|
|
||||||
python make-tiff.py -j <jugfrau-name> -s <path to scan file> -n <name of output file>
|
|
||||||
|
|
||||||
# output
|
|
||||||
creates a .npy file that can be loaded directly into pyFAI
|
|
||||||
"""
|
|
||||||
|
|
||||||
# modules
|
|
||||||
from matplotlib import pyplot as plt
|
|
||||||
import numpy as np
|
|
||||||
from sfdata import SFScanInfo
|
|
||||||
from tqdm import tqdm
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
def convert_image( path_to_json, jungfrau, name ):
|
|
||||||
|
|
||||||
# opens scan
|
|
||||||
scan = SFScanInfo( path_to_json )
|
|
||||||
|
|
||||||
# step through scan and average files from each positions
|
|
||||||
mean_image = []
|
|
||||||
for step in tqdm( enumerate(scan) ):
|
|
||||||
# step is a SFDataFiles object
|
|
||||||
subset = step[1]
|
|
||||||
|
|
||||||
# go through data in_batches so you don't run out of memory
|
|
||||||
means = np.empty(subset[jungfrau].data.shape)
|
|
||||||
for indices, batch in subset[jungfrau].in_batches(size=2):
|
|
||||||
means[indices] = np.mean(batch.data, axis=(0))
|
|
||||||
|
|
||||||
# take mean of means for batch opened data
|
|
||||||
mean_image.append(np.mean(means, axis=0))
|
|
||||||
|
|
||||||
# sum averaged imaged
|
|
||||||
sum_image = np.sum( mean_image, axis=0 )
|
|
||||||
|
|
||||||
# output to file
|
|
||||||
np.save( "{0}.npy".format( name ), sum_image )
|
|
||||||
|
|
||||||
# create plot of summed, averaged scan
|
|
||||||
fig, ax = plt.subplots()
|
|
||||||
ax.imshow(sum_image, vmin=0, vmax=1000)
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument(
|
|
||||||
"-j",
|
|
||||||
"--jungfrau",
|
|
||||||
help="name of the jungfrau used, i.e., JF17T16V01 for Cristallina MX",
|
|
||||||
type=str,
|
|
||||||
default="JF17T16V01"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-s",
|
|
||||||
"--scan",
|
|
||||||
help="path to json scan file",
|
|
||||||
type=str,
|
|
||||||
default="/sf/cristallina/data/p20590/raw/run0003/meta/scan.json"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-n",
|
|
||||||
"--name",
|
|
||||||
help="name of output file",
|
|
||||||
type=str,
|
|
||||||
default="sum_mean_scan"
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
convert_image( args.scan, args.jungfrau, args.name )
|
|
||||||
@@ -1,85 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
# author J.Beale
|
|
||||||
|
|
||||||
"""
|
|
||||||
# aim
|
|
||||||
make image file to input into pyFAI for initial detector beam-centre and detector distance calibration
|
|
||||||
refer to Cristallina8M-calibration for complete protocol
|
|
||||||
https://docs.google.com/document/d/1RoeUUogvRxX4M6uqGwkjf3dVJBabiMUx4ZxwcA5e9Dc/edit#
|
|
||||||
|
|
||||||
# protocol
|
|
||||||
take scan of LaB6
|
|
||||||
## IMPORTANT ##
|
|
||||||
- save image as photon-counts - in slic/run_control scale=beam energy
|
|
||||||
- detector_geometry=TRUE - saves detector panels in their correct orientation
|
|
||||||
## scan inputs ##
|
|
||||||
- <0.01 trans
|
|
||||||
- motor scan > 10 um per step
|
|
||||||
- 10 images per step, 100 steps
|
|
||||||
- use scan.json as input for this script
|
|
||||||
|
|
||||||
# usage
|
|
||||||
python make-tiff.py -j <jugfrau-name> -s <path to scan file> -n <name of output file>
|
|
||||||
|
|
||||||
# output
|
|
||||||
creates a .npy file that can be loaded directly into pyFAI
|
|
||||||
"""
|
|
||||||
|
|
||||||
# modules
|
|
||||||
from matplotlib import pyplot as plt
|
|
||||||
import numpy as np
|
|
||||||
from sfdata import SFScanInfo
|
|
||||||
from tqdm import tqdm
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
def convert_image( path_to_json, jungfrau, name ):
|
|
||||||
|
|
||||||
# opens scan
|
|
||||||
scan = SFScanInfo( path_to_json )
|
|
||||||
|
|
||||||
# step through scan and average files from each positions
|
|
||||||
mean_image = []
|
|
||||||
for step in tqdm( enumerate(scan) ):
|
|
||||||
# step is a SFDataFiles object
|
|
||||||
subset = step[1]
|
|
||||||
mean = np.mean( subset[ jungfrau ].data, axis=0 )
|
|
||||||
mean_image.append(mean)
|
|
||||||
|
|
||||||
# sum averaged imaged
|
|
||||||
sum_image = np.sum( mean_image, axis=0 )
|
|
||||||
|
|
||||||
# output to file
|
|
||||||
np.save( "{0}.npy".format( name ), sum_image )
|
|
||||||
|
|
||||||
# create plot of summed, averaged scan
|
|
||||||
fig, ax = plt.subplots()
|
|
||||||
ax.imshow(sum_image, vmin=0, vmax=1000)
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument(
|
|
||||||
"-j",
|
|
||||||
"--jungfrau",
|
|
||||||
help="name of the jungfrau used, i.e., JF17T16V01 for Cristallina MX",
|
|
||||||
type=str,
|
|
||||||
default="JF17T16V01"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-s",
|
|
||||||
"--scan",
|
|
||||||
help="path to json scan file",
|
|
||||||
type=str,
|
|
||||||
default="/sf/cristallina/data/p20590/raw/run0003/meta/scan.json"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-n",
|
|
||||||
"--name",
|
|
||||||
help="name of output file",
|
|
||||||
type=str,
|
|
||||||
default="sum_mean_scan"
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
convert_image( args.scan, args.jungfrau, args.name )
|
|
||||||
@@ -1,155 +0,0 @@
|
|||||||
|
|
||||||
|
|
||||||
# modules
|
|
||||||
import pandas as pd
|
|
||||||
import subprocess
|
|
||||||
import os, errno
|
|
||||||
import regex as re
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
def h5_sample( lst, sample ):
|
|
||||||
|
|
||||||
# create sample of images from run
|
|
||||||
# read h5.lst - note - removes // from imade column
|
|
||||||
cols = [ "h5", "image" ]
|
|
||||||
sample_df = pd.read_csv( lst, sep="\s//", engine="python", names=cols )
|
|
||||||
|
|
||||||
# take defined sample
|
|
||||||
sample_df = sample_df.sample( sample )
|
|
||||||
|
|
||||||
# sort list
|
|
||||||
sample_df = sample_df.sort_index()
|
|
||||||
|
|
||||||
# re-add // to image columm
|
|
||||||
sample_df[ "image" ] = "//" + sample_df.image.astype(str)
|
|
||||||
|
|
||||||
# write sample to file
|
|
||||||
sample_file = "h5_{0}_sample.lst".format( sample )
|
|
||||||
sample_df.to_csv( sample_file, sep=" ", index=False, header=False )
|
|
||||||
|
|
||||||
# return sample file name
|
|
||||||
return sample_file
|
|
||||||
|
|
||||||
def geom_amend( lab6_geom_file, clen ):
|
|
||||||
|
|
||||||
# read lab6 geom
|
|
||||||
lab6_geom = open( lab6_geom_file, "r" )
|
|
||||||
|
|
||||||
# use regex to find clen and replace with new
|
|
||||||
# clen example => clen = 0.1217
|
|
||||||
clen_geom = re.sub( "clen = 0\.\d+", "clen = {0}".format( clen ), lab6_geom.read() )
|
|
||||||
|
|
||||||
# close lab6 geom file
|
|
||||||
lab6_geom.close()
|
|
||||||
|
|
||||||
# write new clen_geom to file
|
|
||||||
clen_geom_file = "{0}.geom".format( clen )
|
|
||||||
geom = open( clen_geom_file, "w" )
|
|
||||||
geom.write( clen_geom )
|
|
||||||
geom.close()
|
|
||||||
|
|
||||||
# return clen_geom file name
|
|
||||||
return clen_geom_file
|
|
||||||
|
|
||||||
def write_crystfel_run( clen, sample_h5_file, clen_geom_file, cell_file ):
|
|
||||||
|
|
||||||
# crystfel file name
|
|
||||||
cryst_run_file = "{0}_cryst_run.sh".format( clen )
|
|
||||||
|
|
||||||
# write file
|
|
||||||
run_sh = open( cryst_run_file, "w" )
|
|
||||||
run_sh.write( "#!/bin/sh\n\n" )
|
|
||||||
run_sh.write( "module purge\n" )
|
|
||||||
run_sh.write( "module load crystfel/0.10.2\n" )
|
|
||||||
# run_sh.write( "module use MX unstable\n" )
|
|
||||||
# run_sh.write( "module load gcc/4.8.5 hdf5_serial/1.10.3 xds/20210205 DirAx/1.17 pinkindexer/2021.08\n" )
|
|
||||||
# run_sh.write( "module load xgandalf/2021.08 fdip/2021.08 mosflm/7.3.0 crystfel/0.10.0 HDF5_bitshuffle/2018.05 HDF5_LZ4/2018.05 ccp4\n\n" )
|
|
||||||
run_sh.write( "indexamajig -i {0} \\\n".format( sample_h5_file ) )
|
|
||||||
run_sh.write( " --output={0}.stream \\\n".format( clen ) )
|
|
||||||
run_sh.write( " --geometry={0}\\\n".format( clen_geom_file ) )
|
|
||||||
run_sh.write( " --pdb={0} \\\n".format( cell_file ) )
|
|
||||||
run_sh.write( " --indexing=xgandalf-latt-cell --peaks=peakfinder8 \\\n" )
|
|
||||||
run_sh.write( " --integration=rings-grad --tolerance=10.0,10.0,10.0,2,3,2 --threshold=10 --min-snr=5 --int-radius=2,3,6 \\\n" )
|
|
||||||
run_sh.write( " -j 36 --no-multi --no-retry --check-peaks --max-res=3000 --min-pix-count=1 --local-bg-radius=4 --min-res=85\n\n" )
|
|
||||||
run_sh.close()
|
|
||||||
|
|
||||||
# make file executable
|
|
||||||
subprocess.call( [ "chmod", "+x", "{0}".format( cryst_run_file ) ] )
|
|
||||||
|
|
||||||
# return crystfel file name
|
|
||||||
return cryst_run_file
|
|
||||||
|
|
||||||
def main( lst, sample, lab6_geom_file, centre_clen, cell_file, steps, scan_name, step_size ):
|
|
||||||
|
|
||||||
# set current working directory
|
|
||||||
cwd = os.getcwd()
|
|
||||||
|
|
||||||
# make sample list
|
|
||||||
print( "making {0} sample of images".format( sample ) )
|
|
||||||
sample_h5 = h5_sample( lst, sample)
|
|
||||||
sample_h5_file = "{0}/{1}".format( cwd, sample_h5 )
|
|
||||||
print( "done" )
|
|
||||||
|
|
||||||
# make list of clen steps above and below the central clen
|
|
||||||
print( "make clen array around {0}".format( centre_clen ) )
|
|
||||||
step_range = step_size*steps
|
|
||||||
bottom_step = centre_clen-step_range/2
|
|
||||||
top_step = bottom_step+step_range
|
|
||||||
step_range = np.arange( bottom_step, top_step, step_size )
|
|
||||||
step_range = step_range.round( 4 ) # important - otherwise np gives your .99999999 instead of 1 somethimes
|
|
||||||
print( "done" )
|
|
||||||
|
|
||||||
# make directorys for results
|
|
||||||
print( "begin CrystFEL anaylsis of different clens" )
|
|
||||||
|
|
||||||
# loop to cycle through clen steps
|
|
||||||
for clen in step_range:
|
|
||||||
|
|
||||||
# move back to cwd
|
|
||||||
os.chdir( cwd )
|
|
||||||
|
|
||||||
print( "processing clen = {0}".format( clen ) )
|
|
||||||
# define process directory
|
|
||||||
proc_dir = "{0}/{1}/{2}".format( cwd, scan_name, clen )
|
|
||||||
|
|
||||||
# make process directory
|
|
||||||
try:
|
|
||||||
os.makedirs( proc_dir )
|
|
||||||
except OSError as e:
|
|
||||||
if e.errno != errno.EEXIST:
|
|
||||||
raise
|
|
||||||
|
|
||||||
# move to process directory
|
|
||||||
os.chdir( proc_dir )
|
|
||||||
|
|
||||||
# make geom file
|
|
||||||
print( "amend .geom file" )
|
|
||||||
clen_geom_file = geom_amend( lab6_geom_file, clen )
|
|
||||||
print( "done" )
|
|
||||||
|
|
||||||
# make crystfel run file
|
|
||||||
print( "make crystfel file" )
|
|
||||||
cryst_run_file = write_crystfel_run( clen, sample_h5_file, clen_geom_file, cell_file )
|
|
||||||
print( "done" )
|
|
||||||
|
|
||||||
# run crystfel file
|
|
||||||
print( "run crystFEL" )
|
|
||||||
#subprocess.call( [ "./{0}".format( cryst_run_file ) ] )
|
|
||||||
subprocess.call( [ "sbatch", "-p", "day", "--cpus-per-task=32", "--", "./{0}".format( cryst_run_file ) ] )
|
|
||||||
print( "done" )
|
|
||||||
|
|
||||||
#subprocess.call( [ "sbatch", "-p", "day", "--cpus-per-task=32", "--", "run{0}.sh".format( run.zfill(4) ) ] )
|
|
||||||
|
|
||||||
# variables
|
|
||||||
sample = 500
|
|
||||||
lst = "/sf/cristallina/data/p20590/work/process/jhb/detector_refinement/acq0001.JF17T16V01.dark.lst"
|
|
||||||
lab6_geom_file = "/sf/cristallina/data/p20590/work/process/jhb/detector_refinement/8M_p-op_c-op_p20590.geom"
|
|
||||||
centre_clen = 0.122 # in m
|
|
||||||
cell_file = "/sf/cristallina/data/p20590/work/process/jhb/detector_refinement/hewl.cell"
|
|
||||||
steps = 10
|
|
||||||
scan_name = "fine_scan"
|
|
||||||
step_size = 0.0005 # m - 0.001 = coarse scan, 0.0005 = fine
|
|
||||||
|
|
||||||
main( lst, sample, lab6_geom_file, centre_clen, cell_file, steps, scan_name, step_size )
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,155 +0,0 @@
|
|||||||
|
|
||||||
|
|
||||||
# modules
|
|
||||||
import pandas as pd
|
|
||||||
import regex as re
|
|
||||||
import os
|
|
||||||
import numpy as np
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
|
|
||||||
|
|
||||||
def scrub_clen( stream_pwd ):
|
|
||||||
|
|
||||||
# get clen from stream name
|
|
||||||
# example - /sf/cristallina/data/p20590/work/process/jhb/detector_refinement/coarse_scan/0.115/0.115.stream
|
|
||||||
# scrub clen and return - else nan
|
|
||||||
try:
|
|
||||||
pattern = r"0\.\d+/(0\.\d+)\.stream"
|
|
||||||
re_search = re.search( pattern, stream_pwd )
|
|
||||||
clen = re_search.group( 1 )
|
|
||||||
if AttributeError:
|
|
||||||
return float( clen )
|
|
||||||
except AttributeError:
|
|
||||||
return np.nan
|
|
||||||
|
|
||||||
def find_streams( top_dir ):
|
|
||||||
|
|
||||||
# create df for streams
|
|
||||||
stream_df = pd.DataFrame()
|
|
||||||
|
|
||||||
# search for all files that end with .stream
|
|
||||||
|
|
||||||
for path, dirs, files in os.walk( top_dir ):
|
|
||||||
for name in files:
|
|
||||||
if name.endswith( ".stream" ):
|
|
||||||
|
|
||||||
# get stream pwd
|
|
||||||
stream_pwd = os.path.join( path, name )
|
|
||||||
|
|
||||||
# scrub clen from stream
|
|
||||||
clen = scrub_clen( stream_pwd )
|
|
||||||
|
|
||||||
# put clen and stream pwd into df
|
|
||||||
data = [ { "stream_pwd" : stream_pwd,
|
|
||||||
"clen" : clen
|
|
||||||
} ]
|
|
||||||
stream_df_1 = pd.DataFrame( data )
|
|
||||||
stream_df = pd.concat( ( stream_df, stream_df_1 ) )
|
|
||||||
|
|
||||||
# sort df based on clen
|
|
||||||
stream_df = stream_df.sort_values( by="clen" )
|
|
||||||
|
|
||||||
# reset df index
|
|
||||||
stream_df = stream_df.reset_index( drop=True )
|
|
||||||
|
|
||||||
# return df of streams and clens
|
|
||||||
return stream_df
|
|
||||||
|
|
||||||
def scrub_us( stream ):
|
|
||||||
|
|
||||||
# get uc values from stream file
|
|
||||||
# example - Cell parameters 7.71784 7.78870 3.75250 nm, 90.19135 90.77553 90.19243 deg
|
|
||||||
# scrub clen and return - else nan
|
|
||||||
try:
|
|
||||||
pattern = r"Cell\sparameters\s(\d\.\d+)\s(\d\.\d+)\s(\d\.\d+)\snm,\s(\d+\.\d+)\s(\d+\.\d+)\s(\d+\.\d+)\sdeg"
|
|
||||||
cells = re.findall( pattern, stream )
|
|
||||||
if AttributeError:
|
|
||||||
return cells
|
|
||||||
except AttributeError:
|
|
||||||
return np.nan
|
|
||||||
|
|
||||||
def main( top_dir ):
|
|
||||||
|
|
||||||
# find stream files from process directory
|
|
||||||
print( "finding stream files" )
|
|
||||||
stream_df = find_streams( top_dir )
|
|
||||||
print( "done" )
|
|
||||||
|
|
||||||
# making results df for unit cell and index no.
|
|
||||||
results_df = pd.DataFrame()
|
|
||||||
|
|
||||||
# loop through stream files and collect unit_cell information
|
|
||||||
print( "looping through stream files to collect unit cell, indexed information" )
|
|
||||||
for index, row in stream_df.iterrows():
|
|
||||||
|
|
||||||
stream_pwd, clen = row[ "stream_pwd" ], row[ "clen" ]
|
|
||||||
|
|
||||||
# open stream file
|
|
||||||
print( "scrubbing stream for clen={0}".format( clen ) )
|
|
||||||
stream = open( stream_pwd, "r" ).read()
|
|
||||||
|
|
||||||
# scrub unit cell information
|
|
||||||
cells = scrub_us( stream )
|
|
||||||
|
|
||||||
# put cells in df
|
|
||||||
cols = [ "a", "b", "c", "alpha", "beta", "gamma" ]
|
|
||||||
cells_df = pd.DataFrame( cells, columns=cols )
|
|
||||||
cells_df = cells_df.astype( float )
|
|
||||||
|
|
||||||
# calc stats
|
|
||||||
indexed = len( cells_df )
|
|
||||||
std_a = cells_df.a.std()
|
|
||||||
std_b = cells_df.b.std()
|
|
||||||
std_c = cells_df.c.std()
|
|
||||||
|
|
||||||
# put stats in results df
|
|
||||||
stats = [ { "clen" : clen,
|
|
||||||
"indexed" : indexed,
|
|
||||||
"std_a" : std_a,
|
|
||||||
"std_b" : std_b,
|
|
||||||
"std_c" : std_c
|
|
||||||
} ]
|
|
||||||
results_df_1 = pd.DataFrame( stats )
|
|
||||||
results_df = pd.concat( ( results_df, results_df_1 ) )
|
|
||||||
|
|
||||||
print( "done" )
|
|
||||||
|
|
||||||
# reset index
|
|
||||||
results_df = results_df.reset_index( drop=True )
|
|
||||||
|
|
||||||
# plot results
|
|
||||||
fig, ax1 = plt.subplots()
|
|
||||||
|
|
||||||
# indexed images plot
|
|
||||||
color = "tab:red"
|
|
||||||
ax1.set_xlabel( "clen" )
|
|
||||||
ax1.set_ylabel( "indexed", color=color )
|
|
||||||
ax1.plot( results_df.clen, results_df.indexed, color=color)
|
|
||||||
ax1.tick_params( axis="y", labelcolor=color)
|
|
||||||
|
|
||||||
# instantiate a second axes that shares the same x-axis
|
|
||||||
ax2 = ax1.twinx()
|
|
||||||
|
|
||||||
# std_a plot
|
|
||||||
color = "tab:blue"
|
|
||||||
ax2.set_ylabel( "st.deviation", color=color )
|
|
||||||
ax2.plot( results_df.clen, results_df.std_a, color=color )
|
|
||||||
ax2.tick_params(axis='y', labelcolor=color)
|
|
||||||
|
|
||||||
# std_b plot
|
|
||||||
ax2.plot( results_df.clen, results_df.std_b, color=color )
|
|
||||||
ax2.tick_params(axis='y', labelcolor=color)
|
|
||||||
|
|
||||||
# std_b plot
|
|
||||||
ax2.plot( results_df.clen, results_df.std_c, color=color )
|
|
||||||
ax2.tick_params(axis='y', labelcolor=color)
|
|
||||||
|
|
||||||
fig.tight_layout() # otherwise the right y-label is slightly clipped
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
|
|
||||||
# variables
|
|
||||||
top_dir = "/sf/cristallina/data/p20590/work/process/jhb/detector_refinement/coarse_scan"
|
|
||||||
|
|
||||||
|
|
||||||
main( top_dir )
|
|
||||||
@@ -1,107 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
# author J.Beale
|
|
||||||
|
|
||||||
"""
|
|
||||||
# aim
|
|
||||||
- -16M=varient for large detectors
|
|
||||||
make image file to input into pyFAI for initial detector beam-centre and detector distance calibration
|
|
||||||
refer to Cristallina8M-calibration for complete protocol
|
|
||||||
https://docs.google.com/document/d/1RoeUUogvRxX4M6uqGwkjf3dVJBabiMUx4ZxwcA5e9Dc/edit#
|
|
||||||
|
|
||||||
# protocol
|
|
||||||
take scan of LaB6
|
|
||||||
## IMPORTANT ##
|
|
||||||
- save image as photon-counts - in slic/run_control scale=beam energy
|
|
||||||
- detector_geometry=TRUE - saves detector panels in their correct orientation
|
|
||||||
## scan inputs ##
|
|
||||||
- <0.01 trans
|
|
||||||
- motor scan > 10 um per step
|
|
||||||
- 10 images per step, 100 steps
|
|
||||||
- use scan.json as input for this script
|
|
||||||
|
|
||||||
# usage
|
|
||||||
python make-tiff.py -j <jugfrau-name> -s <path to scan file> -n <name of output file>
|
|
||||||
|
|
||||||
# output
|
|
||||||
creates a .npy file that can be loaded directly into pyFAI
|
|
||||||
"""
|
|
||||||
|
|
||||||
# modules
|
|
||||||
from matplotlib import pyplot as plt
|
|
||||||
import numpy as np
|
|
||||||
from sfdata import SFScanInfo
|
|
||||||
from tqdm import tqdm
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
def convert_image( path_to_json, jungfrau, name ):
|
|
||||||
|
|
||||||
# opens scan
|
|
||||||
print( "opening scane" )
|
|
||||||
scan = SFScanInfo( path_to_json )
|
|
||||||
|
|
||||||
# steps in scane
|
|
||||||
nsteps = len(scan)
|
|
||||||
|
|
||||||
# define step ch and im_shape
|
|
||||||
step = scan[0]
|
|
||||||
ch = step[jungfrau]
|
|
||||||
img_shape = ch[0].shape
|
|
||||||
|
|
||||||
print("stepping through scan and averaging images at each step")
|
|
||||||
# step through scan and average files from each positions
|
|
||||||
imgs_shape = (nsteps, *img_shape)
|
|
||||||
imgs = np.empty(imgs_shape)
|
|
||||||
for i, subset in tqdm(enumerate(scan)):
|
|
||||||
|
|
||||||
# go through data in_batches so you don't run out of memory
|
|
||||||
ch = subset[jungfrau]
|
|
||||||
mean = np.zeros(img_shape)
|
|
||||||
for _indices, batch in ch.in_batches(size=2):
|
|
||||||
mean += np.mean(batch, axis=0)
|
|
||||||
|
|
||||||
# take mean of means for batch opened data
|
|
||||||
imgs[i] = mean
|
|
||||||
|
|
||||||
print( "done" )
|
|
||||||
# sum averaged imaged
|
|
||||||
print( "final average" )
|
|
||||||
mean_image = imgs.mean(axis=0)
|
|
||||||
print("done")
|
|
||||||
|
|
||||||
# output to file
|
|
||||||
print( "saving to .npy = {0}".format( name ) )
|
|
||||||
np.save( "{0}.npy".format( name ), mean_image )
|
|
||||||
print( "done" )
|
|
||||||
|
|
||||||
# create plot of summed, averaged scan
|
|
||||||
fig, ax = plt.subplots()
|
|
||||||
ax.imshow(mean_image, vmin=0, vmax=1000)
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument(
|
|
||||||
"-j",
|
|
||||||
"--jungfrau",
|
|
||||||
help="name of the jungfrau used, i.e., JF17T16V01 for Cristallina MX",
|
|
||||||
type=str,
|
|
||||||
default="JF17T16V01"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-s",
|
|
||||||
"--scan",
|
|
||||||
help="path to json scan file",
|
|
||||||
type=str,
|
|
||||||
default="/sf/cristallina/data/p20590/raw/run0003/meta/scan.json"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-n",
|
|
||||||
"--name",
|
|
||||||
help="name of output file",
|
|
||||||
type=str,
|
|
||||||
default="mean_scan"
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
convert_image( args.scan, args.jungfrau, args.name )
|
|
||||||
BIN
pyfai-tools/mean_scan.npy
Normal file
BIN
pyfai-tools/mean_scan.npy
Normal file
Binary file not shown.
@@ -1,164 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
import regex as re
|
|
||||||
from scipy import constants
|
|
||||||
import argparse
|
|
||||||
from datetime import datetime
|
|
||||||
date = datetime.today().strftime('%y%m%d')
|
|
||||||
|
|
||||||
|
|
||||||
def calculate_new_corner_positions( beam_x, beam_y ):
|
|
||||||
|
|
||||||
# make df of current corner positions
|
|
||||||
positions = { "current_x" : [ 607, 1646, 607, 1646, 607, 1646, 538, 1577, 538, 1577, 538, 1577, 538, 3212, 514, 3143 ],
|
|
||||||
"current_y" : [ 0, 69, 550, 619, 1100, 1169, 1650, 1719, 2200, 2269, 2750, 2819, 597, 667, 1636, 1706 ]
|
|
||||||
}
|
|
||||||
corner_df = pd.DataFrame( positions )
|
|
||||||
|
|
||||||
# calculate new corner positions
|
|
||||||
corner_df[ "new_x" ] = corner_df.current_x.subtract( beam_x )
|
|
||||||
corner_df[ "new_y" ] = corner_df.current_y.subtract( beam_y )
|
|
||||||
|
|
||||||
# drop old positions
|
|
||||||
corner_df = corner_df[[ "new_x", "new_y" ]]
|
|
||||||
|
|
||||||
return corner_df
|
|
||||||
|
|
||||||
def scrub_poni( path_to_poni_file ):
|
|
||||||
|
|
||||||
# open poni file
|
|
||||||
poni_file = open( path_to_poni_file, "r" ).read()
|
|
||||||
|
|
||||||
# regex patterns to scrub poni data
|
|
||||||
clen_m_pattern = r"Distance:\s(\d\.\d*)"
|
|
||||||
poni1_m_pattern = r"Poni1:\s(\d\.\d*)"
|
|
||||||
poni2_m_pattern = r"Poni2:\s(\d\.\d*)"
|
|
||||||
wave_pattern = r"Wavelength:\s(\d\.\d*)e(-\d+)"
|
|
||||||
|
|
||||||
# regex seach
|
|
||||||
clen = re.search( clen_m_pattern, poni_file ).group( 1 )
|
|
||||||
poni1_m = re.search( poni1_m_pattern, poni_file ).group( 1 )
|
|
||||||
poni2_m = re.search( poni2_m_pattern, poni_file ).group( 1 )
|
|
||||||
wave = re.search( wave_pattern, poni_file ).group( 1, 2 )
|
|
||||||
|
|
||||||
# calulate proper wavelength
|
|
||||||
wave = float(wave[0]) * np.float_power( 10, int( wave[1]) )
|
|
||||||
|
|
||||||
# calculate beam_centre
|
|
||||||
poni1_p = float( poni1_m ) / 0.000000075
|
|
||||||
poni2_p = float( poni2_m ) / 0.000000075
|
|
||||||
|
|
||||||
# calculate beam energy in eV
|
|
||||||
eV = ( ( constants.c * constants.h ) / wave ) / constants.electron_volt
|
|
||||||
|
|
||||||
# return poni1 = y, poni2 = x and energy
|
|
||||||
return poni1_p, poni2_p, eV, round( float( clen )*1000, 5 )
|
|
||||||
|
|
||||||
|
|
||||||
def write_new_positions( path_to_geom, beam_x, beam_y, clen, energy ):
|
|
||||||
|
|
||||||
# open current geometry file
|
|
||||||
current_geom_file = open( path_to_geom, "r" ).read()
|
|
||||||
|
|
||||||
# calculate new corner positions
|
|
||||||
corner_df = calculate_new_corner_positions( beam_x, beam_y )
|
|
||||||
|
|
||||||
# replace current corner positions with new ones
|
|
||||||
for i in range(0, 16):
|
|
||||||
|
|
||||||
# x and y positions
|
|
||||||
new_x, new_y = round( corner_df.new_x[i], 3 ), round( corner_df.new_y[i], 3 )
|
|
||||||
|
|
||||||
# input new x position
|
|
||||||
current_pattern_x = r"p" + re.escape( str(i) ) + r"/corner_x = -?\d+\.\d+"
|
|
||||||
new_pattern_x = r"p" + re.escape( str(i) ) + r"/corner_x = " + str( new_x )
|
|
||||||
current_geom_file = re.sub( current_pattern_x, new_pattern_x, current_geom_file )
|
|
||||||
|
|
||||||
# input new y position
|
|
||||||
current_pattern_y = r"p" + re.escape( str(i) ) + r"/corner_y = -?\d+\.\d+"
|
|
||||||
new_pattern_y = r"p" + re.escape( str(i) ) + r"/corner_y = " + str( new_y )
|
|
||||||
current_geom_file = re.sub( current_pattern_y, new_pattern_y, current_geom_file )
|
|
||||||
|
|
||||||
# input new clen
|
|
||||||
current_clen = r"clen = \d\.\d+"
|
|
||||||
new_clen = r"clen = " + str( clen )
|
|
||||||
current_geom_file = re.sub( current_clen, new_clen, current_geom_file )
|
|
||||||
|
|
||||||
# input new energy
|
|
||||||
current_energy = r"photon_energy = \d+"
|
|
||||||
new_energy = r"photon_energy = " + str( energy )
|
|
||||||
current_geom_file = re.sub( current_energy, new_energy, current_geom_file )
|
|
||||||
|
|
||||||
# create geom new file
|
|
||||||
geom_start = path_to_geom[:-5]
|
|
||||||
new_geom_name = "{0}_{1}.geom".format( geom_start, date )
|
|
||||||
|
|
||||||
# write new geom file
|
|
||||||
f = open( new_geom_name, "w" )
|
|
||||||
f.write( current_geom_file )
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
# return new_geom_name
|
|
||||||
return new_geom_name
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument(
|
|
||||||
"-g",
|
|
||||||
"--path_to_geom",
|
|
||||||
help="give the path to the cristallina 8M geom file to be updated",
|
|
||||||
type=str,
|
|
||||||
default="/sf/cristallina/data/p20558/work/geom/optimised_geom_file/p20558_hewl_op.geom",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-x",
|
|
||||||
"--beam_x",
|
|
||||||
help="beam_x in pixels",
|
|
||||||
type=float,
|
|
||||||
default=1603.73
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-y",
|
|
||||||
"--beam_y",
|
|
||||||
help="beam_y in pixels",
|
|
||||||
type=float,
|
|
||||||
default=1661.99
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-c",
|
|
||||||
"--clen",
|
|
||||||
help="detector distance in m",
|
|
||||||
type=int,
|
|
||||||
default=0.111
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-e",
|
|
||||||
"--energy",
|
|
||||||
help="photon energy",
|
|
||||||
type=int,
|
|
||||||
default=12400
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-p",
|
|
||||||
"--poni",
|
|
||||||
help="path to poni file",
|
|
||||||
type=str,
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
# run geom converter
|
|
||||||
|
|
||||||
# import pdb;pdb.set_trace()
|
|
||||||
if args.poni is not None:
|
|
||||||
print( "reading poni file" )
|
|
||||||
beam_y, beam_x, eV, clen = scrub_poni( args.poni )
|
|
||||||
print( "beam x, beam_y = {0}, {1}\nphoton_energy = {2}\nclen = {3}".format( beam_x, beam_y, eV, clen ) )
|
|
||||||
new_geom_name = write_new_positions( args.path_to_geom, beam_x, beam_y, clen, eV )
|
|
||||||
print( "updated .geom file with poni calculations\n new .geom = {0}".format( new_geom_name ) )
|
|
||||||
else:
|
|
||||||
print( "manually input positions" )
|
|
||||||
print( "beam x, beam_y = {0}, {1}\nphoton_energy = {2}\nclen = {3}".format( args.beam_x, args.beam_y, args.energy, args.clen ) )
|
|
||||||
new_geom_name = write_new_positions( args.path_to_geom, args.beam_x, args.beam_y, args.clen, args.energy )
|
|
||||||
print( "updated .geom file with poni calculations\nnew .geom = {0}".format( new_geom_name ) )
|
|
||||||
Reference in New Issue
Block a user