add files for public distribution
based on internal repository 0a462b6 2017-11-22 14:41:39 +0100
This commit is contained in:
157
bin/pmsco.ra.template
Normal file
157
bin/pmsco.ra.template
Normal file
@ -0,0 +1,157 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Slurm script template for PMSCO calculations on the Ra cluster
|
||||
# based on run_mpi_HPL_nodes-2.sl by V. Markushin 2016-03-01
|
||||
#
|
||||
# Use:
|
||||
# - enter the appropriate parameters and save as a new file.
|
||||
# - call the sbatch command to pass the job script.
|
||||
# request a specific number of nodes and tasks.
|
||||
# example:
|
||||
# sbatch --nodes=2 --ntasks-per-node=24 --time=02:00:00 run_pmsco.sl
|
||||
#
|
||||
# PMSCO arguments
|
||||
# copy this template to a new file, and set the arguments
|
||||
#
|
||||
# PMSCO_WORK_DIR
|
||||
# path to be used as working directory.
|
||||
# contains the script derived from this template.
|
||||
# receives output and temporary files.
|
||||
#
|
||||
# PMSCO_PROJECT_FILE
|
||||
# python module that declares the project and starts the calculation.
|
||||
# must include the file path relative to $PMSCO_WORK_DIR.
|
||||
#
|
||||
# PMSCO_SOURCE_DIR
|
||||
# path to the pmsco source directory
|
||||
# (the directory which contains the bin, lib, pmsco sub-directories)
|
||||
#
|
||||
# PMSCO_SCAN_FILES
|
||||
# list of scan files.
|
||||
#
|
||||
# PMSCO_OUT
|
||||
# name of output file. should not include a path.
|
||||
#
|
||||
# all paths are relative to $PMSCO_WORK_DIR or (better) absolute.
|
||||
#
|
||||
#
|
||||
# Further arguments
|
||||
#
|
||||
# PMSCO_JOBNAME (required)
|
||||
# the job name is the base name for output files.
|
||||
#
|
||||
# PMSCO_WALLTIME_HR (integer, required)
|
||||
# wall time limit in hours. must be integer, minimum 1.
|
||||
# this value is passed to PMSCO.
|
||||
# it should specify the same amount of wall time as requested from the scheduler.
|
||||
#
|
||||
# PMSCO_MODE (optional)
|
||||
# calculation mode: single, swarm, grid, gradient
|
||||
#
|
||||
# PMSCO_CODE (optional)
|
||||
# calculation code: edac, msc, test
|
||||
#
|
||||
# PMSCO_LOGLEVEL (optional)
|
||||
# request log level: DEBUG, INFO, WARNING, ERROR
|
||||
# create a log file based on the job name.
|
||||
#
|
||||
# PMSCO_PROJECT_ARGS (optional)
|
||||
# extra arguments that are parsed by the project module.
|
||||
#
|
||||
#SBATCH --job-name="_PMSCO_JOBNAME"
|
||||
#SBATCH --output="_PMSCO_JOBNAME.o.%j"
|
||||
#SBATCH --error="_PMSCO_JOBNAME.e.%j"
|
||||
|
||||
PMSCO_WORK_DIR="_PMSCO_WORK_DIR"
|
||||
PMSCO_JOBNAME="_PMSCO_JOBNAME"
|
||||
PMSCO_WALLTIME_HR=_PMSCO_WALLTIME_HR
|
||||
|
||||
PMSCO_PROJECT_FILE="_PMSCO_PROJECT_FILE"
|
||||
PMSCO_MODE="_PMSCO_MODE"
|
||||
PMSCO_CODE="_PMSCO_CODE"
|
||||
PMSCO_SOURCE_DIR="_PMSCO_SOURCE_DIR"
|
||||
PMSCO_SCAN_FILES="_PMSCO_SCAN_FILES"
|
||||
PMSCO_OUT="_PMSCO_JOBNAME"
|
||||
PMSCO_LOGLEVEL="_PMSCO_LOGLEVEL"
|
||||
PMSCO_PROJECT_ARGS="_PMSCO_PROJECT_ARGS"
|
||||
|
||||
module load psi-python27/2.4.1
|
||||
module load gcc/4.8.5
|
||||
module load openmpi/1.10.2
|
||||
source activate pmsco
|
||||
|
||||
echo '================================================================================'
|
||||
echo "=== Running $0 at the following time and place:"
|
||||
date
|
||||
/bin/hostname
|
||||
cd $PMSCO_WORK_DIR
|
||||
pwd
|
||||
ls -lA
|
||||
#the intel compiler is currently not compatible with mpi4py. -mm 170131
|
||||
#echo
|
||||
#echo '================================================================================'
|
||||
#echo "=== Setting the environment to use Intel Cluster Studio XE 2016 Update 2 intel/16.2:"
|
||||
#cmd="source /opt/psi/Programming/intel/16.2/bin/compilervars.sh intel64"
|
||||
#echo $cmd
|
||||
#$cmd
|
||||
echo
|
||||
echo '================================================================================'
|
||||
echo "=== The environment is set as following:"
|
||||
env
|
||||
echo
|
||||
echo '================================================================================'
|
||||
echo "BEGIN test"
|
||||
echo "=== Intel native mpirun will get the number of nodes and the machinefile from Slurm"
|
||||
which mpirun
|
||||
cmd="mpirun /bin/hostname"
|
||||
echo $cmd
|
||||
$cmd
|
||||
echo "END test"
|
||||
echo
|
||||
echo '================================================================================'
|
||||
echo "BEGIN mpirun pmsco"
|
||||
echo "Intel native mpirun will get the number of nodes and the machinefile from Slurm"
|
||||
echo
|
||||
echo "code revision"
|
||||
cd "$PMSCO_SOURCE_DIR"
|
||||
git log --pretty=tformat:'%h %ai %d' -1
|
||||
python -m compileall pmsco
|
||||
python -m compileall projects
|
||||
cd "$PMSCO_WORK_DIR"
|
||||
echo
|
||||
|
||||
PMSCO_CMD="python $PMSCO_PROJECT_FILE"
|
||||
PMSCO_ARGS="$PMSCO_PROJECT_ARGS"
|
||||
if [ -n "$PMSCO_SCAN_FILES" ]; then
|
||||
PMSCO_ARGS="-s $PMSCO_SCAN_FILES $PMSCO_ARGS"
|
||||
fi
|
||||
if [ -n "$PMSCO_CODE" ]; then
|
||||
PMSCO_ARGS="-c $PMSCO_CODE $PMSCO_ARGS"
|
||||
fi
|
||||
if [ -n "$PMSCO_MODE" ]; then
|
||||
PMSCO_ARGS="-m $PMSCO_MODE $PMSCO_ARGS"
|
||||
fi
|
||||
if [ -n "$PMSCO_OUT" ]; then
|
||||
PMSCO_ARGS="-o $PMSCO_OUT $PMSCO_ARGS"
|
||||
fi
|
||||
if [ "$PMSCO_WALLTIME_HR" -ge 1 ]; then
|
||||
PMSCO_ARGS="-t $PMSCO_WALLTIME_HR $PMSCO_ARGS"
|
||||
fi
|
||||
if [ -n "$PMSCO_LOGLEVEL" ]; then
|
||||
PMSCO_ARGS="--log-level $PMSCO_LOGLEVEL --log-file $PMSCO_JOBNAME.log $PMSCO_ARGS"
|
||||
fi
|
||||
|
||||
which mpirun
|
||||
ls -l "$PMSCO_SOURCE_DIR"
|
||||
ls -l "$PMSCO_PROJECT_FILE"
|
||||
# Do no use the OpenMPI specific options, like "-x LD_LIBRARY_PATH", with the Intel mpirun.
|
||||
cmd="mpirun $PMSCO_CMD $PMSCO_ARGS"
|
||||
echo $cmd
|
||||
$cmd
|
||||
echo "END mpirun pmsco"
|
||||
echo '================================================================================'
|
||||
date
|
||||
ls -lAtr
|
||||
echo '================================================================================'
|
||||
|
||||
exit 0
|
178
bin/pmsco.sge.template
Normal file
178
bin/pmsco.sge.template
Normal file
@ -0,0 +1,178 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# SGE script template for MSC calculations
|
||||
#
|
||||
# This script uses the tight integration of openmpi-1.4.5-gcc-4.6.3 in SGE
|
||||
# using the parallel environment (PE) "orte".
|
||||
# This script must be used only with qsub command - do NOT run it as a stand-alone
|
||||
# shell script because it will start all processes on the local node.
|
||||
#
|
||||
# PhD arguments
|
||||
# copy this template to a new file, and set the arguments
|
||||
#
|
||||
# PHD_WORK_DIR
|
||||
# path to be used as working directory.
|
||||
# contains the SGE script derived from this template.
|
||||
# receives output and temporary files.
|
||||
#
|
||||
# PHD_PROJECT_FILE
|
||||
# python module that declares the project and starts the calculation.
|
||||
# must include the file path relative to $PHD_WORK_DIR.
|
||||
#
|
||||
# PHD_SOURCE_DIR
|
||||
# path to the pmsco source directory
|
||||
# (the directory which contains the bin, lib, pmsco sub-directories)
|
||||
#
|
||||
# PHD_SCAN_FILES
|
||||
# list of scan files.
|
||||
#
|
||||
# PHD_OUT
|
||||
# name of output file. should not include a path.
|
||||
#
|
||||
# all paths are relative to $PHD_WORK_DIR or (better) absolute.
|
||||
#
|
||||
#
|
||||
# Further arguments
|
||||
#
|
||||
# PHD_JOBNAME (required)
|
||||
# the job name is the base name for output files.
|
||||
#
|
||||
# PHD_NODES (required)
|
||||
# number of computing nodes (processes) to allocate for the job.
|
||||
#
|
||||
# PHD_WALLTIME_HR (required)
|
||||
# wall time limit (hours)
|
||||
#
|
||||
# PHD_WALLTIME_MIN (required)
|
||||
# wall time limit (minutes)
|
||||
#
|
||||
# PHD_MODE (optional)
|
||||
# calculation mode: single, swarm, grid, gradient
|
||||
#
|
||||
# PHD_CODE (optional)
|
||||
# calculation code: edac, msc, test
|
||||
#
|
||||
# PHD_LOGLEVEL (optional)
|
||||
# request log level: DEBUG, INFO, WARNING, ERROR
|
||||
# create a log file based on the job name.
|
||||
#
|
||||
# PHD_PROJECT_ARGS (optional)
|
||||
# extra arguments that are parsed by the project module.
|
||||
#
|
||||
|
||||
PHD_WORK_DIR="_PHD_WORK_DIR"
|
||||
PHD_JOBNAME="_PHD_JOBNAME"
|
||||
PHD_NODES=_PHD_NODES
|
||||
PHD_WALLTIME_HR=_PHD_WALLTIME_HR
|
||||
PHD_WALLTIME_MIN=_PHD_WALLTIME_MIN
|
||||
|
||||
PHD_PROJECT_FILE="_PHD_PROJECT_FILE"
|
||||
PHD_MODE="_PHD_MODE"
|
||||
PHD_CODE="_PHD_CODE"
|
||||
PHD_SOURCE_DIR="_PHD_SOURCE_DIR"
|
||||
PHD_SCAN_FILES="_PHD_SCAN_FILES"
|
||||
PHD_OUT="_PHD_JOBNAME"
|
||||
PHD_LOGLEVEL="_PHD_LOGLEVEL"
|
||||
PHD_PROJECT_ARGS="_PHD_PROJECT_ARGS"
|
||||
|
||||
# Define your job name, parallel environment with the number of slots, and run time:
|
||||
#$ -cwd
|
||||
#$ -N _PHD_JOBNAME.job
|
||||
#$ -pe orte _PHD_NODES
|
||||
#$ -l ram=2G
|
||||
#$ -l s_rt=_PHD_WALLTIME_HR:_PHD_WALLTIME_MIN:00
|
||||
#$ -l h_rt=_PHD_WALLTIME_HR:_PHD_WALLTIME_MIN:30
|
||||
#$ -V
|
||||
|
||||
###################################################
|
||||
# Fix the SGE environment-handling bug (bash):
|
||||
source /usr/share/Modules/init/sh
|
||||
export -n -f module
|
||||
|
||||
# Load the environment modules for this job (the order may be important):
|
||||
module load python/python-2.7.5
|
||||
module load gcc/gcc-4.6.3
|
||||
module load mpi/openmpi-1.4.5-gcc-4.6.3
|
||||
module load blas/blas-20110419-gcc-4.6.3
|
||||
module load lapack/lapack-3.4.2-gcc-4.6.3
|
||||
export LD_LIBRARY_PATH=$PHD_SOURCE_DIR/lib/:$LD_LIBRARY_PATH
|
||||
|
||||
###################################################
|
||||
# Set the environment variables:
|
||||
MPIEXEC=$OPENMPI/bin/mpiexec
|
||||
# OPENMPI is set by the mpi/openmpi-* module.
|
||||
|
||||
export OMP_NUM_THREADS=1
|
||||
export OMPI_MCA_btl='openib,sm,self'
|
||||
# export OMPI_MCA_orte_process_binding=core
|
||||
|
||||
##############
|
||||
# BEGIN DEBUG
|
||||
# Print the SGE environment on master host:
|
||||
echo "================================================================"
|
||||
echo "=== SGE job JOB_NAME=$JOB_NAME JOB_ID=$JOB_ID"
|
||||
echo "================================================================"
|
||||
echo DATE=`date`
|
||||
echo HOSTNAME=`hostname`
|
||||
echo PWD=`pwd`
|
||||
echo "NSLOTS=$NSLOTS"
|
||||
echo "PE_HOSTFILE=$PE_HOSTFILE"
|
||||
cat $PE_HOSTFILE
|
||||
echo "================================================================"
|
||||
echo "Running environment:"
|
||||
env
|
||||
echo "================================================================"
|
||||
echo "Loaded environment modules:"
|
||||
module list 2>&1
|
||||
echo
|
||||
# END DEBUG
|
||||
##############
|
||||
|
||||
##############
|
||||
# Setup
|
||||
cd "$PHD_SOURCE_DIR"
|
||||
python -m compileall .
|
||||
|
||||
cd "$PHD_WORK_DIR"
|
||||
ulimit -c 0
|
||||
|
||||
###################################################
|
||||
# The command to run with mpiexec:
|
||||
CMD="python $PHD_PROJECT_FILE"
|
||||
ARGS="$PHD_PROJECT_ARGS"
|
||||
|
||||
if [ -n "$PHD_SCAN_FILES" ]; then
|
||||
ARGS="-s $PHD_SCAN_FILES -- $ARGS"
|
||||
fi
|
||||
|
||||
if [ -n "$PHD_CODE" ]; then
|
||||
ARGS="-c $PHD_CODE $ARGS"
|
||||
fi
|
||||
|
||||
if [ -n "$PHD_MODE" ]; then
|
||||
ARGS="-m $PHD_MODE $ARGS"
|
||||
fi
|
||||
|
||||
if [ -n "$PHD_OUT" ]; then
|
||||
ARGS="-o $PHD_OUT $ARGS"
|
||||
fi
|
||||
|
||||
if [ "$PHD_WALLTIME_HR" -ge 1 ]
|
||||
then
|
||||
ARGS="-t $PHD_WALLTIME_HR $ARGS"
|
||||
else
|
||||
ARGS="-t 0.5 $ARGS"
|
||||
fi
|
||||
|
||||
if [ -n "$PHD_LOGLEVEL" ]; then
|
||||
ARGS="--log-level $PHD_LOGLEVEL --log-file $PHD_JOBNAME.log $ARGS"
|
||||
fi
|
||||
|
||||
# The MPI command to run:
|
||||
MPICMD="$MPIEXEC --prefix $OPENMPI -x PATH -x LD_LIBRARY_PATH -x OMP_NUM_THREADS -x OMPI_MCA_btl -np $NSLOTS $CMD $ARGS"
|
||||
echo "Command to run:"
|
||||
echo "$MPICMD"
|
||||
echo
|
||||
exec $MPICMD
|
||||
|
||||
exit 0
|
145
bin/qpmsco.ra.sh
Executable file
145
bin/qpmsco.ra.sh
Executable file
@ -0,0 +1,145 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# submission script for PMSCO calculations on the Ra cluster
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "Usage: $0 [NOSUB] JOBNAME NODES TASKS_PER_NODE WALLTIME:HOURS PROJECT MODE [ARGS [ARGS [...]]]"
|
||||
echo ""
|
||||
echo " NOSUB (optional): do not submit the script to the queue. default: submit."
|
||||
echo " JOBNAME (text): name of job. use only alphanumeric characters, no spaces."
|
||||
echo " NODES (integer): number of computing nodes. (1 node = 24 or 32 processors)."
|
||||
echo " do not specify more than 2."
|
||||
echo " TASKS_PER_NODE (integer): 1...24, or 32."
|
||||
echo " 24 or 32 for full-node allocation."
|
||||
echo " 1...23 for shared node allocation."
|
||||
echo " WALLTIME:HOURS (integer): requested wall time."
|
||||
echo " 1...24 for day partition"
|
||||
echo " 24...192 for week partition"
|
||||
echo " 1...192 for shared partition"
|
||||
echo " PROJECT: python module (file path) that declares the project and starts the calculation."
|
||||
echo " MODE: PMSCO calculation mode (single|swarm|gradient|grid)."
|
||||
echo " ARGS (optional): any number of further PMSCO or project arguments (except mode and time)."
|
||||
echo ""
|
||||
echo "the job script complete with the program code and input/output data is generated in ~/jobs/\$JOBNAME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# location of the pmsco package is derived from the path of this script
|
||||
SCRIPTDIR="$(dirname $(readlink -f $0))"
|
||||
SOURCEDIR="$SCRIPTDIR/.."
|
||||
PMSCO_SOURCE_DIR="$SOURCEDIR"
|
||||
|
||||
# read arguments
|
||||
if [ "$1" == "NOSUB" ]; then
|
||||
NOSUB="true"
|
||||
shift
|
||||
else
|
||||
NOSUB="false"
|
||||
fi
|
||||
|
||||
PMSCO_JOBNAME=$1
|
||||
shift
|
||||
|
||||
PMSCO_NODES=$1
|
||||
PMSCO_TASKS_PER_NODE=$2
|
||||
PMSCO_TASKS=$(expr $PMSCO_NODES \* $PMSCO_TASKS_PER_NODE)
|
||||
shift 2
|
||||
|
||||
PMSCO_WALLTIME_HR=$1
|
||||
PMSCO_WALLTIME_MIN=$(expr $PMSCO_WALLTIME_HR \* 60)
|
||||
shift
|
||||
|
||||
# select partition
|
||||
if [ $PMSCO_WALLTIME_HR -ge 25 ]; then
|
||||
PMSCO_PARTITION="week"
|
||||
else
|
||||
PMSCO_PARTITION="day"
|
||||
fi
|
||||
if [ $PMSCO_TASKS_PER_NODE -lt 24 ]; then
|
||||
PMSCO_PARTITION="shared"
|
||||
fi
|
||||
|
||||
PMSCO_PROJECT_FILE="$(readlink -f $1)"
|
||||
shift
|
||||
|
||||
PMSCO_MODE="$1"
|
||||
shift
|
||||
|
||||
PMSCO_PROJECT_ARGS="$*"
|
||||
|
||||
# use defaults, override explicitly in PMSCO_PROJECT_ARGS if necessary
|
||||
PMSCO_SCAN_FILES=""
|
||||
PMSCO_LOGLEVEL=""
|
||||
PMSCO_CODE=""
|
||||
|
||||
# set up working directory
|
||||
cd ~
|
||||
if [ ! -d "jobs" ]; then
|
||||
mkdir jobs
|
||||
fi
|
||||
cd jobs
|
||||
if [ ! -d "$PMSCO_JOBNAME" ]; then
|
||||
mkdir "$PMSCO_JOBNAME"
|
||||
fi
|
||||
cd "$PMSCO_JOBNAME"
|
||||
WORKDIR="$(pwd)"
|
||||
PMSCO_WORK_DIR="$WORKDIR"
|
||||
|
||||
# provide revision information, requires git repository
|
||||
cd "$SOURCEDIR"
|
||||
PMSCO_REV=$(git log --pretty=format:"Data revision %h, %ai" -1)
|
||||
if [ $? -ne 0 ]; then
|
||||
PMSCO_REV="Data revision unknown, "$(date +"%F %T %z")
|
||||
fi
|
||||
cd "$WORKDIR"
|
||||
echo "$PMSCO_REV" > revision.txt
|
||||
|
||||
# generate job script from template
|
||||
sed -e "s:_PMSCO_WORK_DIR:$PMSCO_WORK_DIR:g" \
|
||||
-e "s:_PMSCO_JOBNAME:$PMSCO_JOBNAME:g" \
|
||||
-e "s:_PMSCO_NODES:$PMSCO_NODES:g" \
|
||||
-e "s:_PMSCO_WALLTIME_HR:$PMSCO_WALLTIME_HR:g" \
|
||||
-e "s:_PMSCO_PROJECT_FILE:$PMSCO_PROJECT_FILE:g" \
|
||||
-e "s:_PMSCO_PROJECT_ARGS:$PMSCO_PROJECT_ARGS:g" \
|
||||
-e "s:_PMSCO_CODE:$PMSCO_CODE:g" \
|
||||
-e "s:_PMSCO_MODE:$PMSCO_MODE:g" \
|
||||
-e "s:_PMSCO_SOURCE_DIR:$PMSCO_SOURCE_DIR:g" \
|
||||
-e "s:_PMSCO_SCAN_FILES:$PMSCO_SCAN_FILES:g" \
|
||||
-e "s:_PMSCO_LOGLEVEL:$PMSCO_LOGLEVEL:g" \
|
||||
"$SCRIPTDIR/pmsco.ra.template" > $PMSCO_JOBNAME.job
|
||||
|
||||
chmod u+x "$PMSCO_JOBNAME.job"
|
||||
|
||||
# request nodes and tasks
|
||||
#
|
||||
# The option --ntasks-per-node is meant to be used with the --nodes option.
|
||||
# (For the --ntasks option, the default is one task per node, use the --cpus-per-task option to change this default.)
|
||||
#
|
||||
# sbatch options
|
||||
# --cores-per-socket=16
|
||||
# 32 cores per node
|
||||
# --partition=[shared|day|week]
|
||||
# --time=8-00:00:00
|
||||
# override default time limit (2 days in long queue)
|
||||
# time formats: "minutes", "minutes:seconds", "hours:minutes:seconds", "days-hours", "days-hours:minutes", "days-hours:minutes:seconds"
|
||||
# --mail-type=ALL
|
||||
# --test-only
|
||||
# check script but do not submit
|
||||
#
|
||||
SLURM_ARGS="--nodes=$PMSCO_NODES --ntasks-per-node=$PMSCO_TASKS_PER_NODE"
|
||||
|
||||
if [ $PMSCO_TASKS_PER_NODE -gt 24 ]; then
|
||||
SLURM_ARGS="--cores-per-socket=16 $SLURM_ARGS"
|
||||
fi
|
||||
|
||||
SLURM_ARGS="--partition=$PMSCO_PARTITION $SLURM_ARGS"
|
||||
|
||||
SLURM_ARGS="--time=$PMSCO_WALLTIME_HR:00:00 $SLURM_ARGS"
|
||||
|
||||
CMD="sbatch $SLURM_ARGS $PMSCO_JOBNAME.job"
|
||||
echo $CMD
|
||||
if [ "$NOSUB" != "true" ]; then
|
||||
$CMD
|
||||
fi
|
||||
|
||||
exit 0
|
128
bin/qpmsco.sge
Executable file
128
bin/qpmsco.sge
Executable file
@ -0,0 +1,128 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# submission script for PMSCO calculations on Merlin cluster
|
||||
#
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "Usage: $0 [NOSUB] JOBNAME NODES WALLTIME:HOURS PROJECT MODE [LOG_LEVEL]"
|
||||
echo ""
|
||||
echo " NOSUB (optional): do not submit the script to the queue. default: submit."
|
||||
echo " WALLTIME:HOURS (integer): sets the wall time limits."
|
||||
echo " soft limit = HOURS:00:00"
|
||||
echo " hard limit = HOURS:00:30"
|
||||
echo " for short.q: HOURS = 0 (-> MINUTES=30)"
|
||||
echo " for all.q: HOURS <= 24"
|
||||
echo " for long.q: HOURS <= 96"
|
||||
echo " PROJECT: python module (file path) that declares the project and starts the calculation."
|
||||
echo " MODE: PMSCO calculation mode (single|swarm|gradient|grid)."
|
||||
echo " LOG_LEVEL (optional): one of DEBUG, INFO, WARNING, ERROR if log files should be produced."
|
||||
echo ""
|
||||
echo "the job script complete with the program code and input/output data is generated in ~/jobs/\$JOBNAME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# location of the pmsco package is derived from the path of this script
|
||||
SCRIPTDIR="$(dirname $(readlink -f $0))"
|
||||
SOURCEDIR="$SCRIPTDIR/.."
|
||||
PHD_SOURCE_DIR="$SOURCEDIR"
|
||||
|
||||
PHD_CODE="edac"
|
||||
|
||||
# read arguments
|
||||
if [ "$1" == "NOSUB" ]; then
|
||||
NOSUB="true"
|
||||
shift
|
||||
else
|
||||
NOSUB="false"
|
||||
fi
|
||||
|
||||
PHD_JOBNAME=$1
|
||||
shift
|
||||
|
||||
PHD_NODES=$1
|
||||
shift
|
||||
|
||||
PHD_WALLTIME_HR=$1
|
||||
PHD_WALLTIME_MIN=0
|
||||
shift
|
||||
|
||||
PHD_PROJECT_FILE="$(readlink -f $1)"
|
||||
PHD_PROJECT_ARGS=""
|
||||
shift
|
||||
|
||||
PHD_MODE="$1"
|
||||
shift
|
||||
|
||||
PHD_LOGLEVEL=""
|
||||
if [ "$1" == "DEBUG" ] || [ "$1" == "INFO" ] || [ "$1" == "WARNING" ] || [ "$1" == "ERROR" ]; then
|
||||
PHD_LOGLEVEL="$1"
|
||||
shift
|
||||
fi
|
||||
|
||||
# ignore remaining arguments
|
||||
PHD_SCAN_FILES=""
|
||||
|
||||
# select allowed queues
|
||||
QUEUE=short.q,all.q,long.q
|
||||
|
||||
# for short queue (limit 30 minutes)
|
||||
if [ "$PHD_WALLTIME_HR" -lt 1 ]; then
|
||||
PHD_WALLTIME_HR=0
|
||||
PHD_WALLTIME_MIN=30
|
||||
fi
|
||||
|
||||
# set up working directory
|
||||
cd ~
|
||||
if [ ! -d "jobs" ]; then
|
||||
mkdir jobs
|
||||
fi
|
||||
cd jobs
|
||||
if [ ! -d "$PHD_JOBNAME" ]; then
|
||||
mkdir "$PHD_JOBNAME"
|
||||
fi
|
||||
cd "$PHD_JOBNAME"
|
||||
WORKDIR="$(pwd)"
|
||||
PHD_WORK_DIR="$WORKDIR"
|
||||
|
||||
# provide revision information, requires git repository
|
||||
cd "$SOURCEDIR"
|
||||
PHD_REV=$(git log --pretty=format:"Data revision %h, %ad" --date=iso -1)
|
||||
if [ $? -ne 0 ]; then
|
||||
PHD_REV="Data revision unknown, "$(date +"%F %T %z")
|
||||
fi
|
||||
cd "$WORKDIR"
|
||||
echo "$PHD_REV" > revision.txt
|
||||
|
||||
# generate job script from template
|
||||
sed -e "s:_PHD_WORK_DIR:$PHD_WORK_DIR:g" \
|
||||
-e "s:_PHD_JOBNAME:$PHD_JOBNAME:g" \
|
||||
-e "s:_PHD_NODES:$PHD_NODES:g" \
|
||||
-e "s:_PHD_WALLTIME_HR:$PHD_WALLTIME_HR:g" \
|
||||
-e "s:_PHD_WALLTIME_MIN:$PHD_WALLTIME_MIN:g" \
|
||||
-e "s:_PHD_PROJECT_FILE:$PHD_PROJECT_FILE:g" \
|
||||
-e "s:_PHD_PROJECT_ARGS:$PHD_PROJECT_ARGS:g" \
|
||||
-e "s:_PHD_CODE:$PHD_CODE:g" \
|
||||
-e "s:_PHD_MODE:$PHD_MODE:g" \
|
||||
-e "s:_PHD_SOURCE_DIR:$PHD_SOURCE_DIR:g" \
|
||||
-e "s:_PHD_SCAN_FILES:$PHD_SCAN_FILES:g" \
|
||||
-e "s:_PHD_LOGLEVEL:$PHD_LOGLEVEL:g" \
|
||||
"$SCRIPTDIR/pmsco.sge.template" > $PHD_JOBNAME.job
|
||||
|
||||
chmod u+x "$PHD_JOBNAME.job"
|
||||
|
||||
if [ "$NOSUB" != "true" ]; then
|
||||
|
||||
# suppress bash error [stackoverflow.com/questions/10496758]
|
||||
unset module
|
||||
|
||||
# submit the job script
|
||||
# EMAIL must be defined in the environment
|
||||
if [ -n "$EMAIL" ]; then
|
||||
qsub -q $QUEUE -m ae -M $EMAIL $PHD_JOBNAME.job
|
||||
else
|
||||
qsub -q $QUEUE $PHD_JOBNAME.job
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
exit 0
|
Reference in New Issue
Block a user