#!/bin/bash # # Slurm script template for PMSCO calculations on the Ra cluster # based on run_mpi_HPL_nodes-2.sl by V. Markushin 2016-03-01 # # Use: # - enter the appropriate parameters and save as a new file. # - call the sbatch command to pass the job script. # request a specific number of nodes and tasks. # example: # sbatch --nodes=2 --ntasks-per-node=24 --time=02:00:00 run_pmsco.sl # # PMSCO arguments # copy this template to a new file, and set the arguments # # PMSCO_WORK_DIR # path to be used as working directory. # contains the script derived from this template. # receives output and temporary files. # # PMSCO_PROJECT_FILE # python module that declares the project and starts the calculation. # must include the file path relative to $PMSCO_WORK_DIR. # # PMSCO_SOURCE_DIR # path to the pmsco source directory # (the directory which contains the bin, lib, pmsco sub-directories) # # PMSCO_SCAN_FILES # list of scan files. # # PMSCO_OUT # name of output file. should not include a path. # # all paths are relative to $PMSCO_WORK_DIR or (better) absolute. # # # Further arguments # # PMSCO_JOBNAME (required) # the job name is the base name for output files. # # PMSCO_WALLTIME_HR (integer, required) # wall time limit in hours. must be integer, minimum 1. # this value is passed to PMSCO. # it should specify the same amount of wall time as requested from the scheduler. # # PMSCO_MODE (optional) # calculation mode: single, swarm, grid, gradient # # PMSCO_CODE (optional) # calculation code: edac, msc, test # # PMSCO_LOGLEVEL (optional) # request log level: DEBUG, INFO, WARNING, ERROR # create a log file based on the job name. # # PMSCO_PROJECT_ARGS (optional) # extra arguments that are parsed by the project module. # #SBATCH --job-name="_PMSCO_JOBNAME" #SBATCH --output="_PMSCO_JOBNAME.o.%j" #SBATCH --error="_PMSCO_JOBNAME.e.%j" PMSCO_WORK_DIR="_PMSCO_WORK_DIR" PMSCO_JOBNAME="_PMSCO_JOBNAME" PMSCO_WALLTIME_HR=_PMSCO_WALLTIME_HR PMSCO_PROJECT_FILE="_PMSCO_PROJECT_FILE" PMSCO_MODE="_PMSCO_MODE" PMSCO_CODE="_PMSCO_CODE" PMSCO_SOURCE_DIR="_PMSCO_SOURCE_DIR" PMSCO_SCAN_FILES="_PMSCO_SCAN_FILES" PMSCO_OUT="_PMSCO_JOBNAME" PMSCO_LOGLEVEL="_PMSCO_LOGLEVEL" PMSCO_PROJECT_ARGS="_PMSCO_PROJECT_ARGS" module load psi-python36/4.4.0 module load gcc/4.8.5 module load openmpi/3.1.3 source activate pmsco3 echo '================================================================================' echo "=== Running $0 at the following time and place:" date /bin/hostname cd $PMSCO_WORK_DIR pwd ls -lA #the intel compiler is currently not compatible with mpi4py. -mm 170131 #echo #echo '================================================================================' #echo "=== Setting the environment to use Intel Cluster Studio XE 2016 Update 2 intel/16.2:" #cmd="source /opt/psi/Programming/intel/16.2/bin/compilervars.sh intel64" #echo $cmd #$cmd echo echo '================================================================================' echo "=== The environment is set as following:" env echo echo '================================================================================' echo "BEGIN test" echo "=== Intel native mpirun will get the number of nodes and the machinefile from Slurm" which mpirun cmd="mpirun /bin/hostname" echo $cmd $cmd echo "END test" echo echo '================================================================================' echo "BEGIN mpirun pmsco" echo "Intel native mpirun will get the number of nodes and the machinefile from Slurm" echo echo "code revision" cd "$PMSCO_SOURCE_DIR" git log --pretty=tformat:'%h %ai %d' -1 python -m compileall pmsco python -m compileall projects cd "$PMSCO_WORK_DIR" echo PMSCO_CMD="python $PMSCO_SOURCE_DIR/pmsco $PMSCO_PROJECT_FILE" PMSCO_ARGS="$PMSCO_PROJECT_ARGS" if [ -n "$PMSCO_SCAN_FILES" ]; then PMSCO_ARGS="-s $PMSCO_SCAN_FILES $PMSCO_ARGS" fi if [ -n "$PMSCO_CODE" ]; then PMSCO_ARGS="-c $PMSCO_CODE $PMSCO_ARGS" fi if [ -n "$PMSCO_MODE" ]; then PMSCO_ARGS="-m $PMSCO_MODE $PMSCO_ARGS" fi if [ -n "$PMSCO_OUT" ]; then PMSCO_ARGS="-o $PMSCO_OUT $PMSCO_ARGS" fi if [ "$PMSCO_WALLTIME_HR" -ge 1 ]; then PMSCO_ARGS="-t $PMSCO_WALLTIME_HR $PMSCO_ARGS" fi if [ -n "$PMSCO_LOGLEVEL" ]; then PMSCO_ARGS="--log-level $PMSCO_LOGLEVEL --log-file $PMSCO_JOBNAME.log $PMSCO_ARGS" fi which mpirun ls -l "$PMSCO_SOURCE_DIR" ls -l "$PMSCO_PROJECT_FILE" # Do no use the OpenMPI specific options, like "-x LD_LIBRARY_PATH", with the Intel mpirun. cmd="mpirun $PMSCO_CMD $PMSCO_ARGS" echo $cmd $cmd echo "END mpirun pmsco" echo '================================================================================' date ls -lAtr echo '================================================================================' exit 0