Files
cristallina_analysis_package/cluster_tools/jupyter_on_sf.sh

52 lines
1.9 KiB
Bash
Executable File

#!/bin/bash
# Slurm submission script to start a Jupyterlab instance on the SF cluster for
# data analysis Cristallina @ SwissFEL.
# Requirements: user account on SF and access to /sf/cristallina
# To execute from cristallina console use:
# ssh your_username@ra.psi.ch "srun /sf/cristallina/applications/conda/jupyter_on_ra.sh"
# or when using more computing power we start a batch job which takes the options below into account:
# ssh your_username@ra.psi.ch "sbatch /sf/cristallina/applications/conda/jupyter_on_ra.sh"
# alternatively we can also run on the SwissFEL computing nodes: sf-cn-1
#SBATCH --job-name=analysis # Job name
#SBATCH --partition prod-aramis # or week, shared, hour, day-rhel8
#SBATCH --nodes=1 # Run all processes on a single node
#SBATCH --ntasks=1 # Run a single task
#SBATCH --output=jupyterlab_%j_%N.log # Standard output and error log
echo "Starting Jupyterlab..."
echo "Date = $(date)"
echo "Hostname = $(hostname -s)"
echo "Working Directory = $(pwd)"
echo ""
echo "Number of Nodes Allocated = $SLURM_JOB_NUM_NODES"
echo "Number of Tasks Allocated = $SLURM_NTASKS"
echo "Number of Cores/Task Allocated = $SLURM_CPUS_PER_TASK"
# loading our minimal conda environment
source /sf/cristallina/applications/conda/envs/miniconda/etc/profile.d/conda.sh
# and activating the actual analysis environment
# a bit more conservative: conda activate /sf/cristallina/applications/conda/envs/analysis_forge
conda activate /sf/cristallina/applications/conda/envs/analysis_edge
# password equivalent
export JUPYTER_TOKEN=cristallina
# single user: jupyter lab --no-browser --ip 0.0.0.0
# experimental: use collaboration environment
jupyter lab --no-browser --ip 0.0.0.0 --YDocExtension.ystore_class=cristallina.jupyter_helper.MySQLiteYStore
echo "Jupyterlab finished."