Files
cristallina_analysis_package/cluster_tools/start_jupyter.sh

61 lines
1.5 KiB
Bash
Executable File

#!/bin/bash
# cleanup old log files
mv jupyterlab*.log ~/old_logs 2>/dev/null
# shortname, used in slurm queue
SHORT=$(echo $USER | cut -c1-7)
# Jupyter (Lab) does not like to run with several instances
# as the same user on a NFS system as this causes broken databases.
# So we abort here in this case.
if [ "$(squeue | grep $SHORT | awk '{print $3}')" == "analysis" ]; then
echo "Jupyter instance already running. Aborting."
exit 1
fi
# submits the batch job to the RA cluster
sbatch jupyter_on_ra.sh
# prepare spinner
i=1
sp="/-\|"
printf 'Waiting for Jupyterlab to start. '
# wait until the Jupyterlab instance shows up
while [ "$(squeue | grep $SHORT | wc -w )" -lt 2 ]
do
sleep 0.25
printf "\b${sp:i++%${#sp}:1}"
done
printf '\nWaiting for Jupyterlab logfile. '
# and wait till there is a logfile (parsing ls, I know...)
while [ "$(ls jupyterlab*.log 2>/dev/null | xargs cat | wc -w )" -lt 50 ]
do
sleep 0.25
printf "\b${sp:i++%${#sp}:1}"
done
# wait a bit till the startup of the jupyterlab server is complete
sleep 2
printf '\nScanning for corresponding node in logfile. '
LOGFILE=$(find . -maxdepth 1 -name "jupyterlab*" -printf "%T@ %Tc %p\n" | sort -n | awk '{print $NF}')
until grep -q -E "Jupyter Server.*is running at" $LOGFILE
do
sleep 0.25
printf "\b${sp:i++%${#sp}:1}"
done
# and output the corresponding entry
OUTPUT=$(grep -o "http://r.*" $LOGFILE )
printf "\nJupyter instances running at: \n${OUTPUT/token=.../token=cristallina}\n"