From c69800e4be568cf0605681e0c228c11da7b0560b Mon Sep 17 00:00:00 2001 From: caubet_m Date: Wed, 30 Jul 2025 17:16:13 +0200 Subject: [PATCH] Update MOTD --- .../interactive-jobs.md | 38 ++++++++++++------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/pages/merlin7/03-Slurm-General-Documentation/interactive-jobs.md b/pages/merlin7/03-Slurm-General-Documentation/interactive-jobs.md index bf1a081..362a9b0 100644 --- a/pages/merlin7/03-Slurm-General-Documentation/interactive-jobs.md +++ b/pages/merlin7/03-Slurm-General-Documentation/interactive-jobs.md @@ -8,6 +8,16 @@ sidebar: merlin7_sidebar permalink: /merlin7/interactive-jobs.html --- +### The Merlin7 'interactive' partition + +On `merlin7`, is recommended to always run interactive jobs on the `interactive` partition. This partition oversubscribe +CPUs (up to 4 users can use the same CPU), and has the highest priority. In general, access to this +partition should be quick, and can be used as an extension of the login nodes. + +Other interactive partitions are available on the `gmerlin7` cluster, however, the main user is for CPU access only. +Since the GPU resources are very expensive and we don't have many, please do not submit interactive allocations on GPU +nodes using GPUs unless strongly justified. + ## Running interactive jobs There are two different ways for running interactive jobs in Slurm. This is possible by using @@ -40,7 +50,7 @@ Refer to ``man srun`` for exploring all possible options for that command.
[Show 'srun' example]: Running 'hostname' command on 3 nodes, using 2 cores (1 task/core) per node
-caubet_m@login001:~> srun --clusters=merlin7 --ntasks=6 --ntasks-per-node=2 --nodes=3 hostname
+caubet_m@login001:~> srun --clusters=merlin7 --partition=interactive --ntasks=6 --ntasks-per-node=2 --nodes=3 hostname
 cn001.merlin7.psi.ch
 cn001.merlin7.psi.ch
 cn002.merlin7.psi.ch
@@ -64,17 +74,17 @@ a shell (`$SHELL`) at the end of the `salloc` command. In example:
 
 ```bash
 # Typical 'salloc' call 
-salloc --clusters=merlin7 -N 2 -n 2
+salloc --clusters=merlin7 --partition=interactive -N 2 -n 2
 
 # Custom 'salloc' call
 #   - $SHELL will open a local shell on the login node from where ``salloc`` is running
-salloc --clusters=merlin7 -N 2 -n 2 $SHELL
+salloc --clusters=merlin7 --partition=interactive -N 2 -n 2 $SHELL
 ```
 
 
[Show 'salloc' example]: Allocating 2 cores (1 task/core) in 2 nodes (1 core/node) - Default
-caubet_m@login001:~> salloc --clusters=merlin7 -N 2 -n 2
+caubet_m@login001:~> salloc --clusters=merlin7 --partition=interactive -N 2 -n 2
 salloc: Granted job allocation 161
 salloc: Nodes cn[001-002] are ready for job
 
@@ -91,7 +101,7 @@ salloc: Relinquishing job allocation 161
 
[Show 'salloc' example]: Allocating 2 cores (1 task/core) in 2 nodes (1 core/node) - $SHELL
-caubet_m@login001:~> salloc --clusters=merlin7 --ntasks=2 --nodes=2 $SHELL
+caubet_m@login001:~> salloc --clusters=merlin7 --partition=interactive --ntasks=2 --nodes=2 $SHELL
 salloc: Granted job allocation 165
 salloc: Nodes cn[001-002] are ready for job
 caubet_m@login001:~> srun hostname
@@ -126,7 +136,7 @@ Merlin6 and merlin7 clusters allow running any windows based applications. For t
 add the option ``--x11`` to the ``srun`` command. In example:
 
 ```bash
-srun --clusters=merlin7 --x11 sview
+srun --clusters=merlin7 --partition=interactive --x11 sview
 ```
 
 will popup a X11 based slurm view of the cluster.
@@ -136,17 +146,17 @@ to add the option ``--pty`` to the ``srun --x11`` command. Once resource is allo
 there you can interactively run X11 and non-X11 based commands.
 
 ```bash
-srun --clusters=merlin7 --x11 --pty bash
+srun --clusters=merlin7 --partition=interactive --x11 --pty bash
 ```
 
 
[Show 'srun' with X11 support examples]
-caubet_m@login001:~> srun --clusters=merlin7 --x11 sview
+caubet_m@login001:~> srun --clusters=merlin7 --partition=interactive --x11 sview
 
 caubet_m@login001:~> 
 
-caubet_m@login001:~> srun --clusters=merlin7 --x11 --pty bash
+caubet_m@login001:~> srun --clusters=merlin7 --partition=interactive --x11 --pty bash
 
 caubet_m@cn003:~> sview
 
@@ -164,28 +174,28 @@ exit
 add the option ``--x11`` to the ``salloc`` command. In example:
 
 ```bash
-salloc --clusters=merlin7 --x11 sview
+salloc --clusters=merlin7 --partition=interactive --x11 sview
 ```
 
 will popup a X11 based slurm view of the cluster.
 
 In the same manner, you can create a bash shell with x11 support. For doing that, you need
-to add to run just ``salloc --clusters=merlin7 --x11``. Once resource is allocated, from 
+to add to run just ``salloc --clusters=merlin7 --partition=interactive --x11``. Once resource is allocated, from 
 there you can interactively run X11 and non-X11 based commands.
 
 ```bash
-salloc --clusters=merlin7 --x11
+salloc --clusters=merlin7 --partition=interactive --x11
 ```
 
 
[Show 'salloc' with X11 support examples]
-caubet_m@login001:~> salloc --clusters=merlin7 --x11 sview
+caubet_m@login001:~> salloc --clusters=merlin7 --partition=interactive --x11 sview
 salloc: Granted job allocation 174
 salloc: Nodes cn001 are ready for job
 salloc: Relinquishing job allocation 174
 
-caubet_m@login001:~> salloc --clusters=merlin7 --x11 
+caubet_m@login001:~> salloc --clusters=merlin7 --partition=interactive --x11 
 salloc: Granted job allocation 175
 salloc: Nodes cn001 are ready for job
 caubet_m@cn001:~>