ADD: cpu sbatch script for Gromacs
All checks were successful
Build and Deploy Documentation / build-and-deploy (push) Successful in 6s
All checks were successful
Build and Deploy Documentation / build-and-deploy (push) Successful in 6s
This commit is contained in:
@@ -33,6 +33,30 @@ module load gcc/12.3 openmpi/5.0.7-3vzj-A100-gpu gromacs/2025.2-vbj4-A100-gpu-om
|
||||
module use Spack unstable
|
||||
module load gcc/12.3 openmpi/5.0.7-blxc-GH200-gpu gromacs/2025.2-cjnq-GH200-gpu-omp
|
||||
```
|
||||
|
||||
### SBATCH CPU, 4 MPI ranks, 16 OMP threads
|
||||
```bash
|
||||
#!/bin/bash
|
||||
#SBATCH --time=00:10:00 # maximum execution time of 10 minutes
|
||||
#SBATCH --nodes=1 # requesting 1 compute node
|
||||
#SBATCH --ntasks=4 # use 4 MPI rank (task)
|
||||
#SBATCH --partition=hourly
|
||||
#SBATCH --cpus-per-task=16 # modify this number of CPU cores per MPI task
|
||||
#SBATCH --output=_scheduler-stdout.txt
|
||||
#SBATCH --error=_scheduler-stderr.txt
|
||||
|
||||
unset PMODULES_ENV
|
||||
module purge
|
||||
module use Spack unstable
|
||||
module load gcc/12.3 openmpi/5.0.7-ax23-A100-gpu gromacs/2025.2-whcq-omp
|
||||
|
||||
export FI_CXI_RX_MATCH_MODE=software
|
||||
|
||||
# Add your input (tpr) file in the command below
|
||||
srun gmx_mpi grompp -f step6.0_minimization.mdp -o step6.0_minimization.tpr -c step5_input.gro -r step5_input.gro -p topol.top -n index.ndx
|
||||
srun gmx_mpi mdrun -s step6.0_minimization.tpr -pin on -ntomp ${SLURM_CPUS_PER_TASK}
|
||||
```
|
||||
|
||||
### SBATCH A100, 4 GPU, 16 OMP threads, 4 MPI ranks
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
Reference in New Issue
Block a user