first stab at mkdocs migration
This commit is contained in:
163
docs/merlin7/05-Software-Support/gromacs.md
Normal file
163
docs/merlin7/05-Software-Support/gromacs.md
Normal file
@@ -0,0 +1,163 @@
|
||||
---
|
||||
title: GROMACS
|
||||
keywords: GROMACS software, compile
|
||||
summary: "GROMACS (GROningen Machine for Chemical Simulations) is a versatile and widely-used open source package to perform molecular dynamics"
|
||||
sidebar: merlin7_sidebar
|
||||
toc: false
|
||||
permalink: /merlin7/gromacs.html
|
||||
---
|
||||
|
||||
## GROMACS
|
||||
|
||||
GROMACS (GROningen Machine for Chemical Simulations) is a versatile and widely-used open source package to perform molecular dynamics, i.e. simulate the Newtonian equations of motion for systems with hundreds to millions of particles.
|
||||
|
||||
It is primarily designed for biochemical molecules like proteins, lipids and nucleic acids that have a lot of complicated bonded interactions, but since GROMACS is extremely fast at calculating the nonbonded interactions (that usually dominate simulations) many groups are also using it for research on non-biological systems, e.g. polymers.)
|
||||
|
||||
## Licensing Terms and Conditions
|
||||
|
||||
GROMACS is a joint effort, with contributions from developers around the world: users agree to acknowledge use of GROMACS in any reports or publications of results obtained with the Software (see GROMACS Homepage for details).
|
||||
|
||||
## How to run on Merlin7
|
||||
## 2025.2
|
||||
### CPU nodes
|
||||
```bash
|
||||
module use Spack unstable
|
||||
module load gcc/12.3 openmpi/5.0.7-ax23-A100-gpu gromacs/2025.2-whcq-omp
|
||||
```
|
||||
### A100 nodes
|
||||
```bash
|
||||
module use Spack unstable
|
||||
module load gcc/12.3 openmpi/5.0.7-3vzj-A100-gpu gromacs/2025.2-vbj4-A100-gpu-omp
|
||||
```
|
||||
### GH nodes
|
||||
```bash
|
||||
module use Spack unstable
|
||||
module load gcc/12.3 openmpi/5.0.7-blxc-GH200-gpu gromacs/2025.2-cjnq-GH200-gpu-omp
|
||||
```
|
||||
## 2025.3
|
||||
### CPU nodes
|
||||
```bash
|
||||
module use Spack unstable
|
||||
module load gcc/12.3 openmpi/5.0.9-n4yf-A100-gpu gromacs/2025.3-6ken-omp
|
||||
```
|
||||
### A100 nodes
|
||||
```bash
|
||||
module use Spack unstable
|
||||
module load gcc/12.3 openmpi/5.0.9-xqhy-A100-gpu gromacs/2025.3-ohlj-A100-gpu-omp
|
||||
```
|
||||
### GH nodes
|
||||
```bash
|
||||
module use Spack unstable
|
||||
module load gcc/12.3 openmpi/5.0.9-inxi-GH200-gpu gromacs/2025.3-yqlu-GH200-gpu-omp
|
||||
```
|
||||
|
||||
### SBATCH CPU, 4 MPI ranks, 16 OMP threads
|
||||
```bash
|
||||
#!/bin/bash
|
||||
#SBATCH --time=00:10:00 # maximum execution time of 10 minutes
|
||||
#SBATCH --nodes=1 # requesting 1 compute node
|
||||
#SBATCH --ntasks=4 # use 4 MPI rank (task)
|
||||
#SBATCH --partition=hourly
|
||||
#SBATCH --cpus-per-task=16 # modify this number of CPU cores per MPI task
|
||||
#SBATCH --output=_scheduler-stdout.txt
|
||||
#SBATCH --error=_scheduler-stderr.txt
|
||||
|
||||
unset PMODULES_ENV
|
||||
module purge
|
||||
module use Spack unstable
|
||||
module load gcc/12.3 openmpi/5.0.7-ax23-A100-gpu gromacs/2025.2-whcq-omp
|
||||
|
||||
export FI_CXI_RX_MATCH_MODE=software
|
||||
|
||||
# Add your input (tpr) file in the command below
|
||||
srun gmx_mpi grompp -f step6.0_minimization.mdp -o step6.0_minimization.tpr -c step5_input.gro -r step5_input.gro -p topol.top -n index.ndx
|
||||
srun gmx_mpi mdrun -s step6.0_minimization.tpr -pin on -ntomp ${SLURM_CPUS_PER_TASK}
|
||||
```
|
||||
|
||||
### SBATCH A100, 4 GPU, 16 OMP threads, 4 MPI ranks
|
||||
```bash
|
||||
#!/bin/bash
|
||||
#SBATCH --time=00:10:00 # maximum execution time of 10 minutes
|
||||
#SBATCH --output=_scheduler-stdout.txt
|
||||
#SBATCH --error=_scheduler-stderr.txt
|
||||
#SBATCH --nodes=1 # number of GH200 nodes with each node having 4 CPU+GPU
|
||||
#SBATCH --ntasks-per-node=4 # 4 MPI ranks per node
|
||||
#SBATCH --cpus-per-task=16 # 16 OMP threads per MPI rank
|
||||
#SBATCH --cluster=gmerlin7
|
||||
#SBATCH --hint=nomultithread
|
||||
#SBATCH --partition=a100-hourly
|
||||
#SBATCH --gpus=4
|
||||
|
||||
unset PMODULES_ENV
|
||||
module purge
|
||||
module use Spack unstable
|
||||
module load gcc/12.3 openmpi/5.0.7-3vzj-A100-gpu gromacs/2025.2-vbj4-A100-gpu-omp
|
||||
|
||||
export FI_CXI_RX_MATCH_MODE=software
|
||||
|
||||
export GMX_GPU_DD_COMMS=true
|
||||
export GMX_GPU_PME_PP_COMMS=true
|
||||
export GMX_FORCE_UPDATE_DEFAULT_GPU=true
|
||||
export GMX_ENABLE_DIRECT_GPU_COMM=1
|
||||
export GMX_FORCE_GPU_AWARE_MPI=1
|
||||
|
||||
# Add your input (tpr) file in the command below
|
||||
srun gmx_mpi grompp -f step6.0_minimization.mdp -o step6.0_minimization.tpr -c step5_input.gro -r step5_input.gro -p topol.top -n index.ndx
|
||||
srun gmx_mpi mdrun -s step6.0_minimization.tpr -ntomp ${SLURM_CPUS_PER_TASK}
|
||||
```
|
||||
|
||||
### SBATCH GH, 2 GPU, 18 OMP threads, 2 MPI ranks
|
||||
```bash
|
||||
#!/bin/bash
|
||||
#SBATCH --time=00:10:00 # maximum execution time of 10 minutes
|
||||
#SBATCH --output=_scheduler-stdout.txt
|
||||
#SBATCH --error=_scheduler-stderr.txt
|
||||
#SBATCH --nodes=1 # number of GH200 nodes with each node having 4 CPU+GPU
|
||||
#SBATCH --ntasks-per-node=2 # 2 MPI ranks per node
|
||||
#SBATCH --cpus-per-task=18 # 18 OMP threads per MPI rank
|
||||
#SBATCH --cluster=gmerlin7
|
||||
#SBATCH --hint=nomultithread
|
||||
#SBATCH --partition=gh-hourly
|
||||
#SBATCH --gpus=2
|
||||
|
||||
unset PMODULES_ENV
|
||||
module purge
|
||||
module use Spack unstable
|
||||
module load gcc/12.3 openmpi/5.0.7-blxc-GH200-gpu gromacs/2025.2-cjnq-GH200-gpu-omp
|
||||
|
||||
export FI_CXI_RX_MATCH_MODE=software
|
||||
|
||||
export GMX_GPU_DD_COMMS=true
|
||||
export GMX_GPU_PME_PP_COMMS=true
|
||||
export GMX_FORCE_UPDATE_DEFAULT_GPU=true
|
||||
export GMX_ENABLE_DIRECT_GPU_COMM=1
|
||||
export GMX_FORCE_GPU_AWARE_MPI=1
|
||||
|
||||
# Add your input (tpr) file in the command below
|
||||
srun gmx_mpi grompp -f step6.0_minimization.mdp -o step6.0_minimization.tpr -c step5_input.gro -r step5_input.gro -p topol.top -n index.ndx
|
||||
srun gmx_mpi mdrun -s step6.0_minimization.tpr -ntomp ${SLURM_CPUS_PER_TASK}
|
||||
```
|
||||
|
||||
## Developing your own GPU code
|
||||
#### A100
|
||||
```bash
|
||||
module purge
|
||||
module use Spack unstable
|
||||
module load gcc/12.3 openmpi/5.0.7-3vzj-A100-gpu gromacs/2025.2-vbj4-A100-gpu-omp cmake/3.31.6-o3lb python/3.13.1-cyro
|
||||
|
||||
git clone https://github.com/gromacs/gromacs.git
|
||||
cd gromacs
|
||||
|
||||
mkdir build && cd build
|
||||
cmake -DCMAKE_C_COMPILER=gcc-12 \
|
||||
-DCMAKE_CXX_COMPILER=g++-12 \
|
||||
-DGMX_MPI=on \
|
||||
-DGMX_GPU=CUDA \
|
||||
-GMX_CUDA_TARGET_SM="80" \ # 90 for the Hopper GPUs
|
||||
-DGMX_DOUBLE=off \ # turn on double precision only if useful
|
||||
..
|
||||
|
||||
make
|
||||
|
||||
```
|
||||
|
||||
Reference in New Issue
Block a user