FIX: runtime docu for Gromacs
All checks were successful
Build and Deploy Documentation / build-and-deploy (push) Successful in 7s
All checks were successful
Build and Deploy Documentation / build-and-deploy (push) Successful in 7s
This commit is contained in:
@@ -33,22 +33,51 @@ module load gcc/12.3 openmpi/5.0.7-3vzj-A100-gpu gromacs/2025.2-vbj4-A100-gpu-om
|
||||
module use Spack unstable
|
||||
module load gcc/12.3 openmpi/5.0.7-blxc-GH200-gpu gromacs/2025.2-cjnq-GH200-gpu-omp
|
||||
```
|
||||
|
||||
### SBATCH GH, 4 GPU, 32 OMP threads, 4 MPI ranks
|
||||
### SBATCH A100, 4 GPU, 16 OMP threads, 4 MPI ranks
|
||||
```bash
|
||||
#!/bin/bash
|
||||
#SBATCH --get-user-env
|
||||
#SBATCH --time=00:10:00 # maximum execution time of 10 minutes
|
||||
#SBATCH --output=_scheduler-stdout.txt
|
||||
#SBATCH --error=_scheduler-stderr.txt
|
||||
#SBATCH --job-name="Testing GROMACS GH"
|
||||
#SBATCH --nodes=1 # number of GH200 nodes with each node having 4 CPU+GPU
|
||||
#SBATCH --ntasks-per-node=4 # 8 MPI ranks per node
|
||||
#SBATCH --cpus-per-task 32 # 32 OMP threads per MPI rank
|
||||
#SBATCH --ntasks-per-node=4 # 4 MPI ranks per node
|
||||
#SBATCH --cpus-per-task=16 # 16 OMP threads per MPI rank
|
||||
#SBATCH --cluster=gmerlin7
|
||||
#SBATCH --hint=nomultithread
|
||||
#SBATCH --partition=gh-hourly
|
||||
#SBATCH --hint=nomultithread
|
||||
#SBATCH --partition=a100-hourly
|
||||
#SBATCH --gpus=4
|
||||
#SBATCH --gpus-per-task=1
|
||||
|
||||
unset PMODULES_ENV
|
||||
module purge
|
||||
module use Spack unstable
|
||||
module load gcc/12.3 openmpi/5.0.7-3vzj-A100-gpu gromacs/2025.2-vbj4-A100-gpu-omp
|
||||
|
||||
export FI_CXI_RX_MATCH_MODE=software
|
||||
|
||||
export GMX_GPU_DD_COMMS=true
|
||||
export GMX_GPU_PME_PP_COMMS=true
|
||||
export GMX_FORCE_UPDATE_DEFAULT_GPU=true
|
||||
export GMX_ENABLE_DIRECT_GPU_COMM=1
|
||||
export GMX_FORCE_GPU_AWARE_MPI=1
|
||||
|
||||
# Add your input (tpr) file in the command below
|
||||
srun gmx_mpi grompp -f step6.0_minimization.mdp -o step6.0_minimization.tpr -c step5_input.gro -r step5_input.gro -p topol.top -n index.ndx
|
||||
srun gmx_mpi mdrun -s step6.0_minimization.tpr -ntomp ${SLURM_CPUS_PER_TASK}
|
||||
```
|
||||
|
||||
### SBATCH GH, 2 GPU, 18 OMP threads, 2 MPI ranks
|
||||
```bash
|
||||
#!/bin/bash
|
||||
#SBATCH --time=00:10:00 # maximum execution time of 10 minutes
|
||||
#SBATCH --output=_scheduler-stdout.txt
|
||||
#SBATCH --error=_scheduler-stderr.txt
|
||||
#SBATCH --nodes=1 # number of GH200 nodes with each node having 4 CPU+GPU
|
||||
#SBATCH --ntasks-per-node=2 # 2 MPI ranks per node
|
||||
#SBATCH --cpus-per-task=18 # 18 OMP threads per MPI rank
|
||||
#SBATCH --cluster=gmerlin7
|
||||
#SBATCH --hint=nomultithread
|
||||
#SBATCH --partition=gh-hourly
|
||||
#SBATCH --gpus=2
|
||||
|
||||
unset PMODULES_ENV
|
||||
module purge
|
||||
@@ -63,7 +92,9 @@ export GMX_FORCE_UPDATE_DEFAULT_GPU=true
|
||||
export GMX_ENABLE_DIRECT_GPU_COMM=1
|
||||
export GMX_FORCE_GPU_AWARE_MPI=1
|
||||
|
||||
srun gmx_mpi mdrun -s input.tpr -ntomp 32 -bonded gpu -nb gpu -pme gpu -pin on -v -noconfout -dlb yes -nstlist 300 -npme 1 -nsteps 10000 -update gpu
|
||||
# Add your input (tpr) file in the command below
|
||||
srun gmx_mpi grompp -f step6.0_minimization.mdp -o step6.0_minimization.tpr -c step5_input.gro -r step5_input.gro -p topol.top -n index.ndx
|
||||
srun gmx_mpi mdrun -s step6.0_minimization.tpr -ntomp ${SLURM_CPUS_PER_TASK}
|
||||
```
|
||||
|
||||
## Developing your own GPU code
|
||||
@@ -74,7 +105,7 @@ module use Spack unstable
|
||||
module load gcc/12.3 openmpi/5.0.7-3vzj-A100-gpu gromacs/2025.2-vbj4-A100-gpu-omp cmake/3.31.6-o3lb python/3.13.1-cyro
|
||||
|
||||
git clone https://github.com/gromacs/gromacs.git
|
||||
cd gromacs
|
||||
cd gromacs
|
||||
|
||||
mkdir build && cd build
|
||||
cmake -DCMAKE_C_COMPILER=gcc-12 \
|
||||
|
||||
Reference in New Issue
Block a user