diff --git a/pages/merlin7/05-Software-Support/cp2k.md b/pages/merlin7/05-Software-Support/cp2k.md index 5a72fa9..e4a1103 100644 --- a/pages/merlin7/05-Software-Support/cp2k.md +++ b/pages/merlin7/05-Software-Support/cp2k.md @@ -28,6 +28,11 @@ module load gcc/12.3 openmpi/5.0.8-hgej cp2k/2025.2-yb6g-omp module use unstable Spack module load gcc/12.3 openmpi/5.0.8-5tb3-A100-gpu cp2k/2025.2-osvk-A100-gpu-omp ``` +### GH nodes +```bash +module use unstable Spack +module load gcc/12.3 openmpi/5.0.8-v4cd-GH200-gpu cp2k/2025.2-vvak-GH200-gpu-omp +``` ### SBATCH CPU, 4 MPI ranks, 16 OMP threads ```bash @@ -76,3 +81,75 @@ export OMP_NUM_THREADS=$((SLURM_CPUS_PER_TASK - 1)) srun cp2k.psmp -i -o ``` +### SBATCH GH, 2 GPU, 18 OMP threads, 2 MPI ranks +```bash +#!/bin/bash +#SBATCH --time=00:10:00 # maximum execution time of 10 minutes +#SBATCH --output=_scheduler-stdout.txt +#SBATCH --error=_scheduler-stderr.txt +#SBATCH --nodes=1 # number of GH200 nodes with each node having 4 CPU+GPU +#SBATCH --ntasks-per-node=2 # 2 MPI ranks per node +#SBATCH --cpus-per-task=18 # 18 OMP threads per MPI rank +#SBATCH --cluster=gmerlin7 +#SBATCH --hint=nomultithread +#SBATCH --partition=gh-hourly +#SBATCH --gpus=2 + +unset PMODULES_ENV +module purge +module use unstable Spack +module load gcc/12.3 openmpi/5.0.8-v4cd-GH200-gpu cp2k/2025.2-vvak-GH200-gpu-omp + +export FI_CXI_RX_MATCH_MODE=software +export OMP_NUM_THREADS=$((SLURM_CPUS_PER_TASK - 1)) + +srun cp2k.psmp -i -o +``` + +## Developing your own CPU code +[![Pipeline](https://gitea.psi.ch/HPCE/spack-psi/actions/workflows/cp2k_cpu_merlin7.yml/badge.svg?branch=main)](https://gitea.psi.ch/HPCE/spack-psi +```bash +module purge +module use Spack unstable +module load gcc/12.3 openmpi/5.0.8-hgej dbcsr/2.8.0-4yld-omp openblas/0.3.30-gye6-omp netlib-scalapack/2.2.2-2trj libxsmm/1.17-hwwi libxc/7.0.0-mibp libint/2.11.1-nxhl hdf5/1.14.6-tgzo fftw/3.3.10-t7bo-omp py-fypp/3.1-bteo sirius/7.8.0-uh3i-omp cmake/3.31.6-2ajs ninja/1.12.1-afxy + +git clone https://github.com/cp2k/cp2k.git +cd cp2k + +mkdir build && cd build +CC=mpicc CXX=mpic++ FC=mpifort cmake -GNinja -DCMAKE_CUDA_HOST_COMPILER=mpicc -DCP2K_USE_LIBXC=ON -DCP2K_USE_LIBINT2=ON -DCP2K_USE_SIRIUS=ON -DCP2K_USE_SPLA=ON -DCP2K_USE_SPGLIB=ON -DCP2K_USE_HDF5=ON -DCP2K_USE_FFTW3=ON .. +ninja -j 16 + +``` + +## Developing your own GPU code +#### A100 +[![Pipeline](https://gitea.psi.ch/HPCE/spack-psi/actions/workflows/cp2k_gpu_merlin7.yml/badge.svg?branch=main)](https://gitea.psi.ch/HPCE/spack-psi +```bash +module purge +module use Spack unstable +module load gcc/12.3 openmpi/5.0.8-5tb3-A100-gpu dbcsr/2.8.0-xcn2-A100-gpu-omp fftw/3.3.10-v4mq-omp libint/2.11.1-3lxv libxc/7.0.0-u556 netlib-scalapack/2.2.2-enjp openblas/0.3.30-ynou-omp py-fypp/3.1-z25p py-numpy/2.3.2-45ay python/3.13.5-qivs cmake/3.31.6-2ajs ninja/1.12.1-afxy + +git clone https://github.com/cp2k/cp2k.git +cd cp2k + +mkdir build && cd build +CC=mpicc CXX=mpic++ FC=mpifort cmake -GNinja -DCMAKE_CUDA_HOST_COMPILER=mpicc -DCP2K_USE_LIBXC=ON -DCP2K_USE_LIBINT2=ON -DCP2K_USE_ACCEL=CUDA -DCMAKE_CUDA_ARCHITECTURES=80 -DCP2K_USE_FFTW3=ON .. +ninja -j 16 + +``` +#### GH200 +[![Pipeline](https://gitea.psi.ch/HPCE/spack-psi/actions/workflows/cp2k_gh_merlin7.yml/badge.svg?branch=main)](https://gitea.psi.ch/HPCE/spack-psi +```bash +module purge +module use Spack unstable +module load gcc/12.3 openmpi/5.0.8-v4cd-GH200-gpu dbcsr/2.8.0-lzj6-GH200-gpu-omp fftw/3.3.10-ajlu-omp hdf5/1.14.6-62wt libint/2.11.1-dpqq libxc/7.0.0-ojgl netlib-scalapack/2.2.2-k7uz openblas/0.3.30-rv46-omp py-fypp/3.1-j4yw py-numpy/2.3.2-yoqr python/3.13.5-xbg5 cmake/3.31.8-2jne ninja/1.13.0-xn4a + +git clone https://github.com/cp2k/cp2k.git +cd cp2k + +mkdir build && cd build +CC=mpicc CXX=mpic++ FC=mpifort cmake -GNinja -DCMAKE_CUDA_HOST_COMPILER=mpicc -DCP2K_USE_LIBXC=ON -DCP2K_USE_LIBINT2=ON -DCP2K_USE_ACCEL=CUDA -DCMAKE_CUDA_ARCHITECTURES=90 .. +ninja -j 16 + +```