7.0 KiB
title, keywords, summary, sidebar, toc, permalink
| title | keywords | summary | sidebar | toc | permalink |
|---|---|---|---|---|---|
| CP2k | CP2k software, compile | CP2k is a quantum chemistry and solid state physics software package | merlin7_sidebar | false | /merlin7/cp2k.html |
CP2k
CP2K is a quantum chemistry and solid state physics software package that can perform atomistic simulations of solid state, liquid, molecular, periodic, material, crystal, and biological systems.
CP2K provides a general framework for different modeling methods such as DFT using the mixed Gaussian and plane waves approaches GPW and GAPW. Supported theory levels include DFTB, LDA, GGA, MP2, RPA, semi-empirical methods (AM1, PM3, PM6, RM1, MNDO, …), and classical force fields (AMBER, CHARMM, …). CP2K can do simulations of molecular dynamics, metadynamics, Monte Carlo, Ehrenfest dynamics, vibrational analysis, core level spectroscopy, energy minimization, and transition state optimization using NEB or dimer method
Licensing Terms and Conditions
CP2k is a joint effort, with contributions from developers around the world: users agree to acknowledge use of CP2k in any reports or publications of results obtained with the Software (see CP2k Homepage for details).
How to run on Merlin7
CPU nodes
module use unstable Spack
module load gcc/12.3 openmpi/5.0.8-hgej cp2k/2025.2-yb6g-omp
A100 nodes
module use unstable Spack
module load gcc/12.3 openmpi/5.0.8-r5lz-A100-gpu cp2k/2025.2-hkub-A100-gpu-omp
GH nodes
module use unstable Spack
module load gcc/12.3 openmpi/5.0.8-tx2w-GH200-gpu cp2k/2025.2-xk4q-GH200-gpu-omp
SBATCH CPU, 4 MPI ranks, 16 OMP threads
#!/bin/bash
#SBATCH --time=00:10:00 # maximum execution time of 10 minutes
#SBATCH --nodes=1 # requesting 1 compute node
#SBATCH --ntasks=4 # use 4 MPI rank (task)
#SBATCH --partition=hourly
#SBATCH --cpus-per-task=16 # modify this number of CPU cores per MPI task
#SBATCH --output=_scheduler-stdout.txt
#SBATCH --error=_scheduler-stderr.txt
unset PMODULES_ENV
module purge
module use unstable Spack
module load gcc/12.3 openmpi/5.0.8-hgej cp2k/2025.2-yb6g-omp
export FI_CXI_RX_MATCH_MODE=software
export OMP_NUM_THREADS=$((SLURM_CPUS_PER_TASK - 1))
srun cp2k.psmp -i <CP2K_INPUT> -o <CP2K_OUTPUT>
SBATCH A100, 4 GPU, 16 OMP threads, 4 MPI ranks
#!/bin/bash
#SBATCH --time=00:10:00 # maximum execution time of 10 minutes
#SBATCH --output=_scheduler-stdout.txt
#SBATCH --error=_scheduler-stderr.txt
#SBATCH --nodes=1 # number of A100 nodes
#SBATCH --ntasks-per-node=4 # 4 MPI ranks per node
#SBATCH --cpus-per-task=16 # 16 OMP threads per MPI rank
#SBATCH --cluster=gmerlin7
#SBATCH --hint=nomultithread
#SBATCH --partition=a100-hourly
#SBATCH --gpus=4
unset PMODULES_ENV
module purge
module use unstable Spack
module load gcc/12.3 openmpi/5.0.8-r5lz-A100-gpu cp2k/2025.2-hkub-A100-gpu-omp
export FI_CXI_RX_MATCH_MODE=software
export OMP_NUM_THREADS=$((SLURM_CPUS_PER_TASK - 1))
srun cp2k.psmp -i <CP2K_INPUT> -o <CP2K_OUTPUT>
SBATCH GH, 2 GPU, 18 OMP threads, 2 MPI ranks
#!/bin/bash
#SBATCH --time=00:10:00 # maximum execution time of 10 minutes
#SBATCH --output=_scheduler-stdout.txt
#SBATCH --error=_scheduler-stderr.txt
#SBATCH --nodes=1 # number of GH200 nodes with each node having 4 CPU+GPU
#SBATCH --ntasks-per-node=2 # 2 MPI ranks per node
#SBATCH --cpus-per-task=18 # 18 OMP threads per MPI rank
#SBATCH --cluster=gmerlin7
#SBATCH --hint=nomultithread
#SBATCH --partition=gh-hourly
#SBATCH --gpus=2
unset PMODULES_ENV
module purge
module use unstable Spack
module load gcc/12.3 openmpi/5.0.8-tx2w-GH200-gpu cp2k/2025.2-xk4q-GH200-gpu-omp
export FI_CXI_RX_MATCH_MODE=software
export OMP_NUM_THREADS=$((SLURM_CPUS_PER_TASK - 1))
srun cp2k.psmp -i <CP2K_INPUT> -o <CP2K_OUTPUT>
Developing your own CPU code
module purge
module use Spack unstable
module load gcc/12.3 openmpi/5.0.8-hgej dbcsr/2.8.0-4yld-omp openblas/0.3.30-gye6-omp netlib-scalapack/2.2.2-2trj libxsmm/1.17-hwwi libxc/7.0.0-mibp libint/2.11.1-nxhl hdf5/1.14.6-tgzo fftw/3.3.10-t7bo-omp py-fypp/3.1-bteo sirius/7.8.0-uh3i-omp cmake/3.31.8-j47l ninja/1.12.1-afxy
git clone https://github.com/cp2k/cp2k.git
cd cp2k
mkdir build && cd build
CC=mpicc CXX=mpic++ FC=mpifort cmake -GNinja -DCMAKE_CUDA_HOST_COMPILER=mpicc -DCP2K_USE_LIBXC=ON -DCP2K_USE_LIBINT2=ON -DCP2K_USE_SIRIUS=ON -DCP2K_USE_SPLA=ON -DCP2K_USE_SPGLIB=ON -DCP2K_USE_HDF5=ON -DCP2K_USE_FFTW3=ON ..
ninja -j 16
Developing your own GPU code
A100
module purge
module use Spack unstable
module load gcc/12.3 openmpi/5.0.8-r5lz-A100-gpu dbcsr/2.8.0-3r22-A100-gpu-omp cosma/2.7.0-y2tr-gpu cuda/12.6.0-3y6a dftd4/3.7.0-4k4c-omp elpa/2025.01.002-bovg-A100-gpu-omp fftw/3.3.10-syba-omp hdf5/1.14.6-pcsd libint/2.11.1-3lxv libxc/7.0.0-u556 libxsmm/1.17-2azz netlib-scalapack/2.2.2-rmcf openblas/0.3.30-ynou-omp plumed/2.9.2-47hk py-fypp/3.1-z25p py-numpy/2.3.2-45ay python/3.13.5-qivs sirius/develop-qz4c-A100-gpu-omp spglib/2.5.0-jl5l-omp spla/1.6.1-hrgf-gpu cmake/3.31.8-j47l ninja/1.12.1-afxy
git clone https://github.com/cp2k/cp2k.git
cd cp2k
mkdir build && cd build
CC=mpicc CXX=mpic++ FC=mpifort cmake -GNinja -DCMAKE_CUDA_HOST_COMPILER=mpicc -DCP2K_USE_LIBXC=ON -DCP2K_USE_LIBINT2=ON -DCP2K_USE_SPGLIB=ON -DCP2K_USE_ELPA=ON -DCP2K_USE_SPLA=ON -DCP2K_USE_SIRIUS=ON -DCP2K_USE_PLUMED=ON -DCP2K_USE_DFTD4=ON -DCP2K_USE_COSMA=ON -DCP2K_USE_ACCEL=CUDA -DCMAKE_CUDA_ARCHITECTURES=80 -DCP2K_USE_FFTW3=ON ..
ninja -j 16
GH200
salloc --partition=gh-daily --clusters=gmerlin7 --time=08:00:00 --ntasks=4 --nodes=1 --gpus=1 --mem=40000 $SHELL
ssh <allocated_gpu>
module purge
module use Spack unstable
module load gcc/12.3 openmpi/5.0.8-tx2w-GH200-gpu dbcsr/2.8.0-h3bo-GH200-gpu-omp cosma/2.7.0-dc23-gpu cuda/12.6.0-wak5 dbcsr/2.8.0-h3bo-GH200-gpu-omp dftd4/3.7.0-aa6l-omp elpa/2025.01.002-nybd-GH200-gpu-omp fftw/3.3.10-alp3-omp hdf5/1.14.6-qjob libint/2.11.1-dpqq libxc/7.0.0-ojgl netlib-scalapack/2.2.2-cj5m openblas/0.3.30-rv46-omp plumed/2.9.2-nbay py-fypp/3.1-j4yw py-numpy/2.3.2-yoqr python/3.13.5-xbg5 sirius/develop-v5tb-GH200-gpu-omp spglib/2.5.0-da2i-omp spla/1.6.1-uepy-gpu cmake/3.31.8-2jne ninja/1.13.0-xn4a
git clone https://github.com/cp2k/cp2k.git
cd cp2k
mkdir build && cd build
CC=mpicc CXX=mpic++ FC=mpifort cmake -GNinja -DCMAKE_CUDA_HOST_COMPILER=mpicc -DCP2K_USE_LIBXC=ON -DCP2K_USE_LIBINT2=ON -DCP2K_USE_SPGLIB=ON -DCP2K_USE_ELPA=ON -DCP2K_USE_SPLA=ON -DCP2K_USE_SIRIUS=ON -DCP2K_USE_PLUMED=ON -DCP2K_USE_DFTD4=ON -DCP2K_USE_COSMA=ON -DCP2K_USE_ACCEL=CUDA -DCMAKE_CUDA_ARCHITECTURES=90 -DCP2K_USE_FFTW3=ON -DCP2K_USE_HDF5=ON ..
ninja -j 16