Gromacs 2022.6 with GPU support

Webpage

http://www.gromacs.org/

Version

2022.6

Build Envinronment

  • GCC 11.2.1 (gcc-toolset-11)
  • HPC-X 2.11 (Open MPI 4.1.4)
  • CUDA 12.0

Files Required

  • gromacs-2022.6.tar.gz
  • regressiontests-2022.6.tar.gz
  • fftw-3.3.8.tar.gz
    • (computation nodes can't access download site)

Build Procedure

Interactive job with 24 cores + 2 gpus are employed.

Interactive job script

#!/bin/sh
#PBS -l select=2:ncpus=12:mpiprocs=12:ompthreads=1:ngpus=1
#PBS -l walltime=03:00:00

Gromacs

#!/bin/sh

VERSION=2022.6
INSTALL_PREFIX=/apl/gromacs/${VERSION}-CUDA

BASEDIR=/home/users/${USER}/Software/Gromacs/${VERSION}/
GROMACS_TARBALL=${BASEDIR}/gromacs-${VERSION}.tar.gz
REGRESSION_TARBALL=${BASEDIR}/regressiontests-${VERSION}.tar.gz
WORKDIR=/gwork/users/${USER}
REGRESSION_PATH=${WORKDIR}/regressiontests-${VERSION}

PARALLEL=12
export LANG=C

FFTW_VER=3.3.8
FFTW_PATH=${BASEDIR}/fftw-${FFTW_VER}.tar.gz

#---------------------------------------------------------------------
umask 0022

module purge
module load gcc-toolset/11
#module load mpi/intelmpi/2019.8.254
module load openmpi/4.1.4-hpcx/gcc11
module load cuda/12.0

export CUDA_VISIBLE_DEVICES=0,1
unset OMP_NUM_THREADS

cd ${WORKDIR}
if [ -d gromacs-${VERSION} ]; then
  mv gromacs-${VERSION} gromacs_erase
  rm -rf gromacs_erase &
fi

if [ -d regressiontests-${VERSION} ]; then
  mv regressiontests-${VERSION} regressiontests_erase
  rm -rf regressiontests_erase &
fi

tar xzf ${GROMACS_TARBALL}
tar xzf ${REGRESSION_TARBALL}
cd gromacs-${VERSION}

# single precision, no MPI
mkdir rccs-s
cd rccs-s
cmake .. \
   -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \
   -DCMAKE_VERBOSE_MAKEFILE=ON \
   -DCMAKE_C_COMPILER=gcc \
   -DCMAKE_CXX_COMPILER=g++ \
   -DGMX_MPI=OFF \
   -DGMX_GPU=CUDA \
   -DGMX_DOUBLE=OFF \
   -DGMX_THREAD_MPI=ON \
   -DGMX_BUILD_OWN_FFTW=ON \
   -DGMX_BUILD_OWN_FFTW_URL=${FFTW_PATH} \
   -DREGRESSIONTEST_DOWNLOAD=OFF \
   -DREGRESSIONTEST_PATH=${REGRESSION_PATH}
make -j${PARALLEL} && make check && make install
cd ..

# single precision, with MPI
mkdir rccs-mpi-s
cd rccs-mpi-s
cmake .. \
   -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \
   -DCMAKE_VERBOSE_MAKEFILE=ON \
   -DCMAKE_C_COMPILER=mpicc \
   -DCMAKE_CXX_COMPILER=mpicxx \
   -DGMX_MPI=ON \
   -DGMX_GPU=CUDA \
   -DGMX_DOUBLE=OFF \
   -DGMX_THREAD_MPI=OFF \
   -DGMX_BUILD_OWN_FFTW=ON \
   -DGMX_BUILD_OWN_FFTW_URL=${FFTW_PATH} \
   -DREGRESSIONTEST_DOWNLOAD=OFF \
   -DREGRESSIONTEST_PATH=${REGRESSION_PATH}
make -j${PARALLEL} && make check && make install
cd ..

Notes

  • unset OMP_NUM_THREADS is necessary to avoid error of MdrunFEPTests.
    • (Mismatch between the env variable and the actual number of threads is considered fatal error, not a critical one.)