DFTB+ 25.1 (MPI)

Webpage

https://dftbplus.org/

Build Environment

  • Intel oneAPI HPC Toolkit 2025.3.3.15 (icx/icpx, ifx)
  • Intel MKL 2025.3
  • Open MPI 4.1.8
  • CUDA 13.1 Update 1 

Files Required

  • elsi_interface-v2.12.0.tar.gz
  • plumed-src-2.10.0.tgz
  • arpack-ng
  • libmbd-0.12.8
  • dftbplus-25.1.tar.xz

Build Procedure

ELSI

#!/bin/sh
set -eo pipefail

ELSI_VERSION=2.12.0
ELSI_DIRNAME=elsi_interface-v${ELSI_VERSION}
PARALLEL=16
COMPILER=intel2025.3.3
MPI=ompi
BASEDIR=${HOME}/Software/elsi/${ELSI_VERSION}
ELSI_TARBALL=${BASEDIR}/${ELSI_DIRNAME}.tar.gz
WORKDIR=/gwork/users/${USER}
INSTALLDIR=/apl/elsi/${ELSI_VERSION}/${COMPILER}/${MPI}
OPENMPI_MODULE=openmpi/4.1.8/intel2025
CUDA_ARCH=sm_80  # NVIDIA A30 (Ampere)
#-----------------------------------------------------------------------------
cd ${WORKDIR} || { echo "ERROR: cannot cd to ${WORKDIR}" >&2; exit 1; }

module -s purge
if [ ! -f "${ELSI_TARBALL}" ]; then
   echo "ERROR: tarball not found: ${ELSI_TARBALL}" >&2
   exit 1
fi
if [ ! -d "${ELSI_DIRNAME}" ]; then
   tar xzf ${ELSI_TARBALL}
fi

cd ${ELSI_DIRNAME} || { echo "ERROR: cannot cd to ${ELSI_DIRNAME}" >&2; exit 1; }
rm -rf build
mkdir -p build

cd build || { echo "ERROR: cannot cd to build" >&2; exit 1; }
module load mkl/2025.3
module load cuda/13.1u1

source ${HOME}/intel/oneapi/setvars.sh --force > /dev/null 2>&1
module load ${OPENMPI_MODULE}

which ifx || { echo "ERROR: ifx not found. Intel oneAPI not installed?" >&2; exit 1; }

export OMPI_FC=ifx
export OMPI_CC=icx
export OMPI_CXX=icpx

cmake \
 -DCMAKE_TOOLCHAIN_FILE=../toolchains/intel_gpu.cmake \
 -DCMAKE_INSTALL_PREFIX=${INSTALLDIR} \
 -DCMAKE_Fortran_COMPILER=mpifort \
 -DCMAKE_C_COMPILER=mpicc \
 -DCMAKE_CXX_COMPILER=mpicxx \
 -DCMAKE_Fortran_FLAGS="-O3 -fp-model precise -fpp" \
 -DCMAKE_C_FLAGS="-O3 -fp-model precise -std=c99 -diag-disable=10441 -Wno-error=implicit-function-declaration" \
 -DCMAKE_CXX_FLAGS="-O3 -fp-model precise -std=c++11 -diag-disable=10441 -Wno-error=implicit-function-declaration" \
 -DLIBS="mkl_scalapack_lp64 mkl_blacs_openmpi_lp64 mkl_intel_lp64 mkl_sequential mkl_core cusolver cublas cublasLt cudart" \
 -DBUILD_SHARED_LIBS=ON \
 -DCMAKE_CUDA_FLAGS="-O3 -arch=${CUDA_ARCH}" \
 -DENABLE_PEXSI=ON \
 ..

make -j ${PARALLEL} || { echo "ERROR: make failed" >&2; exit 1; }
make test
make install || { echo "ERROR: make install failed" >&2; exit 1; }
echo "Successfully installed to ${INSTALLDIR}"

PLUMED (MPI)

#!/bin/bash
set -eo pipefail

source ${HOME}/intel/oneapi/setvars.sh --force > /dev/null 2>&1

PLUMED_VERSION=2.10.0
PLUMED_DIRNAME=plumed-${PLUMED_VERSION}
PARALLEL=16
COMPILER=intel2025.3.3
MPI_VERSION="4.1.8/intel2025"          
BASEDIR=${HOME}/Software/plumed/${PLUMED_VERSION}
PLUMED_TARBALL=${BASEDIR}/plumed-src-${PLUMED_VERSION}.tgz
WORKDIR=/gwork/users/${USER}
INSTALLDIR=/apl/plumed/${PLUMED_VERSION}/${COMPILER}/ompi

cd $WORKDIR
module -s purge
module load openmpi/${MPI_VERSION}

if [ ! -d "${PLUMED_DIRNAME}" ]; then
   [ ! -f "${PLUMED_TARBALL}" ] && { echo "ERROR: tarball not found: ${PLUMED_TARBALL}"; exit 1; }
   tar xzf "${PLUMED_TARBALL}"
fi

cd "${PLUMED_DIRNAME}" || { echo "ERROR: cannot cd to ${PLUMED_DIRNAME}"; exit 1; }


export OMPI_CC=icx
export OMPI_CXX=icpx
export OMPI_FC=ifx


FC=mpifort \
CC=mpicc \
CXX=mpicxx \
./configure \
 --prefix=${INSTALLDIR} \
 --enable-libsearch=no \
 LIBS="-lmkl_rt -lpthread -lm -ldl -liomp5 -lstdc++ -lstdc++fs" \
 CXXFLAGS="-O3 -qopenmp" \
 FCFLAGS="" \
 FFLAGS="" \
 LDFLAGS="-qopenmp"

make clean
make -j ${PARALLEL} 2>&1 | tee ${WORKDIR}/plumed-build.log

export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-}:${WORKDIR}/${PLUMED_DIRNAME}/src/lib
make test 2>&1 | tee ${WORKDIR}/plumed-test.log || { echo "ERROR: make test failed. Aborting install."; exit 1; }
make install 2>&1 | tee ${WORKDIR}/plumed-install.log
 

DFTB+

#!/bin/bash
set -eo pipefail
source "${HOME}/intel/oneapi/setvars.sh" --force > /dev/null 2>&1
DFTBPLUS_VERSION=25.1
DFTBPLUS_DIRNAME=dftbplus-${DFTBPLUS_VERSION}
PARALLEL=64
BASEDIR=${HOME}/Software/dftbplus/${DFTBPLUS_VERSION}
DFTBPLUS_TARBALL=${BASEDIR}/${DFTBPLUS_DIRNAME}.tar.xz
WORKDIR=/gwork/users/${USER}
INSTALLDIR=/apl/dftb+/${DFTBPLUS_VERSION}/mpi
ELSI_DIR=/apl/elsi/2.12.0/intel2025.3.3/ompi
PLUMED_DIR=/apl/plumed/2.10.0/intel2025.3.3/ompi
ARPACK_DIR=/apl/arpack-ng/3.9.1
LIBMBD=${HOME}/libmbd/0.12.8/libmbd-0.12.8
#-----------------------------------------------------------------------------
cd "$WORKDIR" || { echo "ERROR: Cannot cd to $WORKDIR"; exit 1; }
module -s purge
module load mkl/2025.3
module load cuda/13.1u1
module load openmpi/4.1.8/intel2025

export PKG_CONFIG_PATH="${ARPACK_DIR}/lib/pkgconfig:${PKG_CONFIG_PATH:-}"

if [ ! -d "$DFTBPLUS_DIRNAME" ]; then
   tar xJf "${DFTBPLUS_TARBALL}" || { echo "ERROR: tar failed"; exit 1; }
fi

cd "$DFTBPLUS_DIRNAME" || { echo "ERROR: Cannot cd to $DFTBPLUS_DIRNAME"; exit 1; }
 

if [ -f _build/CMakeCache.txt ]; then
   echo "INFO: Existing CMakeCache.txt found. Cleaning _build..."
   rm -rf _build
fi
mkdir -p _build

rm -rf external/mbd/origin
cp -a "${LIBMBD}" external/mbd/origin \
    || { echo "ERROR: Failed to copy libMBD from ${LIBMBD}"; exit 1; }

# ---------------------------------------------------------------------------
# CMake configure
# ---------------------------------------------------------------------------
OMPI_FC=ifx OMPI_CC=icx OMPI_CXX=icpx \
FC=mpifort CC=mpicc CXX=mpicxx \
CMAKE_PREFIX_PATH="${ELSI_DIR};${PLUMED_DIR};${ARPACK_DIR}" \
cmake \
 -S . -B _build \
 -DCMAKE_INSTALL_PREFIX="${INSTALLDIR}" \
 -DBLAS_LIBRARIES="mkl_rt;pthread;m;dl" \
 -DLAPACK_LIBRARIES="mkl_rt;pthread;m;dl" \
 -DWITH_MPI=TRUE \
 -DWITH_ELSI=TRUE \
 -DWITH_ARPACK=TRUE \
 -DWITH_GPU=TRUE \
 -DWITH_TRANSPORT=TRUE \
 -DWITH_TBLITE=TRUE \
 -DWITH_SOCKETS=TRUE \
 -DWITH_SDFTD3=TRUE \
 -DWITH_MBD=TRUE \
 -DWITH_PLUMED=TRUE \
 -DWITH_CHIMES=TRUE

cmake --build _build --parallel ${PARALLEL} \
   || { echo "ERROR: Build failed"; exit 1; }

# Install
mkdir -p "${INSTALLDIR}"
cmake --install _build \
   || { echo "ERROR: Install failed"; exit 1; }

echo "Install complete: $(ls ${INSTALLDIR}/bin/dftb+)"