CP2K 2024.2
ウェブページ
バージョン
2024.2
ビルド環境
- GCC 13.1.1 (gcc-toolset-13)
- HPC-X 2.16 (Open MPI 4.1.5)
ビルドに必要なファイル
- cp2k-2024.2.tar.bz2
ビルド手順
#!/bin/sh
VERSION=2024.2
INSTDIR=/apl/cp2k/2024.2
SOURCE_ROOT=/home/users/${USER}/Software/CP2K/${VERSION}
TARBALL=${SOURCE_ROOT}/cp2k-${VERSION}.tar.bz2
PARALLEL=32
# ------------------------------------------------------------------------
umask 0022
export LANG=C
export LC_ALL=C
ulimit -s unlimited
module -s purge
module -s load gcc-toolset/13
module -s load openmpi/4.1.5-hpcx2.16/gcc13
cd $INSTDIR
if [ -d cp2k-${VERSION} ]; then
mv cp2k-${VERSION} cp2k-erase
rm -rf cp2k-erase &
fi
tar jxf ${TARBALL}
sleep 5
mv cp2k-${VERSION}/* .
sleep 5
rm -rf cp2k-${VERSION}/.dockerignore
rmdir cp2k-${VERSION}
cd ${INSTDIR}/tools/toolchain
sed -i -e "/PARMETISLIB=FALSE/"a'-DCMAKE_C_COMPILER=${MPICC} -DCMAKE_CXX_COMPILER=${MPICXX} \\' scripts/stage5/install_superlu.sh
export CC=gcc
export CXX=g++
export FC=gfortran
export MPICC=mpicc
export MPICXX=mpicxx
export MPIFC=mpif90
./install_cp2k_toolchain.sh --mpi-mode=openmpi \
--math-mode=openblas \
--with-gcc=system \
--with-cmake=system \
--with-openmpi=system \
--with-mpich=no \
--with-intelmpi=no \
--with-libxc=install \
--with-libint=install \
--with-fftw=install \
--with-acml=no \
--with-mkl=no \
--with-openblas=install \
--with-scalapack=install \
--with-libxsmm=install \
--with-elpa=install \
--with-ptscotch=install \
--with-superlu=install \
--with-pexsi=install \
--with-quip=install \
--with-plumed=install \
--with-sirius=install \
--with-gsl=install \
--with-libvdwxc=install \
--with-spglib=install \
--with-hdf5=install \
--with-spfft=install \
--with-spla=install \
--with-cosma=install \
--with-libvori=install \
--with-libtorch=install
cp install/arch/local.psmp ../../arch/rccs.psmp
cd ${INSTDIR}
# dbcsr source code is already available
make -j ${PARALLEL} ARCH=rccs VERSION=psmp
make -j ${PARALLEL} ARCH=rccs VERSION=psmp libcp2k
テスト
以下のジョブスクリプトで実行
#!/bin/sh
#PBS -l select=1:ncpus=16:mpiprocs=16:ompthreads=1
#PBS -l walltime=12:00:00export LC_ALL=C
export LANG=""
export OMP_STACKSIZE=64Mmodule -s purge
module -s load gcc-toolset/13
module -s load openmpi/4.1.5-hpcx2.16/gcc13CP2K=/apl/cp2k/2024.2
CP2K_ARCH=rccs
CP2K_VER=psmp
TIMEOUT=600
PARALLEL=16ulimit -s unlimited
cd ${CP2K}/regtesting/${CP2K_ARCH}/${CP2K_VER} || { echo "Failed to change directory"; exit 1; }
rm -rf LAST-${CP2K_ARCH}-${CP2K_VER}#serial test
echo "Starting serial test at $(date)"
../../../tests/do_regtest.py \
--mpiranks 1 \
--ompthreads 1 \
--timeout ${TIMEOUT} \
--workbasedir ../../../ \
--maxtasks ${PARALLEL} \
${CP2K_ARCH} ${CP2K_VER} >& regtest_mpi1_omp1.log
rm -rf LAST-${CP2K_ARCH}-${CP2K_VER}# # omp test
../../../tests/do_regtest.py \
--mpiranks 1 \
--ompthreads 2 \
--timeout ${TIMEOUT} \
--workbasedir ../../../ \
--maxtasks ${PARALLEL} \
${CP2K_ARCH} ${CP2K_VER} >& regtest_mpi1_omp2.log
rm -rf LAST-${CP2K_ARCH}-${CP2K_VER}# # mpi test
../../../tests/do_regtest.py \
--mpiranks 2 \
--ompthreads 1 \
--timeout ${TIMEOUT} \
--workbasedir ../../../ \
--maxtasks ${PARALLEL} \
${CP2K_ARCH} ${CP2K_VER} >& regtest_mpi2_omp1.log
rm -rf LAST-${CP2K_ARCH}-${CP2K_VER}# # mpi/openmp test
../../../tests/do_regtest.py \
--mpiranks 2 \
--ompthreads 2 \
--timeout ${TIMEOUT} \
--workbasedir ../../../ \
--maxtasks ${PARALLEL} \
${CP2K_ARCH} ${CP2K_VER} >& regtest_mpi2_omp2.log
rm -rf LAST-${CP2K_ARCH}-${CP2K_VER}# # yet another mpi test
../../../tests/do_regtest.py \
--mpiranks 8 \
--ompthreads 1 \
--timeout ${TIMEOUT} \
--workbasedir ../../../ \
--maxtasks ${PARALLEL} \
${CP2K_ARCH} ${CP2K_VER} >& regtest_mpi8_omp1.log
rm -rf LAST-${CP2K_ARCH}-${CP2K_VER}# # yet another mpi/openmp test
../../../tests/do_regtest.py \
--mpiranks 8 \
--ompthreads 2\
--timeout ${TIMEOUT} \
--workbasedir ../../../ \
--maxtasks ${PARALLEL} \
${CP2K_ARCH} ${CP2K_VER} >& regtest_mpi8_omp2.log
rm -rf LAST-${CP2K_ARCH}-${CP2K_VER}
メモ
- テストの詳細については /apl/cp2k/2024.2/regtesting/rccs/psmp 以下のディレクトリ内にある情報をご確認ください。
- summary.txt, error_summary, timings.txt あたりが参考になるかと思います。
- (2024/7/26) HPC-X 2.11, 2.13.1 の実行時ライブラリを使った場合 H20-256 系の 128 並列計算で速度が大幅に落ちる現象を確認。Open MPI 4.1.5, 4.1.6, HPC-X 2.16 を使えば問題は発生しない。