AMBER20 update 13
Webpage
Version
Amber20 update 13, AmberTools 21 update 12
Build Environment
- GCC 9.2.1 (gcc-toolset-9)
- MKL 2022.2.1
- CUDA 11.2
- OpenMPI 4.1.4 (HPC-X 2.11)
- OpenMPI 4.1.5 (HPC-X 2.13.1) was used in actual built. But the runtime library was changed to HPC-X 2.11 to avoid errors on GPU parallel runs.
- HPC-X 2.11 is employed in the following procedure.
Files Required
- Amber20.tar.bz2
- AmberTools20.tar.bz2
- (Amber20 update.1-13 & AmberTools20 update.1-15 & AmberTools21 update.1-12; downloaded during the installation.)
- patch-nmrat-gpu.cpp
--- src/pmemd/src/cuda/gpu.cpp.org 2022-01-06 16:02:15.915217989 +0900
+++ src/pmemd/src/cuda/gpu.cpp 2022-01-06 16:02:26.857121731 +0900
@@ -2849,7 +2849,7 @@
}
// torsions, resttype = 3
else if (resttype[i] == 3) {
- if (nmrat[i][0] >= 0 && nmrat[i][1] >= 0 && nmrat[i][2] >= 0 && nmrat[3] >= 0) {
+ if (nmrat[i][0] >= 0 && nmrat[i][1] >= 0 && nmrat[i][2] >= 0 && nmrat[i][3] >= 0) {
torsions++;
}
else {
- patch-cpptraj-configure
--- AmberTools/src/cpptraj/configure.org 2022-01-13 22:56:46.000000000 +0900
+++ AmberTools/src/cpptraj/configure 2022-01-13 22:57:10.000000000 +0900
@@ -1771,8 +1771,6 @@
if [ "${LIB_STAT[$LPARANC]}" != 'off' -a $USE_MPI -eq 0 ] ; then
WrnMsg "Parallel NetCDF enabled but MPI not specified. Assuming '-mpi'."
USE_MPI=1
- elif [ $USE_MPI -ne 0 -a "${LIB_STAT[$LPARANC]}" = 'off' ] ; then
- LIB_STAT[$LPARANC]='enabled'
fi
# If we are using the bundled ARPACK then we will need C++/Fortran linking.
if [ "${LIB_STAT[$LARPACK]}" = 'bundled' ] ; then
- patch-configure_python (to use miniforge instead of miniconda)
--- AmberTools/src/configure_python.org 2022-01-12 15:46:09.042775250 +0900
+++ AmberTools/src/configure_python 2022-01-12 15:48:09.177986821 +0900
@@ -107,8 +107,7 @@
echo "Downloading the latest Miniconda distribution"
if [ $mac -eq 1 ]; then
- curl -L -# https://repo.continuum.io/miniconda/Miniconda${version}-${MINICONDA_VERSION}-MacOSX-x86_64.sh > \
- miniconda.sh
+ :
else
# Try to figure out if the machine builds 32- or 64-bit binaries,
# respectively.
@@ -145,23 +144,23 @@
;;
esac
if [ $bit -eq 32 ]; then
- wget https://repo.continuum.io/miniconda/Miniconda${version}-${MINICONDA_VERSION}-Linux-x86.sh \
- -O miniconda.sh
+ exit 0
else
- wget https://repo.continuum.io/miniconda/Miniconda${version}-${MINICONDA_VERSION}-Linux-x86_64.sh \
- -O miniconda.sh
+ wget https://github.com/conda-forge/miniforge/releases/download/${MINICONDA_VERSION}/Miniforge${version}-${MINICONDA_VERSION}-Linux-x86_64.sh \
+ -O miniforge.sh
fi
fi
-if [ -d "$prefix/miniconda" ]; then
- echo "Deleting existing miniconda at $prefix/miniconda"
- /bin/rm -fr "$prefix/miniconda"
+if [ -d "$prefix/miniforge" ]; then
+ echo "Deleting existing miniforge at $prefix/miniforge"
+ /bin/rm -fr "$prefix/miniforge"
fi
echo "Installing Miniconda Python."
-bash miniconda.sh -b -p "$prefix/miniconda" || error "Installing miniconda failed"
+bash miniforge.sh -b -p "$prefix/miniforge" || error "Installing miniconda failed"
+ln -s ./miniforge ./miniconda
-export PATH="$prefix/miniconda/bin":"$PATH"
+export PATH="$prefix/miniforge/bin":"$PATH"
echo "Updating and installing required and optional packages..."
$prefix/miniconda/bin/python -m pip install pip --upgrade
@@ -172,7 +171,7 @@
# Use pip to install matplotlib so we don't have to pull in the entire Qt
# dependency. And cache inside the Miniconda directory, since we don't want to
# be writing outside $AMBERHOME unless specifically requested to
-$prefix/miniconda/bin/python -m pip --cache-dir=$prefix/miniconda/pkgs \
+$prefix/miniconda/bin/python -m pip --cache-dir=$prefix/miniforge/pkgs \
install matplotlib || install_matplotlib='yes'
if [ ! -z "$install_matplotlib" ]; then
@@ -183,22 +182,22 @@
mkdir -p $prefix/lib
cwd=`pwd`
cd "$prefix/bin"
-ln -sf ../miniconda/bin/python amber.python || error "Linking Amber's Miniconda Python"
-ln -sf ../miniconda/bin/conda amber.conda || error "Linking Amber's Miniconda conda"
-ln -sf ../miniconda/bin/ipython amber.ipython || error "Linking Amber's Miniconda ipython"
-ln -sf ../miniconda/bin/jupyter amber.jupyter || error "Linking Amber's Miniconda jupyter"
-ln -sf ../miniconda/bin/pip amber.pip || error "Linking Amber's Miniconda pip"
+ln -sf ../miniforge/bin/python amber.python || error "Linking Amber's Miniconda Python"
+ln -sf ../miniforge/bin/conda amber.conda || error "Linking Amber's Miniconda conda"
+ln -sf ../miniforge/bin/ipython amber.ipython || error "Linking Amber's Miniconda ipython"
+ln -sf ../miniforge/bin/jupyter amber.jupyter || error "Linking Amber's Miniconda jupyter"
+ln -sf ../miniforge/bin/pip amber.pip || error "Linking Amber's Miniconda pip"
cd "$prefix/lib"
-for dir in ../miniconda/lib/tcl*; do
+for dir in ../miniforge/lib/tcl*; do
ln -sf "$dir" || error "Linking TCL library folder $dir"
done
-for dir in ../miniconda/lib/tk*; do
+for dir in ../miniforge/lib/tk*; do
ln -sf "$dir" || error "Linking TK library folder $dir"
done
cd $cwd
echo ""
-echo "Done. Miniconda installed in $prefix/miniconda"
+echo "Done. Miniforge installed in $prefix/miniforge"
-/bin/rm -f miniconda.sh
+/bin/rm -f miniforge.sh
-$prefix/miniconda/bin/conda clean --all --yes
+$prefix/miniforge/bin/conda clean --all --yes
Build Procedure
#!/bin/sh
VERSION=20
TOOLSVERSION=20 # will be upgraded to 21
MINIFORGE_VERSION="4.11.0-4" # ad hoc custom version
INSTALL_DIR="/apl/amber/20u13"
TARBALL_DIR="/home/users/${USER}/Software/AMBER/20"
PATCH0=${TARBALL_DIR}/patch-nmrat-gpu.cpp
PATCH1=${TARBALL_DIR}/patch-cpptraj-configure
PATCHX=${TARBALL_DIR}/patch-configure_python
PARALLEL=12
#----------------------------------------------------------------------
module purge
module load gcc-toolset/9
module load mkl/2022.2.1
module load cuda/11.2
module load openmpi/4.1.4-hpcx/gcc9
export AMBERHOME=${INSTALL_DIR}
export CUDA_HOME="/apl/cuda/11.2"
export LANG=C
export LC_ALL=C
# install directory has to be prepared before running this script
if [ ! -d ${AMBERHOME} ]; then
echo "Create ${AMBERHOME} before running this script."
exit 1
fi
# the install directory must be empty
if [ "$(ls -A ${AMBERHOME})" ]; then
echo "Target directory ${AMBERHOME} not empty"
exit 2
fi
ulimit -s unlimited
# prep files
cd ${AMBERHOME}
bunzip2 -c ${TARBALL_DIR}/Amber${VERSION}.tar.bz2 | tar xf -
bunzip2 -c ${TARBALL_DIR}/AmberTools${TOOLSVERSION}.tar.bz2 | tar xf -
mv amber${VERSION}_src/* .
rmdir amber${VERSION}_src
patch -p0 < $PATCHX
sed -i -e "s/=latest/=${MINIFORGE_VERSION}/" AmberTools/src/configure_python
# install python first. otherwise, update_amber failed to connect ambermd.org
./AmberTools/src/configure_python -v 3
AMBER_PYTHON=$AMBERHOME/bin/amber.python
# cheat /usr/bin/env python...
cd bin && ln -s amber.python ./python && cd ..
OLDPATH=${PATH}
export PATH="${AMBERHOME}/bin:${PATH}"
# apply patches and update AmberTools
echo y | $AMBER_PYTHON ./update_amber --upgrade
$AMBER_PYTHON ./update_amber --update
# remove evidence
rm -f ${AMBERHOME}/bin/python
export PATH="${OLDPATH}"
# patch
# see http://archive.ambermd.org/202110/0206.html
patch -p0 < $PATCH0
# ad hoc something for pnetcdf
patch -p0 < $PATCH1
# ad hoc fix for cuda 11.2
sed -i -e "s/11\.1/11.2/" AmberTools/src/configure2
# reaxff-puremd is openmp only (tentatively)
# quick is tentatively not enabled; libstdc++ related issue?
echo "[GPU serial edition (two versions)]"
LANG=C ./configure --no-updates -cuda gnu
make -j${PARALLEL} install && make clean
echo "[GPU parallel edition (two versions)]"
LANG=C ./configure --no-updates -mpi -cuda gnu
make -j${PARALLEL} install && make clean
echo "[CPU serial edition]"
LANG=C ./configure --no-updates gnu
make -j${PARALLEL} install && make clean
echo "[CPU openmp edition]"
LANG=C ./configure --no-updates -mkl -reaxff-puremd-openmp -openmp gnu
make -j${PARALLEL} install && make clean
echo "[CPU parallel edition]"
LANG=C ./configure --no-updates -mkl -mpi gnu
make -j${PARALLEL} install && make clean
# run tests
. ${AMBERHOME}/amber.sh
cd ${AMBERHOME}
# ad hoc work-around
# https://github.com/Amber-MD/pdb4amber/issues/85#issuecomment-672812778
cd ${AMBERHOME}/lib/python*/site-packages
if [ -d ParmEd*/parmed ]; then
ln -s ParmEd*/parmed ./parmed
fi
if [ -d pdb4amber*/pdb4amber ]; then
ln -s pdb4amber*/pdb4amber ./pdb4amber
fi
if [ -d pytraj*/pytraj ]; then
ln -s pytraj*/pytraj ./pytraj
fi
# parallel tests first
cd ${AMBERHOME}
export DO_PARALLEL="mpirun -np 2"
make test.parallel && make clean.test
export DO_PARALLEL="mpirun -np 4"
cd test; make test.parallel.4proc; make clean; cd ../
unset DO_PARALLEL
# openmp tests
make test.openmp && make clean.test
# serial tests
make test.serial && make clean.test
cd ${AMBERHOME}
chmod 700 src
Tests
- GPU tests can't be performed on forntend node. We ran the following script in the GPU-equipped computation node.
#!/bin/sh
module purge
module load gcc-toolset/9
module load mkl/2022.2.1
module load cuda/11.2
module load openmpi/4.1.4-hpcx/gcc9
export AMBERHOME="/apl/amber/20u13"
export CUDA_HOME="/apl/cuda/11.2"
export LANG=C
export LC_ALL=C
ulimit -s unlimited
# parallel tests first
cd ${AMBERHOME}
. ${AMBERHOME}/amber.sh
## gpu tests (elsewhere)
export DO_PARALLEL="mpirun -np 2"
make test.cuda_parallel && make clean.test # DPFP
cd test; ./test_amber_cuda_parallel.sh SPFP; make clean; cd ../
unset DO_PARALLEL
make test.cuda_serial && make clean.test # DPFP
cd test; ./test_amber_cuda_serial.sh SPFP; make clean; cd ../
- Test results are available at /apl/amber/20u13/logs/. Minor numerical errors are reported.
Notes
- Configure was employed as in the previous case. Cmake is not tested.
- CUDA 11.2 is not officially supported in configure. But it seems to work.
- HPC-X 2.13.1 version failed on GPU multinode parallel run (> 4 nodes). Switching runtime library to HPC-X 2.11 fixed the problem.
- There is a problem in PYTHONPATH of MMPBSA.py. We have added the following two lines in MMPBSA.py, MMPBSA.py.MPI.
- This problem did not occur in the case on the previous system.
import sys
sys.path.append( "/apl/amber/20u13/AmberTools/src/mmpbsa_py/MMPBSA_mods" )