+
+if [ "$NVIDIA_GPU_SUPPORT" == "1" ]; then
+ # We need a kernel and matching headers
+ if [[ "$DIST" =~ ^debian ]]; then
+ $SUDO apt-get -y install linux-image-cloud-amd64 linux-headers-cloud-amd64
+ elif [ "$CLOUD" == "azure" ]; then
+ $SUDO apt-get -y install linux-image-azure linux-headers-azure
+ elif [ "$CLOUD" == "aws" ]; then
+ $SUDO apt-get -y install linux-image-aws linux-headers-aws
+ fi
+
+ # Install CUDA
+ $SUDO apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/$DIST/x86_64/7fa2af80.pub
+ $SUDO apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/$DIST/x86_64/3bf863cc.pub
+ $SUDO apt-get -y install software-properties-common
+ $SUDO add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/$DIST/x86_64/ /"
+ # Ubuntu 18.04's add-apt-repository does not understand 'contrib'
+ $SUDO add-apt-repository contrib || true
+ $SUDO apt-get update
+ $SUDO apt-get -y install cuda
+
+ # Install libnvidia-container, the tooling for Docker/Singularity
+ curl -s -L https://nvidia.github.io/libnvidia-container/gpgkey | \
+ $SUDO apt-key add -
+ if [ "$DIST" == "debian11" ]; then
+ # As of 2021-12-16 libnvidia-container and friends are only available for
+ # Debian 10, not yet Debian 11. Install experimental rc1 package as per this
+ # workaround:
+ # https://github.com/NVIDIA/nvidia-docker/issues/1549#issuecomment-989670662
+ curl -s -L https://nvidia.github.io/libnvidia-container/debian10/libnvidia-container.list | \
+ $SUDO tee /etc/apt/sources.list.d/libnvidia-container.list
+ $SUDO sed -i -e '/experimental/ s/^#//g' /etc/apt/sources.list.d/libnvidia-container.list
+ else
+ # here, $DIST should have a dot if there is one in /etc/os-release (e.g. 18.04)...
+ DIST=$(. /etc/os-release; echo $ID$VERSION_ID)
+ curl -s -L https://nvidia.github.io/libnvidia-container/$DIST/libnvidia-container.list | \
+ $SUDO tee /etc/apt/sources.list.d/libnvidia-container.list
+ fi
+
+ $SUDO apt-get update
+ $SUDO apt-get -y install libnvidia-container1 libnvidia-container-tools nvidia-container-toolkit
+ # This service fails to start when the image is booted without Nvidia GPUs present, which makes
+ # `systemctl is-system-running` respond with "degraded" and since that command is our default
+ # BootProbeCommand, compute nodes never finish booting from Arvados' perspective.
+ # Disable the service to avoid this. This should be fine because crunch-run does its own basic
+ # CUDA initialization.
+ $SUDO systemctl disable nvidia-persistenced.service
+fi
+
+# Get Go and build singularity
+mkdir -p /var/lib/arvados
+rm -rf /var/lib/arvados/go/
+curl -s https://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz | tar -C /var/lib/arvados -xzf -
+ln -sf /var/lib/arvados/go/bin/* /usr/local/bin/
+
+singularityversion=3.10.4
+cd /var/lib/arvados
+git clone --recurse-submodules https://github.com/sylabs/singularity
+cd singularity
+git checkout v${singularityversion}
+
+# build dependencies for singularity
+wait_for_apt_locks && $SUDO DEBIAN_FRONTEND=noninteractive apt-get -qq --yes install \
+ make build-essential libssl-dev uuid-dev cryptsetup \
+ squashfs-tools libglib2.0-dev libseccomp-dev
+
+
+echo $singularityversion > VERSION
+./mconfig --prefix=/var/lib/arvados
+make -C ./builddir
+make -C ./builddir install
+ln -sf /var/lib/arvados/bin/* /usr/local/bin/
+
+# set `mksquashfs mem` in the singularity config file if it is configured
+if [ "$MKSQUASHFS_MEM" != "" ]; then
+ echo "mksquashfs mem = ${MKSQUASHFS_MEM}" >> /var/lib/arvados/etc/singularity/singularity.conf
+fi
+
+# Print singularity version installed
+singularity --version
+
+$SUDO apt-get clean