X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/055b9792b7692d6c42f4e13d38dc6cd008396a6f..0e2f3e506566b1ceb54bd764d3f32c004e45f8b3:/tools/compute-images/scripts/base.sh diff --git a/tools/compute-images/scripts/base.sh b/tools/compute-images/scripts/base.sh index b7d0d0f363..90b845f1ac 100644 --- a/tools/compute-images/scripts/base.sh +++ b/tools/compute-images/scripts/base.sh @@ -4,6 +4,8 @@ # # SPDX-License-Identifier: Apache-2.0 +set -eu -o pipefail + SUDO=sudo wait_for_apt_locks() { @@ -64,12 +66,11 @@ wait_for_apt_locks && $SUDO DEBIAN_FRONTEND=noninteractive apt-get -qq --yes ins # Install the Arvados packages we need wait_for_apt_locks && $SUDO DEBIAN_FRONTEND=noninteractive apt-get -qq --yes install \ python3-arvados-fuse \ - crunch-run \ arvados-docker-cleaner \ docker.io # Get Go and build singularity -goversion=1.16.3 +goversion=1.17.1 mkdir -p /var/lib/arvados rm -rf /var/lib/arvados/go/ curl -s https://storage.googleapis.com/golang/go${goversion}.linux-amd64.tar.gz | tar -C /var/lib/arvados -xzf - @@ -89,6 +90,11 @@ make -C ./builddir make -C ./builddir install ln -sf /var/lib/arvados/bin/* /usr/local/bin/ +# set `mksquashfs mem` in the singularity config file if it is configured +if [ "$MKSQUASHFS_MEM" != "" ]; then + echo "mksquashfs mem = ${MKSQUASHFS_MEM}" >> /var/lib/arvados/etc/singularity/singularity.conf +fi + # Print singularity version installed singularity --version @@ -115,7 +121,7 @@ $SUDO sed "s/ExecStart=\(.*\)/ExecStart=\1 --default-ulimit nofile=10000:10000 $ $SUDO systemctl daemon-reload # docker should not start on boot: we restart it inside /usr/local/bin/ensure-encrypted-partitions.sh, -# and the BootProbeCommand defaults to "docker ps -q" +# and the BootProbeCommand might be "docker ps -q" $SUDO systemctl disable docker # Make sure user_allow_other is set in fuse.conf @@ -138,9 +144,98 @@ $SUDO chmod 700 /home/crunch/.ssh/ if [ "x$RESOLVER" != "x" ]; then $SUDO sed -i "s/#prepend domain-name-servers 127.0.0.1;/prepend domain-name-servers ${RESOLVER};/" /etc/dhcp/dhclient.conf fi -# Set up the cloud-init script that will ensure encrypted disks -$SUDO mv /tmp/usr-local-bin-ensure-encrypted-partitions.sh /usr/local/bin/ensure-encrypted-partitions.sh + +if [ "$AWS_EBS_AUTOSCALE" != "1" ]; then + # Set up the cloud-init script that will ensure encrypted disks + $SUDO mv /tmp/usr-local-bin-ensure-encrypted-partitions.sh /usr/local/bin/ensure-encrypted-partitions.sh +else + wait_for_apt_locks && $SUDO DEBIAN_FRONTEND=noninteractive apt-get -qq --yes install jq unzip + + curl -s "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "/tmp/awscliv2.zip" + unzip -q /tmp/awscliv2.zip -d /tmp && $SUDO /tmp/aws/install + # Pinned to v2.4.5 because we apply a patch below + #export EBS_AUTOSCALE_VERSION=$(curl --silent "https://api.github.com/repos/awslabs/amazon-ebs-autoscale/releases/latest" | jq -r .tag_name) + export EBS_AUTOSCALE_VERSION="v2.4.5" + cd /opt && $SUDO git clone https://github.com/awslabs/amazon-ebs-autoscale.git + cd /opt/amazon-ebs-autoscale && $SUDO git checkout $EBS_AUTOSCALE_VERSION + $SUDO patch -p1 < /tmp/create-ebs-volume-nvme.patch + + # This script really requires bash and the shebang line is wrong + $SUDO sed -i 's|^#!/bin/sh|#!/bin/bash|' /opt/amazon-ebs-autoscale/bin/ebs-autoscale + + # Set up the cloud-init script that makes use of the AWS EBS autoscaler + $SUDO mv /tmp/usr-local-bin-ensure-encrypted-partitions-aws-ebs-autoscale.sh /usr/local/bin/ensure-encrypted-partitions.sh +fi + $SUDO chmod 755 /usr/local/bin/ensure-encrypted-partitions.sh $SUDO chown root:root /usr/local/bin/ensure-encrypted-partitions.sh $SUDO mv /tmp/etc-cloud-cloud.cfg.d-07_compute_arvados_dispatch_cloud.cfg /etc/cloud/cloud.cfg.d/07_compute_arvados_dispatch_cloud.cfg $SUDO chown root:root /etc/cloud/cloud.cfg.d/07_compute_arvados_dispatch_cloud.cfg + +if [ "$NVIDIA_GPU_SUPPORT" == "1" ]; then + # $DIST should not have a dot if there is one in /etc/os-release (e.g. 18.04) + DIST=$(. /etc/os-release; echo $ID$VERSION_ID | tr -d '.') + # We need a kernel and matching headers + if [[ "$DIST" =~ ^debian ]]; then + $SUDO apt-get -y install linux-image-cloud-amd64 linux-headers-cloud-amd64 + elif [ "$CLOUD" == "azure" ]; then + $SUDO apt-get -y install linux-image-azure linux-headers-azure + elif [ "$CLOUD" == "aws" ]; then + $SUDO apt-get -y install linux-image-aws linux-headers-aws + fi + + # Install CUDA + $SUDO apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/$DIST/x86_64/7fa2af80.pub + $SUDO apt-get -y install software-properties-common + $SUDO add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/$DIST/x86_64/ /" + $SUDO add-apt-repository contrib + $SUDO apt-get update + $SUDO apt-get -y install cuda + + # Install libnvidia-container, the tooling for Docker/Singularity + curl -s -L https://nvidia.github.io/libnvidia-container/gpgkey | \ + $SUDO apt-key add - + if [ "$DIST" == "debian11" ]; then + # As of 2021-12-16 libnvidia-container and friends are only available for + # Debian 10, not yet Debian 11. Install experimental rc1 package as per this + # workaround: + # https://github.com/NVIDIA/nvidia-docker/issues/1549#issuecomment-989670662 + curl -s -L https://nvidia.github.io/libnvidia-container/debian10/libnvidia-container.list | \ + $SUDO tee /etc/apt/sources.list.d/libnvidia-container.list + $SUDO sed -i -e '/experimental/ s/^#//g' /etc/apt/sources.list.d/libnvidia-container.list + else + # here, $DIST should have a dot if there is one in /etc/os-release (e.g. 18.04)... + DIST=$(. /etc/os-release; echo $ID$VERSION_ID) + curl -s -L https://nvidia.github.io/libnvidia-container/$DIST/libnvidia-container.list | \ + $SUDO tee /etc/apt/sources.list.d/libnvidia-container.list + fi + + if [ "$DIST" == "debian10" ]; then + # Debian 10 comes with Docker 18.xx, we need 19.03 or later + curl -fsSL https://download.docker.com/linux/debian/gpg | $SUDO gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg + echo deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian/ buster stable | \ + $SUDO tee /etc/apt/sources.list.d/docker.list + $SUDO apt-get update + $SUDO apt-get -yq --no-install-recommends install docker-ce=5:19.03.15~3-0~debian-buster + + $SUDO sed "s/ExecStart=\(.*\)/ExecStart=\1 --default-ulimit nofile=10000:10000 ${SET_RESOLVER}/g" \ + /lib/systemd/system/docker.service \ + > /etc/systemd/system/docker.service + + $SUDO systemctl daemon-reload + + # docker should not start on boot: we restart it inside /usr/local/bin/ensure-encrypted-partitions.sh, + # and the BootProbeCommand might be "docker ps -q" + $SUDO systemctl disable docker + fi + $SUDO apt-get update + $SUDO apt-get -y install libnvidia-container1 libnvidia-container-tools nvidia-container-toolkit + # This service fails to start when the image is booted without Nvidia GPUs present, which makes + # `systemctl is-system-running` respond with "degraded" and since that command is our default + # BootProbeCommand, compute nodes never finish booting from Arvados' perspective. + # Disable the service to avoid this. This should be fine because crunch-run does its own basic + # CUDA initialization. + $SUDO systemctl disable nvidia-persistenced.service +fi + +$SUDO apt-get clean