X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/9d706659aa362728270f02b7d18d12b1f68f3e36..2efd88cf64130bb0ebb0549d30053b85baaae2f9:/tools/salt-install/provision.sh diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh index 669d04d92f..f4660be370 100755 --- a/tools/salt-install/provision.sh +++ b/tools/salt-install/provision.sh @@ -52,13 +52,13 @@ usage() { echo >&2 " -h, --help Display this help and exit" echo >&2 " --dump-config Dumps the pillars and states to a directory" echo >&2 " This parameter does not perform any installation at all. It's" - echo >&2 " intended to give you a parsed sot of configuration files so" + echo >&2 " intended to give you a parsed set of configuration files so" echo >&2 " you can inspect them or use them in you Saltstack infrastructure." echo >&2 " It" echo >&2 " - parses the pillar and states templates," echo >&2 " - downloads the helper formulas with their desired versions," echo >&2 " - prepares the 'top.sls' files both for pillars and states" - echo >&2 " for the selected role/s" + echo >&2 " for the selected role(s)" echo >&2 " - writes the resulting files into " echo >&2 " -v, --vagrant Run in vagrant and use the /vagrant shared dir" echo >&2 " --development Run in dev mode, using snakeoil certs" @@ -200,7 +200,7 @@ WORKBENCH2_EXT_SSL_PORT=3001 SSL_MODE="self-signed" USE_LETSENCRYPT_ROUTE53="no" -CUSTOM_CERTS_DIR="${SCRIPT_DIR}/certs" +CUSTOM_CERTS_DIR="${SCRIPT_DIR}/local_config_dir/certs" ## These are ARVADOS-related parameters # For a stable release, change RELEASE "production" and VERSION to the @@ -237,6 +237,8 @@ T_DIR="/tmp/cluster_tests" arguments ${@} +declare -A NODES + if [ -s ${CONFIG_FILE} ]; then source ${CONFIG_FILE} else @@ -255,14 +257,14 @@ if [ ! -d ${CONFIG_DIR} ]; then exit 1 fi -if grep -q 'fixme_or_this_wont_work' ${CONFIG_FILE} ; then +if grep -rni 'fixme' ${CONFIG_FILE} ${CONFIG_DIR} ; then echo >&2 "The config file ${CONFIG_FILE} has some parameters that need to be modified." echo >&2 "Please, fix them and re-run the provision script." exit 1 fi if ! grep -qE '^[[:alnum:]]{5}$' <<<${CLUSTER} ; then - echo >&2 "ERROR: must be exactly 5 alphanumeric characters long" + echo >&2 "ERROR: must be exactly 5 lowercase alphanumeric characters long" echo >&2 "Fix the cluster name in the 'local.params' file and re-run the provision script" exit 1 fi @@ -274,7 +276,7 @@ if [ ! -z "${HOSTNAME_EXT}" ] ; then # Make sure that the value configured as IP_INT is a real IP on the system. # If we don't error out early here when there is a mismatch, the formula will # fail with hard to interpret nginx errors later on. - ip addr list |grep -q "${IP_INT}/" + ip addr list |grep "${IP_INT}/" >/dev/null if [[ $? -ne 0 ]]; then echo "Unable to find the IP_INT address '${IP_INT}' on the system, please correct the value in local.params. Exiting..." exit 1 @@ -302,7 +304,10 @@ else yum install -y curl git jq ;; "debian"|"ubuntu") - DEBIAN_FRONTEND=noninteractive apt update + # Wait 2 minutes for any apt locks to clear + # This option is supported from apt 1.9.1 and ignored in older apt versions. + # Cf. https://blog.sinjakli.co.uk/2021/10/25/waiting-for-apt-locks-without-the-hacky-bash-scripts/ + DEBIAN_FRONTEND=noninteractive apt -o DPkg::Lock::Timeout=120 update DEBIAN_FRONTEND=noninteractive apt install -y curl git jq ;; esac @@ -565,6 +570,7 @@ if [ -z "${ROLES}" ]; then echo " - arvados" >> ${S_DIR}/top.sls echo " - extra.shell_sudo_passwordless" >> ${S_DIR}/top.sls echo " - extra.shell_cron_add_login_sync" >> ${S_DIR}/top.sls + echo " - extra.passenger_rvm" >> ${S_DIR}/top.sls # Pillars echo " - docker" >> ${P_DIR}/top.sls @@ -589,11 +595,27 @@ if [ -z "${ROLES}" ]; then fi grep -q "letsencrypt" ${P_DIR}/top.sls || echo " - letsencrypt" >> ${P_DIR}/top.sls - # As the pillar differ whether we use LE or custom certs, we need to do a final edition on them - for c in controller websocket workbench workbench2 webshell download collections keepproxy; do - sed -i "s/__CERT_REQUIRES__/cmd: create-initial-cert-${c}.${CLUSTER}.${DOMAIN}*/g; - s#__CERT_PEM__#/etc/letsencrypt/live/${c}.${CLUSTER}.${DOMAIN}/fullchain.pem#g; - s#__CERT_KEY__#/etc/letsencrypt/live/${c}.${CLUSTER}.${DOMAIN}/privkey.pem#g" \ + hosts=("controller" "websocket" "workbench" "workbench2" "webshell" "keepproxy") + if [ ${USE_SINGLE_HOSTNAME} = "no" ]; then + hosts+=("download" "collections") + else + hosts+=("keepweb") + fi + + for c in "${hosts[@]}"; do + # Are we in a single-host-single-hostname env? + if [ "${USE_SINGLE_HOSTNAME}" = "yes" ]; then + # Are we in a single-host-single-hostname env? + CERT_NAME=${HOSTNAME_EXT} + else + # We are in a multiple-hostnames env + CERT_NAME=${c}.${CLUSTER}.${DOMAIN} + fi + + # As the pillar differs whether we use LE or custom certs, we need to do a final edition on them + sed -i "s/__CERT_REQUIRES__/cmd: create-initial-cert-${CERT_NAME}*/g; + s#__CERT_PEM__#/etc/letsencrypt/live/${CERT_NAME}/fullchain.pem#g; + s#__CERT_KEY__#/etc/letsencrypt/live/${CERT_NAME}/privkey.pem#g" \ ${P_DIR}/nginx_${c}_configuration.sls done else @@ -657,6 +679,7 @@ else else echo " - nginx.passenger" >> ${S_DIR}/top.sls fi + echo " - extra.passenger_rvm" >> ${S_DIR}/top.sls ### If we don't install and run LE before arvados-api-server, it fails and breaks everything ### after it. So we add this here as we are, after all, sharing the host for api and controller if [ "${SSL_MODE}" = "lets-encrypt" ]; then