echo >&2 " controller"
echo >&2 " dispatcher"
echo >&2 " keepproxy"
+ echo >&2 " keepbalance"
echo >&2 " keepstore"
echo >&2 " keepweb"
echo >&2 " shell"
for i in ${2//,/ }
do
# Verify the role exists
- if [[ ! "database,api,controller,keepstore,websocket,keepweb,workbench2,webshell,keepproxy,shell,workbench,dispatcher" == *"$i"* ]]; then
+ if [[ ! "database,api,controller,keepstore,websocket,keepweb,workbench2,webshell,keepbalance,keepproxy,shell,workbench,dispatcher" == *"$i"* ]]; then
echo "The role '${i}' is not a valid role"
usage
exit 1
# BRANCH="main"
# Other formula versions we depend on
-POSTGRES_TAG="v0.43.0"
+POSTGRES_TAG="v0.44.0"
NGINX_TAG="v2.8.0"
-DOCKER_TAG="v2.0.7"
+DOCKER_TAG="v2.4.0"
LOCALE_TAG="v0.3.4"
LETSENCRYPT_TAG="v2.1.0"
if [ ! -z "${HOSTNAME_EXT}" ] ; then
# We need to add some extra control vars to manage a single certificate vs. multiple
USE_SINGLE_HOSTNAME="yes"
+ # Make sure that the value configured as IP_INT is a real IP on the system.
+ # If we don't error out early here when there is a mismatch, the formula will
+ # fail with hard to interpret nginx errors later on.
+ ip addr list |grep -q " ${IP_INT}/"
+ if [[ $? -ne 0 ]]; then
+ echo "Unable to find the IP_INT address '${IP_INT}' on the system, please correct the value in local.params. Exiting..."
+ exit 1
+ fi
else
USE_SINGLE_HOSTNAME="no"
# We set this variable, anyway, so sed lines do not fail and we don't need to add more
mkdir -p ${T_DIR}
# Replace cluster and domain name in the test files
for f in $(ls "${SOURCE_TESTS_DIR}"/*); do
- sed "s#__CLUSTER__#${CLUSTER}#g;
+ FILTERS="s#__CLUSTER__#${CLUSTER}#g;
s#__CONTROLLER_EXT_SSL_PORT__#${CONTROLLER_EXT_SSL_PORT}#g;
s#__DOMAIN__#${DOMAIN}#g;
s#__IP_INT__#${IP_INT}#g;
s#__INITIAL_USER_PASSWORD__#${INITIAL_USER_PASSWORD}#g
s#__INITIAL_USER__#${INITIAL_USER}#g;
s#__DATABASE_PASSWORD__#${DATABASE_PASSWORD}#g;
- s#__SYSTEM_ROOT_TOKEN__#${SYSTEM_ROOT_TOKEN}#g" \
- "${f}" > ${T_DIR}/$(basename "${f}")
+ s#__SYSTEM_ROOT_TOKEN__#${SYSTEM_ROOT_TOKEN}#g"
+ if [ "$USE_SINGLE_HOSTNAME" = "yes" ]; then
+ FILTERS="s#__CLUSTER__.__DOMAIN__#${HOSTNAME_EXT}#g;
+ $FILTERS"
+ fi
+ sed "$FILTERS" \
+ "${f}" > ${T_DIR}/$(basename "${f}")
done
chmod 755 ${T_DIR}/run-test.sh
grep -q "letsencrypt" ${P_DIR}/top.sls || echo " - letsencrypt" >> ${P_DIR}/top.sls
# As the pillar differ whether we use LE or custom certs, we need to do a final edition on them
- for c in controller websocket workbench workbench2 webshell keepweb keepproxy; do
- if [ "${USE_SINGLE_HOSTNAME}" = "yes" ]; then
- # Are we in a single-host-single-hostname env?
- CERT_NAME=${HOSTNAME_EXT}
- else
- # We are in a single-host-multiple-hostnames env
- CERT_NAME=${c}.${CLUSTER}.${DOMAIN}
- fi
-
- sed -i "s/__CERT_REQUIRES__/cmd: create-initial-cert-${CERT_NAME}*/g;
- s#__CERT_PEM__#/etc/letsencrypt/live/${CERT_NAME}/fullchain.pem#g;
- s#__CERT_KEY__#/etc/letsencrypt/live/${CERT_NAME}/privkey.pem#g" \
+ for c in controller websocket workbench workbench2 webshell download collections keepproxy; do
+ sed -i "s/__CERT_REQUIRES__/cmd: create-initial-cert-${c}.${CLUSTER}.${DOMAIN}*/g;
+ s#__CERT_PEM__#/etc/letsencrypt/live/${c}.${CLUSTER}.${DOMAIN}/fullchain.pem#g;
+ s#__CERT_KEY__#/etc/letsencrypt/live/${c}.${CLUSTER}.${DOMAIN}/privkey.pem#g" \
${P_DIR}/nginx_${c}_configuration.sls
done
else
CERT_NAME=${c}
fi
- if [[ "${SSL_MODE}" = "bring-your-own" || "${SSL_MODE}" == "self-signed" ]]; then
+ if [[ "$SSL_MODE" == "bring-your-own" ]]; then
copy_custom_cert ${CUSTOM_CERTS_DIR} ${CERT_NAME}
fi
# Pillars
grep -q "docker" ${P_DIR}/top.sls || echo " - docker" >> ${P_DIR}/top.sls
;;
- "dispatcher")
- # States
- grep -q "arvados.${R}" ${S_DIR}/top.sls || echo " - arvados.${R}" >> ${S_DIR}/top.sls
- # Pillars
- # ATM, no specific pillar needed
- ;;
- "keepstore")
+ "dispatcher" | "keepbalance" | "keepstore")
# States
grep -q "arvados.${R}" ${S_DIR}/top.sls || echo " - arvados.${R}" >> ${S_DIR}/top.sls
# Pillars
exit 0
fi
-# FIXME! #16992 Temporary fix for psql call in arvados-api-server
-if [ -e /root/.psqlrc ]; then
- if ! ( grep 'pset pager off' /root/.psqlrc ); then
- RESTORE_PSQL="yes"
- cp /root/.psqlrc /root/.psqlrc.provision.backup
- fi
-else
- DELETE_PSQL="yes"
-fi
-
-echo '\pset pager off' >> /root/.psqlrc
-# END FIXME! #16992 Temporary fix for psql call in arvados-api-server
-
# Now run the install
salt-call --local state.apply -l ${LOG_LEVEL}
-# FIXME! #16992 Temporary fix for psql call in arvados-api-server
-if [ "x${DELETE_PSQL}" = "xyes" ]; then
- echo "Removing .psql file"
- rm /root/.psqlrc
-fi
-
-if [ "x${RESTORE_PSQL}" = "xyes" ]; then
- echo "Restoring .psql file"
- mv -v /root/.psqlrc.provision.backup /root/.psqlrc
+# Finally, make sure that /etc/hosts is not overwritten on reboot
+if [ -d /etc/cloud/cloud.cfg.d ]; then
+ # TODO: will this work on CentOS?
+ sed -i 's/^manage_etc_hosts: true/#manage_etc_hosts: true/g' /etc/cloud/cloud.cfg.d/*
fi
-# END FIXME! #16992 Temporary fix for psql call in arvados-api-server
# Leave a copy of the Arvados CA so the user can copy it where it's required
if [ "$DEV_MODE" = "yes" ]; then