# This will be populated by loadconfig()
declare -A NODES
+# A bash associative array listing each role and mapping to the nodes
+# that should be provisioned with this role.
+# This will be populated by loadconfig()
+declare -A ROLE2NODES
+
# The ssh user we'll use
# This will be populated by loadconfig()
declare DEPLOY_USER
# This will be populated by loadconfig()
declare USE_SSH_JUMPHOST
+# The temp file that will get used to disable envvar forwarding to avoid locale
+# issues in Debian distros.
+# This will be populated by loadconfig()
+declare SSH_CONFFILE
+
checktools() {
local MISSING=''
for a in git ip ; do
deploynode() {
local NODE=$1
local ROLES=$2
+ local BRANCH=$3
# Deploy a node. This runs the provision script on the node, with
# the appropriate roles.
+ sync $NODE $BRANCH
+
if [[ -z "$ROLES" ]] ; then
echo "No roles specified for $NODE, will deploy all roles"
else
fi
$SUDO ./provision.sh --config ${CONFIG_FILE} ${ROLES} 2>&1 | tee $logfile
else
- $SSH $DEPLOY_USER@$NODE "cd ${GITTARGET} && git log -n1 HEAD && sudo ./provision.sh --config ${CONFIG_FILE} ${ROLES}" 2>&1 | tee $logfile
+ $SSH $DEPLOY_USER@$NODE "cd ${GITTARGET} && git log -n1 HEAD && DISABLED_CONTROLLER=\"$DISABLED_CONTROLLER\" sudo --preserve-env=DISABLED_CONTROLLER ./provision.sh --config ${CONFIG_FILE} ${ROLES}" 2>&1 | tee $logfile
cleanup $NODE
fi
}
loadconfig() {
- if [[ -s ${CONFIG_FILE} && -s ${CONFIG_FILE}.secrets ]]; then
+ if ! [[ -s ${CONFIG_FILE} && -s ${CONFIG_FILE}.secrets ]]; then
echo "Must be run from initialized setup dir, maybe you need to 'initialize' first?"
fi
- source ${CONFIG_FILE}.secrets
- source ${CONFIG_FILE}
+ source common.sh
GITTARGET=arvados-deploy-config-${CLUSTER}
+
+ # Set up SSH so that it doesn't forward any environment variable. This is to avoid
+ # getting "setlocale" errors on the first run, depending on the distro being used
+ # to run the installer (like Debian).
+ SSH_CONFFILE=$(mktemp)
+ echo "Include config SendEnv -*" > ${SSH_CONFFILE}
}
ssh_cmd() {
local NODE=$1
if [ -z "${USE_SSH_JUMPHOST}" -o "${NODE}" == "${USE_SSH_JUMPHOST}" -o "${NODE}" == "localhost" ]; then
- echo "ssh"
+ echo "ssh -F ${SSH_CONFFILE}"
else
- echo "ssh -J ${DEPLOY_USER}@${USE_SSH_JUMPHOST}"
+ echo "ssh -F ${SSH_CONFFILE} -J ${DEPLOY_USER}@${USE_SSH_JUMPHOST}"
fi
}
git commit -m"prepare for deploy"
fi
+ # Used for rolling updates to disable individual nodes at the
+ # load balancer.
+ export DISABLED_CONTROLLER=""
if [[ -z "$NODE" ]]; then
for NODE in "${!NODES[@]}"
do
- # First, push the git repo to each node. This also
- # confirms that we have git and can log into each
- # node.
- sync $NODE $BRANCH
+ # First, just confirm we can ssh to each node.
+ `ssh_cmd "$NODE"` $DEPLOY_USER@$NODE true
done
for NODE in "${!NODES[@]}"
do
# Do 'database' role first,
if [[ "${NODES[$NODE]}" =~ database ]] ; then
- deploynode $NODE "${NODES[$NODE]}"
+ deploynode $NODE "${NODES[$NODE]}" $BRANCH
unset NODES[$NODE]
fi
done
- for NODE in "${!NODES[@]}"
- do
- # then 'api' or 'controller' roles
- if [[ "${NODES[$NODE]}" =~ (api|controller) ]] ; then
- deploynode $NODE "${NODES[$NODE]}"
- unset NODES[$NODE]
- fi
- done
+ BALANCER=${ROLE2NODES['balancer']:-}
+
+ # Check if there are multiple controllers, they'll be comma-separated
+ # in ROLE2NODES
+ if [[ ${ROLE2NODES['controller']} =~ , ]] ;
+ then
+ # If we have multiple controllers then there must be
+ # load balancer. We want to do a rolling update, take
+ # down each node at the load balancer before updating
+ # it.
+
+ for NODE in "${!NODES[@]}"
+ do
+ if [[ "${NODES[$NODE]}" =~ controller ]] ; then
+ export DISABLED_CONTROLLER=$NODE
+
+ # Update balancer that the node is disabled
+ deploynode $BALANCER "${NODES[$BALANCER]}" $BRANCH
+
+ # Now update the node itself
+ deploynode $NODE "${NODES[$NODE]}" $BRANCH
+ unset NODES[$NODE]
+ fi
+ done
+ else
+ # Only one controller
+ NODE=${ROLE2NODES['controller']}
+ deploynode $NODE "${NODES[$NODE]}" $BRANCH
+ unset NODES[$NODE]
+ fi
+
+ if [[ -n "$BALANCER" ]] ; then
+ # Deploy balancer. In the rolling update case, this
+ # will re-enable all the controllers at the balancer.
+ export DISABLED_CONTROLLER=""
+ deploynode $BALANCER "${NODES[$BALANCER]}" $BRANCH
+ unset NODES[$BALANCER]
+ fi
for NODE in "${!NODES[@]}"
do
# Everything else (we removed the nodes that we
# already deployed from the list)
- deploynode $NODE "${NODES[$NODE]}"
+ deploynode $NODE "${NODES[$NODE]}" $BRANCH
done
else
# Just deploy the node that was supplied on the command line.
- sync $NODE $BRANCH
- deploynode $NODE "${NODES[$NODE]}"
+ deploynode $NODE "${NODES[$NODE]}" $BRANCH
fi
set +x