From: Lucas Di Pentima Date: Tue, 28 Mar 2023 17:19:57 +0000 (-0300) Subject: 20270: Removes keep1 & keepproxy nodes. Uses SSH jumphost to deploy nodes. X-Git-Tag: 2.6.0~8^2 X-Git-Url: https://git.arvados.org/arvados.git/commitdiff_plain/7aeaedbd8009c596bfc159432bb7b1f09c19ed72 20270: Removes keep1 & keepproxy nodes. Uses SSH jumphost to deploy nodes. Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima --- diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls index 25f68ca047..b33282f180 100644 --- a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls +++ b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls @@ -169,7 +169,6 @@ arvados: Keepstore: InternalURLs: 'http://__KEEPSTORE0_INT_IP__:25107': {} - 'http://__KEEPSTORE1_INT_IP__:25107': {} RailsAPI: InternalURLs: 'http://localhost:8004': {} diff --git a/tools/salt-install/config_examples/multi_host/aws/states/host_entries.sls b/tools/salt-install/config_examples/multi_host/aws/states/host_entries.sls index 6e0deb49c6..68aeab3abb 100644 --- a/tools/salt-install/config_examples/multi_host/aws/states/host_entries.sls +++ b/tools/salt-install/config_examples/multi_host/aws/states/host_entries.sls @@ -69,9 +69,3 @@ extra_extra_hosts_entries_etc_hosts_keep0_host_present: - ip: __KEEPSTORE0_INT_IP__ - names: - keep0.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }} - -extra_extra_hosts_entries_etc_hosts_keep1_host_present: - host.present: - - ip: __KEEPSTORE1_INT_IP__ - - names: - - keep1.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }} diff --git a/tools/salt-install/installer.sh b/tools/salt-install/installer.sh index e06c0237a1..0f1d16ddee 100755 --- a/tools/salt-install/installer.sh +++ b/tools/salt-install/installer.sh @@ -43,6 +43,10 @@ declare DEPLOY_USER # This will be populated by loadconfig() declare GITTARGET +# The public host used as an SSH jump host +# This will be populated by loadconfig() +declare USE_SSH_JUMPHOST + checktools() { local MISSING='' for a in git ip ; do @@ -64,31 +68,33 @@ sync() { # each node, pushing our branch, and updating the checkout. if [[ "$NODE" != localhost ]] ; then - if ! ssh $DEPLOY_USER@$NODE test -d ${GITTARGET}.git ; then - - # Initialize the git repository (1st time case). We're - # actually going to make two repositories here because git - # will complain if you try to push to a repository with a - # checkout. So we're going to create a "bare" repository - # and then clone a regular repository (with a checkout) - # from that. - - ssh $DEPLOY_USER@$NODE git init --bare --shared=0600 ${GITTARGET}.git - if ! git remote add $NODE $DEPLOY_USER@$NODE:${GITTARGET}.git ; then - git remote set-url $NODE $DEPLOY_USER@$NODE:${GITTARGET}.git - fi - git push $NODE $BRANCH - ssh $DEPLOY_USER@$NODE "umask 0077 && git clone ${GITTARGET}.git ${GITTARGET}" - fi + SSH=`ssh_cmd "$NODE"` + GIT="eval `git_cmd $NODE`" + if ! $SSH $DEPLOY_USER@$NODE test -d ${GITTARGET}.git ; then + + # Initialize the git repository (1st time case). We're + # actually going to make two repositories here because git + # will complain if you try to push to a repository with a + # checkout. So we're going to create a "bare" repository + # and then clone a regular repository (with a checkout) + # from that. + + $SSH $DEPLOY_USER@$NODE git init --bare --shared=0600 ${GITTARGET}.git + if ! $GIT remote add $NODE $DEPLOY_USER@$NODE:${GITTARGET}.git ; then + $GIT remote set-url $NODE $DEPLOY_USER@$NODE:${GITTARGET}.git + fi + $GIT push $NODE $BRANCH + $SSH $DEPLOY_USER@$NODE "umask 0077 && git clone ${GITTARGET}.git ${GITTARGET}" + fi - # The update case. - # - # Push to the bare repository on the remote node, then in the - # remote node repository with the checkout, pull the branch - # from the bare repository. + # The update case. + # + # Push to the bare repository on the remote node, then in the + # remote node repository with the checkout, pull the branch + # from the bare repository. - git push $NODE $BRANCH - ssh $DEPLOY_USER@$NODE "git -C ${GITTARGET} checkout ${BRANCH} && git -C ${GITTARGET} pull" + $GIT push $NODE $BRANCH + $SSH $DEPLOY_USER@$NODE "git -C ${GITTARGET} checkout ${BRANCH} && git -C ${GITTARGET} pull" fi } @@ -100,32 +106,47 @@ deploynode() { # the appropriate roles. if [[ -z "$ROLES" ]] ; then - echo "No roles specified for $NODE, will deploy all roles" + echo "No roles specified for $NODE, will deploy all roles" else - ROLES="--roles ${ROLES}" + ROLES="--roles ${ROLES}" fi logfile=deploy-${NODE}-$(date -Iseconds).log + SSH=`ssh_cmd "$NODE"` if [[ "$NODE" = localhost ]] ; then SUDO='' - if [[ $(whoami) != 'root' ]] ; then - SUDO=sudo - fi - $SUDO ./provision.sh --config ${CONFIG_FILE} ${ROLES} 2>&1 | tee $logfile - else - ssh $DEPLOY_USER@$NODE "cd ${GITTARGET} && sudo ./provision.sh --config ${CONFIG_FILE} ${ROLES}" 2>&1 | tee $logfile + if [[ $(whoami) != 'root' ]] ; then + SUDO=sudo + fi + $SUDO ./provision.sh --config ${CONFIG_FILE} ${ROLES} 2>&1 | tee $logfile + else + $SSH $DEPLOY_USER@$NODE "cd ${GITTARGET} && sudo ./provision.sh --config ${CONFIG_FILE} ${ROLES}" 2>&1 | tee $logfile fi } loadconfig() { if [[ ! -s $CONFIG_FILE ]] ; then - echo "Must be run from initialized setup dir, maybe you need to 'initialize' first?" + echo "Must be run from initialized setup dir, maybe you need to 'initialize' first?" fi source ${CONFIG_FILE} GITTARGET=arvados-deploy-config-${CLUSTER} } +ssh_cmd() { + local NODE=$1 + if [ -z "${USE_SSH_JUMPHOST}" -o "${NODE}" == "${USE_SSH_JUMPHOST}" -o "${NODE}" == "localhost" ]; then + echo "ssh" + else + echo "ssh -J ${DEPLOY_USER}@${USE_SSH_JUMPHOST}" + fi +} + +git_cmd() { + local NODE=$1 + echo "GIT_SSH_COMMAND=\"`ssh_cmd ${NODE}`\" git" +} + set +u subcmd="$1" set -u @@ -208,9 +229,9 @@ case "$subcmd" in terraform) logfile=terraform-$(date -Iseconds).log - (cd terraform/vpc && terraform apply) 2>&1 | tee -a $logfile - (cd terraform/data-storage && terraform apply) 2>&1 | tee -a $logfile - (cd terraform/services && terraform apply) 2>&1 | grep -v letsencrypt_iam_secret_access_key | tee -a $logfile + (cd terraform/vpc && terraform apply -auto-approve) 2>&1 | tee -a $logfile + (cd terraform/data-storage && terraform apply -auto-approve) 2>&1 | tee -a $logfile + (cd terraform/services && terraform apply -auto-approve) 2>&1 | grep -v letsencrypt_iam_secret_access_key | tee -a $logfile (cd terraform/services && echo -n 'letsencrypt_iam_secret_access_key = ' && terraform output letsencrypt_iam_secret_access_key) 2>&1 | tee -a $logfile ;; diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts index 0064a78c5e..01a321c4a0 100644 --- a/tools/salt-install/local.params.example.multiple_hosts +++ b/tools/salt-install/local.params.example.multiple_hosts @@ -21,10 +21,8 @@ DEPLOY_USER=root # it for the specified roles. NODES=( [controller.${CLUSTER}.${DOMAIN}]=database,api,controller,websocket,dispatcher,keepbalance + [workbench.${CLUSTER}.${DOMAIN}]=workbench,workbench2,webshell,keepproxy,keepweb [keep0.${CLUSTER}.${DOMAIN}]=keepstore - [keep1.${CLUSTER}.${DOMAIN}]=keepstore - [keep.${CLUSTER}.${DOMAIN}]=keepproxy,keepweb - [workbench.${CLUSTER}.${DOMAIN}]=workbench,workbench2,webshell [shell.${CLUSTER}.${DOMAIN}]=shell ) @@ -48,16 +46,15 @@ CLUSTER_INT_CIDR=10.1.0.0/16 # https://doc.arvados.org/main/install/salt-multi-host.html CONTROLLER_INT_IP=10.1.1.11 WEBSOCKET_INT_IP=10.1.1.11 -KEEP_INT_IP=10.1.1.12 +KEEP_INT_IP=10.1.1.15 # Both for collections and downloads -KEEPWEB_INT_IP=10.1.1.12 -KEEPSTORE0_INT_IP=10.1.1.13 -KEEPSTORE1_INT_IP=10.1.1.14 +KEEPWEB_INT_IP=10.1.1.15 +KEEPSTORE0_INT_IP=10.1.2.13 WORKBENCH1_INT_IP=10.1.1.15 WORKBENCH2_INT_IP=10.1.1.15 WEBSHELL_INT_IP=10.1.1.15 DATABASE_INT_IP=10.1.1.11 -SHELL_INT_IP=10.1.1.17 +SHELL_INT_IP=10.1.2.17 INITIAL_USER="admin" @@ -66,6 +63,12 @@ INITIAL_USER="admin" INITIAL_USER_EMAIL="admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work" INITIAL_USER_PASSWORD="fixmepassword" +# Use a public node as a jump host for SSH sessions. This allows running the +# installer from the outside of the cluster's local network and still reach +# the internal servers for configuration deployment. +# Comment out to disable. +USE_SSH_JUMPHOST="controller.${CLUSTER}.${DOMAIN}" + # YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS BLOB_SIGNING_KEY=fixmeblobsigningkeymushaveatleast32characters MANAGEMENT_TOKEN=fixmemanagementtokenmushaveatleast32characters diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh index 86335ff8ec..05a41ded60 100755 --- a/tools/salt-install/provision.sh +++ b/tools/salt-install/provision.sh @@ -428,7 +428,6 @@ for f in $(ls "${SOURCE_PILLARS_DIR}"/*); do s#__WEBSOCKET_INT_IP__#${WEBSOCKET_INT_IP}#g; s#__KEEP_INT_IP__#${KEEP_INT_IP}#g; s#__KEEPSTORE0_INT_IP__#${KEEPSTORE0_INT_IP}#g; - s#__KEEPSTORE1_INT_IP__#${KEEPSTORE1_INT_IP}#g; s#__KEEPWEB_INT_IP__#${KEEPWEB_INT_IP}#g; s#__WEBSHELL_INT_IP__#${WEBSHELL_INT_IP}#g; s#__SHELL_INT_IP__#${SHELL_INT_IP}#g; @@ -498,7 +497,6 @@ if [ -d "${SOURCE_STATES_DIR}" ]; then s#__WEBSOCKET_INT_IP__#${WEBSOCKET_INT_IP}#g; s#__KEEP_INT_IP__#${KEEP_INT_IP}#g; s#__KEEPSTORE0_INT_IP__#${KEEPSTORE0_INT_IP}#g; - s#__KEEPSTORE1_INT_IP__#${KEEPSTORE1_INT_IP}#g; s#__KEEPWEB_INT_IP__#${KEEPWEB_INT_IP}#g; s#__WEBSHELL_INT_IP__#${WEBSHELL_INT_IP}#g; s#__WORKBENCH1_INT_IP__#${WORKBENCH1_INT_IP}#g;