X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/87c0aff0cdbaf9b0779bb253fa707dfba1bfebb9..5720f268fb6d6995042dd689ae760770fa3cf54e:/tools/salt-install/local.params.example.multiple_hosts diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts index c39351d74f..17e937f2e3 100644 --- a/tools/salt-install/local.params.example.multiple_hosts +++ b/tools/salt-install/local.params.example.multiple_hosts @@ -21,10 +21,8 @@ DEPLOY_USER=root # it for the specified roles. NODES=( [controller.${CLUSTER}.${DOMAIN}]=database,api,controller,websocket,dispatcher,keepbalance + [workbench.${CLUSTER}.${DOMAIN}]=workbench,workbench2,webshell,keepproxy,keepweb [keep0.${CLUSTER}.${DOMAIN}]=keepstore - [keep1.${CLUSTER}.${DOMAIN}]=keepstore - [keep.${CLUSTER}.${DOMAIN}]=keepproxy,keepweb - [workbench.${CLUSTER}.${DOMAIN}]=workbench,workbench2,webshell [shell.${CLUSTER}.${DOMAIN}]=shell ) @@ -42,22 +40,21 @@ WORKBENCH1_EXT_SSL_PORT=443 WORKBENCH2_EXT_SSL_PORT=443 # Internal IPs for the configuration -CLUSTER_INT_CIDR=10.0.0.0/16 +CLUSTER_INT_CIDR=10.1.0.0/16 # Note the IPs in this example are shared between roles, as suggested in # https://doc.arvados.org/main/install/salt-multi-host.html -CONTROLLER_INT_IP=10.0.0.1 -WEBSOCKET_INT_IP=10.0.0.1 -KEEP_INT_IP=10.0.0.2 +CONTROLLER_INT_IP=10.1.1.11 +WEBSOCKET_INT_IP=10.1.1.11 +KEEP_INT_IP=10.1.1.15 # Both for collections and downloads -KEEPWEB_INT_IP=10.1.1.2 -KEEPSTORE0_INT_IP=10.1.1.3 -KEEPSTORE1_INT_IP=10.1.1.4 -WORKBENCH1_INT_IP=10.1.1.5 -WORKBENCH2_INT_IP=10.1.1.5 -WEBSHELL_INT_IP=10.1.1.5 -DATABASE_INT_IP=10.1.1.1 -SHELL_INT_IP=10.1.1.7 +KEEPWEB_INT_IP=10.1.1.15 +KEEPSTORE0_INT_IP=10.1.2.13 +WORKBENCH1_INT_IP=10.1.1.15 +WORKBENCH2_INT_IP=10.1.1.15 +WEBSHELL_INT_IP=10.1.1.15 +DATABASE_INT_IP=10.1.1.11 +SHELL_INT_IP=10.1.2.17 INITIAL_USER="admin" @@ -66,6 +63,12 @@ INITIAL_USER="admin" INITIAL_USER_EMAIL="admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work" INITIAL_USER_PASSWORD="fixmepassword" +# Use a public node as a jump host for SSH sessions. This allows running the +# installer from the outside of the cluster's local network and still reach +# the internal servers for configuration deployment. +# Comment out to disable. +USE_SSH_JUMPHOST="controller.${CLUSTER}.${DOMAIN}" + # YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS BLOB_SIGNING_KEY=fixmeblobsigningkeymushaveatleast32characters MANAGEMENT_TOKEN=fixmemanagementtokenmushaveatleast32characters @@ -114,6 +117,12 @@ LE_AWS_SECRET_ACCESS_KEY="thisistherandomstringthatisyoursecretkey" # ${CUSTOM_CERTS_DIR}/keepproxy.crt # ${CUSTOM_CERTS_DIR}/keepproxy.key +# Set the following to "yes" if the key files are encrypted and optionally set +# a custom AWS secret name for each node to retrieve the password. +SSL_KEY_ENCRYPTED="no" +SSL_KEY_AWS_SECRET_NAME="${CLUSTER}-arvados-ssl-privkey-password" +SSL_KEY_AWS_REGION="us-east-1" + # The directory to check for the config files (pillars, states) you want to use. # There are a few examples under 'config_examples'. # CONFIG_DIR="local_config_dir" @@ -141,3 +150,4 @@ RELEASE="production" # DOCKER_TAG="v2.4.2" # LOCALE_TAG="v0.3.4" # LETSENCRYPT_TAG="v2.1.0" +# PROMETHEUS_TAG="v5.6.5"