16379: Moves prometheus to its own subdomain instead of 'mon.prefix.domain.tld'
[arvados.git] / tools / salt-install / local.params.example.multiple_hosts
index 5e7ae7ca103998ef78228aa569d17923c10fc335..17e937f2e317a7cf44e291065f634cda689732ff 100644 (file)
@@ -21,10 +21,8 @@ DEPLOY_USER=root
 # it for the specified roles.
 NODES=(
   [controller.${CLUSTER}.${DOMAIN}]=database,api,controller,websocket,dispatcher,keepbalance
+  [workbench.${CLUSTER}.${DOMAIN}]=workbench,workbench2,webshell,keepproxy,keepweb
   [keep0.${CLUSTER}.${DOMAIN}]=keepstore
-  [keep1.${CLUSTER}.${DOMAIN}]=keepstore
-  [keep.${CLUSTER}.${DOMAIN}]=keepproxy,keepweb
-  [workbench.${CLUSTER}.${DOMAIN}]=workbench,workbench2,webshell
   [shell.${CLUSTER}.${DOMAIN}]=shell
 )
 
@@ -42,29 +40,34 @@ WORKBENCH1_EXT_SSL_PORT=443
 WORKBENCH2_EXT_SSL_PORT=443
 
 # Internal IPs for the configuration
-CLUSTER_INT_CIDR=10.0.0.0/16
+CLUSTER_INT_CIDR=10.1.0.0/16
 
 # Note the IPs in this example are shared between roles, as suggested in
 # https://doc.arvados.org/main/install/salt-multi-host.html
-CONTROLLER_INT_IP=10.0.0.1
-WEBSOCKET_INT_IP=10.0.0.1
-KEEP_INT_IP=10.0.0.2
+CONTROLLER_INT_IP=10.1.1.11
+WEBSOCKET_INT_IP=10.1.1.11
+KEEP_INT_IP=10.1.1.15
 # Both for collections and downloads
-KEEPWEB_INT_IP=10.0.0.2
-KEEPSTORE0_INT_IP=10.0.0.3
-KEEPSTORE1_INT_IP=10.0.0.4
-WORKBENCH1_INT_IP=10.0.0.5
-WORKBENCH2_INT_IP=10.0.0.5
-WEBSHELL_INT_IP=10.0.0.5
-DATABASE_INT_IP=10.0.0.6
-SHELL_INT_IP=10.0.0.7
+KEEPWEB_INT_IP=10.1.1.15
+KEEPSTORE0_INT_IP=10.1.2.13
+WORKBENCH1_INT_IP=10.1.1.15
+WORKBENCH2_INT_IP=10.1.1.15
+WEBSHELL_INT_IP=10.1.1.15
+DATABASE_INT_IP=10.1.1.11
+SHELL_INT_IP=10.1.2.17
 
 INITIAL_USER="admin"
 
 # If not specified, the initial user email will be composed as
 # INITIAL_USER@CLUSTER.DOMAIN
 INITIAL_USER_EMAIL="admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work"
-INITIAL_USER_PASSWORD="password"
+INITIAL_USER_PASSWORD="fixmepassword"
+
+# Use a public node as a jump host for SSH sessions. This allows running the
+# installer from the outside of the cluster's local network and still reach
+# the internal servers for configuration deployment.
+# Comment out to disable.
+USE_SSH_JUMPHOST="controller.${CLUSTER}.${DOMAIN}"
 
 # YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS
 BLOB_SIGNING_KEY=fixmeblobsigningkeymushaveatleast32characters
@@ -114,6 +117,12 @@ LE_AWS_SECRET_ACCESS_KEY="thisistherandomstringthatisyoursecretkey"
 # ${CUSTOM_CERTS_DIR}/keepproxy.crt
 # ${CUSTOM_CERTS_DIR}/keepproxy.key
 
+# Set the following to "yes" if the key files are encrypted and optionally set
+# a custom AWS secret name for each node to retrieve the password.
+SSL_KEY_ENCRYPTED="no"
+SSL_KEY_AWS_SECRET_NAME="${CLUSTER}-arvados-ssl-privkey-password"
+SSL_KEY_AWS_REGION="us-east-1"
+
 # The directory to check for the config files (pillars, states) you want to use.
 # There are a few examples under 'config_examples'.
 # CONFIG_DIR="local_config_dir"
@@ -141,3 +150,4 @@ RELEASE="production"
 # DOCKER_TAG="v2.4.2"
 # LOCALE_TAG="v0.3.4"
 # LETSENCRYPT_TAG="v2.1.0"
+# PROMETHEUS_TAG="v5.6.5"