20889: Adds configurable data retention parameter for Prometheus.
[arvados.git] / tools / salt-install / local.params.example.multiple_hosts
index c6f196ca9aca113ff751959ba55b10aa21f16ba2..26cd16ed574b5264b62ad33e3cfe3bcbb8fda87f 100644 (file)
@@ -5,84 +5,65 @@
 
 # These are the basic parameters to configure the installation
 
-# The FIVE ALPHANUMERIC CHARACTERS name you want to give your cluster
+# The Arvados cluster ID, needs to be 5 lowercase alphanumeric characters.
 CLUSTER="cluster_fixme_or_this_wont_work"
 
-# The domainname you want tou give to your cluster's hosts
+# The domain name you want to give to your cluster's hosts;
+# the end result hostnames will be $SERVICE.$DOMAIN
 DOMAIN="domain_fixme_or_this_wont_work"
 
-# Host SSL port where you want to point your browser to access Arvados
-# Defaults to 443 for regular runs, and to 8443 when called in Vagrant.
-# You can point it to another port if desired
-# In Vagrant, make sure it matches what you set in the Vagrantfile (8443)
-CONTROLLER_EXT_SSL_PORT=443
-KEEP_EXT_SSL_PORT=443
-# Both for collections and downloads
-KEEPWEB_EXT_SSL_PORT=443
-WEBSHELL_EXT_SSL_PORT=443
-WEBSOCKET_EXT_SSL_PORT=443
-WORKBENCH1_EXT_SSL_PORT=443
-WORKBENCH2_EXT_SSL_PORT=443
-
-# Internal IPs for the configuration
-CLUSTER_INT_CIDR=10.0.0.0/16
+# For multi-node installs, the ssh log in for each node
+# must be root or able to sudo
+DEPLOY_USER=admin
 
-# Note the IPs in this example are shared between roles, as suggested in
-# https://doc.arvados.org/main/install/salt-multi-host.html
-CONTROLLER_INT_IP=10.0.0.1
-WEBSOCKET_INT_IP=10.0.0.1
-KEEP_INT_IP=10.0.0.2
-# Both for collections and downloads
-KEEPWEB_INT_IP=10.0.0.2
-KEEPSTORE0_INT_IP=10.0.0.3
-KEEPSTORE1_INT_IP=10.0.0.4
-WORKBENCH1_INT_IP=10.0.0.5
-WORKBENCH2_INT_IP=10.0.0.5
-WEBSHELL_INT_IP=10.0.0.5
-DATABASE_INT_IP=10.0.0.6
-SHELL_INT_IP=10.0.0.7
-
-INITIAL_USER="admin"
+INITIAL_USER=admin
 
 # If not specified, the initial user email will be composed as
-# INITIAL_USER@CLUSTER.DOMAIN
+# INITIAL_USER@DOMAIN
 INITIAL_USER_EMAIL="admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work"
-INITIAL_USER_PASSWORD="password"
 
-# YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS
-BLOB_SIGNING_KEY=blobsigningkeymushaveatleast32characters
-MANAGEMENT_TOKEN=managementtokenmushaveatleast32characters
-SYSTEM_ROOT_TOKEN=systemroottokenmushaveatleast32characters
-ANONYMOUS_USER_TOKEN=anonymoususertokenmushaveatleast32characters
-WORKBENCH_SECRET_KEY=workbenchsecretkeymushaveatleast32characters
-DATABASE_PASSWORD=please_set_this_to_some_secure_value
+# Use a public node as a jump host for SSH sessions. This allows running the
+# installer from the outside of the cluster's local network and still reach
+# the internal servers for configuration deployment.
+# Comment out to disable.
+USE_SSH_JUMPHOST="controller.${DOMAIN}"
+
+AWS_REGION="fixme_or_this_wont_work"
 
 # SSL CERTIFICATES
-# Arvados REQUIRES valid SSL to work correctly. Otherwise, some components will fail
-# to communicate and can silently drop traffic. You can try to use the Letsencrypt
-# salt formula (https://github.com/saltstack-formulas/letsencrypt-formula) to try to
-# automatically obtain and install SSL certificates for your instances or set this
-# variable to "no", provide and upload your own certificates to the instances and
-# modify the 'nginx_*' salt pillars accordingly (see CUSTOM_CERTS_DIR below)
-USE_LETSENCRYPT="yes"
-USE_LETSENCRYPT_IAM_USER="yes"
+# Arvados requires SSL certificates to work correctly. This installer supports these options:
+# * self-signed: let the installer create self-signed certificate(s)
+# * bring-your-own: supply your own certificate(s) in the `certs` directory
+# * lets-encrypt: automatically obtain and install SSL certificates for your hostname(s)
+#
+# See https://doc.arvados.org/intall/salt-multi-host.html for more information.
+SSL_MODE="lets-encrypt"
+USE_LETSENCRYPT_ROUTE53="yes"
 # For collections, we need to obtain a wildcard certificate for
 # '*.collections.<cluster>.<domain>'. This is only possible through a DNS-01 challenge.
 # For that reason, you'll need to provide AWS credentials with permissions to manage
 # RRs in the route53 zone for the cluster.
 # WARNING!: If AWS credentials files already exist in the hosts, they won't be replaced.
-LE_AWS_REGION="us-east-1"
-LE_AWS_ACCESS_KEY_ID="AKIABCDEFGHIJKLMNOPQ"
-LE_AWS_SECRET_ACCESS_KEY="thisistherandomstringthatisyoursecretkey"
+LE_AWS_REGION="${AWS_REGION}"
+
+# Compute node configurations
+COMPUTE_AMI="ami_id_fixme_or_this_wont_work"
+COMPUTE_SG="security_group_fixme_or_this_wont_work"
+COMPUTE_SUBNET="subnet_fixme_or_this_wont_work"
+COMPUTE_AWS_REGION="${AWS_REGION}"
+COMPUTE_USER="${DEPLOY_USER}"
+
+# Keep S3 backend region
+KEEP_AWS_REGION="${AWS_REGION}"
 
 # If you going to provide your own certificates for Arvados, the provision script can
-# help you deploy them. In order to do that, you need to set `USE_LETSENCRYPT=no` above,
+# help you deploy them. In order to do that, you need to set `SSL_MODE=bring-your-own` above,
 # and copy the required certificates under the directory specified in the next line.
 # The certs will be copied from this directory by the provision script.
 # Please set it to the FULL PATH to the certs dir if you're going to use a different dir
 # Default is "${SCRIPT_DIR}/certs", where the variable "SCRIPT_DIR" has the path to the
 # directory where the  "provision.sh" script was copied in the destination host.
-# CUSTOM_CERTS_DIR="${SCRIPT_DIR}/certs"
+# CUSTOM_CERTS_DIR="${SCRIPT_DIR}/local_config_dir/certs"
 # The script expects cert/key files with these basenames (matching the role except for
 # keepweb, which is split in both download/collections):
 #  "controller"
@@ -93,13 +74,85 @@ LE_AWS_SECRET_ACCESS_KEY="thisistherandomstringthatisyoursecretkey"
 #  "download"         # Part of keepweb
 #  "collections"      # Part of keepweb
 #  "keepproxy"        # Keepproxy
+#  "prometheus"
+#  "grafana"
 # Ie., 'keep', the script will lookup for
 # ${CUSTOM_CERTS_DIR}/keepproxy.crt
 # ${CUSTOM_CERTS_DIR}/keepproxy.key
 
+# Set the following to "yes" if the key files are encrypted and optionally set
+# a custom AWS secret name for each node to retrieve the password.
+SSL_KEY_ENCRYPTED="no"
+SSL_KEY_AWS_SECRET_NAME="${CLUSTER}-arvados-ssl-privkey-password"
+SSL_KEY_AWS_REGION="${AWS_REGION}"
+
+# Customize Prometheus & Grafana web UI access credentials
+MONITORING_USERNAME=${INITIAL_USER}
+MONITORING_EMAIL=${INITIAL_USER_EMAIL}
+
+# Sets the directory for Grafana dashboards
+# GRAFANA_DASHBOARDS_DIR="${SCRIPT_DIR}/local_config_dir/dashboards"
+
+# Sets the amount of data (expressed in time) Prometheus keeps on its
+# time-series database. Default is 15 days.
+# PROMETHEUS_DATA_RETENTION_TIME="180d"
+
+# The mapping of nodes to roles
+# installer.sh will log in to each of these nodes and then provision
+# it for the specified roles.
+NODES=(
+  [controller.${DOMAIN}]=database,controller
+  [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb,websocket,dispatcher,keepbalance
+  [keep0.${DOMAIN}]=keepstore
+  [shell.${DOMAIN}]=shell
+)
+
+# Host SSL port where you want to point your browser to access Arvados
+# Defaults to 443 for regular runs, and to 8443 when called in Vagrant.
+# You can point it to another port if desired
+# In Vagrant, make sure it matches what you set in the Vagrantfile (8443)
+CONTROLLER_EXT_SSL_PORT=443
+KEEP_EXT_SSL_PORT=443
+# Both for collections and downloads
+KEEPWEB_EXT_SSL_PORT=443
+WEBSHELL_EXT_SSL_PORT=443
+WEBSOCKET_EXT_SSL_PORT=443
+WORKBENCH1_EXT_SSL_PORT=443
+WORKBENCH2_EXT_SSL_PORT=443
+
+# Internal IPs for the configuration
+CLUSTER_INT_CIDR=10.1.0.0/16
+
+# Note the IPs in this example are shared between roles, as suggested in
+# https://doc.arvados.org/main/install/salt-multi-host.html
+CONTROLLER_INT_IP=10.1.1.11
+DATABASE_INT_IP=${CONTROLLER_INT_IP}
+WORKBENCH1_INT_IP=10.1.1.15
+DISPATCHER_INT_IP=${WORKBENCH1_INT_IP}
+KEEPBALANCE_INT_IP=${WORKBENCH1_INT_IP}
+WEBSOCKET_INT_IP=${WORKBENCH1_INT_IP}
+# Both for collections and downloads
+KEEPWEB_INT_IP=${WORKBENCH1_INT_IP}
+WORKBENCH2_INT_IP=${WORKBENCH1_INT_IP}
+WEBSHELL_INT_IP=${WORKBENCH1_INT_IP}
+KEEP_INT_IP=${WORKBENCH1_INT_IP}
+KEEPSTORE0_INT_IP=10.1.2.13
+SHELL_INT_IP=10.1.2.17
+
+# In a load balanced deployment, you can do rolling upgrades by specifying one
+# controller node name at a time, so that it gets removed from the pool and can
+# be upgraded.
+DISABLED_CONTROLLER=""
+
+# Performance tuning parameters.  If these are not set, workers
+# defaults on the number of cpus and queued requests defaults to 128.
+#CONTROLLER_MAX_WORKERS=
+#CONTROLLER_MAX_QUEUED_REQUESTS=
+
 # The directory to check for the config files (pillars, states) you want to use.
 # There are a few examples under 'config_examples'.
 # CONFIG_DIR="local_config_dir"
+
 # Extra states to apply. If you use your own subdir, change this value accordingly
 # EXTRA_STATES_DIR="${CONFIG_DIR}/states"
 
@@ -119,8 +172,10 @@ RELEASE="production"
 
 # Formulas versions
 # ARVADOS_TAG="2.2.0"
-# POSTGRES_TAG="v0.41.6"
-# NGINX_TAG="temp-fix-missing-statements-in-pillar"
-# DOCKER_TAG="v2.0.7"
+# POSTGRES_TAG="v0.44.0"
+# NGINX_TAG="v2.8.1"
+# DOCKER_TAG="v2.4.2"
 # LOCALE_TAG="v0.3.4"
 # LETSENCRYPT_TAG="v2.1.0"
+# PROMETHEUS_TAG="v5.6.5"
+# GRAFANA_TAG="v3.1.3"