+# For multi-node installs, the ssh log in for each node
+# must be root or able to sudo
+DEPLOY_USER=admin
+
+INITIAL_USER=admin
+
+# If not specified, the initial user email will be composed as
+# INITIAL_USER@CLUSTER.DOMAIN
+INITIAL_USER_EMAIL="admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work"
+INITIAL_USER_PASSWORD="fixmepassword"
+
+# Use a public node as a jump host for SSH sessions. This allows running the
+# installer from the outside of the cluster's local network and still reach
+# the internal servers for configuration deployment.
+# Comment out to disable.
+USE_SSH_JUMPHOST="controller.${CLUSTER}.${DOMAIN}"
+
+# YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS
+BLOB_SIGNING_KEY=fixmeblobsigningkeymushaveatleast32characters
+MANAGEMENT_TOKEN=fixmemanagementtokenmushaveatleast32characters
+SYSTEM_ROOT_TOKEN=fixmesystemroottokenmushaveatleast32characters
+ANONYMOUS_USER_TOKEN=fixmeanonymoususertokenmushaveatleast32characters
+WORKBENCH_SECRET_KEY=fixmeworkbenchsecretkeymushaveatleast32characters
+DATABASE_PASSWORD=fixmeplease_set_this_to_some_secure_value
+
+# SSL CERTIFICATES
+# Arvados requires SSL certificates to work correctly. This installer supports these options:
+# * self-signed: let the installer create self-signed certificate(s)
+# * bring-your-own: supply your own certificate(s) in the `certs` directory
+# * lets-encrypt: automatically obtain and install SSL certificates for your hostname(s)
+#
+# See https://doc.arvados.org/intall/salt-multi-host.html for more information.
+SSL_MODE="lets-encrypt"
+USE_LETSENCRYPT_ROUTE53="yes"
+# For collections, we need to obtain a wildcard certificate for
+# '*.collections.<cluster>.<domain>'. This is only possible through a DNS-01 challenge.
+# For that reason, you'll need to provide AWS credentials with permissions to manage
+# RRs in the route53 zone for the cluster.
+# WARNING!: If AWS credentials files already exist in the hosts, they won't be replaced.
+LE_AWS_REGION="us-east-1"
+LE_AWS_ACCESS_KEY_ID="AKIABCDEFGHIJKLMNOPQ"
+LE_AWS_SECRET_ACCESS_KEY="thisistherandomstringthatisyoursecretkey"
+
+# If you going to provide your own certificates for Arvados, the provision script can
+# help you deploy them. In order to do that, you need to set `SSL_MODE=bring-your-own` above,
+# and copy the required certificates under the directory specified in the next line.
+# The certs will be copied from this directory by the provision script.
+# Please set it to the FULL PATH to the certs dir if you're going to use a different dir
+# Default is "${SCRIPT_DIR}/certs", where the variable "SCRIPT_DIR" has the path to the
+# directory where the "provision.sh" script was copied in the destination host.
+# CUSTOM_CERTS_DIR="${SCRIPT_DIR}/local_config_dir/certs"
+# The script expects cert/key files with these basenames (matching the role except for
+# keepweb, which is split in both download/collections):
+# "controller"
+# "websocket"
+# "workbench"
+# "workbench2"
+# "webshell"
+# "download" # Part of keepweb
+# "collections" # Part of keepweb
+# "keepproxy" # Keepproxy
+# "prometheus"
+# "grafana"
+# Ie., 'keep', the script will lookup for
+# ${CUSTOM_CERTS_DIR}/keepproxy.crt
+# ${CUSTOM_CERTS_DIR}/keepproxy.key
+
+# Set the following to "yes" if the key files are encrypted and optionally set
+# a custom AWS secret name for each node to retrieve the password.
+SSL_KEY_ENCRYPTED="no"
+SSL_KEY_AWS_SECRET_NAME="${CLUSTER}-arvados-ssl-privkey-password"
+SSL_KEY_AWS_REGION="us-east-1"
+
+# Customize Prometheus & Grafana web UI access credentials
+MONITORING_USERNAME=${INITIAL_USER}
+MONITORING_PASSWORD=${INITIAL_USER_PASSWORD}
+MONITORING_EMAIL=${INITIAL_USER_EMAIL}
+# Sets the directory for Grafana dashboards
+# GRAFANA_DASHBOARDS_DIR="${SCRIPT_DIR}/local_config_dir/dashboards"
+
+# The mapping of nodes to roles
+# installer.sh will log in to each of these nodes and then provision
+# it for the specified roles.
+NODES=(
+ [controller.${CLUSTER}.${DOMAIN}]=database,api,controller,websocket,dispatcher,keepbalance
+ [workbench.${CLUSTER}.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb
+ [keep0.${CLUSTER}.${DOMAIN}]=keepstore
+ [shell.${CLUSTER}.${DOMAIN}]=shell
+)
+