Merge branch '21774-package-dependencies'
[arvados.git] / tools / salt-install / local.params.example.multiple_hosts
index fd1919e0cc12e9b5912c5dc180d3336a8f2b20e5..d97afaca1c4e82e2c8c62622b1acbcfc6843a77a 100644 (file)
@@ -8,8 +8,8 @@
 # The Arvados cluster ID, needs to be 5 lowercase alphanumeric characters.
 CLUSTER="cluster_fixme_or_this_wont_work"
 
-# The domain name you want to give to your cluster's hosts
-# the end result hostnames will be $SERVICE.$CLUSTER.$DOMAIN
+# The domain name you want to give to your cluster's hosts;
+# the end result hostnames will be $SERVICE.$DOMAIN
 DOMAIN="domain_fixme_or_this_wont_work"
 
 # For multi-node installs, the ssh log in for each node
@@ -19,23 +19,16 @@ DEPLOY_USER=admin
 INITIAL_USER=admin
 
 # If not specified, the initial user email will be composed as
-# INITIAL_USER@CLUSTER.DOMAIN
+# INITIAL_USER@DOMAIN
 INITIAL_USER_EMAIL="admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work"
-INITIAL_USER_PASSWORD="fixmepassword"
 
 # Use a public node as a jump host for SSH sessions. This allows running the
 # installer from the outside of the cluster's local network and still reach
 # the internal servers for configuration deployment.
 # Comment out to disable.
-USE_SSH_JUMPHOST="controller.${CLUSTER}.${DOMAIN}"
+USE_SSH_JUMPHOST="controller.${DOMAIN}"
 
-# YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS
-BLOB_SIGNING_KEY=fixmeblobsigningkeymushaveatleast32characters
-MANAGEMENT_TOKEN=fixmemanagementtokenmushaveatleast32characters
-SYSTEM_ROOT_TOKEN=fixmesystemroottokenmushaveatleast32characters
-ANONYMOUS_USER_TOKEN=fixmeanonymoususertokenmushaveatleast32characters
-WORKBENCH_SECRET_KEY=fixmeworkbenchsecretkeymushaveatleast32characters
-DATABASE_PASSWORD=fixmeplease_set_this_to_some_secure_value
+AWS_REGION="fixme_or_this_wont_work"
 
 # SSL CERTIFICATES
 # Arvados requires SSL certificates to work correctly. This installer supports these options:
@@ -51,9 +44,19 @@ USE_LETSENCRYPT_ROUTE53="yes"
 # For that reason, you'll need to provide AWS credentials with permissions to manage
 # RRs in the route53 zone for the cluster.
 # WARNING!: If AWS credentials files already exist in the hosts, they won't be replaced.
-LE_AWS_REGION="us-east-1"
-LE_AWS_ACCESS_KEY_ID="AKIABCDEFGHIJKLMNOPQ"
-LE_AWS_SECRET_ACCESS_KEY="thisistherandomstringthatisyoursecretkey"
+LE_AWS_REGION="${AWS_REGION}"
+
+# Compute node configurations
+COMPUTE_AMI="ami_id_fixme_or_this_wont_work"
+COMPUTE_SG="security_group_fixme_or_this_wont_work"
+COMPUTE_SUBNET="subnet_fixme_or_this_wont_work"
+COMPUTE_AWS_REGION="${AWS_REGION}"
+COMPUTE_USER="${DEPLOY_USER}"
+
+# Keep S3 backend settings
+KEEP_AWS_REGION="${AWS_REGION}"
+KEEP_AWS_S3_BUCKET="${CLUSTER}-nyw5e-000000000000000-volume"
+KEEP_AWS_IAM_ROLE="${CLUSTER}-keepstore-00-iam-role"
 
 # If you going to provide your own certificates for Arvados, the provision script can
 # help you deploy them. In order to do that, you need to set `SSL_MODE=bring-your-own` above,
@@ -83,23 +86,27 @@ LE_AWS_SECRET_ACCESS_KEY="thisistherandomstringthatisyoursecretkey"
 # a custom AWS secret name for each node to retrieve the password.
 SSL_KEY_ENCRYPTED="no"
 SSL_KEY_AWS_SECRET_NAME="${CLUSTER}-arvados-ssl-privkey-password"
-SSL_KEY_AWS_REGION="us-east-1"
+SSL_KEY_AWS_REGION="${AWS_REGION}"
 
 # Customize Prometheus & Grafana web UI access credentials
 MONITORING_USERNAME=${INITIAL_USER}
-MONITORING_PASSWORD=${INITIAL_USER_PASSWORD}
 MONITORING_EMAIL=${INITIAL_USER_EMAIL}
+
 # Sets the directory for Grafana dashboards
 # GRAFANA_DASHBOARDS_DIR="${SCRIPT_DIR}/local_config_dir/dashboards"
 
+# Sets the amount of data (expressed in time) Prometheus keeps on its
+# time-series database. Default is 15 days.
+# PROMETHEUS_DATA_RETENTION_TIME="180d"
+
 # The mapping of nodes to roles
 # installer.sh will log in to each of these nodes and then provision
 # it for the specified roles.
 NODES=(
-  [controller.${CLUSTER}.${DOMAIN}]=database,api,controller,websocket,dispatcher,keepbalance
-  [workbench.${CLUSTER}.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb
-  [keep0.${CLUSTER}.${DOMAIN}]=keepstore
-  [shell.${CLUSTER}.${DOMAIN}]=shell
+  [controller.${DOMAIN}]=database,controller
+  [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb,websocket,dispatcher,keepbalance
+  [keep0.${DOMAIN}]=keepstore
+  [shell.${DOMAIN}]=shell
 )
 
 # Host SSL port where you want to point your browser to access Arvados
@@ -121,20 +128,31 @@ CLUSTER_INT_CIDR=10.1.0.0/16
 # Note the IPs in this example are shared between roles, as suggested in
 # https://doc.arvados.org/main/install/salt-multi-host.html
 CONTROLLER_INT_IP=10.1.1.11
-WEBSOCKET_INT_IP=10.1.1.11
-KEEP_INT_IP=10.1.1.15
+DATABASE_INT_IP=${CONTROLLER_INT_IP}
+WORKBENCH1_INT_IP=10.1.1.15
+DISPATCHER_INT_IP=${WORKBENCH1_INT_IP}
+KEEPBALANCE_INT_IP=${WORKBENCH1_INT_IP}
+WEBSOCKET_INT_IP=${WORKBENCH1_INT_IP}
 # Both for collections and downloads
-KEEPWEB_INT_IP=10.1.1.15
+KEEPWEB_INT_IP=${WORKBENCH1_INT_IP}
+WORKBENCH2_INT_IP=${WORKBENCH1_INT_IP}
+WEBSHELL_INT_IP=${WORKBENCH1_INT_IP}
+KEEP_INT_IP=${WORKBENCH1_INT_IP}
 KEEPSTORE0_INT_IP=10.1.2.13
-WORKBENCH1_INT_IP=10.1.1.15
-WORKBENCH2_INT_IP=10.1.1.15
-WEBSHELL_INT_IP=10.1.1.15
-DATABASE_INT_IP=10.1.1.11
 SHELL_INT_IP=10.1.2.17
 
-# Performance tuning parameters
-#CONTROLLER_NGINX_WORKERS=
-#CONTROLLER_MAX_CONCURRENT_REQUESTS=
+DATABASE_NAME="${CLUSTER}_arvados"
+DATABASE_USER="${CLUSTER}_arvados"
+# Set these if using an external PostgreSQL service.
+#DATABASE_EXTERNAL_SERVICE_HOST_OR_IP=
+#DATABASE_POSTGRESQL_VERSION=
+
+# Performance tuning parameters.  If these are not set, workers
+# defaults on the number of cpus, queued requests defaults to 128
+# and gateway tunnels defaults to 1000.
+#CONTROLLER_MAX_WORKERS=
+#CONTROLLER_MAX_QUEUED_REQUESTS=
+#CONTROLLER_MAX_GATEWAY_TUNNELS=
 
 # The directory to check for the config files (pillars, states) you want to use.
 # There are a few examples under 'config_examples'.