20846: Merge branch 'main' into 20846-ruby3
[arvados.git] / tools / salt-install / local.params.example.multiple_hosts
index 12da2e3ed683b16b8d3b671a7cbeb793b8d41fab..909c3354ec15d88ce97328ed148a20e1895c581a 100644 (file)
@@ -53,8 +53,10 @@ COMPUTE_SUBNET="subnet_fixme_or_this_wont_work"
 COMPUTE_AWS_REGION="${AWS_REGION}"
 COMPUTE_USER="${DEPLOY_USER}"
 
-# Keep S3 backend region
+# Keep S3 backend settings
 KEEP_AWS_REGION="${AWS_REGION}"
+KEEP_AWS_S3_BUCKET="${CLUSTER}-nyw5e-000000000000000-volume"
+KEEP_AWS_IAM_ROLE="${CLUSTER}-keepstore-00-iam-role"
 
 # If you going to provide your own certificates for Arvados, the provision script can
 # help you deploy them. In order to do that, you need to set `SSL_MODE=bring-your-own` above,
@@ -89,15 +91,20 @@ SSL_KEY_AWS_REGION="${AWS_REGION}"
 # Customize Prometheus & Grafana web UI access credentials
 MONITORING_USERNAME=${INITIAL_USER}
 MONITORING_EMAIL=${INITIAL_USER_EMAIL}
+
 # Sets the directory for Grafana dashboards
 # GRAFANA_DASHBOARDS_DIR="${SCRIPT_DIR}/local_config_dir/dashboards"
 
+# Sets the amount of data (expressed in time) Prometheus keeps on its
+# time-series database. Default is 15 days.
+# PROMETHEUS_DATA_RETENTION_TIME="180d"
+
 # The mapping of nodes to roles
 # installer.sh will log in to each of these nodes and then provision
 # it for the specified roles.
 NODES=(
-  [controller.${DOMAIN}]=database,controller,websocket,dispatcher,keepbalance
-  [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb
+  [controller.${DOMAIN}]=database,controller
+  [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb,websocket,dispatcher,keepbalance
   [keep0.${DOMAIN}]=keepstore
   [shell.${DOMAIN}]=shell
 )
@@ -121,11 +128,11 @@ CLUSTER_INT_CIDR=10.1.0.0/16
 # Note the IPs in this example are shared between roles, as suggested in
 # https://doc.arvados.org/main/install/salt-multi-host.html
 CONTROLLER_INT_IP=10.1.1.11
-DISPATCHER_INT_IP=${CONTROLLER_INT_IP}
-KEEPBALANCE_INT_IP=${CONTROLLER_INT_IP}
-WEBSOCKET_INT_IP=${CONTROLLER_INT_IP}
 DATABASE_INT_IP=${CONTROLLER_INT_IP}
 WORKBENCH1_INT_IP=10.1.1.15
+DISPATCHER_INT_IP=${WORKBENCH1_INT_IP}
+KEEPBALANCE_INT_IP=${WORKBENCH1_INT_IP}
+WEBSOCKET_INT_IP=${WORKBENCH1_INT_IP}
 # Both for collections and downloads
 KEEPWEB_INT_IP=${WORKBENCH1_INT_IP}
 WORKBENCH2_INT_IP=${WORKBENCH1_INT_IP}
@@ -134,12 +141,14 @@ KEEP_INT_IP=${WORKBENCH1_INT_IP}
 KEEPSTORE0_INT_IP=10.1.2.13
 SHELL_INT_IP=10.1.2.17
 
-# In a load balanced deployment, you can do rolling upgrades by specifying one
-# controller node name at a time, so that it gets removed from the pool and can
-# be upgraded.
-DISABLED_CONTROLLER=""
+DATABASE_NAME="${CLUSTER}_arvados"
+DATABASE_USER="${CLUSTER}_arvados"
+# Set these if using an external PostgreSQL service.
+#DATABASE_EXTERNAL_SERVICE_HOST_OR_IP=
+#DATABASE_POSTGRESQL_VERSION=
 
-# Performance tuning parameters
+# Performance tuning parameters.  If these are not set, workers
+# defaults on the number of cpus and queued requests defaults to 128.
 #CONTROLLER_MAX_WORKERS=
 #CONTROLLER_MAX_QUEUED_REQUESTS=