20889: Adds configurable data retention parameter for Prometheus.
[arvados.git] / tools / salt-install / local.params.example.multiple_hosts
index 12da2e3ed683b16b8d3b671a7cbeb793b8d41fab..26cd16ed574b5264b62ad33e3cfe3bcbb8fda87f 100644 (file)
@@ -89,15 +89,20 @@ SSL_KEY_AWS_REGION="${AWS_REGION}"
 # Customize Prometheus & Grafana web UI access credentials
 MONITORING_USERNAME=${INITIAL_USER}
 MONITORING_EMAIL=${INITIAL_USER_EMAIL}
+
 # Sets the directory for Grafana dashboards
 # GRAFANA_DASHBOARDS_DIR="${SCRIPT_DIR}/local_config_dir/dashboards"
 
+# Sets the amount of data (expressed in time) Prometheus keeps on its
+# time-series database. Default is 15 days.
+# PROMETHEUS_DATA_RETENTION_TIME="180d"
+
 # The mapping of nodes to roles
 # installer.sh will log in to each of these nodes and then provision
 # it for the specified roles.
 NODES=(
-  [controller.${DOMAIN}]=database,controller,websocket,dispatcher,keepbalance
-  [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb
+  [controller.${DOMAIN}]=database,controller
+  [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb,websocket,dispatcher,keepbalance
   [keep0.${DOMAIN}]=keepstore
   [shell.${DOMAIN}]=shell
 )
@@ -121,11 +126,11 @@ CLUSTER_INT_CIDR=10.1.0.0/16
 # Note the IPs in this example are shared between roles, as suggested in
 # https://doc.arvados.org/main/install/salt-multi-host.html
 CONTROLLER_INT_IP=10.1.1.11
-DISPATCHER_INT_IP=${CONTROLLER_INT_IP}
-KEEPBALANCE_INT_IP=${CONTROLLER_INT_IP}
-WEBSOCKET_INT_IP=${CONTROLLER_INT_IP}
 DATABASE_INT_IP=${CONTROLLER_INT_IP}
 WORKBENCH1_INT_IP=10.1.1.15
+DISPATCHER_INT_IP=${WORKBENCH1_INT_IP}
+KEEPBALANCE_INT_IP=${WORKBENCH1_INT_IP}
+WEBSOCKET_INT_IP=${WORKBENCH1_INT_IP}
 # Both for collections and downloads
 KEEPWEB_INT_IP=${WORKBENCH1_INT_IP}
 WORKBENCH2_INT_IP=${WORKBENCH1_INT_IP}
@@ -139,7 +144,8 @@ SHELL_INT_IP=10.1.2.17
 # be upgraded.
 DISABLED_CONTROLLER=""
 
-# Performance tuning parameters
+# Performance tuning parameters.  If these are not set, workers
+# defaults on the number of cpus and queued requests defaults to 128.
 #CONTROLLER_MAX_WORKERS=
 #CONTROLLER_MAX_QUEUED_REQUESTS=