COMPUTE_AWS_REGION="${AWS_REGION}"
COMPUTE_USER="${DEPLOY_USER}"
-# Keep S3 backend region
+# Keep S3 backend settings
KEEP_AWS_REGION="${AWS_REGION}"
+KEEP_AWS_S3_BUCKET="${CLUSTER}-nyw5e-000000000000000-volume"
+KEEP_AWS_IAM_ROLE="${CLUSTER}-keepstore-00-iam-role"
# If you going to provide your own certificates for Arvados, the provision script can
# help you deploy them. In order to do that, you need to set `SSL_MODE=bring-your-own` above,
# Customize Prometheus & Grafana web UI access credentials
MONITORING_USERNAME=${INITIAL_USER}
MONITORING_EMAIL=${INITIAL_USER_EMAIL}
+
# Sets the directory for Grafana dashboards
# GRAFANA_DASHBOARDS_DIR="${SCRIPT_DIR}/local_config_dir/dashboards"
+# Sets the amount of data (expressed in time) Prometheus keeps on its
+# time-series database. Default is 15 days.
+# PROMETHEUS_DATA_RETENTION_TIME="180d"
+
# The mapping of nodes to roles
# installer.sh will log in to each of these nodes and then provision
# it for the specified roles.
NODES=(
- [controller.${DOMAIN}]=database,api,controller,websocket,dispatcher,keepbalance
- [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb
+ [controller.${DOMAIN}]=database,controller
+ [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb,websocket,dispatcher,keepbalance
[keep0.${DOMAIN}]=keepstore
[shell.${DOMAIN}]=shell
)
-# Comma-separated list of nodes. This is used to dynamically adjust
-# salt pillars.
-NODELIST=""
-for node in "${!NODES[@]}"; do
- if [ -z "$NODELIST" ]; then
- NODELIST="$node"
- else
- NODELIST="$NODELIST,$node"
- fi
-done
-
-# The mapping of roles to nodes. This is used to dinamically adjust
-# salt pillars.
-declare -A ROLES
-for node in "${!NODES[@]}"; do
- roles="${NODES[$node]}"
-
- # Split the comma-separated roles into an array
- IFS=',' read -ra roles_array <<< "$roles"
-
- for role in "${roles_array[@]}"; do
- if [ -n "${ROLES[$role]:-}" ]; then
- ROLES["$role"]="${ROLES[$role]},$node"
- else
- ROLES["$role"]=$node
- fi
- done
-done
-
# Host SSL port where you want to point your browser to access Arvados
# Defaults to 443 for regular runs, and to 8443 when called in Vagrant.
# You can point it to another port if desired
# Note the IPs in this example are shared between roles, as suggested in
# https://doc.arvados.org/main/install/salt-multi-host.html
CONTROLLER_INT_IP=10.1.1.11
-DISPATCHER_INT_IP=${CONTROLLER_INT_IP}
-KEEPBALANCE_INT_IP=${CONTROLLER_INT_IP}
-WEBSOCKET_INT_IP=${CONTROLLER_INT_IP}
DATABASE_INT_IP=${CONTROLLER_INT_IP}
WORKBENCH1_INT_IP=10.1.1.15
+DISPATCHER_INT_IP=${WORKBENCH1_INT_IP}
+KEEPBALANCE_INT_IP=${WORKBENCH1_INT_IP}
+WEBSOCKET_INT_IP=${WORKBENCH1_INT_IP}
# Both for collections and downloads
KEEPWEB_INT_IP=${WORKBENCH1_INT_IP}
WORKBENCH2_INT_IP=${WORKBENCH1_INT_IP}
KEEPSTORE0_INT_IP=10.1.2.13
SHELL_INT_IP=10.1.2.17
-# Load balancing settings
-ENABLE_BALANCER="no"
-DISABLED_CONTROLLER=""
+DATABASE_NAME="${CLUSTER}_arvados"
+DATABASE_USER="${CLUSTER}_arvados"
+# Set these if using an external PostgreSQL service.
+#DATABASE_EXTERNAL_SERVICE_HOST_OR_IP=
+#DATABASE_POSTGRESQL_VERSION=
-# Performance tuning parameters
-#CONTROLLER_NGINX_WORKERS=
-#CONTROLLER_MAX_CONCURRENT_REQUESTS=
+# Performance tuning parameters. If these are not set, workers
+# defaults on the number of cpus and queued requests defaults to 128.
+#CONTROLLER_MAX_WORKERS=
+#CONTROLLER_MAX_QUEUED_REQUESTS=
# The directory to check for the config files (pillars, states) you want to use.
# There are a few examples under 'config_examples'.