X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/8d438557f4f3da941eb9fa695da905d488c6301b..HEAD:/tools/salt-install/local.params.example.multiple_hosts diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts index 4234a965d5..d97afaca1c 100644 --- a/tools/salt-install/local.params.example.multiple_hosts +++ b/tools/salt-install/local.params.example.multiple_hosts @@ -53,8 +53,10 @@ COMPUTE_SUBNET="subnet_fixme_or_this_wont_work" COMPUTE_AWS_REGION="${AWS_REGION}" COMPUTE_USER="${DEPLOY_USER}" -# Keep S3 backend region +# Keep S3 backend settings KEEP_AWS_REGION="${AWS_REGION}" +KEEP_AWS_S3_BUCKET="${CLUSTER}-nyw5e-000000000000000-volume" +KEEP_AWS_IAM_ROLE="${CLUSTER}-keepstore-00-iam-role" # If you going to provide your own certificates for Arvados, the provision script can # help you deploy them. In order to do that, you need to set `SSL_MODE=bring-your-own` above, @@ -89,48 +91,24 @@ SSL_KEY_AWS_REGION="${AWS_REGION}" # Customize Prometheus & Grafana web UI access credentials MONITORING_USERNAME=${INITIAL_USER} MONITORING_EMAIL=${INITIAL_USER_EMAIL} + # Sets the directory for Grafana dashboards # GRAFANA_DASHBOARDS_DIR="${SCRIPT_DIR}/local_config_dir/dashboards" +# Sets the amount of data (expressed in time) Prometheus keeps on its +# time-series database. Default is 15 days. +# PROMETHEUS_DATA_RETENTION_TIME="180d" + # The mapping of nodes to roles # installer.sh will log in to each of these nodes and then provision # it for the specified roles. NODES=( - [controller.${DOMAIN}]=database,api,controller,websocket,dispatcher,keepbalance - [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb + [controller.${DOMAIN}]=database,controller + [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb,websocket,dispatcher,keepbalance [keep0.${DOMAIN}]=keepstore [shell.${DOMAIN}]=shell ) -# Comma-separated list of nodes. This is used to dynamically adjust -# salt pillars. -NODELIST="" -for node in "${!NODES[@]}"; do - if [ -z "$NODELIST" ]; then - NODELIST="$node" - else - NODELIST="$NODELIST,$node" - fi -done - -# The mapping of roles to nodes. This is used to dinamically adjust -# salt pillars. -declare -A ROLES -for node in "${!NODES[@]}"; do - roles="${NODES[$node]}" - - # Split the comma-separated roles into an array - IFS=',' read -ra roles_array <<< "$roles" - - for role in "${roles_array[@]}"; do - if [ -n "${ROLES[$role]:-}" ]; then - ROLES["$role"]="${ROLES[$role]},$node" - else - ROLES["$role"]=$node - fi - done -done - # Host SSL port where you want to point your browser to access Arvados # Defaults to 443 for regular runs, and to 8443 when called in Vagrant. # You can point it to another port if desired @@ -150,11 +128,11 @@ CLUSTER_INT_CIDR=10.1.0.0/16 # Note the IPs in this example are shared between roles, as suggested in # https://doc.arvados.org/main/install/salt-multi-host.html CONTROLLER_INT_IP=10.1.1.11 -DISPATCHER_INT_IP=${CONTROLLER_INT_IP} -KEEPBALANCE_INT_IP=${CONTROLLER_INT_IP} -WEBSOCKET_INT_IP=${CONTROLLER_INT_IP} DATABASE_INT_IP=${CONTROLLER_INT_IP} WORKBENCH1_INT_IP=10.1.1.15 +DISPATCHER_INT_IP=${WORKBENCH1_INT_IP} +KEEPBALANCE_INT_IP=${WORKBENCH1_INT_IP} +WEBSOCKET_INT_IP=${WORKBENCH1_INT_IP} # Both for collections and downloads KEEPWEB_INT_IP=${WORKBENCH1_INT_IP} WORKBENCH2_INT_IP=${WORKBENCH1_INT_IP} @@ -163,12 +141,18 @@ KEEP_INT_IP=${WORKBENCH1_INT_IP} KEEPSTORE0_INT_IP=10.1.2.13 SHELL_INT_IP=10.1.2.17 -# Load balancing settings -ENABLE_BALANCER="no" - -# Performance tuning parameters -#CONTROLLER_NGINX_WORKERS= -#CONTROLLER_MAX_CONCURRENT_REQUESTS= +DATABASE_NAME="${CLUSTER}_arvados" +DATABASE_USER="${CLUSTER}_arvados" +# Set these if using an external PostgreSQL service. +#DATABASE_EXTERNAL_SERVICE_HOST_OR_IP= +#DATABASE_POSTGRESQL_VERSION= + +# Performance tuning parameters. If these are not set, workers +# defaults on the number of cpus, queued requests defaults to 128 +# and gateway tunnels defaults to 1000. +#CONTROLLER_MAX_WORKERS= +#CONTROLLER_MAX_QUEUED_REQUESTS= +#CONTROLLER_MAX_GATEWAY_TUNNELS= # The directory to check for the config files (pillars, states) you want to use. # There are a few examples under 'config_examples'.