########################################################## # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: CC-BY-SA-3.0 # These are the basic parameters to configure the installation # The Arvados cluster ID, needs to be 5 lowercase alphanumeric characters. CLUSTER="cluster_fixme_or_this_wont_work" # The domain name you want to give to your cluster's hosts; # the end result hostnames will be $SERVICE.$DOMAIN DOMAIN="domain_fixme_or_this_wont_work" # For multi-node installs, the ssh log in for each node # must be root or able to sudo DEPLOY_USER=admin INITIAL_USER=admin # If not specified, the initial user email will be composed as # INITIAL_USER@DOMAIN INITIAL_USER_EMAIL="admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work" # Use a public node as a jump host for SSH sessions. This allows running the # installer from the outside of the cluster's local network and still reach # the internal servers for configuration deployment. # Comment out to disable. USE_SSH_JUMPHOST="controller.${DOMAIN}" AWS_REGION="fixme_or_this_wont_work" # SSL CERTIFICATES # Arvados requires SSL certificates to work correctly. This installer supports these options: # * self-signed: let the installer create self-signed certificate(s) # * bring-your-own: supply your own certificate(s) in the `certs` directory # * lets-encrypt: automatically obtain and install SSL certificates for your hostname(s) # # See https://doc.arvados.org/intall/salt-multi-host.html for more information. SSL_MODE="lets-encrypt" USE_LETSENCRYPT_ROUTE53="yes" # For collections, we need to obtain a wildcard certificate for # '*.collections..'. This is only possible through a DNS-01 challenge. # For that reason, you'll need to provide AWS credentials with permissions to manage # RRs in the route53 zone for the cluster. # WARNING!: If AWS credentials files already exist in the hosts, they won't be replaced. LE_AWS_REGION="${AWS_REGION}" # Compute node configurations COMPUTE_AMI="ami_id_fixme_or_this_wont_work" COMPUTE_SG="security_group_fixme_or_this_wont_work" COMPUTE_SUBNET="subnet_fixme_or_this_wont_work" COMPUTE_AWS_REGION="${AWS_REGION}" COMPUTE_USER="${DEPLOY_USER}" # Keep S3 backend region KEEP_AWS_REGION="${AWS_REGION}" # If you going to provide your own certificates for Arvados, the provision script can # help you deploy them. In order to do that, you need to set `SSL_MODE=bring-your-own` above, # and copy the required certificates under the directory specified in the next line. # The certs will be copied from this directory by the provision script. # Please set it to the FULL PATH to the certs dir if you're going to use a different dir # Default is "${SCRIPT_DIR}/certs", where the variable "SCRIPT_DIR" has the path to the # directory where the "provision.sh" script was copied in the destination host. # CUSTOM_CERTS_DIR="${SCRIPT_DIR}/local_config_dir/certs" # The script expects cert/key files with these basenames (matching the role except for # keepweb, which is split in both download/collections): # "controller" # "websocket" # "workbench" # "workbench2" # "webshell" # "download" # Part of keepweb # "collections" # Part of keepweb # "keepproxy" # Keepproxy # "prometheus" # "grafana" # Ie., 'keep', the script will lookup for # ${CUSTOM_CERTS_DIR}/keepproxy.crt # ${CUSTOM_CERTS_DIR}/keepproxy.key # Set the following to "yes" if the key files are encrypted and optionally set # a custom AWS secret name for each node to retrieve the password. SSL_KEY_ENCRYPTED="no" SSL_KEY_AWS_SECRET_NAME="${CLUSTER}-arvados-ssl-privkey-password" SSL_KEY_AWS_REGION="${AWS_REGION}" # Customize Prometheus & Grafana web UI access credentials MONITORING_USERNAME=${INITIAL_USER} MONITORING_EMAIL=${INITIAL_USER_EMAIL} # Sets the directory for Grafana dashboards # GRAFANA_DASHBOARDS_DIR="${SCRIPT_DIR}/local_config_dir/dashboards" # The mapping of nodes to roles # installer.sh will log in to each of these nodes and then provision # it for the specified roles. NODES=( [controller.${DOMAIN}]=database,api,controller,websocket,dispatcher,keepbalance [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb [keep0.${DOMAIN}]=keepstore [shell.${DOMAIN}]=shell ) # Comma-separated list of nodes. This is used to dynamically adjust # salt pillars. NODELIST="" for node in "${!NODES[@]}"; do if [ -z "$NODELIST" ]; then NODELIST="$node" else NODELIST="$NODELIST,$node" fi done # The mapping of roles to nodes. This is used to dinamically adjust # salt pillars. declare -A ROLES for node in "${!NODES[@]}"; do roles="${NODES[$node]}" # Split the comma-separated roles into an array IFS=',' read -ra roles_array <<< "$roles" for role in "${roles_array[@]}"; do if [ -n "${ROLES[$role]:-}" ]; then ROLES["$role"]="${ROLES[$role]},$node" else ROLES["$role"]=$node fi done done # Auto-detects load-balancing mode if [ -z "${ROLES['balancer']:-}" ]; then ENABLE_BALANCER="no" else ENABLE_BALANCER="yes" fi # Host SSL port where you want to point your browser to access Arvados # Defaults to 443 for regular runs, and to 8443 when called in Vagrant. # You can point it to another port if desired # In Vagrant, make sure it matches what you set in the Vagrantfile (8443) CONTROLLER_EXT_SSL_PORT=443 KEEP_EXT_SSL_PORT=443 # Both for collections and downloads KEEPWEB_EXT_SSL_PORT=443 WEBSHELL_EXT_SSL_PORT=443 WEBSOCKET_EXT_SSL_PORT=443 WORKBENCH1_EXT_SSL_PORT=443 WORKBENCH2_EXT_SSL_PORT=443 # Internal IPs for the configuration CLUSTER_INT_CIDR=10.1.0.0/16 # Note the IPs in this example are shared between roles, as suggested in # https://doc.arvados.org/main/install/salt-multi-host.html CONTROLLER_INT_IP=10.1.1.11 DISPATCHER_INT_IP=${CONTROLLER_INT_IP} KEEPBALANCE_INT_IP=${CONTROLLER_INT_IP} WEBSOCKET_INT_IP=${CONTROLLER_INT_IP} DATABASE_INT_IP=${CONTROLLER_INT_IP} WORKBENCH1_INT_IP=10.1.1.15 # Both for collections and downloads KEEPWEB_INT_IP=${WORKBENCH1_INT_IP} WORKBENCH2_INT_IP=${WORKBENCH1_INT_IP} WEBSHELL_INT_IP=${WORKBENCH1_INT_IP} KEEP_INT_IP=${WORKBENCH1_INT_IP} KEEPSTORE0_INT_IP=10.1.2.13 SHELL_INT_IP=10.1.2.17 # Load balancing settings DISABLED_CONTROLLER="" # Performance tuning parameters #CONTROLLER_NGINX_WORKERS= #CONTROLLER_MAX_CONCURRENT_REQUESTS= # The directory to check for the config files (pillars, states) you want to use. # There are a few examples under 'config_examples'. # CONFIG_DIR="local_config_dir" # Extra states to apply. If you use your own subdir, change this value accordingly # EXTRA_STATES_DIR="${CONFIG_DIR}/states" # These are ARVADOS-related settings. # Which release of Arvados repo you want to use RELEASE="production" # Which version of Arvados you want to install. Defaults to latest stable # VERSION="2.1.2-1" # This is an arvados-formula setting. # If branch is set, the script will switch to it before running salt # Usually not needed, only used for testing # BRANCH="main" ########################################################## # Usually there's no need to modify things below this line # Formulas versions # ARVADOS_TAG="2.2.0" # POSTGRES_TAG="v0.44.0" # NGINX_TAG="v2.8.1" # DOCKER_TAG="v2.4.2" # LOCALE_TAG="v0.3.4" # LETSENCRYPT_TAG="v2.1.0" # PROMETHEUS_TAG="v5.6.5" # GRAFANA_TAG="v3.1.3"