X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/9539317a22d8ea16f94b0e086507ab595d758216..284f37a08fcdff15012b9f731000c57c1d7c56f1:/tools/salt-install/local.params.example.multiple_hosts diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts index c770c8d74c..0064a78c5e 100644 --- a/tools/salt-install/local.params.example.multiple_hosts +++ b/tools/salt-install/local.params.example.multiple_hosts @@ -5,12 +5,29 @@ # These are the basic parameters to configure the installation -# The FIVE ALPHANUMERIC CHARACTERS name you want to give your cluster +# The Arvados cluster ID, needs to be 5 lowercase alphanumeric characters. CLUSTER="cluster_fixme_or_this_wont_work" -# The domainname you want tou give to your cluster's hosts +# The domain name you want to give to your cluster's hosts +# the end result hostnames will be $SERVICE.$CLUSTER.$DOMAIN DOMAIN="domain_fixme_or_this_wont_work" +# For multi-node installs, the ssh log in for each node +# must be root or able to sudo +DEPLOY_USER=root + +# The mapping of nodes to roles +# installer.sh will log in to each of these nodes and then provision +# it for the specified roles. +NODES=( + [controller.${CLUSTER}.${DOMAIN}]=database,api,controller,websocket,dispatcher,keepbalance + [keep0.${CLUSTER}.${DOMAIN}]=keepstore + [keep1.${CLUSTER}.${DOMAIN}]=keepstore + [keep.${CLUSTER}.${DOMAIN}]=keepproxy,keepweb + [workbench.${CLUSTER}.${DOMAIN}]=workbench,workbench2,webshell + [shell.${CLUSTER}.${DOMAIN}]=shell +) + # Host SSL port where you want to point your browser to access Arvados # Defaults to 443 for regular runs, and to 8443 when called in Vagrant. # You can point it to another port if desired @@ -25,47 +42,47 @@ WORKBENCH1_EXT_SSL_PORT=443 WORKBENCH2_EXT_SSL_PORT=443 # Internal IPs for the configuration -CLUSTER_INT_CIDR=10.0.0.0/16 +CLUSTER_INT_CIDR=10.1.0.0/16 # Note the IPs in this example are shared between roles, as suggested in # https://doc.arvados.org/main/install/salt-multi-host.html -CONTROLLER_INT_IP=10.0.0.1 -WEBSOCKET_INT_IP=10.0.0.1 -KEEP_INT_IP=10.0.0.2 +CONTROLLER_INT_IP=10.1.1.11 +WEBSOCKET_INT_IP=10.1.1.11 +KEEP_INT_IP=10.1.1.12 # Both for collections and downloads -KEEPWEB_INT_IP=10.0.0.2 -KEEPSTORE0_INT_IP=10.0.0.3 -KEEPSTORE1_INT_IP=10.0.0.4 -WORKBENCH1_INT_IP=10.0.0.5 -WORKBENCH2_INT_IP=10.0.0.5 -WEBSHELL_INT_IP=10.0.0.5 -DATABASE_INT_IP=10.0.0.6 -SHELL_INT_IP=10.0.0.7 +KEEPWEB_INT_IP=10.1.1.12 +KEEPSTORE0_INT_IP=10.1.1.13 +KEEPSTORE1_INT_IP=10.1.1.14 +WORKBENCH1_INT_IP=10.1.1.15 +WORKBENCH2_INT_IP=10.1.1.15 +WEBSHELL_INT_IP=10.1.1.15 +DATABASE_INT_IP=10.1.1.11 +SHELL_INT_IP=10.1.1.17 INITIAL_USER="admin" # If not specified, the initial user email will be composed as # INITIAL_USER@CLUSTER.DOMAIN INITIAL_USER_EMAIL="admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work" -INITIAL_USER_PASSWORD="password" +INITIAL_USER_PASSWORD="fixmepassword" # YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS -BLOB_SIGNING_KEY=blobsigningkeymushaveatleast32characters -MANAGEMENT_TOKEN=managementtokenmushaveatleast32characters -SYSTEM_ROOT_TOKEN=systemroottokenmushaveatleast32characters -ANONYMOUS_USER_TOKEN=anonymoususertokenmushaveatleast32characters -WORKBENCH_SECRET_KEY=workbenchsecretkeymushaveatleast32characters -DATABASE_PASSWORD=please_set_this_to_some_secure_value +BLOB_SIGNING_KEY=fixmeblobsigningkeymushaveatleast32characters +MANAGEMENT_TOKEN=fixmemanagementtokenmushaveatleast32characters +SYSTEM_ROOT_TOKEN=fixmesystemroottokenmushaveatleast32characters +ANONYMOUS_USER_TOKEN=fixmeanonymoususertokenmushaveatleast32characters +WORKBENCH_SECRET_KEY=fixmeworkbenchsecretkeymushaveatleast32characters +DATABASE_PASSWORD=fixmeplease_set_this_to_some_secure_value # SSL CERTIFICATES -# Arvados REQUIRES valid SSL to work correctly. Otherwise, some components will fail -# to communicate and can silently drop traffic. You can try to use the Letsencrypt -# salt formula (https://github.com/saltstack-formulas/letsencrypt-formula) to try to -# automatically obtain and install SSL certificates for your instances or set this -# variable to "no", provide and upload your own certificates to the instances and -# modify the 'nginx_*' salt pillars accordingly (see CUSTOM_CERTS_DIR below) -USE_LETSENCRYPT="yes" -USE_LETSENCRYPT_IAM_USER="yes" +# Arvados requires SSL certificates to work correctly. This installer supports these options: +# * self-signed: let the installer create self-signed certificate(s) +# * bring-your-own: supply your own certificate(s) in the `certs` directory +# * lets-encrypt: automatically obtain and install SSL certificates for your hostname(s) +# +# See https://doc.arvados.org/intall/salt-multi-host.html for more information. +SSL_MODE="lets-encrypt" +USE_LETSENCRYPT_ROUTE53="yes" # For collections, we need to obtain a wildcard certificate for # '*.collections..'. This is only possible through a DNS-01 challenge. # For that reason, you'll need to provide AWS credentials with permissions to manage @@ -76,12 +93,15 @@ LE_AWS_ACCESS_KEY_ID="AKIABCDEFGHIJKLMNOPQ" LE_AWS_SECRET_ACCESS_KEY="thisistherandomstringthatisyoursecretkey" # If you going to provide your own certificates for Arvados, the provision script can -# help you deploy them. In order to do that, you need to set `USE_LETSENCRYPT=no` above, +# help you deploy them. In order to do that, you need to set `SSL_MODE=bring-your-own` above, # and copy the required certificates under the directory specified in the next line. # The certs will be copied from this directory by the provision script. -CUSTOM_CERTS_DIR="./certs" +# Please set it to the FULL PATH to the certs dir if you're going to use a different dir +# Default is "${SCRIPT_DIR}/certs", where the variable "SCRIPT_DIR" has the path to the +# directory where the "provision.sh" script was copied in the destination host. +# CUSTOM_CERTS_DIR="${SCRIPT_DIR}/local_config_dir/certs" # The script expects cert/key files with these basenames (matching the role except for -# keepweb, which is split in both downoad/collections): +# keepweb, which is split in both download/collections): # "controller" # "websocket" # "workbench" @@ -89,10 +109,16 @@ CUSTOM_CERTS_DIR="./certs" # "webshell" # "download" # Part of keepweb # "collections" # Part of keepweb -# "keep" # Keepproxy +# "keepproxy" # Keepproxy # Ie., 'keep', the script will lookup for -# ${CUSTOM_CERTS_DIR}/keep.crt -# ${CUSTOM_CERTS_DIR}/keep.key +# ${CUSTOM_CERTS_DIR}/keepproxy.crt +# ${CUSTOM_CERTS_DIR}/keepproxy.key + +# Set the following to "yes" if the key files are encrypted and optionally set +# a custom AWS secret name for each node to retrieve the password. +SSL_KEY_ENCRYPTED="no" +SSL_KEY_AWS_SECRET_NAME="${CLUSTER}-arvados-ssl-privkey-password" +SSL_KEY_AWS_REGION="us-east-1" # The directory to check for the config files (pillars, states) you want to use. # There are a few examples under 'config_examples'. @@ -116,8 +142,8 @@ RELEASE="production" # Formulas versions # ARVADOS_TAG="2.2.0" -# POSTGRES_TAG="v0.41.6" -# NGINX_TAG="temp-fix-missing-statements-in-pillar" -# DOCKER_TAG="v2.0.7" +# POSTGRES_TAG="v0.44.0" +# NGINX_TAG="v2.8.1" +# DOCKER_TAG="v2.4.2" # LOCALE_TAG="v0.3.4" # LETSENCRYPT_TAG="v2.1.0"