X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/ec3d70f727cf622db949b72d85cdb36504b07f13..5720f268fb6d6995042dd689ae760770fa3cf54e:/tools/salt-install/local.params.example.multiple_hosts diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts index 221e7b35eb..17e937f2e3 100644 --- a/tools/salt-install/local.params.example.multiple_hosts +++ b/tools/salt-install/local.params.example.multiple_hosts @@ -5,12 +5,27 @@ # These are the basic parameters to configure the installation -# The FIVE ALPHANUMERIC CHARACTERS name you want to give your cluster +# The Arvados cluster ID, needs to be 5 lowercase alphanumeric characters. CLUSTER="cluster_fixme_or_this_wont_work" -# The domainname you want tou give to your cluster's hosts +# The domain name you want to give to your cluster's hosts +# the end result hostnames will be $SERVICE.$CLUSTER.$DOMAIN DOMAIN="domain_fixme_or_this_wont_work" +# For multi-node installs, the ssh log in for each node +# must be root or able to sudo +DEPLOY_USER=root + +# The mapping of nodes to roles +# installer.sh will log in to each of these nodes and then provision +# it for the specified roles. +NODES=( + [controller.${CLUSTER}.${DOMAIN}]=database,api,controller,websocket,dispatcher,keepbalance + [workbench.${CLUSTER}.${DOMAIN}]=workbench,workbench2,webshell,keepproxy,keepweb + [keep0.${CLUSTER}.${DOMAIN}]=keepstore + [shell.${CLUSTER}.${DOMAIN}]=shell +) + # Host SSL port where you want to point your browser to access Arvados # Defaults to 443 for regular runs, and to 8443 when called in Vagrant. # You can point it to another port if desired @@ -25,37 +40,42 @@ WORKBENCH1_EXT_SSL_PORT=443 WORKBENCH2_EXT_SSL_PORT=443 # Internal IPs for the configuration -CLUSTER_INT_CIDR=10.0.0.0/16 +CLUSTER_INT_CIDR=10.1.0.0/16 # Note the IPs in this example are shared between roles, as suggested in # https://doc.arvados.org/main/install/salt-multi-host.html -CONTROLLER_INT_IP=10.0.0.1 -WEBSOCKET_INT_IP=10.0.0.1 -KEEP_INT_IP=10.0.0.2 +CONTROLLER_INT_IP=10.1.1.11 +WEBSOCKET_INT_IP=10.1.1.11 +KEEP_INT_IP=10.1.1.15 # Both for collections and downloads -KEEPWEB_INT_IP=10.0.0.2 -KEEPSTORE0_INT_IP=10.0.0.3 -KEEPSTORE1_INT_IP=10.0.0.4 -WORKBENCH1_INT_IP=10.0.0.5 -WORKBENCH2_INT_IP=10.0.0.5 -WEBSHELL_INT_IP=10.0.0.5 -DATABASE_INT_IP=10.0.0.6 -SHELL_INT_IP=10.0.0.7 +KEEPWEB_INT_IP=10.1.1.15 +KEEPSTORE0_INT_IP=10.1.2.13 +WORKBENCH1_INT_IP=10.1.1.15 +WORKBENCH2_INT_IP=10.1.1.15 +WEBSHELL_INT_IP=10.1.1.15 +DATABASE_INT_IP=10.1.1.11 +SHELL_INT_IP=10.1.2.17 INITIAL_USER="admin" # If not specified, the initial user email will be composed as # INITIAL_USER@CLUSTER.DOMAIN INITIAL_USER_EMAIL="admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work" -INITIAL_USER_PASSWORD="password" +INITIAL_USER_PASSWORD="fixmepassword" + +# Use a public node as a jump host for SSH sessions. This allows running the +# installer from the outside of the cluster's local network and still reach +# the internal servers for configuration deployment. +# Comment out to disable. +USE_SSH_JUMPHOST="controller.${CLUSTER}.${DOMAIN}" # YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS -BLOB_SIGNING_KEY=blobsigningkeymushaveatleast32characters -MANAGEMENT_TOKEN=managementtokenmushaveatleast32characters -SYSTEM_ROOT_TOKEN=systemroottokenmushaveatleast32characters -ANONYMOUS_USER_TOKEN=anonymoususertokenmushaveatleast32characters -WORKBENCH_SECRET_KEY=workbenchsecretkeymushaveatleast32characters -DATABASE_PASSWORD=please_set_this_to_some_secure_value +BLOB_SIGNING_KEY=fixmeblobsigningkeymushaveatleast32characters +MANAGEMENT_TOKEN=fixmemanagementtokenmushaveatleast32characters +SYSTEM_ROOT_TOKEN=fixmesystemroottokenmushaveatleast32characters +ANONYMOUS_USER_TOKEN=fixmeanonymoususertokenmushaveatleast32characters +WORKBENCH_SECRET_KEY=fixmeworkbenchsecretkeymushaveatleast32characters +DATABASE_PASSWORD=fixmeplease_set_this_to_some_secure_value # SSL CERTIFICATES # Arvados requires SSL certificates to work correctly. This installer supports these options: @@ -82,7 +102,7 @@ LE_AWS_SECRET_ACCESS_KEY="thisistherandomstringthatisyoursecretkey" # Please set it to the FULL PATH to the certs dir if you're going to use a different dir # Default is "${SCRIPT_DIR}/certs", where the variable "SCRIPT_DIR" has the path to the # directory where the "provision.sh" script was copied in the destination host. -# CUSTOM_CERTS_DIR="${SCRIPT_DIR}/certs" +# CUSTOM_CERTS_DIR="${SCRIPT_DIR}/local_config_dir/certs" # The script expects cert/key files with these basenames (matching the role except for # keepweb, which is split in both download/collections): # "controller" @@ -97,6 +117,12 @@ LE_AWS_SECRET_ACCESS_KEY="thisistherandomstringthatisyoursecretkey" # ${CUSTOM_CERTS_DIR}/keepproxy.crt # ${CUSTOM_CERTS_DIR}/keepproxy.key +# Set the following to "yes" if the key files are encrypted and optionally set +# a custom AWS secret name for each node to retrieve the password. +SSL_KEY_ENCRYPTED="no" +SSL_KEY_AWS_SECRET_NAME="${CLUSTER}-arvados-ssl-privkey-password" +SSL_KEY_AWS_REGION="us-east-1" + # The directory to check for the config files (pillars, states) you want to use. # There are a few examples under 'config_examples'. # CONFIG_DIR="local_config_dir" @@ -119,8 +145,9 @@ RELEASE="production" # Formulas versions # ARVADOS_TAG="2.2.0" -# POSTGRES_TAG="v0.43.0" -# NGINX_TAG="v2.8.0" -# DOCKER_TAG="v2.0.7" +# POSTGRES_TAG="v0.44.0" +# NGINX_TAG="v2.8.1" +# DOCKER_TAG="v2.4.2" # LOCALE_TAG="v0.3.4" # LETSENCRYPT_TAG="v2.1.0" +# PROMETHEUS_TAG="v5.6.5"