X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/2f69ea48b9ca370be4e7aa65b32a7b8aec35d7c3..46fe1e60a1cd96a39163911edd821b3e316ca606:/tools/salt-install/local.params.example.multiple_hosts diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts index 80df62d57d..909c3354ec 100644 --- a/tools/salt-install/local.params.example.multiple_hosts +++ b/tools/salt-install/local.params.example.multiple_hosts @@ -8,71 +8,27 @@ # The Arvados cluster ID, needs to be 5 lowercase alphanumeric characters. CLUSTER="cluster_fixme_or_this_wont_work" -# The domain name you want to give to your cluster's hosts -# the end result hostnames will be $SERVICE.$CLUSTER.$DOMAIN +# The domain name you want to give to your cluster's hosts; +# the end result hostnames will be $SERVICE.$DOMAIN DOMAIN="domain_fixme_or_this_wont_work" # For multi-node installs, the ssh log in for each node # must be root or able to sudo -DEPLOY_USER=root +DEPLOY_USER=admin -# The mapping of nodes to roles -# installer.sh will log in to each of these nodes and then provision -# it for the specified roles. -NODES=( - [controller.${CLUSTER}.${DOMAIN}]=database,api,controller,websocket,dispatcher,keepbalance - [keep0.${CLUSTER}.${DOMAIN}]=keepstore - [keep1.${CLUSTER}.${DOMAIN}]=keepstore - [keep.${CLUSTER}.${DOMAIN}]=keepproxy,keepweb - [workbench.${CLUSTER}.${DOMAIN}]=workbench,workbench2,webshell - [shell.${CLUSTER}.${DOMAIN}]=shell -) - -# Host SSL port where you want to point your browser to access Arvados -# Defaults to 443 for regular runs, and to 8443 when called in Vagrant. -# You can point it to another port if desired -# In Vagrant, make sure it matches what you set in the Vagrantfile (8443) -CONTROLLER_EXT_SSL_PORT=443 -KEEP_EXT_SSL_PORT=443 -# Both for collections and downloads -KEEPWEB_EXT_SSL_PORT=443 -WEBSHELL_EXT_SSL_PORT=443 -WEBSOCKET_EXT_SSL_PORT=443 -WORKBENCH1_EXT_SSL_PORT=443 -WORKBENCH2_EXT_SSL_PORT=443 - -# Internal IPs for the configuration -CLUSTER_INT_CIDR=10.1.0.0/16 - -# Note the IPs in this example are shared between roles, as suggested in -# https://doc.arvados.org/main/install/salt-multi-host.html -CONTROLLER_INT_IP=10.1.1.11 -WEBSOCKET_INT_IP=10.1.1.11 -KEEP_INT_IP=10.1.1.12 -# Both for collections and downloads -KEEPWEB_INT_IP=10.1.1.12 -KEEPSTORE0_INT_IP=10.1.1.13 -KEEPSTORE1_INT_IP=10.1.1.14 -WORKBENCH1_INT_IP=10.1.1.15 -WORKBENCH2_INT_IP=10.1.1.15 -WEBSHELL_INT_IP=10.1.1.15 -DATABASE_INT_IP=10.1.1.11 -SHELL_INT_IP=10.1.1.17 - -INITIAL_USER="admin" +INITIAL_USER=admin # If not specified, the initial user email will be composed as -# INITIAL_USER@CLUSTER.DOMAIN +# INITIAL_USER@DOMAIN INITIAL_USER_EMAIL="admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work" -INITIAL_USER_PASSWORD="fixmepassword" -# YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS -BLOB_SIGNING_KEY=fixmeblobsigningkeymushaveatleast32characters -MANAGEMENT_TOKEN=fixmemanagementtokenmushaveatleast32characters -SYSTEM_ROOT_TOKEN=fixmesystemroottokenmushaveatleast32characters -ANONYMOUS_USER_TOKEN=fixmeanonymoususertokenmushaveatleast32characters -WORKBENCH_SECRET_KEY=fixmeworkbenchsecretkeymushaveatleast32characters -DATABASE_PASSWORD=fixmeplease_set_this_to_some_secure_value +# Use a public node as a jump host for SSH sessions. This allows running the +# installer from the outside of the cluster's local network and still reach +# the internal servers for configuration deployment. +# Comment out to disable. +USE_SSH_JUMPHOST="controller.${DOMAIN}" + +AWS_REGION="fixme_or_this_wont_work" # SSL CERTIFICATES # Arvados requires SSL certificates to work correctly. This installer supports these options: @@ -88,9 +44,19 @@ USE_LETSENCRYPT_ROUTE53="yes" # For that reason, you'll need to provide AWS credentials with permissions to manage # RRs in the route53 zone for the cluster. # WARNING!: If AWS credentials files already exist in the hosts, they won't be replaced. -LE_AWS_REGION="us-east-1" -LE_AWS_ACCESS_KEY_ID="AKIABCDEFGHIJKLMNOPQ" -LE_AWS_SECRET_ACCESS_KEY="thisistherandomstringthatisyoursecretkey" +LE_AWS_REGION="${AWS_REGION}" + +# Compute node configurations +COMPUTE_AMI="ami_id_fixme_or_this_wont_work" +COMPUTE_SG="security_group_fixme_or_this_wont_work" +COMPUTE_SUBNET="subnet_fixme_or_this_wont_work" +COMPUTE_AWS_REGION="${AWS_REGION}" +COMPUTE_USER="${DEPLOY_USER}" + +# Keep S3 backend settings +KEEP_AWS_REGION="${AWS_REGION}" +KEEP_AWS_S3_BUCKET="${CLUSTER}-nyw5e-000000000000000-volume" +KEEP_AWS_IAM_ROLE="${CLUSTER}-keepstore-00-iam-role" # If you going to provide your own certificates for Arvados, the provision script can # help you deploy them. In order to do that, you need to set `SSL_MODE=bring-your-own` above, @@ -110,13 +76,86 @@ LE_AWS_SECRET_ACCESS_KEY="thisistherandomstringthatisyoursecretkey" # "download" # Part of keepweb # "collections" # Part of keepweb # "keepproxy" # Keepproxy +# "prometheus" +# "grafana" # Ie., 'keep', the script will lookup for # ${CUSTOM_CERTS_DIR}/keepproxy.crt # ${CUSTOM_CERTS_DIR}/keepproxy.key +# Set the following to "yes" if the key files are encrypted and optionally set +# a custom AWS secret name for each node to retrieve the password. +SSL_KEY_ENCRYPTED="no" +SSL_KEY_AWS_SECRET_NAME="${CLUSTER}-arvados-ssl-privkey-password" +SSL_KEY_AWS_REGION="${AWS_REGION}" + +# Customize Prometheus & Grafana web UI access credentials +MONITORING_USERNAME=${INITIAL_USER} +MONITORING_EMAIL=${INITIAL_USER_EMAIL} + +# Sets the directory for Grafana dashboards +# GRAFANA_DASHBOARDS_DIR="${SCRIPT_DIR}/local_config_dir/dashboards" + +# Sets the amount of data (expressed in time) Prometheus keeps on its +# time-series database. Default is 15 days. +# PROMETHEUS_DATA_RETENTION_TIME="180d" + +# The mapping of nodes to roles +# installer.sh will log in to each of these nodes and then provision +# it for the specified roles. +NODES=( + [controller.${DOMAIN}]=database,controller + [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb,websocket,dispatcher,keepbalance + [keep0.${DOMAIN}]=keepstore + [shell.${DOMAIN}]=shell +) + +# Host SSL port where you want to point your browser to access Arvados +# Defaults to 443 for regular runs, and to 8443 when called in Vagrant. +# You can point it to another port if desired +# In Vagrant, make sure it matches what you set in the Vagrantfile (8443) +CONTROLLER_EXT_SSL_PORT=443 +KEEP_EXT_SSL_PORT=443 +# Both for collections and downloads +KEEPWEB_EXT_SSL_PORT=443 +WEBSHELL_EXT_SSL_PORT=443 +WEBSOCKET_EXT_SSL_PORT=443 +WORKBENCH1_EXT_SSL_PORT=443 +WORKBENCH2_EXT_SSL_PORT=443 + +# Internal IPs for the configuration +CLUSTER_INT_CIDR=10.1.0.0/16 + +# Note the IPs in this example are shared between roles, as suggested in +# https://doc.arvados.org/main/install/salt-multi-host.html +CONTROLLER_INT_IP=10.1.1.11 +DATABASE_INT_IP=${CONTROLLER_INT_IP} +WORKBENCH1_INT_IP=10.1.1.15 +DISPATCHER_INT_IP=${WORKBENCH1_INT_IP} +KEEPBALANCE_INT_IP=${WORKBENCH1_INT_IP} +WEBSOCKET_INT_IP=${WORKBENCH1_INT_IP} +# Both for collections and downloads +KEEPWEB_INT_IP=${WORKBENCH1_INT_IP} +WORKBENCH2_INT_IP=${WORKBENCH1_INT_IP} +WEBSHELL_INT_IP=${WORKBENCH1_INT_IP} +KEEP_INT_IP=${WORKBENCH1_INT_IP} +KEEPSTORE0_INT_IP=10.1.2.13 +SHELL_INT_IP=10.1.2.17 + +DATABASE_NAME="${CLUSTER}_arvados" +DATABASE_USER="${CLUSTER}_arvados" +# Set these if using an external PostgreSQL service. +#DATABASE_EXTERNAL_SERVICE_HOST_OR_IP= +#DATABASE_POSTGRESQL_VERSION= + +# Performance tuning parameters. If these are not set, workers +# defaults on the number of cpus and queued requests defaults to 128. +#CONTROLLER_MAX_WORKERS= +#CONTROLLER_MAX_QUEUED_REQUESTS= + # The directory to check for the config files (pillars, states) you want to use. # There are a few examples under 'config_examples'. # CONFIG_DIR="local_config_dir" + # Extra states to apply. If you use your own subdir, change this value accordingly # EXTRA_STATES_DIR="${CONFIG_DIR}/states" @@ -141,3 +180,5 @@ RELEASE="production" # DOCKER_TAG="v2.4.2" # LOCALE_TAG="v0.3.4" # LETSENCRYPT_TAG="v2.1.0" +# PROMETHEUS_TAG="v5.6.5" +# GRAFANA_TAG="v3.1.3"