1 ##########################################################
2 # Copyright (C) The Arvados Authors. All rights reserved.
4 # SPDX-License-Identifier: CC-BY-SA-3.0
6 # These are the basic parameters to configure the installation
8 # The Arvados cluster ID, needs to be 5 lowercase alphanumeric characters.
9 CLUSTER="cluster_fixme_or_this_wont_work"
11 # The domain name you want to give to your cluster's hosts;
12 # the end result hostnames will be $SERVICE.$DOMAIN
13 DOMAIN="domain_fixme_or_this_wont_work"
15 # For multi-node installs, the ssh log in for each node
16 # must be root or able to sudo
21 # If not specified, the initial user email will be composed as
23 INITIAL_USER_EMAIL="admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work"
25 # Use a public node as a jump host for SSH sessions. This allows running the
26 # installer from the outside of the cluster's local network and still reach
27 # the internal servers for configuration deployment.
28 # Comment out to disable.
29 USE_SSH_JUMPHOST="controller.${DOMAIN}"
31 AWS_REGION="fixme_or_this_wont_work"
34 # Arvados requires SSL certificates to work correctly. This installer supports these options:
35 # * self-signed: let the installer create self-signed certificate(s)
36 # * bring-your-own: supply your own certificate(s) in the `certs` directory
37 # * lets-encrypt: automatically obtain and install SSL certificates for your hostname(s)
39 # See https://doc.arvados.org/intall/salt-multi-host.html for more information.
40 SSL_MODE="lets-encrypt"
41 USE_LETSENCRYPT_ROUTE53="yes"
42 # For collections, we need to obtain a wildcard certificate for
43 # '*.collections.<cluster>.<domain>'. This is only possible through a DNS-01 challenge.
44 # For that reason, you'll need to provide AWS credentials with permissions to manage
45 # RRs in the route53 zone for the cluster.
46 # WARNING!: If AWS credentials files already exist in the hosts, they won't be replaced.
47 LE_AWS_REGION="${AWS_REGION}"
49 # Compute node configurations
50 COMPUTE_AMI="ami_id_fixme_or_this_wont_work"
51 COMPUTE_SG="security_group_fixme_or_this_wont_work"
52 COMPUTE_SUBNET="subnet_fixme_or_this_wont_work"
53 COMPUTE_AWS_REGION="${AWS_REGION}"
54 COMPUTE_USER="${DEPLOY_USER}"
56 # Keep S3 backend settings
57 KEEP_AWS_REGION="${AWS_REGION}"
58 KEEP_AWS_S3_BUCKET="${CLUSTER}-nyw5e-000000000000000-volume"
59 KEEP_AWS_IAM_ROLE="${CLUSTER}-keepstore-00-iam-role"
61 # If you going to provide your own certificates for Arvados, the provision script can
62 # help you deploy them. In order to do that, you need to set `SSL_MODE=bring-your-own` above,
63 # and copy the required certificates under the directory specified in the next line.
64 # The certs will be copied from this directory by the provision script.
65 # Please set it to the FULL PATH to the certs dir if you're going to use a different dir
66 # Default is "${SCRIPT_DIR}/certs", where the variable "SCRIPT_DIR" has the path to the
67 # directory where the "provision.sh" script was copied in the destination host.
68 # CUSTOM_CERTS_DIR="${SCRIPT_DIR}/local_config_dir/certs"
69 # The script expects cert/key files with these basenames (matching the role except for
70 # keepweb, which is split in both download/collections):
76 # "download" # Part of keepweb
77 # "collections" # Part of keepweb
78 # "keepproxy" # Keepproxy
81 # Ie., 'keep', the script will lookup for
82 # ${CUSTOM_CERTS_DIR}/keepproxy.crt
83 # ${CUSTOM_CERTS_DIR}/keepproxy.key
85 # Set the following to "yes" if the key files are encrypted and optionally set
86 # a custom AWS secret name for each node to retrieve the password.
87 SSL_KEY_ENCRYPTED="no"
88 SSL_KEY_AWS_SECRET_NAME="${CLUSTER}-arvados-ssl-privkey-password"
89 SSL_KEY_AWS_REGION="${AWS_REGION}"
91 # Customize Prometheus, Grafana and Loki web UI access credentials
92 MONITORING_USERNAME=${INITIAL_USER}
93 MONITORING_EMAIL=${INITIAL_USER_EMAIL}
95 # Sets the directory for Grafana dashboards
96 # GRAFANA_DASHBOARDS_DIR="${SCRIPT_DIR}/local_config_dir/dashboards"
98 # Sets the amount of data (expressed in time) Prometheus keeps on its
99 # time-series database. Default is 15 days.
100 # PROMETHEUS_DATA_RETENTION_TIME="180d"
102 # Loki S3 storage settings
103 LOKI_AWS_S3_BUCKET="${CLUSTER}-loki-object-storage"
104 LOKI_LOG_RETENTION_TIME="180d"
105 LOKI_AWS_REGION="${AWS_REGION}"
107 # The mapping of nodes to roles
108 # installer.sh will log in to each of these nodes and then provision
109 # it for the specified roles.
111 [controller.${DOMAIN}]=database,controller
112 [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb,websocket,dispatcher,keepbalance
113 [keep0.${DOMAIN}]=keepstore
114 [shell.${DOMAIN}]=shell
117 # Host SSL port where you want to point your browser to access Arvados
118 # Defaults to 443 for regular runs, and to 8443 when called in Vagrant.
119 # You can point it to another port if desired
120 # In Vagrant, make sure it matches what you set in the Vagrantfile (8443)
121 CONTROLLER_EXT_SSL_PORT=443
122 KEEP_EXT_SSL_PORT=443
123 # Both for collections and downloads
124 KEEPWEB_EXT_SSL_PORT=443
125 WEBSHELL_EXT_SSL_PORT=443
126 WEBSOCKET_EXT_SSL_PORT=443
127 WORKBENCH1_EXT_SSL_PORT=443
128 WORKBENCH2_EXT_SSL_PORT=443
130 # Internal IPs for the configuration
131 CLUSTER_INT_CIDR=10.1.0.0/16
133 # Note the IPs in this example are shared between roles, as suggested in
134 # https://doc.arvados.org/main/install/salt-multi-host.html
135 CONTROLLER_INT_IP=10.1.1.11
136 DATABASE_INT_IP=${CONTROLLER_INT_IP}
137 WORKBENCH1_INT_IP=10.1.1.15
138 DISPATCHER_INT_IP=${WORKBENCH1_INT_IP}
139 KEEPBALANCE_INT_IP=${WORKBENCH1_INT_IP}
140 WEBSOCKET_INT_IP=${WORKBENCH1_INT_IP}
141 # Both for collections and downloads
142 KEEPWEB_INT_IP=${WORKBENCH1_INT_IP}
143 WORKBENCH2_INT_IP=${WORKBENCH1_INT_IP}
144 WEBSHELL_INT_IP=${WORKBENCH1_INT_IP}
145 KEEP_INT_IP=${WORKBENCH1_INT_IP}
146 KEEPSTORE0_INT_IP=10.1.2.13
147 SHELL_INT_IP=10.1.2.17
149 DATABASE_NAME="${CLUSTER}_arvados"
150 DATABASE_USER="${CLUSTER}_arvados"
151 # Set these if using an external PostgreSQL service.
152 #DATABASE_EXTERNAL_SERVICE_HOST_OR_IP=
153 #DATABASE_POSTGRESQL_VERSION=
155 # Performance tuning parameters. If these are not set, workers
156 # defaults on the number of cpus, queued requests defaults to 128
157 # and gateway tunnels defaults to 1000.
158 #CONTROLLER_MAX_WORKERS=
159 #CONTROLLER_MAX_QUEUED_REQUESTS=
160 #CONTROLLER_MAX_GATEWAY_TUNNELS=
162 # The directory to check for the config files (pillars, states) you want to use.
163 # There are a few examples under 'config_examples'.
164 # CONFIG_DIR="local_config_dir"
166 # Extra states to apply. If you use your own subdir, change this value accordingly
167 # EXTRA_STATES_DIR="${CONFIG_DIR}/states"
169 # These are ARVADOS-related settings.
170 # Which release of Arvados repo you want to use
172 # Which version of Arvados you want to install. Defaults to latest stable
175 # This is an arvados-formula setting.
176 # If branch is set, the script will switch to it before running salt
177 # Usually not needed, only used for testing
180 ##########################################################
181 # Usually there's no need to modify things below this line
184 # ARVADOS_TAG="2.2.0"
185 # POSTGRES_TAG="v0.44.0"
187 # DOCKER_TAG="v2.4.2"
188 # LOCALE_TAG="v0.3.4"
189 # LETSENCRYPT_TAG="v2.1.0"
190 # PROMETHEUS_TAG="v5.6.5"
191 # GRAFANA_TAG="v3.1.3"