##########################################################
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: CC-BY-SA-3.0

# These are the basic parameters to configure the installation

# The Arvados cluster ID, needs to be 5 lowercase alphanumeric characters.
CLUSTER="cluster_fixme_or_this_wont_work"

# The domain name you want to give to your cluster's hosts;
# the end result hostnames will be $SERVICE.$DOMAIN
DOMAIN="domain_fixme_or_this_wont_work"

# For multi-node installs, the ssh log in for each node
# must be root or able to sudo
DEPLOY_USER=admin

INITIAL_USER=admin

# If not specified, the initial user email will be composed as
# INITIAL_USER@DOMAIN
INITIAL_USER_EMAIL="admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work"

# Use a public node as a jump host for SSH sessions. This allows running the
# installer from the outside of the cluster's local network and still reach
# the internal servers for configuration deployment.
# Comment out to disable.
USE_SSH_JUMPHOST="controller.${DOMAIN}"

AWS_REGION="fixme_or_this_wont_work"

# SSL CERTIFICATES
# Arvados requires SSL certificates to work correctly. This installer supports these options:
# * self-signed: let the installer create self-signed certificate(s)
# * bring-your-own: supply your own certificate(s) in the `certs` directory
# * lets-encrypt: automatically obtain and install SSL certificates for your hostname(s)
#
# See https://doc.arvados.org/intall/salt-multi-host.html for more information.
SSL_MODE="lets-encrypt"
USE_LETSENCRYPT_ROUTE53="yes"
# For collections, we need to obtain a wildcard certificate for
# '*.collections.<cluster>.<domain>'. This is only possible through a DNS-01 challenge.
# For that reason, you'll need to provide AWS credentials with permissions to manage
# RRs in the route53 zone for the cluster.
# WARNING!: If AWS credentials files already exist in the hosts, they won't be replaced.
LE_AWS_REGION="${AWS_REGION}"

# Compute node configurations
COMPUTE_AMI="ami_id_fixme_or_this_wont_work"
COMPUTE_SG="security_group_fixme_or_this_wont_work"
COMPUTE_SUBNET="subnet_fixme_or_this_wont_work"
COMPUTE_AWS_REGION="${AWS_REGION}"
COMPUTE_USER="${DEPLOY_USER}"

# Keep S3 backend settings
KEEP_AWS_REGION="${AWS_REGION}"
KEEP_AWS_S3_BUCKET="${CLUSTER}-nyw5e-000000000000000-volume"
KEEP_AWS_IAM_ROLE="${CLUSTER}-keepstore-00-iam-role"

# If you going to provide your own certificates for Arvados, the provision script can
# help you deploy them. In order to do that, you need to set `SSL_MODE=bring-your-own` above,
# and copy the required certificates under the directory specified in the next line.
# The certs will be copied from this directory by the provision script.
# Please set it to the FULL PATH to the certs dir if you're going to use a different dir
# Default is "${SCRIPT_DIR}/certs", where the variable "SCRIPT_DIR" has the path to the
# directory where the  "provision.sh" script was copied in the destination host.
# CUSTOM_CERTS_DIR="${SCRIPT_DIR}/local_config_dir/certs"
# The script expects cert/key files with these basenames (matching the role except for
# keepweb, which is split in both download/collections):
#  "controller"
#  "websocket"
#  "workbench"
#  "workbench2"
#  "webshell"
#  "download"         # Part of keepweb
#  "collections"      # Part of keepweb
#  "keepproxy"        # Keepproxy
#  "prometheus"
#  "grafana"
# Ie., 'keep', the script will lookup for
# ${CUSTOM_CERTS_DIR}/keepproxy.crt
# ${CUSTOM_CERTS_DIR}/keepproxy.key

# Set the following to "yes" if the key files are encrypted and optionally set
# a custom AWS secret name for each node to retrieve the password.
SSL_KEY_ENCRYPTED="no"
SSL_KEY_AWS_SECRET_NAME="${CLUSTER}-arvados-ssl-privkey-password"
SSL_KEY_AWS_REGION="${AWS_REGION}"

# Customize Prometheus & Grafana web UI access credentials
MONITORING_USERNAME=${INITIAL_USER}
MONITORING_EMAIL=${INITIAL_USER_EMAIL}

# Sets the directory for Grafana dashboards
# GRAFANA_DASHBOARDS_DIR="${SCRIPT_DIR}/local_config_dir/dashboards"

# Sets the amount of data (expressed in time) Prometheus keeps on its
# time-series database. Default is 15 days.
# PROMETHEUS_DATA_RETENTION_TIME="180d"

# The mapping of nodes to roles
# installer.sh will log in to each of these nodes and then provision
# it for the specified roles.
NODES=(
  [controller.${DOMAIN}]=database,controller
  [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb,websocket,dispatcher,keepbalance
  [keep0.${DOMAIN}]=keepstore
  [shell.${DOMAIN}]=shell
)

# Host SSL port where you want to point your browser to access Arvados
# Defaults to 443 for regular runs, and to 8443 when called in Vagrant.
# You can point it to another port if desired
# In Vagrant, make sure it matches what you set in the Vagrantfile (8443)
CONTROLLER_EXT_SSL_PORT=443
KEEP_EXT_SSL_PORT=443
# Both for collections and downloads
KEEPWEB_EXT_SSL_PORT=443
WEBSHELL_EXT_SSL_PORT=443
WEBSOCKET_EXT_SSL_PORT=443
WORKBENCH1_EXT_SSL_PORT=443
WORKBENCH2_EXT_SSL_PORT=443

# Internal IPs for the configuration
CLUSTER_INT_CIDR=10.1.0.0/16

# Note the IPs in this example are shared between roles, as suggested in
# https://doc.arvados.org/main/install/salt-multi-host.html
CONTROLLER_INT_IP=10.1.1.11
DATABASE_INT_IP=${CONTROLLER_INT_IP}
WORKBENCH1_INT_IP=10.1.1.15
DISPATCHER_INT_IP=${WORKBENCH1_INT_IP}
KEEPBALANCE_INT_IP=${WORKBENCH1_INT_IP}
WEBSOCKET_INT_IP=${WORKBENCH1_INT_IP}
# Both for collections and downloads
KEEPWEB_INT_IP=${WORKBENCH1_INT_IP}
WORKBENCH2_INT_IP=${WORKBENCH1_INT_IP}
WEBSHELL_INT_IP=${WORKBENCH1_INT_IP}
KEEP_INT_IP=${WORKBENCH1_INT_IP}
KEEPSTORE0_INT_IP=10.1.2.13
SHELL_INT_IP=10.1.2.17

DATABASE_NAME="${CLUSTER}_arvados"
DATABASE_USER="${CLUSTER}_arvados"
# Set these if using an external PostgreSQL service.
#DATABASE_EXTERNAL_SERVICE_HOST_OR_IP=
#DATABASE_POSTGRESQL_VERSION=

# Performance tuning parameters.  If these are not set, workers
# defaults on the number of cpus, queued requests defaults to 128
# and gateway tunnels defaults to 1000.
#CONTROLLER_MAX_WORKERS=
#CONTROLLER_MAX_QUEUED_REQUESTS=
#CONTROLLER_MAX_GATEWAY_TUNNELS=

# The directory to check for the config files (pillars, states) you want to use.
# There are a few examples under 'config_examples'.
# CONFIG_DIR="local_config_dir"

# Extra states to apply. If you use your own subdir, change this value accordingly
# EXTRA_STATES_DIR="${CONFIG_DIR}/states"

# These are ARVADOS-related settings.
# Which release of Arvados repo you want to use
RELEASE="production"
# Which version of Arvados you want to install. Defaults to latest stable
# VERSION="2.1.2-1"

# This is an arvados-formula setting.
# If branch is set, the script will switch to it before running salt
# Usually not needed, only used for testing
# BRANCH="main"

##########################################################
# Usually there's no need to modify things below this line

# Formulas versions
# ARVADOS_TAG="2.2.0"
# POSTGRES_TAG="v0.44.0"
# NGINX_TAG="v2.8.1"
# DOCKER_TAG="v2.4.2"
# LOCALE_TAG="v0.3.4"
# LETSENCRYPT_TAG="v2.1.0"
# PROMETHEUS_TAG="v5.6.5"
# GRAFANA_TAG="v3.1.3"